summaryrefslogtreecommitdiffstats
path: root/mlir
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2020-01-13 13:12:37 -0800
committerRiver Riddle <riverriddle@google.com>2020-01-13 13:24:39 -0800
commit4268e4f4b84b85266426e99050d31ec63f3ce8aa (patch)
treed3a6b4cc366310f7ef6108d029155b06295c5495 /mlir
parent26c7a4ed101fae85d2041ee1c8e8483b96e4460e (diff)
downloadbcm5719-llvm-4268e4f4b84b85266426e99050d31ec63f3ce8aa.tar.gz
bcm5719-llvm-4268e4f4b84b85266426e99050d31ec63f3ce8aa.zip
[mlir] Change the syntax of AffineMapAttr and IntegerSetAttr to avoid conflicts with function types.
Summary: The current syntax for AffineMapAttr and IntegerSetAttr conflict with function types, making it currently impossible to round-trip function types(and e.g. FuncOp) in the IR. This revision changes the syntax for the attributes by wrapping them in a keyword. AffineMapAttr is wrapped with `affine_map<>` and IntegerSetAttr is wrapped with `affine_set<>`. Reviewed By: nicolasvasilache, ftynse Differential Revision: https://reviews.llvm.org/D72429
Diffstat (limited to 'mlir')
-rw-r--r--mlir/docs/Dialects/Affine.md34
-rw-r--r--mlir/docs/Dialects/Standard.md16
-rw-r--r--mlir/docs/LangRef.md22
-rw-r--r--mlir/include/mlir/IR/DialectImplementation.h6
-rw-r--r--mlir/include/mlir/IR/OpImplementation.h6
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp20
-rw-r--r--mlir/lib/IR/AsmPrinter.cpp8
-rw-r--r--mlir/lib/Parser/Parser.cpp64
-rw-r--r--mlir/lib/Parser/TokenKinds.def2
-rw-r--r--mlir/test/AffineOps/canonicalize.mlir236
-rw-r--r--mlir/test/AffineOps/dma.mlir28
-rw-r--r--mlir/test/AffineOps/inlining.mlir4
-rw-r--r--mlir/test/AffineOps/invalid.mlir24
-rw-r--r--mlir/test/AffineOps/load-store-invalid.mlir8
-rw-r--r--mlir/test/AffineOps/load-store.mlir24
-rw-r--r--mlir/test/AffineOps/memref-stride-calculation.mlir60
-rw-r--r--mlir/test/AffineOps/ops.mlir24
-rw-r--r--mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir6
-rw-r--r--mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir24
-rw-r--r--mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir8
-rw-r--r--mlir/test/Conversion/VectorToLoops/vector-to-loops.mlir28
-rw-r--r--mlir/test/Dialect/Linalg/affine.mlir6
-rw-r--r--mlir/test/Dialect/Linalg/fusion.mlir18
-rw-r--r--mlir/test/Dialect/Linalg/invalid.mlir122
-rw-r--r--mlir/test/Dialect/Linalg/llvm.mlir22
-rw-r--r--mlir/test/Dialect/Linalg/loops.mlir24
-rw-r--r--mlir/test/Dialect/Linalg/promote.mlir14
-rw-r--r--mlir/test/Dialect/Linalg/roundtrip.mlir96
-rw-r--r--mlir/test/Dialect/Linalg/tile.mlir48
-rw-r--r--mlir/test/Dialect/Linalg/tile_conv.mlir8
-rw-r--r--mlir/test/Dialect/Linalg/tile_indexed_generic.mlir6
-rw-r--r--mlir/test/Dialect/Linalg/transform-patterns.mlir30
-rw-r--r--mlir/test/Dialect/SPIRV/composite-ops.mlir4
-rw-r--r--mlir/test/Dialect/VectorOps/invalid.mlir100
-rw-r--r--mlir/test/Dialect/VectorOps/ops.mlir30
-rw-r--r--mlir/test/Dialect/VectorOps/vector-transforms.mlir28
-rw-r--r--mlir/test/EDSC/builder-api-test.cpp98
-rw-r--r--mlir/test/IR/affine-map.mlir228
-rw-r--r--mlir/test/IR/core-ops.mlir68
-rw-r--r--mlir/test/IR/invalid-affinemap.mlir66
-rw-r--r--mlir/test/IR/invalid-ops.mlir58
-rw-r--r--mlir/test/IR/invalid.mlir52
-rw-r--r--mlir/test/IR/locations.mlir2
-rw-r--r--mlir/test/IR/memory-ops.mlir24
-rw-r--r--mlir/test/IR/opaque_locations.mlir2
-rw-r--r--mlir/test/IR/parser.mlir128
-rw-r--r--mlir/test/IR/pretty-locations.mlir2
-rw-r--r--mlir/test/IR/print-op-local-scope.mlir4
-rw-r--r--mlir/test/Transforms/Vectorize/compose_maps.mlir82
-rw-r--r--mlir/test/Transforms/Vectorize/normalize_maps.mlir42
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_1d.mlir14
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_2d.mlir26
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_3d.mlir2
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir2
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir2
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir2
-rw-r--r--mlir/test/Transforms/affine-data-copy.mlir8
-rw-r--r--mlir/test/Transforms/affine-loop-invariant-code-motion.mlir28
-rw-r--r--mlir/test/Transforms/canonicalize.mlir86
-rw-r--r--mlir/test/Transforms/constant-fold.mlir6
-rw-r--r--mlir/test/Transforms/cse.mlir4
-rw-r--r--mlir/test/Transforms/dma-generate.mlir86
-rw-r--r--mlir/test/Transforms/loop-fusion-slice-computation.mlir4
-rw-r--r--mlir/test/Transforms/loop-fusion.mlir144
-rw-r--r--mlir/test/Transforms/loop-invariant-code-motion.mlir16
-rw-r--r--mlir/test/Transforms/loop-tiling.mlir30
-rw-r--r--mlir/test/Transforms/lower-affine.mlir34
-rw-r--r--mlir/test/Transforms/memref-bound-check.mlir94
-rw-r--r--mlir/test/Transforms/memref-dataflow-opt.mlir22
-rw-r--r--mlir/test/Transforms/memref-dependence-check.mlir148
-rw-r--r--mlir/test/Transforms/memref-normalize.mlir44
-rw-r--r--mlir/test/Transforms/pipeline-data-transfer.mlir34
-rw-r--r--mlir/test/Transforms/simplify-affine-structures.mlir79
-rw-r--r--mlir/test/Transforms/slicing-utils.mlir2
-rw-r--r--mlir/test/Transforms/strip-debuginfo.mlir2
-rw-r--r--mlir/test/Transforms/unroll-jam.mlir20
-rw-r--r--mlir/test/Transforms/unroll.mlir104
-rw-r--r--mlir/test/mlir-cpu-runner/linalg_integration_test.mlir4
78 files changed, 1592 insertions, 1519 deletions
diff --git a/mlir/docs/Dialects/Affine.md b/mlir/docs/Dialects/Affine.md
index c5dcf6a6790..245ba33fed6 100644
--- a/mlir/docs/Dialects/Affine.md
+++ b/mlir/docs/Dialects/Affine.md
@@ -22,7 +22,7 @@ Examples:
```mlir
// A 2d to 3d affine mapping.
// d0/d1 are dimensions, s0 is a symbol
-#affine_map2to3 = (d0, d1)[s0] -> (d0, d1 + s0, d1 - s0)
+#affine_map2to3 = affine_map<(d0, d1)[s0] -> (d0, d1 + s0, d1 - s0)>
```
Dimensional identifiers correspond to the dimensions of the underlying structure
@@ -52,7 +52,7 @@ SSA values bound to dimensions and symbols must always have 'index' type.
Example:
```mlir
-#affine_map2to3 = (d0, d1)[s0] -> (d0, d1 + s0, d1 - s0)
+#affine_map2to3 = affine_map<(d0, d1)[s0] -> (d0, d1 + s0, d1 - s0)>
// Binds %N to the s0 symbol in affine_map2to3.
%x = alloc()[%N] : memref<40x50xf32, #affine_map2to3>
```
@@ -177,14 +177,14 @@ Examples:
```mlir
// Affine map out-of-line definition and usage example.
-#affine_map42 = (d0, d1)[s0] -> (d0, d0 + d1 + s0 floordiv 2)
+#affine_map42 = affine_map<(d0, d1)[s0] -> (d0, d0 + d1 + s0 floordiv 2)>
// Use an affine mapping definition in an alloc operation, binding the
// SSA value %N to the symbol s0.
%a = alloc()[%N] : memref<4x4xf32, #affine_map42>
// Same thing with an inline affine mapping definition.
-%b = alloc()[%N] : memref<4x4xf32, (d0, d1)[s0] -> (d0, d0 + d1 + s0 floordiv 2)>
+%b = alloc()[%N] : memref<4x4xf32, affine_map<(d0, d1)[s0] -> (d0, d0 + d1 + s0 floordiv 2)>>
```
### Semi-affine maps
@@ -280,8 +280,8 @@ Example:
```mlir
// A example two-dimensional integer set with two symbols.
-#set42 = (d0, d1)[s0, s1]
- : (d0 >= 0, -d0 + s0 - 1 >= 0, d1 >= 0, -d1 + s1 - 1 >= 0)
+#set42 = affine_set<(d0, d1)[s0, s1]
+ : (d0 >= 0, -d0 + s0 - 1 >= 0, d1 >= 0, -d1 + s1 - 1 >= 0)>
// Inside a Region
affine.if #set42(%i, %j)[%M, %N] {
@@ -299,7 +299,7 @@ affine.if #set42(%i, %j)[%M, %N] {
Syntax:
```
-operation ::= ssa-id `=` `affine.apply` affine-map dim-and-symbol-use-list
+operation ::= ssa-id `=` `affine.apply` affine-map-attribute dim-and-symbol-use-list
```
The `affine.apply` operation applies an
@@ -312,12 +312,12 @@ value. The input operands and result must all have 'index' type.
Example:
```mlir
-#map10 = (d0, d1) -> (d0 floordiv 8 + d1 floordiv 128)
+#map10 = affine_map<(d0, d1) -> (d0 floordiv 8 + d1 floordiv 128)>
...
%1 = affine.apply #map10 (%s, %t)
// Inline example.
-%2 = affine.apply (i)[s0] -> (i+s0) (%42)[%n]
+%2 = affine.apply affine_map<(i)[s0] -> (i+s0)> (%42)[%n]
```
#### 'affine.for' operation
@@ -328,8 +328,8 @@ Syntax:
operation ::= `affine.for` ssa-id `=` lower-bound `to` upper-bound
(`step` integer-literal)? `{` op* `}`
-lower-bound ::= `max`? affine-map dim-and-symbol-use-list | shorthand-bound
-upper-bound ::= `min`? affine-map dim-and-symbol-use-list | shorthand-bound
+lower-bound ::= `max`? affine-map-attribute dim-and-symbol-use-list | shorthand-bound
+upper-bound ::= `min`? affine-map-attribute dim-and-symbol-use-list | shorthand-bound
shorthand-bound ::= ssa-id | `-`? integer-literal
```
@@ -366,7 +366,7 @@ nullary mapping function that returns the constant value (e.g. `()->(-42)()`).
Example showing reverse iteration of the inner loop:
```mlir
-#map57 = (d0)[s0] -> (s0 - d0 - 1)
+#map57 = affine_map<(d0)[s0] -> (s0 - d0 - 1)>
func @simple_example(%A: memref<?x?xf32>, %B: memref<?x?xf32>) {
%N = dim %A, 0 : memref<?x?xf32>
@@ -387,7 +387,7 @@ Syntax:
```
operation ::= `affine.if` if-op-cond `{` op* `}` (`else` `{` op* `}`)?
-if-op-cond ::= integer-set dim-and-symbol-use-list
+if-op-cond ::= integer-set-attr dim-and-symbol-use-list
```
The `affine.if` operation restricts execution to a subset of the loop iteration
@@ -410,8 +410,8 @@ blocks must not have any arguments.
Example:
```mlir
-#set = (d0, d1)[s0]: (d0 - 10 >= 0, s0 - d0 - 9 >= 0,
- d1 - 10 >= 0, s0 - d1 - 9 >= 0)
+#set = affine_set<(d0, d1)[s0]: (d0 - 10 >= 0, s0 - d0 - 9 >= 0,
+ d1 - 10 >= 0, s0 - d1 - 9 >= 0)>
func @reduced_domain_example(%A, %X, %N) : (memref<10xi32>, i32, i32) {
affine.for %i = 0 to %N {
affine.for %j = 0 to %N {
@@ -571,7 +571,7 @@ Example:
Syntax:
```
-operation ::= ssa-id `=` `affine.min` affine-map dim-and-symbol-use-list
+operation ::= ssa-id `=` `affine.min` affine-map-attribute dim-and-symbol-use-list
```
The `affine.min` operation applies an
@@ -585,7 +585,7 @@ Example:
```mlir
-%0 = affine.min (d0)[s0] -> (1000, d0 + 512, s0) (%arg0)[%arg1]
+%0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
```
diff --git a/mlir/docs/Dialects/Standard.md b/mlir/docs/Dialects/Standard.md
index f84a2c94e92..0d30296b4c2 100644
--- a/mlir/docs/Dialects/Standard.md
+++ b/mlir/docs/Dialects/Standard.md
@@ -259,12 +259,12 @@ Example:
```mlir
%size = constant 32 : index
-%tag = alloc() : memref<1 x i32, (d0) -> (d0), 4>
+%tag = alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4>
%idx = constant 0 : index
dma_start %src[%i, %j], %dst[%k, %l], %size, %tag[%idx] :
- memref<40 x 8 x vector<16xf32>, (d0, d1) -> (d0, d1), 0>,
- memref<2 x 4 x vector<16xf32>, (d0, d1) -> (d0, d1), 2>,
- memref<1 x i32>, (d0) -> (d0), 4>
+ memref<40 x 8 x vector<16xf32>, affine_map<(d0, d1) -> (d0, d1)>, 0>,
+ memref<2 x 4 x vector<16xf32>, affine_map<(d0, d1) -> (d0, d1)>, 2>,
+ memref<1 x i32>, affine_map<(d0) -> (d0)>, 4>
```
### 'dma_wait' operation
@@ -284,7 +284,7 @@ load/store indices.
Example:
```mlir
-dma_wait %tag[%idx], %size : memref<1 x i32, (d0) -> (d0), 4>
+dma_wait %tag[%idx], %size : memref<1 x i32, affine_map<(d0) -> (d0)>, 4>
```
### 'extract_element' operation
@@ -334,12 +334,12 @@ values or the recursively result of such an `affine.apply` operation.
Example:
```mlir
-%1 = affine.apply (d0, d1) -> (3*d0) (%i, %j)
-%2 = affine.apply (d0, d1) -> (d1+1) (%i, %j)
+%1 = affine.apply affine_map<(d0, d1) -> (3*d0)> (%i, %j)
+%2 = affine.apply affine_map<(d0, d1) -> (d1+1)> (%i, %j)
%12 = load %A[%1, %2] : memref<8x?xi32, #layout, memspace0>
// Example of an indirect load (treated as non-affine)
-%3 = affine.apply (d0) -> (2*d0 + 1)(%12)
+%3 = affine.apply affine_map<(d0) -> (2*d0 + 1)>(%12)
%13 = load %A[%3, %2] : memref<4x?xi32, #layout, memspace0>
```
diff --git a/mlir/docs/LangRef.md b/mlir/docs/LangRef.md
index 82b740e50ea..cfb2955e8d4 100644
--- a/mlir/docs/LangRef.md
+++ b/mlir/docs/LangRef.md
@@ -852,20 +852,20 @@ Examples of memref static type
```mlir
// Identity index/layout map
-#identity = (d0, d1) -> (d0, d1)
+#identity = affine_map<(d0, d1) -> (d0, d1)>
// Column major layout.
-#col_major = (d0, d1, d2) -> (d2, d1, d0)
+#col_major = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
// A 2-d tiled layout with tiles of size 128 x 256.
-#tiled_2d_128x256 = (d0, d1) -> (d0 div 128, d1 div 256, d0 mod 128, d1 mod 256)
+#tiled_2d_128x256 = affine_map<(d0, d1) -> (d0 div 128, d1 div 256, d0 mod 128, d1 mod 256)>
// A tiled data layout with non-constant tile sizes.
-#tiled_dynamic = (d0, d1)[s0, s1] -> (d0 floordiv s0, d1 floordiv s1,
- d0 mod s0, d1 mod s1)
+#tiled_dynamic = affine_map<(d0, d1)[s0, s1] -> (d0 floordiv s0, d1 floordiv s1,
+ d0 mod s0, d1 mod s1)>
// A layout that yields a padding on two at either end of the minor dimension.
-#padded = (d0, d1) -> (d0, (d1 + 2) floordiv 2, (d1 + 2) mod 2)
+#padded = affine_map<(d0, d1) -> (d0, (d1 + 2) floordiv 2, (d1 + 2) mod 2)>
// The dimension list "16x32" defines the following 2D index space:
@@ -897,7 +897,7 @@ memref<16x32xf32, #identity, memspace0>
%P = alloc() : memref<16x64xf32, #padded>
// Affine map with symbol 's0' used as offset for the first dimension.
-#imapS = (d0, d1) [s0] -> (d0 + s0, d1)
+#imapS = affine_map<(d0, d1) [s0] -> (d0 + s0, d1)>
// Allocate memref and bind the following symbols:
// '%n' is bound to the dynamic second dimension of the memref type.
// '%o' is bound to the symbol 's0' in the affine map of the memref type.
@@ -1188,10 +1188,10 @@ These aliases *must* be defined before their uses. Alias names may not contain a
Example:
```mlir
-#map = (d0) -> (d0 + 10)
+#map = affine_map<(d0) -> (d0 + 10)>
// Using the original attribute.
-%b = affine.apply (d0) -> (d0 + 10) (%a)
+%b = affine.apply affine_map<(d0) -> (d0 + 10)> (%a)
// Using the attribute alias.
%b = affine.apply #map(%a)
@@ -1262,7 +1262,7 @@ standard-attribute ::= affine-map-attribute
Syntax:
```
-affine-map-attribute ::= affine-map
+affine-map-attribute ::= `affine_map` `<` affine-map `>`
```
An affine-map attribute is an attribute that represents a affine-map object.
@@ -1419,7 +1419,7 @@ is not specified, is a 64-bit integer.
Syntax:
```
-integer-set-attribute ::= affine-map
+integer-set-attribute ::= `affine_set` `<` integer-set `>`
```
An integer-set attribute is an attribute that represents an integer-set object.
diff --git a/mlir/include/mlir/IR/DialectImplementation.h b/mlir/include/mlir/IR/DialectImplementation.h
index 1eada8f264b..ec8e39cf35b 100644
--- a/mlir/include/mlir/IR/DialectImplementation.h
+++ b/mlir/include/mlir/IR/DialectImplementation.h
@@ -291,6 +291,12 @@ public:
return success();
}
+ /// Parse an affine map instance into 'map'.
+ virtual ParseResult parseAffineMap(AffineMap &map) = 0;
+
+ /// Parse an integer set instance into 'set'.
+ virtual ParseResult printIntegerSet(IntegerSet &set) = 0;
+
//===--------------------------------------------------------------------===//
// Type Parsing
//===--------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h
index 98ce1015886..97e3b97d696 100644
--- a/mlir/include/mlir/IR/OpImplementation.h
+++ b/mlir/include/mlir/IR/OpImplementation.h
@@ -379,6 +379,12 @@ public:
virtual ParseResult
parseOptionalAttrDictWithKeyword(SmallVectorImpl<NamedAttribute> &result) = 0;
+ /// Parse an affine map instance into 'map'.
+ virtual ParseResult parseAffineMap(AffineMap &map) = 0;
+
+ /// Parse an integer set instance into 'set'.
+ virtual ParseResult printIntegerSet(IntegerSet &set) = 0;
+
//===--------------------------------------------------------------------===//
// Identifier Parsing
//===--------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index e244542f9b4..835afceb8f2 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -691,16 +691,18 @@ static void print(OpAsmPrinter &p, TransposeOp op) {
static ParseResult parseTransposeOp(OpAsmParser &parser,
OperationState &result) {
OpAsmParser::OperandType view;
- AffineMapAttr permutation;
+ AffineMap permutation;
MemRefType type;
- return failure(parser.parseOperand(view) ||
- parser.parseAttribute(permutation,
- TransposeOp::getPermutationAttrName(),
- result.attributes) ||
- parser.parseOptionalAttrDict(result.attributes) ||
- parser.parseColonType(type) ||
- parser.resolveOperand(view, type, result.operands) ||
- parser.addTypeToList(type, result.types));
+ if (parser.parseOperand(view) || parser.parseAffineMap(permutation) ||
+ parser.parseOptionalAttrDict(result.attributes) ||
+ parser.parseColonType(type) ||
+ parser.resolveOperand(view, type, result.operands) ||
+ parser.addTypeToList(type, result.types))
+ return failure();
+
+ result.addAttribute(TransposeOp::getPermutationAttrName(),
+ AffineMapAttr::get(permutation));
+ return success();
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index 53ded9d7268..49830aae485 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -1218,13 +1218,19 @@ void ModulePrinter::printAttribute(Attribute attr, bool mayElideType) {
os << ']';
break;
case StandardAttributes::AffineMap:
+ os << "affine_map<";
attr.cast<AffineMapAttr>().getValue().print(os);
+ os << '>';
// AffineMap always elides the type.
return;
case StandardAttributes::IntegerSet:
+ os << "affine_set<";
attr.cast<IntegerSetAttr>().getValue().print(os);
- break;
+ os << '>';
+
+ // IntegerSet always elides the type.
+ return;
case StandardAttributes::Type:
printType(attr.cast<TypeAttr>().getValue());
break;
diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index 13196c34d66..9b56f71b812 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -334,8 +334,11 @@ public:
// Affine Parsing
//===--------------------------------------------------------------------===//
+ /// Parse a reference to either an affine map, or an integer set.
ParseResult parseAffineMapOrIntegerSetReference(AffineMap &map,
IntegerSet &set);
+ ParseResult parseAffineMapReference(AffineMap &map);
+ ParseResult parseIntegerSetReference(IntegerSet &set);
/// Parse an AffineMap where the dim and symbol identifiers are SSA ids.
ParseResult
@@ -643,6 +646,16 @@ public:
return success(static_cast<bool>(result));
}
+ /// Parse an affine map instance into 'map'.
+ ParseResult parseAffineMap(AffineMap &map) override {
+ return parser.parseAffineMapReference(map);
+ }
+
+ /// Parse an integer set instance into 'set'.
+ ParseResult printIntegerSet(IntegerSet &set) override {
+ return parser.parseIntegerSetReference(set);
+ }
+
//===--------------------------------------------------------------------===//
// Type Parsing
//===--------------------------------------------------------------------===//
@@ -1446,15 +1459,24 @@ static std::string extractSymbolReference(Token tok) {
Attribute Parser::parseAttribute(Type type) {
switch (getToken().getKind()) {
// Parse an AffineMap or IntegerSet attribute.
- case Token::l_paren: {
- // Try to parse an affine map or an integer set reference.
+ case Token::kw_affine_map: {
+ consumeToken(Token::kw_affine_map);
+
AffineMap map;
+ if (parseToken(Token::less, "expected '<' in affine map") ||
+ parseAffineMapReference(map) ||
+ parseToken(Token::greater, "expected '>' in affine map"))
+ return Attribute();
+ return AffineMapAttr::get(map);
+ }
+ case Token::kw_affine_set: {
+ consumeToken(Token::kw_affine_set);
+
IntegerSet set;
- if (parseAffineMapOrIntegerSetReference(map, set))
- return nullptr;
- if (map)
- return AffineMapAttr::get(map);
- assert(set);
+ if (parseToken(Token::less, "expected '<' in integer set") ||
+ parseIntegerSetReference(set) ||
+ parseToken(Token::greater, "expected '>' in integer set"))
+ return Attribute();
return IntegerSetAttr::get(set);
}
@@ -3034,6 +3056,24 @@ ParseResult Parser::parseAffineMapOrIntegerSetReference(AffineMap &map,
IntegerSet &set) {
return AffineParser(state).parseAffineMapOrIntegerSetInline(map, set);
}
+ParseResult Parser::parseAffineMapReference(AffineMap &map) {
+ llvm::SMLoc curLoc = getToken().getLoc();
+ IntegerSet set;
+ if (parseAffineMapOrIntegerSetReference(map, set))
+ return failure();
+ if (set)
+ return emitError(curLoc, "expected AffineMap, but got IntegerSet");
+ return success();
+}
+ParseResult Parser::parseIntegerSetReference(IntegerSet &set) {
+ llvm::SMLoc curLoc = getToken().getLoc();
+ AffineMap map;
+ if (parseAffineMapOrIntegerSetReference(map, set))
+ return failure();
+ if (map)
+ return emitError(curLoc, "expected IntegerSet, but got AffineMap");
+ return success();
+}
/// Parse an AffineMap of SSA ids. The callback 'parseElement' is used to
/// parse SSA value uses encountered while parsing affine expressions.
@@ -3956,6 +3996,16 @@ public:
return parser.parseAttributeDict(result);
}
+ /// Parse an affine map instance into 'map'.
+ ParseResult parseAffineMap(AffineMap &map) override {
+ return parser.parseAffineMapReference(map);
+ }
+
+ /// Parse an integer set instance into 'set'.
+ ParseResult printIntegerSet(IntegerSet &set) override {
+ return parser.parseIntegerSetReference(set);
+ }
+
//===--------------------------------------------------------------------===//
// Identifier Parsing
//===--------------------------------------------------------------------===//
diff --git a/mlir/lib/Parser/TokenKinds.def b/mlir/lib/Parser/TokenKinds.def
index fc9f7821f1a..9221dd774ac 100644
--- a/mlir/lib/Parser/TokenKinds.def
+++ b/mlir/lib/Parser/TokenKinds.def
@@ -82,6 +82,8 @@ TOK_OPERATOR(star, "*")
// NOTE: Please key these alphabetized to make it easier to find something in
// this list and to cater to OCD.
+TOK_KEYWORD(affine_map)
+TOK_KEYWORD(affine_set)
TOK_KEYWORD(attributes)
TOK_KEYWORD(bf16)
TOK_KEYWORD(ceildiv)
diff --git a/mlir/test/AffineOps/canonicalize.mlir b/mlir/test/AffineOps/canonicalize.mlir
index 98b78f101c9..47477fd5b9f 100644
--- a/mlir/test/AffineOps/canonicalize.mlir
+++ b/mlir/test/AffineOps/canonicalize.mlir
@@ -1,46 +1,46 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s
// Affine maps for test case: compose_affine_maps_1dto2d_no_symbols
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 - 1)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 + 1)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
// Affine maps for test case: compose_affine_maps_1dto2d_with_symbols
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = (d0) -> (d0 - 4)
-// CHECK-DAG: [[MAP4b:#map[0-9]+]] = (d0) -> (d0 - 7)
-// CHECK-DAG: [[MAP7:#map[0-9]+]] = (d0) -> (d0 * 2 - 3)
-// CHECK-DAG: [[MAP7a:#map[0-9]+]] = (d0) -> (d0 * 2 + 1)
+// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
+// CHECK-DAG: [[MAP4b:#map[0-9]+]] = affine_map<(d0) -> (d0 - 7)>
+// CHECK-DAG: [[MAP7:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 - 3)>
+// CHECK-DAG: [[MAP7a:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 + 1)>
// Affine map for test case: compose_affine_maps_d2_tile
-// CHECK-DAG: [[MAP8:#map[0-9]+]] = (d0, d1) -> (d1 + (d0 ceildiv 4) * 4 - (d1 floordiv 4) * 4)
-// CHECK-DAG: [[MAP8a:#map[0-9]+]] = (d0, d1) -> (d1 + (d0 ceildiv 8) * 8 - (d1 floordiv 8) * 8)
+// CHECK-DAG: [[MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 4) * 4 - (d1 floordiv 4) * 4)>
+// CHECK-DAG: [[MAP8a:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 8) * 8 - (d1 floordiv 8) * 8)>
// Affine maps for test case: compose_affine_maps_dependent_loads
-// CHECK-DAG: [[MAP9:#map[0-9]+]] = (d0) -> (d0 + 3)
-// CHECK-DAG: [[MAP10:#map[0-9]+]] = (d0) -> (d0 * 3)
-// CHECK-DAG: [[MAP11:#map[0-9]+]] = (d0) -> ((d0 + 7) ceildiv 3)
-// CHECK-DAG: [[MAP12:#map[0-9]+]] = (d0) -> (d0 * 7 - 49)
+// CHECK-DAG: [[MAP9:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// CHECK-DAG: [[MAP10:#map[0-9]+]] = affine_map<(d0) -> (d0 * 3)>
+// CHECK-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 7) ceildiv 3)>
+// CHECK-DAG: [[MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 * 7 - 49)>
// Affine maps for test case: compose_affine_maps_diamond_dependency
-// CHECK-DAG: [[MAP13A:#map[0-9]+]] = (d0) -> ((d0 + 6) ceildiv 8)
-// CHECK-DAG: [[MAP13B:#map[0-9]+]] = (d0) -> ((d0 * 4 - 4) floordiv 3)
+// CHECK-DAG: [[MAP13A:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 6) ceildiv 8)>
+// CHECK-DAG: [[MAP13B:#map[0-9]+]] = affine_map<(d0) -> ((d0 * 4 - 4) floordiv 3)>
// Affine maps for test case: partial_fold_map
-// CHECK-DAG: [[MAP15:#map[0-9]+]] = ()[s0] -> (s0 - 42)
+// CHECK-DAG: [[MAP15:#map[0-9]+]] = affine_map<()[s0] -> (s0 - 42)>
// Affine maps for test cases: symbolic_composition_*
-// CHECK-DAG: [[map_symbolic_composition_a:#map[0-9]+]] = ()[s0] -> (s0 * 512)
-// CHECK-DAG: [[map_symbolic_composition_b:#map[0-9]+]] = ()[s0] -> (s0 * 4)
-// CHECK-DAG: [[map_symbolic_composition_c:#map[0-9]+]] = ()[s0, s1] -> (s0 * 3 + s1)
-// CHECK-DAG: [[map_symbolic_composition_d:#map[0-9]+]] = ()[s0, s1] -> (s1 * 3 + s0)
+// CHECK-DAG: [[map_symbolic_composition_a:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 512)>
+// CHECK-DAG: [[map_symbolic_composition_b:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 4)>
+// CHECK-DAG: [[map_symbolic_composition_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 * 3 + s1)>
+// CHECK-DAG: [[map_symbolic_composition_d:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 3 + s0)>
// Affine maps for test cases: map_mix_dims_and_symbols_*
-// CHECK-DAG: [[map_mix_dims_and_symbols_b:#map[0-9]+]] = ()[s0, s1] -> (s1 + s0 * 42 + 6)
-// CHECK-DAG: [[map_mix_dims_and_symbols_c:#map[0-9]+]] = ()[s0, s1] -> (s1 * 4 + s0 * 168 - 4)
-// CHECK-DAG: [[map_mix_dims_and_symbols_d:#map[0-9]+]] = ()[s0, s1] -> ((s1 + s0 * 42 + 6) ceildiv 8)
-// CHECK-DAG: [[map_mix_dims_and_symbols_e:#map[0-9]+]] = ()[s0, s1] -> ((s1 * 4 + s0 * 168 - 4) floordiv 3)
+// CHECK-DAG: [[map_mix_dims_and_symbols_b:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 + s0 * 42 + 6)>
+// CHECK-DAG: [[map_mix_dims_and_symbols_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 4 + s0 * 168 - 4)>
+// CHECK-DAG: [[map_mix_dims_and_symbols_d:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 + s0 * 42 + 6) ceildiv 8)>
+// CHECK-DAG: [[map_mix_dims_and_symbols_e:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 * 4 + s0 * 168 - 4) floordiv 3)>
// Affine maps for test case: symbolic_semi_affine
-// CHECK-DAG: [[symbolic_semi_affine:#map[0-9]+]] = (d0)[s0] -> (d0 floordiv (s0 + 1))
+// CHECK-DAG: [[symbolic_semi_affine:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 floordiv (s0 + 1))>
// CHECK-LABEL: func @compose_affine_maps_1dto2d_no_symbols() {
func @compose_affine_maps_1dto2d_no_symbols() {
@@ -49,33 +49,33 @@ func @compose_affine_maps_1dto2d_no_symbols() {
affine.for %i0 = 0 to 15 {
// Test load[%x, %x]
- %x0 = affine.apply (d0) -> (d0 - 1) (%i0)
- %x1_0 = affine.apply (d0, d1) -> (d0) (%x0, %x0)
- %x1_1 = affine.apply (d0, d1) -> (d1) (%x0, %x0)
+ %x0 = affine.apply affine_map<(d0) -> (d0 - 1)> (%i0)
+ %x1_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%x0, %x0)
+ %x1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%x0, %x0)
// CHECK: [[I0A:%[0-9]+]] = affine.apply [[MAP0]](%{{.*}})
// CHECK-NEXT: load %0{{\[}}[[I0A]], [[I0A]]{{\]}}
%v0 = load %0[%x1_0, %x1_1] : memref<4x4xf32>
// Test load[%y, %y]
- %y0 = affine.apply (d0) -> (d0 + 1) (%i0)
- %y1_0 = affine.apply (d0, d1) -> (d0) (%y0, %y0)
- %y1_1 = affine.apply (d0, d1) -> (d1) (%y0, %y0)
+ %y0 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
+ %y1_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%y0, %y0)
+ %y1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%y0, %y0)
// CHECK-NEXT: [[I1A:%[0-9]+]] = affine.apply [[MAP1]](%{{.*}})
// CHECK-NEXT: load %0{{\[}}[[I1A]], [[I1A]]{{\]}}
%v1 = load %0[%y1_0, %y1_1] : memref<4x4xf32>
// Test load[%x, %y]
- %xy_0 = affine.apply (d0, d1) -> (d0) (%x0, %y0)
- %xy_1 = affine.apply (d0, d1) -> (d1) (%x0, %y0)
+ %xy_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%x0, %y0)
+ %xy_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%x0, %y0)
// CHECK-NEXT: load %0{{\[}}[[I0A]], [[I1A]]{{\]}}
%v2 = load %0[%xy_0, %xy_1] : memref<4x4xf32>
// Test load[%y, %x]
- %yx_0 = affine.apply (d0, d1) -> (d0) (%y0, %x0)
- %yx_1 = affine.apply (d0, d1) -> (d1) (%y0, %x0)
+ %yx_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%y0, %x0)
+ %yx_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%y0, %x0)
// CHECK-NEXT: load %0{{\[}}[[I1A]], [[I0A]]{{\]}}
%v3 = load %0[%yx_0, %yx_1] : memref<4x4xf32>
}
@@ -89,29 +89,29 @@ func @compose_affine_maps_1dto2d_with_symbols() {
affine.for %i0 = 0 to 15 {
// Test load[%x0, %x0] with symbol %c4
%c4 = constant 4 : index
- %x0 = affine.apply (d0)[s0] -> (d0 - s0) (%i0)[%c4]
+ %x0 = affine.apply affine_map<(d0)[s0] -> (d0 - s0)> (%i0)[%c4]
// CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP4]](%{{.*}})
// CHECK-NEXT: load %{{[0-9]+}}{{\[}}[[I0]], [[I0]]{{\]}}
%v0 = load %0[%x0, %x0] : memref<4x4xf32>
// Test load[%x0, %x1] with symbol %c4 captured by '%x0' map.
- %x1 = affine.apply (d0) -> (d0 + 1) (%i0)
- %y1 = affine.apply (d0, d1) -> (d0+d1) (%x0, %x1)
+ %x1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
+ %y1 = affine.apply affine_map<(d0, d1) -> (d0+d1)> (%x0, %x1)
// CHECK-NEXT: [[I1:%[0-9]+]] = affine.apply [[MAP7]](%{{.*}})
// CHECK-NEXT: load %{{[0-9]+}}{{\[}}[[I1]], [[I1]]{{\]}}
%v1 = load %0[%y1, %y1] : memref<4x4xf32>
// Test load[%x1, %x0] with symbol %c4 captured by '%x0' map.
- %y2 = affine.apply (d0, d1) -> (d0 + d1) (%x1, %x0)
+ %y2 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%x1, %x0)
// CHECK-NEXT: [[I2:%[0-9]+]] = affine.apply [[MAP7]](%{{.*}})
// CHECK-NEXT: load %{{[0-9]+}}{{\[}}[[I2]], [[I2]]{{\]}}
%v2 = load %0[%y2, %y2] : memref<4x4xf32>
// Test load[%x2, %x0] with symbol %c4 from '%x0' and %c5 from '%x2'
%c5 = constant 5 : index
- %x2 = affine.apply (d0)[s0] -> (d0 + s0) (%i0)[%c5]
- %y3 = affine.apply (d0, d1) -> (d0 + d1) (%x2, %x0)
+ %x2 = affine.apply affine_map<(d0)[s0] -> (d0 + s0)> (%i0)[%c5]
+ %y3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%x2, %x0)
// CHECK: [[I3:%[0-9]+]] = affine.apply [[MAP7a]](%{{.*}})
// CHECK-NEXT: load %{{[0-9]+}}{{\[}}[[I3]], [[I3]]{{\]}}
%v3 = load %0[%y3, %y3] : memref<4x4xf32>
@@ -128,18 +128,18 @@ func @compose_affine_maps_2d_tile() {
%c8 = constant 8 : index
affine.for %i0 = 0 to 3 {
- %x0 = affine.apply (d0)[s0] -> (d0 ceildiv s0) (%i0)[%c4]
+ %x0 = affine.apply affine_map<(d0)[s0] -> (d0 ceildiv s0)> (%i0)[%c4]
affine.for %i1 = 0 to 3 {
- %x1 = affine.apply (d0)[s0] -> (d0 ceildiv s0) (%i1)[%c8]
+ %x1 = affine.apply affine_map<(d0)[s0] -> (d0 ceildiv s0)> (%i1)[%c8]
affine.for %i2 = 0 to 3 {
- %x2 = affine.apply (d0)[s0] -> (d0 mod s0) (%i2)[%c4]
+ %x2 = affine.apply affine_map<(d0)[s0] -> (d0 mod s0)> (%i2)[%c4]
affine.for %i3 = 0 to 3 {
- %x3 = affine.apply (d0)[s0] -> (d0 mod s0) (%i3)[%c8]
+ %x3 = affine.apply affine_map<(d0)[s0] -> (d0 mod s0)> (%i3)[%c8]
- %x40 = affine.apply (d0, d1, d2, d3)[s0, s1] ->
- ((d0 * s0) + d2) (%x0, %x1, %x2, %x3)[%c4, %c8]
- %x41 = affine.apply (d0, d1, d2, d3)[s0, s1] ->
- ((d1 * s1) + d3) (%x0, %x1, %x2, %x3)[%c4, %c8]
+ %x40 = affine.apply affine_map<(d0, d1, d2, d3)[s0, s1] ->
+ ((d0 * s0) + d2)> (%x0, %x1, %x2, %x3)[%c4, %c8]
+ %x41 = affine.apply affine_map<(d0, d1, d2, d3)[s0, s1] ->
+ ((d1 * s1) + d3)> (%x0, %x1, %x2, %x3)[%c4, %c8]
// CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP8]](%{{.*}}, %{{.*}})
// CHECK: [[I1:%[0-9]+]] = affine.apply [[MAP8a]](%{{.*}}, %{{.*}})
// CHECK-NEXT: [[L0:%[0-9]+]] = load %{{[0-9]+}}{{\[}}[[I0]], [[I1]]{{\]}}
@@ -165,11 +165,11 @@ func @compose_affine_maps_dependent_loads() {
%c3 = constant 3 : index
%c7 = constant 7 : index
- %x00 = affine.apply (d0, d1, d2)[s0, s1] -> (d0 + s0)
+ %x00 = affine.apply affine_map<(d0, d1, d2)[s0, s1] -> (d0 + s0)>
(%i0, %i1, %i2)[%c3, %c7]
- %x01 = affine.apply (d0, d1, d2)[s0, s1] -> (d1 - s1)
+ %x01 = affine.apply affine_map<(d0, d1, d2)[s0, s1] -> (d1 - s1)>
(%i0, %i1, %i2)[%c3, %c7]
- %x02 = affine.apply (d0, d1, d2)[s0, s1] -> (d2 * s0)
+ %x02 = affine.apply affine_map<(d0, d1, d2)[s0, s1] -> (d2 * s0)>
(%i0, %i1, %i2)[%c3, %c7]
// CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP9]](%{{.*}})
@@ -186,9 +186,9 @@ func @compose_affine_maps_dependent_loads() {
%v2 = load %0[%x01, %x00] : memref<16x32xf32>
// Swizzle %x00, %x01 and %c3, %c7
- %x10 = affine.apply (d0, d1)[s0, s1] -> (d0 * s1)
+ %x10 = affine.apply affine_map<(d0, d1)[s0, s1] -> (d0 * s1)>
(%x01, %x00)[%c7, %c3]
- %x11 = affine.apply (d0, d1)[s0, s1] -> (d1 ceildiv s0)
+ %x11 = affine.apply affine_map<(d0, d1)[s0, s1] -> (d1 ceildiv s0)>
(%x01, %x00)[%c7, %c3]
// CHECK-NEXT: [[I2A:%[0-9]+]] = affine.apply [[MAP12]](%{{.*}})
@@ -206,11 +206,11 @@ func @compose_affine_maps_diamond_dependency() {
%0 = alloc() : memref<4x4xf32>
affine.for %i0 = 0 to 15 {
- %a = affine.apply (d0) -> (d0 - 1) (%i0)
- %b = affine.apply (d0) -> (d0 + 7) (%a)
- %c = affine.apply (d0) -> (d0 * 4) (%a)
- %d0 = affine.apply (d0, d1) -> (d0 ceildiv 8) (%b, %c)
- %d1 = affine.apply (d0, d1) -> (d1 floordiv 3) (%b, %c)
+ %a = affine.apply affine_map<(d0) -> (d0 - 1)> (%i0)
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
+ %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
+ %d0 = affine.apply affine_map<(d0, d1) -> (d0 ceildiv 8)> (%b, %c)
+ %d1 = affine.apply affine_map<(d0, d1) -> (d1 floordiv 3)> (%b, %c)
// CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP13A]](%{{.*}})
// CHECK: [[I1:%[0-9]+]] = affine.apply [[MAP13B]](%{{.*}})
// CHECK-NEXT: load %{{[0-9]+}}{{\[}}[[I0]], [[I1]]{{\]}}
@@ -227,9 +227,9 @@ func @arg_used_as_dim_and_symbol(%arg0: memref<100x100xf32>, %arg1: index) {
%2 = alloc() : memref<1xi32>
affine.for %i0 = 0 to 100 {
affine.for %i1 = 0 to 100 {
- %3 = affine.apply (d0, d1)[s0, s1] -> (d1 + s0 + s1)
+ %3 = affine.apply affine_map<(d0, d1)[s0, s1] -> (d1 + s0 + s1)>
(%i0, %i1)[%arg1, %c9]
- %4 = affine.apply (d0, d1, d3) -> (d3 - (d0 + d1))
+ %4 = affine.apply affine_map<(d0, d1, d3) -> (d3 - (d0 + d1))>
(%arg1, %c9, %3)
// CHECK: load %{{[0-9]+}}{{\[}}%{{.*}}, %{{.*}}{{\]}}
%5 = load %1[%4, %arg1] : memref<100x100xf32, 1>
@@ -246,11 +246,11 @@ func @trivial_maps() {
%c0 = constant 0 : index
%cst = constant 0.000000e+00 : f32
affine.for %i1 = 0 to 10 {
- %1 = affine.apply ()[s0] -> (s0)()[%c0]
+ %1 = affine.apply affine_map<()[s0] -> (s0)>()[%c0]
store %cst, %0[%1] : memref<10xf32>
%2 = load %0[%c0] : memref<10xf32>
- %3 = affine.apply ()[] -> (0)()[]
+ %3 = affine.apply affine_map<()[] -> (0)>()[]
store %cst, %0[%3] : memref<10xf32>
%4 = load %0[%c0] : memref<10xf32>
}
@@ -261,42 +261,42 @@ func @trivial_maps() {
func @partial_fold_map(%arg1: index, %arg2: index) -> index {
// TODO: Constant fold one index into affine.apply
%c42 = constant 42 : index
- %2 = affine.apply (d0, d1) -> (d0 - d1) (%arg1, %c42)
+ %2 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%arg1, %c42)
// CHECK: [[X:%[0-9]+]] = affine.apply [[MAP15]]()[%{{.*}}]
return %2 : index
}
// CHECK-LABEL: func @symbolic_composition_a(%{{.*}}: index, %{{.*}}: index) -> index {
func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
- %0 = affine.apply (d0) -> (d0 * 4)(%arg0)
- %1 = affine.apply ()[s0, s1] -> (8 * s0)()[%0, %arg0]
- %2 = affine.apply ()[s0, s1] -> (16 * s1)()[%arg1, %1]
+ %0 = affine.apply affine_map<(d0) -> (d0 * 4)>(%arg0)
+ %1 = affine.apply affine_map<()[s0, s1] -> (8 * s0)>()[%0, %arg0]
+ %2 = affine.apply affine_map<()[s0, s1] -> (16 * s1)>()[%arg1, %1]
// CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_a]]()[%{{.*}}]
return %2 : index
}
// CHECK-LABEL: func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
- %0 = affine.apply (d0) -> (d0)(%arg0)
- %1 = affine.apply ()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)()[%0, %0, %0, %0]
+ %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
+ %1 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %0]
// CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
return %1 : index
}
// CHECK-LABEL: func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
- %0 = affine.apply (d0) -> (d0)(%arg0)
- %1 = affine.apply (d0) -> (d0)(%arg1)
- %2 = affine.apply ()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)()[%0, %0, %0, %1]
+ %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
+ %1 = affine.apply affine_map<(d0) -> (d0)>(%arg1)
+ %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
// CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_c]]()[%{{.*}}, %{{.*}}]
return %2 : index
}
// CHECK-LABEL: func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
- %0 = affine.apply (d0) -> (d0)(%arg0)
- %1 = affine.apply ()[s0] -> (s0)()[%arg1]
- %2 = affine.apply ()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)()[%0, %0, %0, %1]
+ %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
+ %1 = affine.apply affine_map<()[s0] -> (s0)>()[%arg1]
+ %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
// CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_d]]()[%{{.*}}, %{{.*}}]
return %2 : index
}
@@ -304,8 +304,8 @@ func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: in
// CHECK-LABEL: func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
- %a = affine.apply (d0)[s0] -> (d0 - 1 + 42 * s0) (%arg0)[%arg1]
- %b = affine.apply (d0) -> (d0 + 7) (%a)
+ %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
// CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_b]]()[%{{.*}}, %{{.*}}]
return %b : index
@@ -313,42 +313,42 @@ func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
// CHECK-LABEL: func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
- %a = affine.apply (d0)[s0] -> (d0 - 1 + 42 * s0) (%arg0)[%arg1]
- %b = affine.apply (d0) -> (d0 + 7) (%a)
- %c = affine.apply (d0) -> (d0 * 4) (%a)
+ %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
+ %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
// CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_c]]()[%{{.*}}, %{{.*}}]
return %c : index
}
// CHECK-LABEL: func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
- %a = affine.apply (d0)[s0] -> (d0 - 1 + 42 * s0) (%arg0)[%arg1]
- %b = affine.apply (d0) -> (d0 + 7) (%a)
- %c = affine.apply (d0) -> (d0 * 4) (%a)
- %d = affine.apply ()[s0] -> (s0 ceildiv 8) ()[%b]
+ %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
+ %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
+ %d = affine.apply affine_map<()[s0] -> (s0 ceildiv 8)> ()[%b]
// CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_d]]()[%{{.*}}, %{{.*}}]
return %d : index
}
// CHECK-LABEL: func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
- %a = affine.apply (d0)[s0] -> (d0 - 1 + 42 * s0) (%arg0)[%arg1]
- %b = affine.apply (d0) -> (d0 + 7) (%a)
- %c = affine.apply (d0) -> (d0 * 4) (%a)
- %d = affine.apply ()[s0] -> (s0 ceildiv 8) ()[%b]
- %e = affine.apply (d0) -> (d0 floordiv 3) (%c)
+ %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
+ %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
+ %d = affine.apply affine_map<()[s0] -> (s0 ceildiv 8)> ()[%b]
+ %e = affine.apply affine_map<(d0) -> (d0 floordiv 3)> (%c)
// CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_e]]()[%{{.*}}, %{{.*}}]
return %e : index
}
// CHECK-LABEL: func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
- %a = affine.apply (d0)[s0] -> (d0 - 1 + 42 * s0) (%arg0)[%arg1]
- %b = affine.apply (d0) -> (d0 + 7) (%a)
- %c = affine.apply (d0) -> (d0 * 4) (%a)
- %d = affine.apply ()[s0] -> (s0 ceildiv 8) ()[%b]
- %e = affine.apply (d0) -> (d0 floordiv 3) (%c)
- %f = affine.apply (d0, d1)[s0, s1] -> (d0 - s1 + d1 - s0) (%d, %e)[%e, %d]
+ %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
+ %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
+ %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
+ %d = affine.apply affine_map<()[s0] -> (s0 ceildiv 8)> ()[%b]
+ %e = affine.apply affine_map<(d0) -> (d0 floordiv 3)> (%c)
+ %f = affine.apply affine_map<(d0, d1)[s0, s1] -> (d0 - s1 + d1 - s0)> (%d, %e)[%e, %d]
// CHECK: {{.*}} = constant 0 : index
return %f : index
@@ -356,10 +356,10 @@ func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
// CHECK-LABEL: func @mix_dims_and_symbols_g(%arg0: index, %arg1: index) -> (index, index, index) {
func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
- %K = affine.apply (d0) -> (4*d0) (%M)
- %res1 = affine.apply ()[s0, s1] -> (4 * s0)()[%N, %K]
- %res2 = affine.apply ()[s0, s1] -> (s1)()[%N, %K]
- %res3 = affine.apply ()[s0, s1] -> (1024)()[%N, %K]
+ %K = affine.apply affine_map<(d0) -> (4*d0)> (%M)
+ %res1 = affine.apply affine_map<()[s0, s1] -> (4 * s0)>()[%N, %K]
+ %res2 = affine.apply affine_map<()[s0, s1] -> (s1)>()[%N, %K]
+ %res3 = affine.apply affine_map<()[s0, s1] -> (1024)>()[%N, %K]
// CHECK-DAG: {{.*}} = constant 1024 : index
// CHECK-DAG: {{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
// CHECK-DAG: {{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
@@ -370,8 +370,8 @@ func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
%f1 = constant 1.0 : f32
affine.for %i0 = 1 to 100 {
- %1 = affine.apply ()[s0] -> (s0 + 1) ()[%M]
- %2 = affine.apply (d0)[s0] -> (d0 floordiv s0) (%i0)[%1]
+ %1 = affine.apply affine_map<()[s0] -> (s0 + 1)> ()[%M]
+ %2 = affine.apply affine_map<(d0)[s0] -> (d0 floordiv s0)> (%i0)[%1]
// CHECK-DAG: {{.*}} = affine.apply [[symbolic_semi_affine]](%{{.*}})[%{{.*}}]
store %f1, %A[%2] : memref<?xf32>
}
@@ -380,8 +380,8 @@ func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = ()[s0] -> (0, s0)
-// CHECK: [[MAP1:#map[0-9]+]] = ()[s0] -> (100, s0)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<()[s0] -> (100, s0)>
// CHECK-LABEL: func @constant_fold_bounds(%arg0: index) {
func @constant_fold_bounds(%N : index) {
@@ -390,23 +390,23 @@ func @constant_fold_bounds(%N : index) {
%c9 = constant 9 : index
%c1 = constant 1 : index
%c2 = constant 2 : index
- %c3 = affine.apply (d0, d1) -> (d0 + d1) (%c1, %c2)
+ %c3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%c1, %c2)
%l = "foo"() : () -> index
// CHECK: affine.for %{{.*}} = 5 to 7 {
- affine.for %i = max (d0, d1) -> (0, d0 + d1)(%c2, %c3) to min (d0, d1) -> (d0 - 2, 32*d1) (%c9, %c1) {
+ affine.for %i = max affine_map<(d0, d1) -> (0, d0 + d1)> (%c2, %c3) to min affine_map<(d0, d1) -> (d0 - 2, 32*d1)> (%c9, %c1) {
"foo"(%i, %c3) : (index, index) -> ()
}
// Bound takes a non-constant argument but can still be folded.
// CHECK: affine.for %{{.*}} = 1 to 7 {
- affine.for %j = max (d0) -> (0, 1)(%N) to min (d0, d1) -> (7, 9)(%N, %l) {
+ affine.for %j = max affine_map<(d0) -> (0, 1)> (%N) to min affine_map<(d0, d1) -> (7, 9)> (%N, %l) {
"foo"(%j, %c3) : (index, index) -> ()
}
// None of the bounds can be folded.
// CHECK: affine.for %{{.*}} = max [[MAP0]]()[%{{.*}}] to min [[MAP1]]()[%{{.*}}] {
- affine.for %k = max ()[s0] -> (0, s0) ()[%l] to min ()[s0] -> (100, s0)()[%N] {
+ affine.for %k = max affine_map<()[s0] -> (0, s0)> ()[%l] to min affine_map<()[s0] -> (100, s0)> ()[%N] {
"foo"(%k, %c3) : (index, index) -> ()
}
return
@@ -425,7 +425,7 @@ func @fold_empty_loop() {
// -----
-// CHECK-DAG: [[SET:#set[0-9]+]] = (d0, d1)[s0] : (d0 >= 0, -d0 + 1022 >= 0, d1 >= 0, -d1 + s0 - 2 >= 0)
+// CHECK-DAG: [[SET:#set[0-9]+]] = affine_set<(d0, d1)[s0] : (d0 >= 0, -d0 + 1022 >= 0, d1 >= 0, -d1 + s0 - 2 >= 0)>
// CHECK-LABEL: func @canonicalize_affine_if
// CHECK-SAME: [[M:%.*]]: index,
@@ -436,7 +436,7 @@ func @canonicalize_affine_if(%M : index, %N : index) {
affine.for %i = 0 to 1024 {
affine.for %j = 0 to %N {
// CHECK: affine.if [[SET]](%{{.*}}, %{{.*}}){{\[}}[[N]]{{\]}}
- affine.if (d0, d1, d2, d3)[s0] : (d1 >= 0, d0 - d1 >= 0, d2 >= 0, d3 - d2 - 2 >= 0) (%c1022, %i, %j, %N)[%M] {
+ affine.if affine_set<(d0, d1, d2, d3)[s0] : (d1 >= 0, d0 - d1 >= 0, d2 >= 0, d3 - d2 - 2 >= 0)> (%c1022, %i, %j, %N)[%M] {
"foo"() : () -> ()
}
"bar"() : () -> ()
@@ -447,8 +447,8 @@ func @canonicalize_affine_if(%M : index, %N : index) {
// -----
-// CHECK-DAG: [[LBMAP:#map[0-9]+]] = ()[s0] -> (0, s0)
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = ()[s0] -> (1024, s0 + s0)
+// CHECK-DAG: [[LBMAP:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (1024, s0 + s0)>
// CHECK-LABEL: func @canonicalize_bounds
// CHECK-SAME: [[M:%.*]]: index,
@@ -459,17 +459,17 @@ func @canonicalize_bounds(%M : index, %N : index) {
// Drop unused operand %N, drop duplicate operand %M, propagate %c1024, and
// promote %M to a symbolic one.
// CHECK: affine.for %{{.*}} = 0 to min [[UBMAP]](){{\[}}[[M]]{{\]}}
- affine.for %i = 0 to min (d0, d1, d2, d3) -> (d0, d1 + d2) (%c1024, %M, %M, %N) {
+ affine.for %i = 0 to min affine_map<(d0, d1, d2, d3) -> (d0, d1 + d2)> (%c1024, %M, %M, %N) {
"foo"() : () -> ()
}
// Promote %M to symbolic position.
// CHECK: affine.for %{{.*}} = 0 to #map{{[0-9]+}}(){{\[}}[[M]]{{\]}}
- affine.for %i = 0 to (d0) -> (4 * d0) (%M) {
+ affine.for %i = 0 to affine_map<(d0) -> (4 * d0)> (%M) {
"foo"() : () -> ()
}
// Lower bound canonicalize.
// CHECK: affine.for %{{.*}} = max [[LBMAP]](){{\[}}[[N]]{{\]}} to [[M]]
- affine.for %i = max (d0, d1) -> (d0, d1) (%c0, %N) to %M {
+ affine.for %i = max affine_map<(d0, d1) -> (d0, d1)> (%c0, %N) to %M {
"foo"() : () -> ()
}
return
@@ -479,7 +479,7 @@ func @canonicalize_bounds(%M : index, %N : index) {
// Compose maps into affine load and store ops.
-// CHECK-DAG: #map{{[0-9]+}} = (d0) -> (d0 + 1)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0) -> (d0 + 1)>
// CHECK-LABEL: @compose_into_affine_load_store
func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
@@ -487,14 +487,14 @@ func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
// CHECK: affine.for %[[IV:.*]] = 0 to 1024
affine.for %i = 0 to 1024 {
// Make sure the unused operand (%u below) gets dropped as well.
- %idx = affine.apply (d0, d1) -> (d0 + 1) (%i, %u)
+ %idx = affine.apply affine_map<(d0, d1) -> (d0 + 1)> (%i, %u)
affine.load %A[%idx] : memref<1024xf32>
affine.store %cf1, %A[%idx] : memref<1024xf32>
// CHECK-NEXT: affine.load %{{.*}}[%[[IV]] + 1]
// CHECK-NEXT: affine.store %cst, %{{.*}}[%[[IV]] + 1]
// Map remains the same, but operand changes on composition.
- %copy = affine.apply (d0) -> (d0) (%i)
+ %copy = affine.apply affine_map<(d0) -> (d0)> (%i)
affine.load %A[%copy] : memref<1024xf32>
// CHECK-NEXT: affine.load %{{.*}}[%[[IV]]]
}
@@ -506,7 +506,7 @@ func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
%c511 = constant 511 : index
%c1 = constant 0 : index
- %0 = affine.min (d0)[s0] -> (1000, d0 + 512, s0 + 1) (%c1)[%c511]
+ %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0 + 1)> (%c1)[%c511]
"op0"(%0) : (index) -> ()
// CHECK: %[[CST:.*]] = constant 512 : index
// CHECK-NEXT: "op0"(%[[CST]]) : (index) -> ()
@@ -519,7 +519,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
%c3 = constant 3 : index
%c20 = constant 20 : index
- %0 = affine.min (d0)[s0] -> (1000, d0 floordiv 4, (s0 mod 5) + 1) (%c20)[%c3]
+ %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 floordiv 4, (s0 mod 5) + 1)> (%c20)[%c3]
"op0"(%0) : (index) -> ()
// CHECK: %[[CST:.*]] = constant 4 : index
// CHECK-NEXT: "op0"(%[[CST]]) : (index) -> ()
diff --git a/mlir/test/AffineOps/dma.mlir b/mlir/test/AffineOps/dma.mlir
index 785d46aa89a..28f41025853 100644
--- a/mlir/test/AffineOps/dma.mlir
+++ b/mlir/test/AffineOps/dma.mlir
@@ -2,12 +2,12 @@
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0, d1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
// Test with loop IVs.
func @test0(%arg0 : index, %arg1 : index) {
%0 = alloc() : memref<100x100xf32>
- %1 = alloc() : memref<100x100xf32, (d0, d1) -> (d0, d1), 2>
+ %1 = alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%2 = alloc() : memref<1xi32>
%c0 = constant 0 : index
%c64 = constant 64 : index
@@ -25,12 +25,12 @@ func @test0(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0, d1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
// Test with loop IVs and optional stride arguments.
func @test1(%arg0 : index, %arg1 : index) {
%0 = alloc() : memref<100x100xf32>
- %1 = alloc() : memref<100x100xf32, (d0, d1) -> (d0, d1), 2>
+ %1 = alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%2 = alloc() : memref<1xi32>
%c0 = constant 0 : index
%c64 = constant 64 : index
@@ -50,13 +50,13 @@ func @test1(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2) -> (d0, d1 + d2 + 5)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2) -> (d0 + d1, d2)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1 + d2 + 5)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 + d1, d2)>
// Test with loop IVs and symbols (without symbol keyword).
func @test2(%arg0 : index, %arg1 : index) {
%0 = alloc() : memref<100x100xf32>
- %1 = alloc() : memref<100x100xf32, (d0, d1) -> (d0, d1), 2>
+ %1 = alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%2 = alloc() : memref<1xi32>
%c0 = constant 0 : index
%c64 = constant 64 : index
@@ -75,14 +75,14 @@ func @test2(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1)[s0] -> (d0, d1 + s0 + 7)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1)[s0] -> (d0 + s0, d1)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1) -> (d0 + d1 + 11)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0, d1 + s0 + 7)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1 + 11)>
// Test with loop IVs and symbols (with symbol keyword).
func @test3(%arg0 : index, %arg1 : index) {
%0 = alloc() : memref<100x100xf32>
- %1 = alloc() : memref<100x100xf32, (d0, d1) -> (d0, d1), 2>
+ %1 = alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%2 = alloc() : memref<1xi32>
%c0 = constant 0 : index
%c64 = constant 64 : index
@@ -102,9 +102,9 @@ func @test3(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1)[s0] -> (d0, (d1 + s0) mod 9 + 7)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1)[s0] -> ((d0 + s0) floordiv 3, d1)
-// CHECK: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d0 + d1 + 11)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0, (d1 + s0) mod 9 + 7)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1)[s0] -> ((d0 + s0) floordiv 3, d1)>
+// CHECK: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1 + 11)>
// Test with loop IVs, symbols and constants in nested affine expressions.
func @test4(%arg0 : index, %arg1 : index) {
diff --git a/mlir/test/AffineOps/inlining.mlir b/mlir/test/AffineOps/inlining.mlir
index 6710663489b..43b6c245207 100644
--- a/mlir/test/AffineOps/inlining.mlir
+++ b/mlir/test/AffineOps/inlining.mlir
@@ -4,8 +4,8 @@
func @func_with_affine_ops(%N: index) {
%c = constant 200 : index
affine.for %i = 1 to 10 {
- affine.if (i)[N] : (i - 2 >= 0, 4 - i >= 0)(%i)[%c] {
- %w = affine.apply (d0,d1)[s0] -> (d0+d1+s0) (%i, %i) [%N]
+ affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%c] {
+ %w = affine.apply affine_map<(d0,d1)[s0] -> (d0+d1+s0)> (%i, %i) [%N]
}
}
return
diff --git a/mlir/test/AffineOps/invalid.mlir b/mlir/test/AffineOps/invalid.mlir
index 2ade76c194c..b36af2cd60a 100644
--- a/mlir/test/AffineOps/invalid.mlir
+++ b/mlir/test/AffineOps/invalid.mlir
@@ -6,7 +6,7 @@ func @affine_apply_operand_non_index(%arg0 : i32) {
// Custom parser automatically assigns all arguments the `index` so we must
// use the generic syntax here to exercise the verifier.
// expected-error@+1 {{operands must be of type 'index'}}
- %0 = "affine.apply"(%arg0) {map = (d0) -> (d0)} : (i32) -> (index)
+ %0 = "affine.apply"(%arg0) {map = affine_map<(d0) -> (d0)>} : (i32) -> (index)
return
}
@@ -16,13 +16,13 @@ func @affine_apply_resul_non_index(%arg0 : index) {
// Custom parser automatically assigns `index` as the result type so we must
// use the generic syntax here to exercise the verifier.
// expected-error@+1 {{result must be of type 'index'}}
- %0 = "affine.apply"(%arg0) {map = (d0) -> (d0)} : (index) -> (i32)
+ %0 = "affine.apply"(%arg0) {map = affine_map<(d0) -> (d0)>} : (index) -> (i32)
return
}
// -----
-#map = (d0)[s0] -> (d0 + s0)
+#map = affine_map<(d0)[s0] -> (d0 + s0)>
func @affine_for_lower_bound_invalid_dim(%arg : index) {
affine.for %n0 = 0 to 7 {
@@ -37,7 +37,7 @@ func @affine_for_lower_bound_invalid_dim(%arg : index) {
// -----
-#map = (d0)[s0] -> (d0 + s0)
+#map = affine_map<(d0)[s0] -> (d0 + s0)>
func @affine_for_upper_bound_invalid_dim(%arg : index) {
affine.for %n0 = 0 to 7 {
@@ -65,7 +65,7 @@ func @affine_load_invalid_dim(%M : memref<10xi32>) {
// -----
-#map0 = (d0)[s0] -> (d0 + s0)
+#map0 = affine_map<(d0)[s0] -> (d0 + s0)>
func @affine_for_lower_bound_invalid_sym() {
affine.for %i0 = 0 to 7 {
@@ -78,7 +78,7 @@ func @affine_for_lower_bound_invalid_sym() {
// -----
-#map0 = (d0)[s0] -> (d0 + s0)
+#map0 = affine_map<(d0)[s0] -> (d0 + s0)>
func @affine_for_upper_bound_invalid_sym() {
affine.for %i0 = 0 to 7 {
@@ -91,7 +91,7 @@ func @affine_for_upper_bound_invalid_sym() {
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @affine_if_invalid_dim(%arg : index) {
affine.for %n0 = 0 to 7 {
@@ -105,7 +105,7 @@ func @affine_if_invalid_dim(%arg : index) {
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @affine_if_invalid_sym() {
affine.for %i0 = 0 to 7 {
@@ -117,7 +117,7 @@ func @affine_if_invalid_sym() {
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
affine.for %n0 = 0 to 7 {
@@ -144,7 +144,7 @@ func @affine_store_missing_l_square(%C: memref<4096x4096xf32>) {
// CHECK-LABEL: @affine_min
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{operand count and affine map dimension and symbol count must match}}
- %0 = affine.min (d0) -> (d0) (%arg0, %arg1)
+ %0 = affine.min affine_map<(d0) -> (d0)> (%arg0, %arg1)
return
}
@@ -154,7 +154,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-LABEL: @affine_min
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{operand count and affine map dimension and symbol count must match}}
- %0 = affine.min ()[s0] -> (s0) (%arg0, %arg1)
+ %0 = affine.min affine_map<()[s0] -> (s0)> (%arg0, %arg1)
return
}
@@ -164,7 +164,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-LABEL: @affine_min
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{operand count and affine map dimension and symbol count must match}}
- %0 = affine.min (d0) -> (d0) ()
+ %0 = affine.min affine_map<(d0) -> (d0)> ()
return
}
diff --git a/mlir/test/AffineOps/load-store-invalid.mlir b/mlir/test/AffineOps/load-store-invalid.mlir
index 9168a40f5ee..5bc31a2184f 100644
--- a/mlir/test/AffineOps/load-store-invalid.mlir
+++ b/mlir/test/AffineOps/load-store-invalid.mlir
@@ -10,7 +10,7 @@ func @load_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: inde
func @load_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) {
// expected-error@+1 {{op expects as many subscripts as affine map inputs}}
"affine.load"(%arg0, %arg1, %arg2, %arg3)
- {map = (i, j) -> (i, j) } : (memref<?x?xf32>, index, index, index) -> f32
+ {map = affine_map<(i, j) -> (i, j)> } : (memref<?x?xf32>, index, index, index) -> f32
}
// -----
@@ -25,7 +25,7 @@ func @load_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index) {
func @load_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index) {
// expected-error@+1 {{op expects as many subscripts as affine map inputs}}
"affine.load"(%arg0, %arg1)
- {map = (i, j) -> (i, j) } : (memref<?x?xf32>, index) -> f32
+ {map = affine_map<(i, j) -> (i, j)> } : (memref<?x?xf32>, index) -> f32
}
// -----
@@ -42,7 +42,7 @@ func @store_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2:
%arg3: index, %val: f32) {
// expected-error@+1 {{op expects as many subscripts as affine map inputs}}
"affine.store"(%val, %arg0, %arg1, %arg2, %arg3)
- {map = (i, j) -> (i, j) } : (f32, memref<?x?xf32>, index, index, index) -> ()
+ {map = affine_map<(i, j) -> (i, j)> } : (f32, memref<?x?xf32>, index, index, index) -> ()
}
// -----
@@ -57,7 +57,7 @@ func @store_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %val: f32)
func @store_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %val: f32) {
// expected-error@+1 {{op expects as many subscripts as affine map inputs}}
"affine.store"(%val, %arg0, %arg1)
- {map = (i, j) -> (i, j) } : (f32, memref<?x?xf32>, index) -> ()
+ {map = affine_map<(i, j) -> (i, j)> } : (f32, memref<?x?xf32>, index) -> ()
}
// -----
diff --git a/mlir/test/AffineOps/load-store.mlir b/mlir/test/AffineOps/load-store.mlir
index ebf632c65f1..54e753d17fe 100644
--- a/mlir/test/AffineOps/load-store.mlir
+++ b/mlir/test/AffineOps/load-store.mlir
@@ -2,7 +2,7 @@
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0, d1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
// Test with just loop IVs.
func @test0(%arg0 : index, %arg1 : index) {
@@ -18,7 +18,7 @@ func @test0(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0 + 3, d1 + 7)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3, d1 + 7)>
// Test with loop IVs and constants.
func @test1(%arg0 : index, %arg1 : index) {
@@ -36,7 +36,7 @@ func @test1(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3) -> (d0 + d1, d2 + d3)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1, d2, d3) -> (d0 + d1, d2 + d3)>
// Test with loop IVs and function args without 'symbol' keyword (should
// be parsed as dim identifiers).
@@ -55,7 +55,7 @@ func @test2(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1)[s0, s1] -> (d0 + s0, d1 + s1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 + s0, d1 + s1)>
// Test with loop IVs and function args with 'symbol' keyword (should
// be parsed as symbol identifiers).
@@ -76,7 +76,7 @@ func @test3(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1)[s0, s1] -> ((d0 + s0) floordiv 3 + 11, (d1 + s1) mod 4 + 7)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> ((d0 + s0) floordiv 3 + 11, (d1 + s1) mod 4 + 7)>
// Test with loop IVs, symbols and constants in nested affine expressions.
func @test4(%arg0 : index, %arg1 : index) {
@@ -96,7 +96,7 @@ func @test4(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2) -> (d0, d1, d2)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// Test with swizzled loop IVs.
func @test5(%arg0 : index, %arg1 : index) {
@@ -116,7 +116,7 @@ func @test5(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3, d4) -> (d0 + d1, d2 + d3, d3 + d1 + d4)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0 + d1, d2 + d3, d3 + d1 + d4)>
// Test with swizzled loop IVs, duplicate args, and function args used as dims.
// Dim identifiers are assigned in parse order:
@@ -140,7 +140,7 @@ func @test6(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2)[s0, s1] -> (d0 + s0, d1 + d2, d2 + s0 + s1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 + s0, d1 + d2, d2 + s0 + s1)>
// Test with swizzled loop IVs, duplicate args, and function args used as syms.
// Dim and symbol identifiers are assigned in parse order:
@@ -169,13 +169,13 @@ func @test6(%arg0 : index, %arg1 : index) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0) -> (d0 + 1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
// Test with operands without special SSA name.
func @test7() {
%0 = alloc() : memref<10xf32>
affine.for %i0 = 0 to 10 {
- %1 = affine.apply (d1) -> (d1 + 1)(%i0)
+ %1 = affine.apply affine_map<(d1) -> (d1 + 1)>(%i0)
%2 = affine.load %0[%1] : memref<10xf32>
affine.store %2, %0[%1] : memref<10xf32>
// CHECK: affine.load %{{.*}}[%{{.*}}] : memref<10xf32>
@@ -197,8 +197,8 @@ func @zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0 + 3, d1 + 7)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1) -> (d0 + 3, d1 + 11)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3, d1 + 7)>
+// CHECK: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3, d1 + 11)>
// Test with loop IVs and constants.
func @test_prefetch(%arg0 : index, %arg1 : index) {
diff --git a/mlir/test/AffineOps/memref-stride-calculation.mlir b/mlir/test/AffineOps/memref-stride-calculation.mlir
index aacd0c776f3..a52de3d858d 100644
--- a/mlir/test/AffineOps/memref-stride-calculation.mlir
+++ b/mlir/test/AffineOps/memref-stride-calculation.mlir
@@ -15,66 +15,66 @@ func @f(%0: index) {
%6 = alloc(%0, %0, %0) : memref<?x?x?xf32>
// CHECK: MemRefType offset: 0 strides: ?, ?, 1
- %11 = alloc() : memref<3x4x5xf32, (i, j, k)->(i, j, k)>
+ %11 = alloc() : memref<3x4x5xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: 20, 5, 1
%b11 = alloc() : memref<3x4x5xf32, offset: 0, strides: [20, 5, 1]>
// CHECK: MemRefType offset: 0 strides: 20, 5, 1
- %12 = alloc(%0) : memref<3x4x?xf32, (i, j, k)->(i, j, k)>
+ %12 = alloc(%0) : memref<3x4x?xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: ?, ?, 1
- %13 = alloc(%0) : memref<3x?x5xf32, (i, j, k)->(i, j, k)>
+ %13 = alloc(%0) : memref<3x?x5xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: ?, 5, 1
- %14 = alloc(%0) : memref<?x4x5xf32, (i, j, k)->(i, j, k)>
+ %14 = alloc(%0) : memref<?x4x5xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: 20, 5, 1
- %15 = alloc(%0, %0) : memref<?x4x?xf32, (i, j, k)->(i, j, k)>
+ %15 = alloc(%0, %0) : memref<?x4x?xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: ?, ?, 1
- %16 = alloc(%0, %0, %0) : memref<?x?x?xf32, (i, j, k)->(i, j, k)>
+ %16 = alloc(%0, %0, %0) : memref<?x?x?xf32, affine_map<(i, j, k)->(i, j, k)>>
// CHECK: MemRefType offset: 0 strides: ?, ?, 1
- %21 = alloc()[%0] : memref<3x4x5xf32, (i, j, k)[M]->(32 * i + 16 * j + M * k + 1)>
+ %21 = alloc()[%0] : memref<3x4x5xf32, affine_map<(i, j, k)[M]->(32 * i + 16 * j + M * k + 1)>>
// CHECK: MemRefType offset: 1 strides: 32, 16, ?
- %22 = alloc()[%0] : memref<3x4x5xf32, (i, j, k)[M]->(32 * i + M * j + 16 * k + 3)>
+ %22 = alloc()[%0] : memref<3x4x5xf32, affine_map<(i, j, k)[M]->(32 * i + M * j + 16 * k + 3)>>
// CHECK: MemRefType offset: 3 strides: 32, ?, 16
%b22 = alloc(%0)[%0, %0] : memref<3x4x?xf32, offset: 0, strides: [?, ?, 1]>
// CHECK: MemRefType offset: 0 strides: ?, ?, 1
- %23 = alloc(%0)[%0] : memref<3x?x5xf32, (i, j, k)[M]->(M * i + 32 * j + 16 * k + 7)>
+ %23 = alloc(%0)[%0] : memref<3x?x5xf32, affine_map<(i, j, k)[M]->(M * i + 32 * j + 16 * k + 7)>>
// CHECK: MemRefType offset: 7 strides: ?, 32, 16
%b23 = alloc(%0)[%0] : memref<3x?x5xf32, offset: 0, strides: [?, 5, 1]>
// CHECK: MemRefType offset: 0 strides: ?, 5, 1
- %24 = alloc(%0)[%0] : memref<3x?x5xf32, (i, j, k)[M]->(M * i + 32 * j + 16 * k + M)>
+ %24 = alloc(%0)[%0] : memref<3x?x5xf32, affine_map<(i, j, k)[M]->(M * i + 32 * j + 16 * k + M)>>
// CHECK: MemRefType offset: ? strides: ?, 32, 16
%b24 = alloc(%0)[%0, %0] : memref<3x?x5xf32, offset: ?, strides: [?, 32, 16]>
// CHECK: MemRefType offset: ? strides: ?, 32, 16
- %25 = alloc(%0, %0)[%0, %0] : memref<?x?x16xf32, (i, j, k)[M, N]->(M * i + N * j + k + 1)>
+ %25 = alloc(%0, %0)[%0, %0] : memref<?x?x16xf32, affine_map<(i, j, k)[M, N]->(M * i + N * j + k + 1)>>
// CHECK: MemRefType offset: 1 strides: ?, ?, 1
%b25 = alloc(%0, %0)[%0, %0] : memref<?x?x16xf32, offset: 1, strides: [?, ?, 1]>
// CHECK: MemRefType offset: 1 strides: ?, ?, 1
- %26 = alloc(%0)[] : memref<?xf32, (i)[M]->(i)>
+ %26 = alloc(%0)[] : memref<?xf32, affine_map<(i)[M]->(i)>>
// CHECK: MemRefType offset: 0 strides: 1
- %27 = alloc()[%0] : memref<5xf32, (i)[M]->(M)>
-// CHECK: MemRefType memref<5xf32, (d0)[s0] -> (s0)> cannot be converted to strided form
- %28 = alloc()[%0] : memref<5xf32, (i)[M]->(123)>
-// CHECK: MemRefType memref<5xf32, (d0)[s0] -> (123)> cannot be converted to strided form
- %29 = alloc()[%0] : memref<f32, ()[M]->(M)>
+ %27 = alloc()[%0] : memref<5xf32, affine_map<(i)[M]->(M)>>
+// CHECK: MemRefType memref<5xf32, affine_map<(d0)[s0] -> (s0)>> cannot be converted to strided form
+ %28 = alloc()[%0] : memref<5xf32, affine_map<(i)[M]->(123)>>
+// CHECK: MemRefType memref<5xf32, affine_map<(d0)[s0] -> (123)>> cannot be converted to strided form
+ %29 = alloc()[%0] : memref<f32, affine_map<()[M]->(M)>>
// CHECK: MemRefType offset: ? strides:
- %30 = alloc()[%0] : memref<f32, ()[M]->(123)>
+ %30 = alloc()[%0] : memref<f32, affine_map<()[M]->(123)>>
// CHECK: MemRefType offset: 123 strides:
- %100 = alloc(%0, %0)[%0, %0] : memref<?x?x16xf32, (i, j, k)[M, N]->(i + j, j, k), (i, j, k)[M, N]->(M * i + N * j + k + 1)>
-// CHECK: MemRefType memref<?x?x16xf32, (d0, d1, d2)[s0, s1] -> (d0 + d1, d1, d2), (d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2 + 1)> cannot be converted to strided form
- %101 = alloc() : memref<3x4x5xf32, (i, j, k)->(i floordiv 4 + j + k)>
-// CHECK: MemRefType memref<3x4x5xf32, (d0, d1, d2) -> (d0 floordiv 4 + d1 + d2)> cannot be converted to strided form
- %102 = alloc() : memref<3x4x5xf32, (i, j, k)->(i ceildiv 4 + j + k)>
-// CHECK: MemRefType memref<3x4x5xf32, (d0, d1, d2) -> (d0 ceildiv 4 + d1 + d2)> cannot be converted to strided form
- %103 = alloc() : memref<3x4x5xf32, (i, j, k)->(i mod 4 + j + k)>
-// CHECK: MemRefType memref<3x4x5xf32, (d0, d1, d2) -> (d0 mod 4 + d1 + d2)> cannot be converted to strided form
+ %100 = alloc(%0, %0)[%0, %0] : memref<?x?x16xf32, affine_map<(i, j, k)[M, N]->(i + j, j, k)>, affine_map<(i, j, k)[M, N]->(M * i + N * j + k + 1)>>
+// CHECK: MemRefType memref<?x?x16xf32, affine_map<(d0, d1, d2)[s0, s1] -> (d0 + d1, d1, d2)>, affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2 + 1)>> cannot be converted to strided form
+ %101 = alloc() : memref<3x4x5xf32, affine_map<(i, j, k)->(i floordiv 4 + j + k)>>
+// CHECK: MemRefType memref<3x4x5xf32, affine_map<(d0, d1, d2) -> (d0 floordiv 4 + d1 + d2)>> cannot be converted to strided form
+ %102 = alloc() : memref<3x4x5xf32, affine_map<(i, j, k)->(i ceildiv 4 + j + k)>>
+// CHECK: MemRefType memref<3x4x5xf32, affine_map<(d0, d1, d2) -> (d0 ceildiv 4 + d1 + d2)>> cannot be converted to strided form
+ %103 = alloc() : memref<3x4x5xf32, affine_map<(i, j, k)->(i mod 4 + j + k)>>
+// CHECK: MemRefType memref<3x4x5xf32, affine_map<(d0, d1, d2) -> (d0 mod 4 + d1 + d2)>> cannot be converted to strided form
- %200 = alloc()[%0, %0, %0] : memref<3x4x5xf32, (i, j, k)[M, N, K]->(M * i + N * i + N * j + K * k - (M + N - 20)* i)>
+ %200 = alloc()[%0, %0, %0] : memref<3x4x5xf32, affine_map<(i, j, k)[M, N, K]->(M * i + N * i + N * j + K * k - (M + N - 20)* i)>>
// CHECK: MemRefType offset: 0 strides: 20, ?, ?
- %201 = alloc()[%0, %0, %0] : memref<3x4x5xf32, (i, j, k)[M, N, K]->(M * i + N * i + N * K * j + K * K * k - (M + N - 20) * (i + 1))>
+ %201 = alloc()[%0, %0, %0] : memref<3x4x5xf32, affine_map<(i, j, k)[M, N, K]->(M * i + N * i + N * K * j + K * K * k - (M + N - 20) * (i + 1))>>
// CHECK: MemRefType offset: ? strides: 20, ?, ?
- %202 = alloc()[%0, %0, %0] : memref<3x4x5xf32, (i, j, k)[M, N, K]->(M * (i + 1) + j + k - M)>
+ %202 = alloc()[%0, %0, %0] : memref<3x4x5xf32, affine_map<(i, j, k)[M, N, K]->(M * (i + 1) + j + k - M)>>
// CHECK: MemRefType offset: 0 strides: ?, 1, 1
- %203 = alloc()[%0, %0, %0] : memref<3x4x5xf32, (i, j, k)[M, N, K]->(M + M * (i + N * (j + K * k)))>
+ %203 = alloc()[%0, %0, %0] : memref<3x4x5xf32, affine_map<(i, j, k)[M, N, K]->(M + M * (i + N * (j + K * k)))>>
// CHECK: MemRefType offset: ? strides: ?, ?, ?
return
diff --git a/mlir/test/AffineOps/ops.mlir b/mlir/test/AffineOps/ops.mlir
index d78ddd2d76f..48b21fa9e0e 100644
--- a/mlir/test/AffineOps/ops.mlir
+++ b/mlir/test/AffineOps/ops.mlir
@@ -22,7 +22,7 @@ func @empty() {
// GENERIC-NEXT: "affine.terminator"() : () -> ()
// GENERIC-NEXT: }, {
// GENERIC-NEXT: })
- affine.if () : () () {
+ affine.if affine_set<() : ()> () {
} {some_attr = true}
// CHECK: } else {
@@ -34,7 +34,7 @@ func @empty() {
// GENERIC-NEXT: "foo"() : () -> ()
// GENERIC-NEXT: "affine.terminator"() : () -> ()
// GENERIC-NEXT: })
- affine.if () : () () {
+ affine.if affine_set<() : ()> () {
} else {
"foo"() : () -> ()
} {some_attr = true}
@@ -61,21 +61,21 @@ func @affine_terminator() {
// -----
-// CHECK-DAG: #[[MAP0:map[0-9]+]] = (d0)[s0] -> (1000, d0 + 512, s0)
-// CHECK-DAG: #[[MAP1:map[0-9]+]] = (d0, d1)[s0] -> (d0 - d1, s0 + 512)
-// CHECK-DAG: #[[MAP2:map[0-9]+]] = ()[s0, s1] -> (s0 - s1, 11)
-// CHECK-DAG: #[[MAP3:map[0-9]+]] = () -> (77, 78, 79)
+// CHECK-DAG: #[[MAP0:map[0-9]+]] = affine_map<(d0)[s0] -> (1000, d0 + 512, s0)>
+// CHECK-DAG: #[[MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)>
+// CHECK-DAG: #[[MAP2:map[0-9]+]] = affine_map<()[s0, s1] -> (s0 - s1, 11)>
+// CHECK-DAG: #[[MAP3:map[0-9]+]] = affine_map<() -> (77, 78, 79)>
// CHECK-LABEL: @affine_min
func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: affine.min #[[MAP0]](%arg0)[%arg1]
- %0 = affine.min (d0)[s0] -> (1000, d0 + 512, s0) (%arg0)[%arg1]
+ %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
// CHECK: affine.min #[[MAP1]](%arg0, %arg1)[%arg2]
- %1 = affine.min (d0, d1)[s0] -> (d0 - d1, s0 + 512) (%arg0, %arg1)[%arg2]
+ %1 = affine.min affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)> (%arg0, %arg1)[%arg2]
// CHECK: affine.min #[[MAP2]]()[%arg1, %arg2]
- %2 = affine.min ()[s0, s1] -> (s0 - s1, 11) ()[%arg1, %arg2]
+ %2 = affine.min affine_map<()[s0, s1] -> (s0 - s1, 11)> ()[%arg1, %arg2]
// CHECK: affine.min #[[MAP3]]()
- %3 = affine.min ()[] -> (77, 78, 79) ()[]
+ %3 = affine.min affine_map<()[] -> (77, 78, 79)> ()[]
return
}
@@ -90,8 +90,8 @@ func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) {
affine.for %arg4 = 0 to %13 step 264 {
%18 = dim %0, 0 : memref<?x?xf32>
%20 = std.subview %0[%c0, %c0][%18,%arg4][%c1,%c1] : memref<?x?xf32>
- to memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
- %24 = dim %20, 0 : memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+ to memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
+ %24 = dim %20, 0 : memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
affine.for %arg5 = 0 to %24 step 768 {
"foo"() : () -> ()
}
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir
index 706e2de2691..358cf40fd46 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir
@@ -8,9 +8,9 @@ func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %m
// CHECK-LABEL: func @check_strided_memref_arguments(
// CHECK-COUNT-3: !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">
-func @check_strided_memref_arguments(%static: memref<10x20xf32, (i,j)->(20 * i + j + 1)>,
- %dynamic : memref<?x?xf32, (i,j)[M]->(M * i + j + 1)>,
- %mixed : memref<10x?xf32, (i,j)[M]->(M * i + j + 1)>) {
+func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>,
+ %dynamic : memref<?x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>,
+ %mixed : memref<10x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>) {
return
}
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
index 25054bb340e..45147235d58 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
@@ -637,7 +637,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mul %{{.*}}, %[[ARG1]]
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%1 = view %0[%arg2][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>>
// Test two dynamic sizes and static offset.
// CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -653,7 +653,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mul %{{.*}}, %[[ARG1]]
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%2 = view %0[][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * s0 + d1)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>
// Test one dynamic size and dynamic offset.
// CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -669,7 +669,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mul %{{.*}}, %[[ARG1]]
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%3 = view %0[%arg2][%arg1]
- : memref<2048xi8> to memref<4x?xf32, (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>
+ : memref<2048xi8> to memref<4x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>>
// Test one dynamic size and static offset.
// CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -686,7 +686,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%4 = view %0[][%arg0]
- : memref<2048xi8> to memref<?x16xf32, (d0, d1) -> (d0 * 4 + d1)>
+ : memref<2048xi8> to memref<?x16xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>
// Test static sizes and static offset.
// CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -704,7 +704,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mlir.constant(4 : index) : !llvm.i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%5 = view %0[][]
- : memref<2048xi8> to memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>
+ : memref<2048xi8> to memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>
// Test dynamic everything.
// CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -719,14 +719,14 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: llvm.mul %[[STRIDE_1]], %[[ARG1]] : !llvm.i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%6 = view %0[%arg2][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>>
return
}
// CHECK-LABEL: func @subview(
// CHECK: %[[MEMREFPTR:.*]]: !llvm<{{.*}}>, %[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64
-func @subview(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func @subview(%0 : memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>, %arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: %[[MEMREF:.*]] = llvm.load %[[MEMREFPTR]] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
// CHECK: %[[DESC0:.*]] = llvm.insertvalue %{{.*}}, %[[DESC]][0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -746,13 +746,13 @@ func @subview(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %arg0 : index, %
// CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
// CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
- memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)> to memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+ memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> to memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
return
}
// CHECK-LABEL: func @subview_const_size(
// CHECK: %[[MEMREFPTR:.*]]: !llvm<{{.*}}>, %[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64
-func @subview_const_size(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func @subview_const_size(%0 : memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>, %arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: %[[MEMREF:.*]] = llvm.load %[[MEMREFPTR]] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
// CHECK: %[[DESC0:.*]] = llvm.insertvalue %{{.*}}, %[[DESC]][0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -774,13 +774,13 @@ func @subview_const_size(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %arg0
// CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
// CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%1 = subview %0[%arg0, %arg1][][%arg0, %arg1] :
- memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)> to memref<4x2xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+ memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> to memref<4x2xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
return
}
// CHECK-LABEL: func @subview_const_stride(
// CHECK: %[[MEMREFPTR:.*]]: !llvm<{{.*}}>, %[[ARG0:.*]]: !llvm.i64, %[[ARG1:.*]]: !llvm.i64, %[[ARG2:.*]]: !llvm.i64
-func @subview_const_stride(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func @subview_const_stride(%0 : memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>, %arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK: %[[MEMREF:.*]] = llvm.load %[[MEMREFPTR]] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
// CHECK: %[[DESC0:.*]] = llvm.insertvalue %{{.*}}, %[[DESC]][0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
@@ -800,7 +800,7 @@ func @subview_const_stride(%0 : memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>, %ar
// CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
// CHECK: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
%1 = subview %0[%arg0, %arg1][%arg0, %arg1][] :
- memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 * 2 + s0)>
+ memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 * 2 + s0)>>
return
}
diff --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
index 50b3e4cdcfb..e020c8f6931 100644
--- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
@@ -3,17 +3,17 @@
// CHECK-LABEL: func @address_space(
// CHECK: %{{.*}}: !llvm<"{ float addrspace(7)*, float addrspace(7)*, i64, [1 x i64], [1 x i64] }*">)
// CHECK: llvm.load %{{.*}} : !llvm<"{ float addrspace(7)*, float addrspace(7)*, i64, [1 x i64], [1 x i64] }*">
-func @address_space(%arg0 : memref<32xf32, (d0) -> (d0), 7>) {
- %0 = alloc() : memref<32xf32, (d0) -> (d0), 5>
+func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
+ %0 = alloc() : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
%1 = constant 7 : index
// CHECK: llvm.load %{{.*}} : !llvm<"float addrspace(5)*">
- %2 = load %0[%1] : memref<32xf32, (d0) -> (d0), 5>
+ %2 = load %0[%1] : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
std.return
}
// CHECK-LABEL: func @strided_memref(
func @strided_memref(%ind: index) {
- %0 = alloc()[%ind] : memref<32x64xf32, (i, j)[M] -> (32 + M * i + j)>
+ %0 = alloc()[%ind] : memref<32x64xf32, affine_map<(i, j)[M] -> (32 + M * i + j)>>
std.return
}
diff --git a/mlir/test/Conversion/VectorToLoops/vector-to-loops.mlir b/mlir/test/Conversion/VectorToLoops/vector-to-loops.mlir
index e73e658a139..87004c2b61c 100644
--- a/mlir/test/Conversion/VectorToLoops/vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToLoops/vector-to-loops.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -test-convert-vector-to-loops | FileCheck %s
-// CHECK: #[[ADD:map[0-9]+]] = (d0, d1) -> (d0 + d1)
-// CHECK: #[[SUB:map[0-9]+]] = ()[s0] -> (s0 - 1)
+// CHECK: #[[ADD:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK: #[[SUB:map[0-9]+]] = affine_map<()[s0] -> (s0 - 1)>
// CHECK-LABEL: func @materialize_read_1d() {
func @materialize_read_1d() {
@@ -9,13 +9,13 @@ func @materialize_read_1d() {
%A = alloc () : memref<7x42xf32>
affine.for %i0 = 0 to 7 step 4 {
affine.for %i1 = 0 to 42 step 4 {
- %f1 = vector.transfer_read %A[%i0, %i1], %f0 {permutation_map = (d0, d1) -> (d0)} : memref<7x42xf32>, vector<4xf32>
- %ip1 = affine.apply (d0) -> (d0 + 1) (%i1)
- %f2 = vector.transfer_read %A[%i0, %ip1], %f0 {permutation_map = (d0, d1) -> (d0)} : memref<7x42xf32>, vector<4xf32>
- %ip2 = affine.apply (d0) -> (d0 + 2) (%i1)
- %f3 = vector.transfer_read %A[%i0, %ip2], %f0 {permutation_map = (d0, d1) -> (d0)} : memref<7x42xf32>, vector<4xf32>
- %ip3 = affine.apply (d0) -> (d0 + 3) (%i1)
- %f4 = vector.transfer_read %A[%i0, %ip3], %f0 {permutation_map = (d0, d1) -> (d0)} : memref<7x42xf32>, vector<4xf32>
+ %f1 = vector.transfer_read %A[%i0, %i1], %f0 {permutation_map = affine_map<(d0, d1) -> (d0)>} : memref<7x42xf32>, vector<4xf32>
+ %ip1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i1)
+ %f2 = vector.transfer_read %A[%i0, %ip1], %f0 {permutation_map = affine_map<(d0, d1) -> (d0)>} : memref<7x42xf32>, vector<4xf32>
+ %ip2 = affine.apply affine_map<(d0) -> (d0 + 2)> (%i1)
+ %f3 = vector.transfer_read %A[%i0, %ip2], %f0 {permutation_map = affine_map<(d0, d1) -> (d0)>} : memref<7x42xf32>, vector<4xf32>
+ %ip3 = affine.apply affine_map<(d0) -> (d0 + 3)> (%i1)
+ %f4 = vector.transfer_read %A[%i0, %ip3], %f0 {permutation_map = affine_map<(d0, d1) -> (d0)>} : memref<7x42xf32>, vector<4xf32>
// Both accesses in the load must be clipped otherwise %i1 + 2 and %i1 + 3 will go out of bounds.
// CHECK: {{.*}} = select
// CHECK: %[[FILTERED1:.*]] = select
@@ -36,9 +36,9 @@ func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %d
affine.for %i2 = 0 to %dyn2 {
affine.for %i3 = 0 to 42 step 2 {
affine.for %i4 = 0 to %dyn4 {
- %f1 = vector.transfer_read %A[%i0, %i1, %i2, %i3, %i4], %f0 {permutation_map = (d0, d1, d2, d3, d4) -> (d3)} : memref<7x?x?x42x?xf32>, vector<4xf32>
- %i3p1 = affine.apply (d0) -> (d0 + 1) (%i3)
- %f2 = vector.transfer_read %A[%i0, %i1, %i2, %i3p1, %i4], %f0 {permutation_map = (d0, d1, d2, d3, d4) -> (d3)} : memref<7x?x?x42x?xf32>, vector<4xf32>
+ %f1 = vector.transfer_read %A[%i0, %i1, %i2, %i3, %i4], %f0 {permutation_map = affine_map<(d0, d1, d2, d3, d4) -> (d3)>} : memref<7x?x?x42x?xf32>, vector<4xf32>
+ %i3p1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i3)
+ %f2 = vector.transfer_read %A[%i0, %i1, %i2, %i3p1, %i4], %f0 {permutation_map = affine_map<(d0, d1, d2, d3, d4) -> (d3)>} : memref<7x?x?x42x?xf32>, vector<4xf32>
}
}
}
@@ -116,7 +116,7 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
affine.for %i1 = 0 to %N {
affine.for %i2 = 0 to %O {
affine.for %i3 = 0 to %P step 5 {
- %f = vector.transfer_read %A[%i0, %i1, %i2, %i3], %f0 {permutation_map = (d0, d1, d2, d3) -> (d3, 0, d0)} : memref<?x?x?x?xf32>, vector<5x4x3xf32>
+ %f = vector.transfer_read %A[%i0, %i1, %i2, %i3], %f0 {permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, 0, d0)>} : memref<?x?x?x?xf32>, vector<5x4x3xf32>
}
}
}
@@ -193,7 +193,7 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
affine.for %i1 = 0 to %N step 4 {
affine.for %i2 = 0 to %O {
affine.for %i3 = 0 to %P step 5 {
- vector.transfer_write %f1, %A[%i0, %i1, %i2, %i3] {permutation_map = (d0, d1, d2, d3) -> (d3, d1, d0)} : vector<5x4x3xf32>, memref<?x?x?x?xf32>
+ vector.transfer_write %f1, %A[%i0, %i1, %i2, %i3] {permutation_map = affine_map<(d0, d1, d2, d3) -> (d3, d1, d0)>} : vector<5x4x3xf32>, memref<?x?x?x?xf32>
}
}
}
diff --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir
index 0591a0c9972..70457825ce4 100644
--- a/mlir/test/Dialect/Linalg/affine.mlir
+++ b/mlir/test/Dialect/Linalg/affine.mlir
@@ -3,10 +3,10 @@
// Test that we can lower all the way to LLVM without crashing, don't check results here.
// RUN: mlir-opt %s --convert-linalg-to-llvm -o=/dev/null 2>&1
-// CHECK-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// CHECK-DAG: #[[strided3D:.*]] = (d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)
+// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[stride2Dilation1:.*]] = (d0, d1) -> (d0 * 2 + d1)
+// CHECK-DAG: #[[stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
%c0 = constant 0 : index
diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index ba74813f566..34c8a205509 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -1,13 +1,13 @@
// RUN: mlir-opt %s -linalg-fusion | FileCheck %s
-#map0 = (d0) -> (d0 + 2)
-#map1 = (d0) -> (d0 + 4)
-#map2 = (d0) -> (d0 + 3)
-#map3 = (d0)[s0, s1] -> (d0 * s1 + s0)
-#map4 = (d0) -> (d0)
-#map5 = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-#map6 = (d0, d1) -> (d0, d1)
-// CHECK-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)
+#map0 = affine_map<(d0) -> (d0 + 2)>
+#map1 = affine_map<(d0) -> (d0 + 4)>
+#map2 = affine_map<(d0) -> (d0 + 3)>
+#map3 = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+#map4 = affine_map<(d0) -> (d0)>
+#map5 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+#map6 = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>, %B: memref<?x?xf32, offset: 0, strides: [?, 1]>, %C: memref<?x?xf32, offset: 0, strides: [?, 1]>, %D: memref<?x?xf32, offset: 0, strides: [?, 1]>, %E: memref<?x?xf32, offset: 0, strides: [?, 1]>) -> memref<?x?xf32, offset: 0, strides: [?, 1]> {
%c0 = constant 0 : index
@@ -306,7 +306,7 @@ func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>, %B: memref<?x?xf32, of
// CHECK: linalg.matmul
// CHECK-NOT: linalg.matmul
-#id_2d = (i, j) -> (i, j)
+#id_2d = affine_map<(i, j) -> (i, j)>
#pointwise_2d_trait = {
args_in = 2,
args_out = 1,
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index f4d11dad986..748ab053a24 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -8,18 +8,18 @@ func @load_number_of_indices(%v : memref<f32>) {
// -----
-func @slice_number_of_indexings(%arg0: memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>) {
+func @slice_number_of_indexings(%arg0: memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+2 {{expected 2 indexings, got 1}}
%c0 = constant 0: index
- %0 = linalg.slice %arg0[%c0] : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>, index, memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>
+ %0 = linalg.slice %arg0[%c0] : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>, index, memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @slice_rank_vs_range_indices(%arg0: memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>) {
+func @slice_rank_vs_range_indices(%arg0: memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+2 {{op expected rank of the view(1) to be the number of ranges(0)}}
%c0 = constant 0: index
- %0 = linalg.slice %arg0[%c0, %c0] : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>, index, index, memref<?xf32, (i)[off]->(off + i)>
+ %0 = linalg.slice %arg0[%c0, %c0] : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>, index, index, memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
@@ -33,23 +33,23 @@ func @store_number_of_indices(%v : memref<f32>) {
// -----
-func @transpose_not_permutation(%v : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>) {
+func @transpose_not_permutation(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+1 {{expected a permutation map}}
- linalg.transpose %v (i, j) -> (i, i) : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>
+ linalg.transpose %v (i, j) -> (i, i) : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @transpose_bad_rank(%v : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>) {
+func @transpose_bad_rank(%v : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>) {
// expected-error @+1 {{expected a permutation map of same rank as the view}}
- linalg.transpose %v (i) -> (i) : memref<?x?xf32, (i, j)[off, M]->(off + M * i + j)>
+ linalg.transpose %v (i) -> (i) : memref<?x?xf32, affine_map<(i, j)[off, M]->(off + M * i + j)>>
}
// -----
-func @yield_parent(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @yield_parent(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op expected 'linalg.generic' or 'linalg.indexed_generic' parent op}}
- linalg.yield %arg0: memref<?xf32, (i)[off]->(off + i)>
+ linalg.yield %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
@@ -60,7 +60,7 @@ func @generic_at_least_2_operands(%arg0: memref<f32>) {
args_in = 1,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0: memref<f32>
}
@@ -73,7 +73,7 @@ func @generic_exactly_2_views(%arg0: memref<f32>) {
args_in = 1,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0, %arg0, %arg0: memref<f32>, memref<f32>, memref<f32>
}
@@ -86,7 +86,7 @@ func @generic_undefined_fun(%arg0: memref<f32>) {
args_in = 1,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0, %arg0: memref<f32>, memref<f32>
}
@@ -101,7 +101,7 @@ func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0: memref<f32>
}
@@ -116,7 +116,7 @@ func @generic_mismatched_num_returns(%arg0: memref<f32>) {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0: memref<f32>
}
@@ -131,7 +131,7 @@ func @generic_symbol_in_map(%arg0: memref<i32>) {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ ()[N] -> (0) ],
+ indexing_maps = [ affine_map<()[N] -> (0)> ],
iterator_types = ["parallel"]
} %arg0: memref<i32>
}
@@ -146,7 +146,7 @@ func @generic_wrong_dim_in_map(%arg0: memref<i32>) {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = ["parallel"]
} %arg0: memref<i32>
}
@@ -161,7 +161,7 @@ func @generic_zero_d_view(%arg0: memref<i32>) {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (1) ],
+ indexing_maps = [ affine_map<() -> (1)> ],
iterator_types = []
} %arg0: memref<i32>
}
@@ -170,15 +170,15 @@ func @generic_zero_d_view(%arg0: memref<i32>) {
func @foo(%0: f32) -> f32 { return %0: f32 }
-func @generic_one_d_view(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
- // expected-error @+1 {{op expected indexing_map #0 results to match view rank: 'memref<?xf32, (d0)[s0] -> (d0 + s0)>'}}
+func @generic_one_d_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+ // expected-error @+1 {{op expected indexing_map #0 results to match view rank: 'memref<?xf32, affine_map<(d0)[s0] -> (d0 + s0)>>'}}
linalg.generic {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0, 0) ],
+ indexing_maps = [ affine_map<() -> (0, 0)> ],
iterator_types = []
- } %arg0: memref<?xf32, (i)[off]->(off + i)>
+ } %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
@@ -188,15 +188,15 @@ func @foo(%0: i32) -> f32 {
return %1: f32
}
-func @generic_fun_arg_0_element_type(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_fun_arg_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op expected fun argument 0 of the same type as elemental type 'f32' of view 0}}
linalg.generic {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
- } %arg0: memref<?xf32, (i)[off]->(off + i)>
+ } %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
@@ -206,33 +206,33 @@ func @foo(%0: f32) -> i4 {
return %1: i4
}
-func @generic_fun_result_0_element_type(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_fun_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op expected fun result 0 of the same type as elemental type 'f32' of view 0}}
linalg.generic {
args_in = 0,
args_out = 1,
fun = @foo,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
- } %arg0: memref<?xf32, (i)[off]->(off + i)>
+ } %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
func @foo(%0: f32, %1: f32) -> f32 { return %1: f32 }
-func @generic_singular_maps(%arg0: memref<?xf32, (i)[off]->(off + i)>, %arg1: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_singular_maps(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>, %arg1: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op expected the concatenation of maps in indexing_map to be invertible}}
linalg.generic {
args_in = 1,
args_out = 1,
fun = @foo,
indexing_maps = [
- (i, j) -> (i + j) ,
- (i, j) -> (i + j)
+ affine_map<(i, j) -> (i + j)>,
+ affine_map<(i, j) -> (i + j)>
],
iterator_types = ["parallel","parallel"]
- } %arg0, %arg1: memref<?xf32, (i)[off]->(off + i)>, memref<?xf32, (i)[off]->(off + i)>
+ } %arg0, %arg1: memref<?xf32, affine_map<(i)[off]->(off + i)>>, memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
////////////////////////////////////////////////////////////////////////////////
@@ -246,7 +246,7 @@ func @generic_empty_region(%arg0: memref<f32>) {
linalg.generic {
args_in = 1,
args_out = 1,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0, %arg0 {
^bb1:
@@ -261,7 +261,7 @@ func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0 {
^bb:
@@ -275,7 +275,7 @@ func @generic_block_arg_type(%arg0: memref<f32>) {
linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ () -> (0) ],
+ indexing_maps = [ affine_map<() -> (0)> ],
iterator_types = []
} %arg0 {
^bb(%i: i1):
@@ -289,7 +289,7 @@ func @indexed_generic_block_arg_count(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%f: f32):
@@ -303,7 +303,7 @@ func @indexed_generic_block_induction_var_arg_type(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: f64, %f: f32):
@@ -317,7 +317,7 @@ func @indexed_generic_block_arg_type(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: index, %f: i1):
@@ -334,7 +334,7 @@ func @indexed_generic_fun_arg_count(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"],
fun = @foo
} %arg0: memref<f32>
@@ -351,7 +351,7 @@ func @indexed_generic_fun_induction_var_arg_type(%arg0: memref<f32>) {
args_in = 0,
args_out = 1,
iterator_types = ["parallel"],
- indexing_maps = [ (i) -> (i) ],
+ indexing_maps = [ affine_map<(i) -> (i)> ],
fun = @foo
} %arg0 : memref<f32>
}
@@ -366,7 +366,7 @@ func @indexed_generic_fun_arg_type(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"],
fun = @foo
} %arg0: memref<f32>
@@ -382,7 +382,7 @@ func @indexed_generic_fun_result_count(%arg0: memref<f32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"],
fun = @foo
} %arg0: memref<f32>
@@ -399,7 +399,7 @@ func @indexed_generic_fun_result_count(%arg0: memref<i32>) {
linalg.indexed_generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (d0) -> (d0) ],
+ indexing_maps = [ affine_map<(d0) -> (d0)> ],
iterator_types = ["parallel"],
fun = @foo
} %arg0: memref<i32>
@@ -407,48 +407,48 @@ func @indexed_generic_fun_result_count(%arg0: memref<i32>) {
// -----
-func @generic_fun_result_0_element_type(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_fun_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+9 {{type of return operand 0 ('i1') doesn't match view element type ('f32')}}
linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (i) -> (i) ],
+ indexing_maps = [ affine_map<(i) -> (i)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: f32):
%0 = constant 0: i1
linalg.yield %0: i1
- }: memref<?xf32, (i)[off]->(off + i)>
+ }: memref<?xf32, affine_map<(i)[off]->(off + i)>>
}
// -----
-func @generic_result_tensor_type(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op result #0 must be ranked tensor of any type values, but got 'f32'}}
%0 = linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (i) -> (i) ],
+ indexing_maps = [ affine_map<(i) -> (i)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: f32):
linalg.yield %i: f32
- }: memref<?xf32, (i)[off]->(off + i)> -> f32
+ }: memref<?xf32, affine_map<(i)[off]->(off + i)>> -> f32
}
// -----
-func @generic_result_tensor_count(%arg0: memref<?xf32, (i)[off]->(off + i)>) {
+func @generic_result_tensor_count(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
// expected-error @+1 {{op expected #output tensor operands (0) to match #results (1)}}
%0 = linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (i) -> (i) ],
+ indexing_maps = [ affine_map<(i) -> (i)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: f32):
linalg.yield %i: f32
- }: memref<?xf32, (i)[off]->(off + i)> -> tensor<?xf32>
+ }: memref<?xf32, affine_map<(i)[off]->(off + i)>> -> tensor<?xf32>
}
// -----
@@ -458,7 +458,7 @@ func @generic_result_tensor_type(%arg0: tensor<?xf32>) {
%0 = linalg.generic {
args_in = 0,
args_out = 1,
- indexing_maps = [ (i) -> (i) ],
+ indexing_maps = [ affine_map<(i) -> (i)> ],
iterator_types = ["parallel"]
} %arg0 {
^bb(%i: f32):
@@ -487,21 +487,21 @@ func @generic_fun_result_0_element_type(%arg0: memref<?xf32>) {
func @reshape(%arg0: memref<f32>) {
// expected-error @+1 {{expected non-zero memref ranks}}
- %0 = linalg.reshape %arg0 [()->(0)] : memref<f32> into memref<f32>
+ %0 = linalg.reshape %arg0 [affine_map<()->(0)>] : memref<f32> into memref<f32>
}
// -----
func @reshape(%arg0: memref<?xf32>) {
// expected-error @+1 {{expected to collapse or expand dims}}
- %0 = linalg.reshape %arg0 [(i)->(i)] : memref<?xf32> into memref<?xf32>
+ %0 = linalg.reshape %arg0 [affine_map<(i)->(i)>] : memref<?xf32> into memref<?xf32>
}
// -----
func @reshape(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{expected rank of the collapsed view(2) to be the number of reassociation maps(1)}}
- %0 = linalg.reshape %arg0 [(i, j, k) -> (i, j)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>] :
memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
}
@@ -509,7 +509,7 @@ func @reshape(%arg0: memref<?x?x?xf32>) {
func @reshape(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{expected reassociation map #0 of same rank as expanded memref(3), but got 1}}
- %0 = linalg.reshape %arg0 [(i) -> (i), (i, j, k) -> (k)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i) -> (i)>, affine_map<(i, j, k) -> (k)>] :
memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
}
@@ -517,14 +517,14 @@ func @reshape(%arg0: memref<?x?x?xf32>) {
func @reshape(%arg0: memref<?x?x?xf32>) {
// expected-error @+1 {{expected reassociation map #1 to be valid and contiguous}}
- %0 = linalg.reshape %arg0 [(i, j, k) -> (i, j), (i, j, k) -> (k, j)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k, j)>] :
memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
}
// -----
func @reshape(%arg0: memref<?x?x?xf32>) {
- // expected-error @+1 {{expected collapsed type to be 'memref<?x?xf32>', but got 'memref<?x?xf32, (d0, d1)[s0] -> (d0 * s0 + d1)>'}}
- %0 = linalg.reshape %arg0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
- memref<?x?x?xf32> into memref<?x?xf32, (d0, d1)[s0] -> (d0 * s0 + d1)>
+ // expected-error @+1 {{expected collapsed type to be 'memref<?x?xf32>', but got 'memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>'}}
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k)>] :
+ memref<?x?x?xf32> into memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>
}
diff --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir
index d70ee8c0271..7c1d02b3fc2 100644
--- a/mlir/test/Dialect/Linalg/llvm.mlir
+++ b/mlir/test/Dialect/Linalg/llvm.mlir
@@ -102,8 +102,8 @@ func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
// CHECK: llvm.insertvalue {{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }">
func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
- linalg.copy(%arg0, %arg1) {inputPermutation = (i, j, k) -> (i, k, j),
- outputPermutation = (i, j, k) -> (k, j, i)}
+ linalg.copy(%arg0, %arg1) {inputPermutation = affine_map<(i, j, k) -> (i, k, j)>,
+ outputPermutation = affine_map<(i, j, k) -> (k, j, i)>}
: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
return
}
@@ -133,9 +133,9 @@ func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %a
// CHECK: llvm.call @linalg_copy_viewsxsxsxf32_viewsxsxsxf32(%{{.*}}, %{{.*}}) : (!llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }*">, !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }*">) -> ()
#matmul_accesses = [
- (m, n, k) -> (m, k),
- (m, n, k) -> (k, n),
- (m, n, k) -> (m, n)
+ affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>
]
#matmul_trait = {
args_in = 2,
@@ -199,13 +199,13 @@ func @matmul_vec_indexed(%A: !matrix_type_A,
func @reshape_static(%arg0: memref<3x4x5xf32>) {
// Reshapes that expand and collapse back a contiguous tensor with some 1's.
- %0 = linalg.reshape %arg0 [(i, j, k, l, m) -> (i, j),
- (i, j, k, l, m) -> (k),
- (i, j, k, l, m) -> (l, m)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
+ affine_map<(i, j, k, l, m) -> (k)>,
+ affine_map<(i, j, k, l, m) -> (l, m)>] :
memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
- %r0 = linalg.reshape %0 [(i, j, k, l, m) -> (i, j),
- (i, j, k, l, m) -> (k),
- (i, j, k, l, m) -> (l, m)] :
+ %r0 = linalg.reshape %0 [affine_map<(i, j, k, l, m) -> (i, j)>,
+ affine_map<(i, j, k, l, m) -> (k)>,
+ affine_map<(i, j, k, l, m) -> (l, m)>] :
memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
return
}
diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 1425b4ed3a4..260f602e4ed 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -3,14 +3,14 @@
// Test that we can lower all the way to LLVM without crashing, don't check results here.
// RUN: mlir-opt %s --convert-linalg-to-llvm -o=/dev/null 2>&1
-// CHECK-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-// CHECK-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// CHECK-DAG: #[[strided3D:.*]] = (d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)
-// CHECK-DAG: #[[strided4D:.*]] = (d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)
+// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// CHECK-DAG: #[[Stride2Dilation1:.*]] = (d0, d1) -> (d0 * 2 + d1)
-// CHECK-DAG: #[[Stride2Dilation4:.*]] = (d0, d1) -> (d0 * 2 + d1 * 4)
-// CHECK-DAG: #[[Stride3Dilation5:.*]] = (d0, d1) -> (d0 * 3 + d1 * 5)
+// CHECK-DAG: #[[Stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
+// CHECK-DAG: #[[Stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
+// CHECK-DAG: #[[Stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
@@ -146,8 +146,8 @@ func @copy_view0(%arg0: memref<f32>, %arg1: memref<f32>) {
// CHECK: store %{{.*}}, %{{.*}}[] : memref<f32>
func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
- linalg.copy(%arg0, %arg1) {inputPermutation = (i, j, k) -> (i, k, j),
- outputPermutation = (i, j, k) -> (k, j, i)} :
+ linalg.copy(%arg0, %arg1) {inputPermutation = affine_map<(i, j, k) -> (i, k, j)>,
+ outputPermutation = affine_map<(i, j, k) -> (k, j, i)>} :
memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
return
}
@@ -217,9 +217,9 @@ func @foo(%0: f32, %1: f32, %2: f32) -> (f32, f32) {
return %f0, %f0 : f32, f32
}
#accesses = [
- (i, j, k) -> (i, j),
- (i, j, k) -> (i, j, k),
- (i, j, k) -> (i, k, j)
+ affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (i, j, k)>,
+ affine_map<(i, j, k) -> (i, k, j)>
]
#trait = {
args_in = 1,
diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 51261fcc37b..81acb859d23 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -1,14 +1,14 @@
// RUN: mlir-opt %s -linalg-promote-subviews | FileCheck %s
// RUN: mlir-opt %s -linalg-promote-subviews -test-linalg-promote-dynamic | FileCheck %s --check-prefix=DYNAMIC
-#map0 = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-#map1 = (d0) -> (d0 + 2)
-#map2 = (d0) -> (d0 + 4)
-#map3 = (d0) -> (d0 + 3)
+#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+#map1 = affine_map<(d0) -> (d0 + 2)>
+#map2 = affine_map<(d0) -> (d0 + 4)>
+#map3 = affine_map<(d0) -> (d0 + 3)>
-// CHECK-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// CHECK-DAG: #[[strided2DnoOffset:.*]] = (d0, d1)[s0] -> (d0 * s0 + d1)
-// CHECK-DAG: #[[strided2D_dynamic:.*]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
+// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[strided2DnoOffset:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
+// CHECK-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
module {
func @matmul(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index c55c0ab2113..f9dbab9ac53 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -5,24 +5,24 @@
// Test that we can lower all the way to LLVM without crashing, don't check results here.
// DISABLED: mlir-opt %s --convert-linalg-to-llvm -o=/dev/null 2>&1
-// CHECK-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-// CHECK-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// CHECK-DAG: #[[strided2DOFF0:.*]] = (d0, d1)[s0] -> (d0 * s0 + d1)
-// CHECK-DAG: #[[strided3D:.*]] = (d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)
-// CHECK-DAG: #[[strided3DOFF0:.*]] = (d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)
-// CHECK-DAG: #[[strided6D:.*]] = (d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)
+// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
+// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>
+// CHECK-DAG: #[[strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)>
-// CHECK-DAG: #[[map0:.*]] = (d0, d1, d2) -> (d0, d2, d1)
-// CHECK-DAG: #[[map1:.*]] = (d0, d1, d2) -> (d2, d1, d0)
+// CHECK-DAG: #[[map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
+// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
-// CHECK-DAG: #[[reshapeD01:.*]] = (d0, d1, d2) -> (d0, d1)
-// CHECK-DAG: #[[reshapeD2:.*]] = (d0, d1, d2) -> (d2)
-// CHECK-DAG: #[[reshapeD0:.*]] = (d0, d1, d2) -> (d0)
-// CHECK-DAG: #[[reshapeD12:.*]] = (d0, d1, d2) -> (d1, d2)
-// CHECK-DAG: #[[reshapeD012:.*]] = (d0, d1, d2) -> (d0, d1, d2)
-// CHECK-DAG: #[[reshape5D01:.*]] = (d0, d1, d2, d3, d4) -> (d0, d1)
-// CHECK-DAG: #[[reshape5D2:.*]] = (d0, d1, d2, d3, d4) -> (d2)
-// CHECK-DAG: #[[reshape5D34:.*]] = (d0, d1, d2, d3, d4) -> (d3, d4)
+// CHECK-DAG: #[[reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-DAG: #[[reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
+// CHECK-DAG: #[[reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
+// CHECK-DAG: #[[reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
+// CHECK-DAG: #[[reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+// CHECK-DAG: #[[reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
+// CHECK-DAG: #[[reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
+// CHECK-DAG: #[[reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
func @range(%arg0: index, %arg1: index, %arg2: index) {
%0 = linalg.range %arg0:%arg1:%arg2 : !linalg.range
@@ -101,8 +101,8 @@ func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf
// CHECK: linalg.copy(%{{.*}}, %{{.*}}) : memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
- linalg.copy(%arg0, %arg1) {inputPermutation = (i, j, k) -> (i, k, j),
- outputPermutation = (i, j, k) -> (k, j, i)} :
+ linalg.copy(%arg0, %arg1) {inputPermutation = affine_map<(i, j, k) -> (i, k, j)>,
+ outputPermutation = affine_map<(i, j, k) -> (k, j, i)>} :
memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
return
}
@@ -127,8 +127,8 @@ func @conv_view6(%arg0: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?,
// CHECK: linalg.conv(%{{.*}}, %{{.*}}, %{{.*}}) {dilations = [4, 4, 5, 5], strides = [2, 2, 3, 3]} : memref<?x?x?x?x?x?xf32, #[[strided6D]]>, memref<?x?x?x?x?x?xf32, #[[strided6D]]>, memref<?x?x?x?x?x?xf32, #[[strided6D]]>
#accesses = [
- (i, j, k) -> (j, i),
- (i, j, k) -> (i, k, i + j)
+ affine_map<(i, j, k) -> (j, i)>,
+ affine_map<(i, j, k) -> (i, k, i + j)>
]
#trait = {
args_in = 1,
@@ -208,30 +208,30 @@ func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?,
func @reshape_static(%arg0: memref<3x4x5xf32>) {
// Reshapes that collapse and expand back a contiguous tensor.
- %0 = linalg.reshape %arg0 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<3x4x5xf32> into memref<12x5xf32>
- %r0 = linalg.reshape %0 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<12x5xf32> into memref<3x4x5xf32>
- %1 = linalg.reshape %arg0 [(i, j, k) -> (i),
- (i, j, k) -> (j, k)] :
+ %1 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i)>,
+ affine_map<(i, j, k) -> (j, k)>] :
memref<3x4x5xf32> into memref<3x20xf32>
- %r1 = linalg.reshape %1 [(i, j, k) -> (i),
- (i, j, k) -> (j, k)] :
+ %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i)>,
+ affine_map<(i, j, k) -> (j, k)>] :
memref<3x20xf32> into memref<3x4x5xf32>
- %2 = linalg.reshape %arg0 [(i, j, k) -> (i, j, k)] :
+ %2 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j, k)>] :
memref<3x4x5xf32> into memref<60xf32>
- %r2 = linalg.reshape %2 [(i, j, k) -> (i, j, k)] :
+ %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j, k)>] :
memref<60xf32> into memref<3x4x5xf32>
// Reshapes that expand and collapse back a contiguous tensor with some 1's.
- %3 = linalg.reshape %arg0 [(i, j, k, l, m) -> (i, j),
- (i, j, k, l, m) -> (k),
- (i, j, k, l, m) -> (l, m)] :
+ %3 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
+ affine_map<(i, j, k, l, m) -> (k)>,
+ affine_map<(i, j, k, l, m) -> (l, m)>] :
memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
- %r3 = linalg.reshape %3 [(i, j, k, l, m) -> (i, j),
- (i, j, k, l, m) -> (k),
- (i, j, k, l, m) -> (l, m)] :
+ %r3 = linalg.reshape %3 [affine_map<(i, j, k, l, m) -> (i, j)>,
+ affine_map<(i, j, k, l, m) -> (k)>,
+ affine_map<(i, j, k, l, m) -> (l, m)>] :
memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
return
}
@@ -256,26 +256,26 @@ func @reshape_static(%arg0: memref<3x4x5xf32>) {
func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
%arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>,
%arg2: memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>) {
- %0 = linalg.reshape %arg0 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?x?xf32> into memref<?x?xf32>
- %r0 = linalg.reshape %0 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?xf32> into memref<?x?x?xf32>
- %1 = linalg.reshape %arg1 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %1 = linalg.reshape %arg1 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]> into
memref<?x?xf32, offset : 0, strides : [?, 1]>
- %r1 = linalg.reshape %1 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?xf32, offset : 0, strides : [?, 1]> into
memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>
- %2 = linalg.reshape %arg2 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %2 = linalg.reshape %arg2 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]> into
memref<?x?xf32, offset : ?, strides : [?, 1]>
- %r2 = linalg.reshape %2 [(i, j, k) -> (i, j),
- (i, j, k) -> (k)] :
+ %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j)>,
+ affine_map<(i, j, k) -> (k)>] :
memref<?x?xf32, offset : ?, strides : [?, 1]> into
memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>
return
diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index 763b33b7973..561591d9087 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -3,29 +3,29 @@
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=0,0,2 | FileCheck %s -check-prefix=TILE-002
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 | FileCheck %s -check-prefix=TILE-234
-// TILE-2-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-// TILE-02-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-// TILE-002-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-// TILE-234-DAG: #[[strided1D:.*]] = (d0)[s0] -> (d0 + s0)
-
-// TILE-2-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// TILE-02-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// TILE-002-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// TILE-234-DAG: #[[strided2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-
-// TILE-2-DAG: #[[strided1D_dynamic:.*]] = (d0)[s0, s1] -> (d0 * s1 + s0)
-// TILE-02-DAG: #[[strided1D_dynamic:.*]] = (d0)[s0, s1] -> (d0 * s1 + s0)
-// T_ILE-002-DAG: #[[strided1D_dynamic:.*]] = (d0)[s0, s1] -> (d0 * s1 + s0)
-// TILE-234-DAG: #[[strided1D_dynamic:.*]] = (d0)[s0, s1] -> (d0 * s1 + s0)
-
-// TILE-2-DAG: #[[strided2D_dynamic:.*]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-// TILE-02-DAG: #[[strided2D_dynamic:.*]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-// TILE-002-DAG: #[[strided2D_dynamic:.*]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-// TILE-234-DAG: #[[strided2D_dynamic:.*]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-
-// REACTIVATE_ME_TILE-2-DAG: #[[stride_99_1_layout_map:.*]] = (d0, d1)[s0] -> (d0 * 99 + s0 + d1)
-// REACTIVATE_ME_TILE-02-DAG: #[[stride_99_1_layout_map:.*]] = (d0, d1)[s0] -> (d0 * 99 + s0 + d1)
-// REACTIVATE_ME_TILE-234-DAG: #[[stride_99_1_layout_map:.*]] = (d0, d1)[s0] -> (d0 * 99 + s0 + d1)
+// TILE-2-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// TILE-02-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// TILE-002-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// TILE-234-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+
+// TILE-2-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// TILE-02-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// TILE-002-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// TILE-234-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+
+// TILE-2-DAG: #[[strided1D_dynamic:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+// TILE-02-DAG: #[[strided1D_dynamic:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+// T_ILE-002-DAG: #[[strided1D_dynamic:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+// TILE-234-DAG: #[[strided1D_dynamic:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+
+// TILE-2-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// TILE-02-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// TILE-002-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// TILE-234-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+
+// REACTIVATE_ME_TILE-2-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
+// REACTIVATE_ME_TILE-02-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
+// REACTIVATE_ME_TILE-234-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
linalg.matmul(%arg0, %arg1, %arg2) : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>
@@ -211,7 +211,7 @@ func @fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: f32) {
// TILE-234-NOT: for
// TILE-234: fill{{.*}} f32
-#id_2d = (i, j) -> (i, j)
+#id_2d = affine_map<(i, j) -> (i, j)>
#pointwise_2d_trait = {
args_in = 2,
args_out = 1,
diff --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir
index 64c55cb1e45..55abb32a8f5 100644
--- a/mlir/test/Dialect/Linalg/tile_conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,0,0,4 | FileCheck %s -check-prefix=TILE-23004
-// TILE-23004-DAG: #[[D0x30pS0x10:.*]] = (d0) -> (d0 * 30)
-// TILE-23004-DAG: #[[S0x10p90:.*]] = ()[s0] -> (s0 * 10 + 90)
-// TILE-23004-DAG: #[[strided4D:.*]] = (d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)
-// TILE-23004-DAG: #[[strided4D_dynamic:.*]] = (d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)
+// TILE-23004-DAG: #[[D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)>
+// TILE-23004-DAG: #[[S0x10p90:.*]] = affine_map<()[s0] -> (s0 * 10 + 90)>
+// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// TILE-23004-DAG: #[[strided4D_dynamic:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)>
func @conv(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
diff --git a/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir b/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir
index c7cd61b76e3..24619bf404b 100644
--- a/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir
+++ b/mlir/test/Dialect/Linalg/tile_indexed_generic.mlir
@@ -2,7 +2,7 @@
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=25,0 | FileCheck %s -check-prefix=TILE-25n0
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=0,25 | FileCheck %s -check-prefix=TILE-0n25
-#id_1d = (i) -> (i)
+#id_1d = affine_map<(i) -> (i)>
#pointwise_1d_trait = {
args_in = 1,
args_out = 1,
@@ -47,8 +47,8 @@ func @indexed_generic_vector(%operand: memref<50xf32>, %result: memref<50xf32>)
args_in = 1,
args_out = 1,
indexing_maps = [
- (i, j) -> (j, i + j),
- (i, j) -> (i, j)
+ affine_map<(i, j) -> (j, i + j)>,
+ affine_map<(i, j) -> (i, j)>
],
iterator_types = ["parallel", "parallel"]
}
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 8a08bf850ff..7a57a4ae2a1 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -1,12 +1,12 @@
// RUN: mlir-opt %s -test-linalg-transform-patterns | FileCheck %s
-// CHECK-DAG: #[[STRIDED_1D:.*]] = (d0)[s0] -> (d0 + s0)
-// CHECK-DAG: #[[STRIDED_2D:.*]] = (d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)
-// CHECK-DAG: #[[mk:.*]] = (d0, d1, d2) -> (d0, d2)
-// CHECK-DAG: #[[kn:.*]] = (d0, d1, d2) -> (d2, d1)
-// CHECK-DAG: #[[mn:.*]] = (d0, d1, d2) -> (d0, d1)
-// CHECK-DAG: #[[nm:.*]] = (d0, d1, d2) -> (d1, d0)
-// CHECK-DAG: #[[km:.*]] = (d0, d1, d2) -> (d2, d0)
+// CHECK-DAG: #[[STRIDED_1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// CHECK-DAG: #[[kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// CHECK-DAG: #[[mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-DAG: #[[nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
+// CHECK-DAG: #[[km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>,
@@ -86,8 +86,8 @@ func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
args_in = 1,
args_out = 1,
indexing_maps = [
- (i, j) -> (i, j),
- (i, j) -> (i, j)
+ affine_map<(i, j) -> (i, j)>,
+ affine_map<(i, j) -> (i, j)>
],
iterator_types = ["parallel", "parallel"]
}
@@ -168,9 +168,9 @@ func @fusion_test(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
args_in = 2,
args_out = 1,
indexing_maps = [
- (m, n, k) -> (m, k),
- (m, n, k) -> (k, n),
- (m, n, k) -> (m, n)
+ affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>
],
iterator_types = ["parallel", "parallel", "reduction"],
__internal_linalg_transform__ = "_marked_matmul_"
@@ -201,9 +201,9 @@ func @fma(%a: f32, %b: f32, %c: f32) -> f32 {
return %e: f32
}
#matmul_accesses = [
- (m, n, k) -> (m, k),
- (m, n, k) -> (k, n),
- (m, n, k) -> (m, n)
+ affine_map<(m, n, k) -> (m, k)>,
+ affine_map<(m, n, k) -> (k, n)>,
+ affine_map<(m, n, k) -> (m, n)>
]
#generic_matmul_trait = {
args_in = 2,
diff --git a/mlir/test/Dialect/SPIRV/composite-ops.mlir b/mlir/test/Dialect/SPIRV/composite-ops.mlir
index 4ce89748a09..556bed82315 100644
--- a/mlir/test/Dialect/SPIRV/composite-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/composite-ops.mlir
@@ -108,8 +108,8 @@ func @composite_extract_invalid_index_type_2(%arg0 : !spv.array<4x!spv.array<4xf
// -----
func @composite_extract_invalid_index_identifier(%arg0 : !spv.array<4x!spv.array<4xf32>>) -> () {
- // expected-error @+1 {{expected bare identifier}}
- %0 = spv.CompositeExtract %arg0(1 : i32) : !spv.array<4x!spv.array<4xf32>>
+ // expected-error @+1 {{expected non-function type}}
+ %0 = spv.CompositeExtract %arg0 ]1 : i32) : !spv.array<4x!spv.array<4xf32>>
return
}
diff --git a/mlir/test/Dialect/VectorOps/invalid.mlir b/mlir/test/Dialect/VectorOps/invalid.mlir
index ef02497710b..a41d45e6dda 100644
--- a/mlir/test/Dialect/VectorOps/invalid.mlir
+++ b/mlir/test/Dialect/VectorOps/invalid.mlir
@@ -239,7 +239,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{two types required}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst { permutation_map = ()->(0) } : memref<?x?xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst { permutation_map = affine_map<()->(0)> } : memref<?x?xf32>
}
// -----
@@ -248,7 +248,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires 2 indices}}
- %0 = vector.transfer_read %arg0[%c3, %c3, %c3], %cst { permutation_map = ()->(0) } : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3, %c3], %cst { permutation_map = affine_map<()->(0)> } : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -257,7 +257,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires attribute 'permutation_map'}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst {perm = (d0)->(d0)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst {perm = affine_map<(d0)->(d0)>} : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -266,7 +266,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires a permutation_map with input dims of the same rank as the memref type}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0)->(d0)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0)->(d0)>} : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -275,7 +275,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires a permutation_map with result dims of the same rank as the vector type}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0, d1)->(d0, d1)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -284,7 +284,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0, d1)->(d0 + d1)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0, d1)->(d0 + d1)>} : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -293,7 +293,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0, d1)->(d0 + 1)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0, d1)->(d0 + 1)>} : memref<?x?xf32>, vector<128xf32>
}
// -----
@@ -302,7 +302,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?x?xf32>) {
%c3 = constant 3 : index
%cst = constant 3.0 : f32
// expected-error@+1 {{requires a permutation_map that is a permutation (found one dim used more than once)}}
- %0 = vector.transfer_read %arg0[%c3, %c3, %c3], %cst {permutation_map = (d0, d1, d2)->(d0, d0)} : memref<?x?x?xf32>, vector<3x7xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3, %c3], %cst {permutation_map = affine_map<(d0, d1, d2)->(d0, d0)>} : memref<?x?x?xf32>, vector<3x7xf32>
}
// -----
@@ -312,7 +312,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xvector<4x3xf32>>) {
%f0 = constant 0.0 : f32
%vf0 = splat %f0 : vector<4x3xf32>
// expected-error@+1 {{requires memref and vector types of the same elemental type}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = (d0, d1)->(d0, d1)} : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xi32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xi32>
}
// -----
@@ -322,7 +322,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xvector<4x3xf32>>) {
%f0 = constant 0.0 : f32
%vf0 = splat %f0 : vector<4x3xf32>
// expected-error@+1 {{requires memref vector element and vector result ranks to match}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = (d0, d1)->(d0, d1)} : memref<?x?xvector<4x3xf32>>, vector<3xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : memref<?x?xvector<4x3xf32>>, vector<3xf32>
}
// -----
@@ -332,7 +332,7 @@ func @test_vector.transfer_read(%arg0: memref<?x?xvector<4x3xf32>>) {
%f0 = constant 0.0 : f32
%vf0 = splat %f0 : vector<4x3xf32>
// expected-error@+1 {{ requires memref vector element shape to match suffix of vector result shape}}
- %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = (d0, d1)->(d0, d1)} : memref<?x?xvector<4x3xf32>>, vector<1x1x2x3xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %vf0 {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : memref<?x?xvector<4x3xf32>>, vector<1x1x2x3xf32>
}
// -----
@@ -341,7 +341,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{expected 5 operand types but had 4}}
- %0 = "vector.transfer_write"(%cst, %arg0, %c3, %c3, %c3) {permutation_map = ()->(0)} : (vector<128xf32>, memref<?x?xf32>, index, index) -> ()
+ %0 = "vector.transfer_write"(%cst, %arg0, %c3, %c3, %c3) {permutation_map = affine_map<()->(0)>} : (vector<128xf32>, memref<?x?xf32>, index, index) -> ()
}
// -----
@@ -350,7 +350,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires 2 indices}}
- vector.transfer_write %cst, %arg0[%c3, %c3, %c3] {permutation_map = ()->(0)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3, %c3] {permutation_map = affine_map<()->(0)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -359,7 +359,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires attribute 'permutation_map'}}
- vector.transfer_write %cst, %arg0[%c3, %c3] {perm = (d0)->(d0)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3] {perm = affine_map<(d0)->(d0)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -368,7 +368,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires a permutation_map with input dims of the same rank as the memref type}}
- vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = (d0)->(d0)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = affine_map<(d0)->(d0)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -377,7 +377,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires a permutation_map with result dims of the same rank as the vector type}}
- vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = (d0, d1)->(d0, d1)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -386,7 +386,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}}
- vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = (d0, d1)->(d0 + d1)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d0 + d1)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -395,7 +395,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<128 x f32>
// expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}}
- vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = (d0, d1)->(d0 + 1)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d0 + 1)>} : vector<128xf32>, memref<?x?xf32>
}
// -----
@@ -404,7 +404,7 @@ func @test_vector.transfer_write(%arg0: memref<?x?x?xf32>) {
%c3 = constant 3 : index
%cst = constant dense<3.0> : vector<3 x 7 x f32>
// expected-error@+1 {{requires a permutation_map that is a permutation (found one dim used more than once)}}
- vector.transfer_write %cst, %arg0[%c3, %c3, %c3] {permutation_map = (d0, d1, d2)->(d0, d0)} : vector<3x7xf32>, memref<?x?x?xf32>
+ vector.transfer_write %cst, %arg0[%c3, %c3, %c3] {permutation_map = affine_map<(d0, d1, d2)->(d0, d0)>} : vector<3x7xf32>, memref<?x?x?xf32>
}
// -----
@@ -515,10 +515,10 @@ func @strided_slice(%arg0: vector<4x8x16xf32>) {
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -535,9 +535,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, c0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, c0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -554,9 +554,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1)[s0] -> (b0, s0, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1)[s0] -> (b0, s0, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -573,9 +573,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -592,9 +592,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -611,9 +611,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, b1, b2) -> (b1, b0, b2, f0),
- (b0, f0, f1, b1, b2) -> (b0, b2, b1, f1),
- (b0, f0, f1, b1, b2) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, b1, b2) -> (b1, b0, b2, f0)>,
+ affine_map<(b0, f0, f1, b1, b2) -> (b0, b2, b1, f1)>,
+ affine_map<(b0, f0, f1, b1, b2) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -630,9 +630,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c1, b0, c0, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c1, b0, c0, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -649,9 +649,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (f1, c1, c0, b0),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (f1, c1, c0, b0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -668,9 +668,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
@@ -687,9 +687,9 @@ func @contraction(%arg0: vector<7x8x16x15xf32>, %arg1: vector<8x16x7x5xf32>,
// -----
#contraction_accesses = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait = {
indexing_maps = #contraction_accesses,
diff --git a/mlir/test/Dialect/VectorOps/ops.mlir b/mlir/test/Dialect/VectorOps/ops.mlir
index 31113bdd479..ac2f0c36e87 100644
--- a/mlir/test/Dialect/VectorOps/ops.mlir
+++ b/mlir/test/Dialect/VectorOps/ops.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
-// CHECK-DAG: #[[MAP0:map[0-9]+]] = (d0, d1) -> (d0, d1)
+// CHECK-DAG: #[[MAP0:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @vector_transfer_ops(
func @vector_transfer_ops(%arg0: memref<?x?xf32>,
@@ -13,22 +13,22 @@ func @vector_transfer_ops(%arg0: memref<?x?xf32>,
//
// CHECK: vector.transfer_read
- %0 = vector.transfer_read %arg0[%c3, %c3], %f0 {permutation_map = (d0, d1)->(d0)} : memref<?x?xf32>, vector<128xf32>
+ %0 = vector.transfer_read %arg0[%c3, %c3], %f0 {permutation_map = affine_map<(d0, d1)->(d0)>} : memref<?x?xf32>, vector<128xf32>
// CHECK: vector.transfer_read
- %1 = vector.transfer_read %arg0[%c3, %c3], %f0 {permutation_map = (d0, d1)->(d1, d0)} : memref<?x?xf32>, vector<3x7xf32>
+ %1 = vector.transfer_read %arg0[%c3, %c3], %f0 {permutation_map = affine_map<(d0, d1)->(d1, d0)>} : memref<?x?xf32>, vector<3x7xf32>
// CHECK: vector.transfer_read
- %2 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0, d1)->(d0)} : memref<?x?xf32>, vector<128xf32>
+ %2 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0, d1)->(d0)>} : memref<?x?xf32>, vector<128xf32>
// CHECK: vector.transfer_read
- %3 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = (d0, d1)->(d1)} : memref<?x?xf32>, vector<128xf32>
+ %3 = vector.transfer_read %arg0[%c3, %c3], %cst {permutation_map = affine_map<(d0, d1)->(d1)>} : memref<?x?xf32>, vector<128xf32>
// CHECK: vector.transfer_read %{{.*}}[%[[C3]], %[[C3]]], %{{.*}} {permutation_map = #[[MAP0]]} : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xf32>
- %4 = vector.transfer_read %arg1[%c3, %c3], %vf0 {permutation_map = (d0, d1)->(d0, d1)} : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xf32>
+ %4 = vector.transfer_read %arg1[%c3, %c3], %vf0 {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : memref<?x?xvector<4x3xf32>>, vector<1x1x4x3xf32>
// CHECK: vector.transfer_write
- vector.transfer_write %0, %arg0[%c3, %c3] {permutation_map = (d0, d1)->(d0)} : vector<128xf32>, memref<?x?xf32>
+ vector.transfer_write %0, %arg0[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d0)>} : vector<128xf32>, memref<?x?xf32>
// CHECK: vector.transfer_write
- vector.transfer_write %1, %arg0[%c3, %c3] {permutation_map = (d0, d1)->(d1, d0)} : vector<3x7xf32>, memref<?x?xf32>
+ vector.transfer_write %1, %arg0[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d1, d0)>} : vector<3x7xf32>, memref<?x?xf32>
// CHECK: vector.transfer_write %{{.*}}, %{{.*}}[%[[C3]], %[[C3]]] {permutation_map = #[[MAP0]]} : vector<1x1x4x3xf32>, memref<?x?xvector<4x3xf32>>
- vector.transfer_write %4, %arg1[%c3, %c3] {permutation_map = (d0, d1)->(d0, d1)} : vector<1x1x4x3xf32>, memref<?x?xvector<4x3xf32>>
+ vector.transfer_write %4, %arg1[%c3, %c3] {permutation_map = affine_map<(d0, d1)->(d0, d1)>} : vector<1x1x4x3xf32>, memref<?x?xvector<4x3xf32>>
return
}
@@ -128,18 +128,18 @@ func @strided_slice(%arg0: vector<4x8x16xf32>) -> vector<2x2x16xf32> {
}
#contraction_accesses0 = [
- (b0, f0, f1, c0, c1) -> (c0, b0, c1, f0),
- (b0, f0, f1, c0, c1) -> (b0, c1, c0, f1),
- (b0, f0, f1, c0, c1) -> (b0, f0, f1)
+ affine_map<(b0, f0, f1, c0, c1) -> (c0, b0, c1, f0)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, c1, c0, f1)>,
+ affine_map<(b0, f0, f1, c0, c1) -> (b0, f0, f1)>
]
#contraction_trait0 = {
indexing_maps = #contraction_accesses0,
iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction"]
}
#contraction_accesses1 = [
- (f0, f1, f2, f3, c0, c1) -> (c0, f0, c1, f2),
- (f0, f1, f2, f3, c0, c1) -> (f1, c1, c0, f3),
- (f0, f1, f2, f3, c0, c1) -> (f0, f1, f2, f3)
+ affine_map<(f0, f1, f2, f3, c0, c1) -> (c0, f0, c1, f2)>,
+ affine_map<(f0, f1, f2, f3, c0, c1) -> (f1, c1, c0, f3)>,
+ affine_map<(f0, f1, f2, f3, c0, c1) -> (f0, f1, f2, f3)>
]
#contraction_trait1 = {
indexing_maps = #contraction_accesses1,
diff --git a/mlir/test/Dialect/VectorOps/vector-transforms.mlir b/mlir/test/Dialect/VectorOps/vector-transforms.mlir
index b5fcbaba91c..a796aaa62d2 100644
--- a/mlir/test/Dialect/VectorOps/vector-transforms.mlir
+++ b/mlir/test/Dialect/VectorOps/vector-transforms.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -test-vector-to-vector-conversion | FileCheck %s
-// CHECK-DAG: #[[MAP0:map[0-9]+]] = (d0, d1) -> (d0, d1)
+// CHECK-DAG: #[[MAP0:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @add4x2
// CHECK: %[[ES1:.*]] = vector.extract_slices %{{.*}}, [2, 2], [1, 1] : vector<4x2xf32> into tuple<vector<2x2xf32>, vector<2x2xf32>>
@@ -65,9 +65,9 @@ func @add4x4(%0: vector<4x4xf32>, %1: vector<4x4xf32>) -> vector<4x4xf32> {
}
#contraction_accesses0 = [
- (i, j, k) -> (i, k),
- (i, j, k) -> (k, j),
- (i, j, k) -> (i, j)
+ affine_map<(i, j, k) -> (i, k)>,
+ affine_map<(i, j, k) -> (k, j)>,
+ affine_map<(i, j, k) -> (i, j)>
]
#contraction_trait0 = {
indexing_maps = #contraction_accesses0,
@@ -159,9 +159,9 @@ func @contraction4x4_ijk(%arg0 : vector<4x6xf32>, %arg1 : vector<6x4xf32>,
}
#contraction_accesses1 = [
- (i, k, j) -> (i, k),
- (i, k, j) -> (k, j),
- (i, k, j) -> (i, j)
+ affine_map<(i, k, j) -> (i, k)>,
+ affine_map<(i, k, j) -> (k, j)>,
+ affine_map<(i, k, j) -> (i, j)>
]
#contraction_trait1 = {
indexing_maps = #contraction_accesses1,
@@ -259,22 +259,22 @@ func @contraction4x4_ikj_xfer_read(%arg0 : memref<4x2xf32>,
%cf0 = constant 0.0 : f32
%0 = vector.transfer_read %arg0[%c0, %c0], %cf0
- { permutation_map = (d0, d1) -> (d0, d1) }
+ { permutation_map = affine_map<(d0, d1) -> (d0, d1)> }
: memref<4x2xf32>, vector<4x2xf32>
%1 = vector.transfer_read %arg1[%c0, %c0], %cf0
- { permutation_map = (d0, d1) -> (d0, d1) }
+ { permutation_map = affine_map<(d0, d1) -> (d0, d1)> }
: memref<2x4xf32>, vector<2x4xf32>
%2 = vector.transfer_read %arg2[%c0, %c0], %cf0
- { permutation_map = (d0, d1) -> (d0, d1) }
+ { permutation_map = affine_map<(d0, d1) -> (d0, d1)> }
: memref<4x4xf32>, vector<4x4xf32>
%3 = vector.contract #contraction_trait1 %0, %1, %2
: vector<4x2xf32>, vector<2x4xf32> into vector<4x4xf32>
vector.transfer_write %3, %arg2[%c0, %c0]
- {permutation_map = (d0, d1) -> (d0, d1)}
+ {permutation_map = affine_map<(d0, d1) -> (d0, d1)>}
: vector<4x4xf32>, memref<4x4xf32>
return
}
@@ -294,10 +294,10 @@ func @vector_transfers(%arg0: index, %arg1: index) {
%cst_1 = constant 2.000000e+00 : f32
affine.for %arg2 = 0 to %arg0 step 4 {
affine.for %arg3 = 0 to %arg1 step 4 {
- %4 = vector.transfer_read %0[%arg2, %arg3], %cst {permutation_map = (d0, d1) -> (d0, d1)} : memref<?x?xf32>, vector<4x4xf32>
- %5 = vector.transfer_read %1[%arg2, %arg3], %cst {permutation_map = (d0, d1) -> (d0, d1)} : memref<?x?xf32>, vector<4x4xf32>
+ %4 = vector.transfer_read %0[%arg2, %arg3], %cst {permutation_map = affine_map<(d0, d1) -> (d0, d1)>} : memref<?x?xf32>, vector<4x4xf32>
+ %5 = vector.transfer_read %1[%arg2, %arg3], %cst {permutation_map = affine_map<(d0, d1) -> (d0, d1)>} : memref<?x?xf32>, vector<4x4xf32>
%6 = addf %4, %5 : vector<4x4xf32>
- vector.transfer_write %6, %2[%arg2, %arg3] {permutation_map = (d0, d1) -> (d0, d1)} : vector<4x4xf32>, memref<?x?xf32>
+ vector.transfer_write %6, %2[%arg2, %arg3] {permutation_map = affine_map<(d0, d1) -> (d0, d1)>} : vector<4x4xf32>, memref<?x?xf32>
}
}
return
diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 7ddfe50130c..d9911883786 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -79,14 +79,14 @@ TEST_FUNC(builder_dynamic_for_func_args) {
// clang-format off
// CHECK-LABEL: func @builder_dynamic_for_func_args(%{{.*}}: index, %{{.*}}: index) {
- // CHECK: affine.for %{{.*}} = (d0) -> (d0)(%{{.*}}) to (d0) -> (d0)(%{{.*}}) step 3 {
- // CHECK: {{.*}} = affine.apply ()[s0] -> (s0 * 3)()[%{{.*}}]
- // CHECK: {{.*}} = affine.apply ()[s0, s1] -> (s1 + s0 * 3)()[%{{.*}}, %{{.*}}]
- // CHECK: {{.*}} = affine.apply ()[s0] -> (s0 + 3)()[%{{.*}}]
- // CHECK: affine.for %{{.*}} = (d0) -> (d0)(%{{.*}}) to (d0) -> (d0)(%{{.*}}) step 2 {
- // CHECK: {{.*}} = affine.apply (d0, d1) -> ((d0 + d1 * 3) floordiv 32)(%{{.*}}, %{{.*}})
- // CHECK: {{.*}} = affine.apply (d0, d1) -> (((d0 + d1 * 3) floordiv 32) * 31)(%{{.*}}, %{{.*}})
- // CHECK: {{.*}} = affine.apply (d0, d1) -> ((((d0 + d1 * 3) floordiv 32) * 31) ceildiv 32)(%{{.*}}, %{{.*}})
+ // CHECK: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%{{.*}}) to affine_map<(d0) -> (d0)>(%{{.*}}) step 3 {
+ // CHECK: {{.*}} = affine.apply affine_map<()[s0] -> (s0 * 3)>()[%{{.*}}]
+ // CHECK: {{.*}} = affine.apply affine_map<()[s0, s1] -> (s1 + s0 * 3)>()[%{{.*}}, %{{.*}}]
+ // CHECK: {{.*}} = affine.apply affine_map<()[s0] -> (s0 + 3)>()[%{{.*}}]
+ // CHECK: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%{{.*}}) to affine_map<(d0) -> (d0)>(%{{.*}}) step 2 {
+ // CHECK: {{.*}} = affine.apply affine_map<(d0, d1) -> ((d0 + d1 * 3) floordiv 32)>(%{{.*}}, %{{.*}})
+ // CHECK: {{.*}} = affine.apply affine_map<(d0, d1) -> (((d0 + d1 * 3) floordiv 32) * 31)>(%{{.*}}, %{{.*}})
+ // CHECK: {{.*}} = affine.apply affine_map<(d0, d1) -> ((((d0 + d1 * 3) floordiv 32) * 31) ceildiv 32)>(%{{.*}}, %{{.*}})
// CHECK-DAG: [[rf1:%[0-9]+]] = addf {{.*}}, {{.*}} : f32
// CHECK-DAG: [[rf2:%[0-9]+]] = divf [[rf1]], {{.*}} : f32
// CHECK-DAG: [[rf3:%[0-9]+]] = remf [[rf2]], {{.*}} : f32
@@ -118,9 +118,9 @@ TEST_FUNC(builder_dynamic_for) {
// clang-format off
// CHECK-LABEL: func @builder_dynamic_for(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
- // CHECK-DAG: [[r0:%[0-9]+]] = affine.apply ()[s0, s1] -> (s0 - s1)()[%{{.*}}, %{{.*}}]
- // CHECK-DAG: [[r1:%[0-9]+]] = affine.apply ()[s0, s1] -> (s0 + s1)()[%{{.*}}, %{{.*}}]
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)([[r0]]) to (d0) -> (d0)([[r1]]) step 2 {
+ // CHECK-DAG: [[r0:%[0-9]+]] = affine.apply affine_map<()[s0, s1] -> (s0 - s1)>()[%{{.*}}, %{{.*}}]
+ // CHECK-DAG: [[r1:%[0-9]+]] = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%{{.*}}, %{{.*}}]
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>([[r0]]) to affine_map<(d0) -> (d0)>([[r1]]) step 2 {
// clang-format on
f.print(llvm::outs());
f.erase();
@@ -142,8 +142,8 @@ TEST_FUNC(builder_loop_for) {
// clang-format off
// CHECK-LABEL: func @builder_loop_for(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
- // CHECK-DAG: [[r0:%[0-9]+]] = affine.apply ()[s0, s1] -> (s0 - s1)()[%{{.*}}, %{{.*}}]
- // CHECK-DAG: [[r1:%[0-9]+]] = affine.apply ()[s0, s1] -> (s0 + s1)()[%{{.*}}, %{{.*}}]
+ // CHECK-DAG: [[r0:%[0-9]+]] = affine.apply affine_map<()[s0, s1] -> (s0 - s1)>()[%{{.*}}, %{{.*}}]
+ // CHECK-DAG: [[r1:%[0-9]+]] = affine.apply affine_map<()[s0, s1] -> (s0 + s1)>()[%{{.*}}, %{{.*}}]
// CHECK-NEXT: loop.for %{{.*}} = [[r0]] to [[r1]] step {{.*}} {
// clang-format on
f.print(llvm::outs());
@@ -167,7 +167,7 @@ TEST_FUNC(builder_max_min_for) {
// clang-format off
// CHECK-LABEL: func @builder_max_min_for(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
- // CHECK: affine.for %{{.*}} = max (d0, d1) -> (d0, d1)(%{{.*}}, %{{.*}}) to min (d0, d1) -> (d0, d1)(%{{.*}}, %{{.*}}) {
+ // CHECK: affine.for %{{.*}} = max affine_map<(d0, d1) -> (d0, d1)>(%{{.*}}, %{{.*}}) to min affine_map<(d0, d1) -> (d0, d1)>(%{{.*}}, %{{.*}}) {
// CHECK: return
// clang-format on
f.print(llvm::outs());
@@ -373,16 +373,16 @@ TEST_FUNC(builder_helpers) {
});
// CHECK-LABEL: @builder_helpers
- // CHECK: affine.for %{{.*}} = (d0) -> (d0)({{.*}}) to (d0) -> (d0)({{.*}}) {
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)({{.*}}) to (d0) -> (d0)({{.*}}) {
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)({{.*}}) to (d0) -> (d0)({{.*}}) {
+ // CHECK: affine.for %{{.*}} = affine_map<(d0) -> (d0)>({{.*}}) to affine_map<(d0) -> (d0)>({{.*}}) {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>({{.*}}) to affine_map<(d0) -> (d0)>({{.*}}) {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>({{.*}}) to affine_map<(d0) -> (d0)>({{.*}}) {
// CHECK-DAG: [[a:%.*]] = affine.load %arg0[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-DAG: [[b:%.*]] = addf {{.*}}, [[a]] : f32
// CHECK-DAG: [[c:%.*]] = affine.load %arg1[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-DAG: [[d:%.*]] = addf [[b]], [[c]] : f32
// CHECK-NEXT: affine.store [[d]], %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-NEXT: }
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)(%{{.*}}) to (d0) -> (d0)(%{{.*}}) {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%{{.*}}) to affine_map<(d0) -> (d0)>(%{{.*}}) {
// CHECK-DAG: [[a:%.*]] = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-DAG: [[b:%.*]] = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-DAG: [[c:%.*]] = addf [[b]], [[a]] : f32
@@ -638,13 +638,13 @@ TEST_FUNC(tile_2d) {
// CHECK: %[[M:[0-9]+]] = dim %arg2, 0 : memref<?x?x?xf32>
// CHECK-NEXT: %[[N:[0-9]+]] = dim %arg2, 1 : memref<?x?x?xf32>
// CHECK-NEXT: %[[P:[0-9]+]] = dim %arg2, 2 : memref<?x?x?xf32>
- // CHECK: affine.for %{{.*}} = (d0) -> (d0)(%[[ZERO]]) to (d0) -> (d0)(%[[M]]) step 512 {
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)(%[[ZERO]]) to (d0) -> (d0)(%[[N]]) step 1024 {
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)(%[[ZERO]]) to (d0) -> (d0)(%[[P]]) {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0) -> (0, d0)(%{{.*}}) to min (d0)[s0] -> (s0, d0 + 512)(%{{.*}})[%[[M]]] step 16 {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0) -> (0, d0)(%{{.*}}) to min (d0)[s0] -> (s0, d0 + 1024)(%{{.*}})[%[[N]]] step 32 {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0, d1) -> (0, d0, d1)(%{{.*}}, %{{.*}}) to min (d0, d1)[s0] -> (s0, d0 + 1024, d1 + 32)(%{{.*}}, %{{.*}})[%[[N]]] {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0, d1) -> (0, d0, d1)(%{{.*}}, %{{.*}}) to min (d0, d1)[s0] -> (s0, d0 + 512, d1 + 16)(%{{.*}}, %{{.*}})[%[[M]]] {
+ // CHECK: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[M]]) step 512 {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[N]]) step 1024 {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[P]]) {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0) -> (0, d0)>(%{{.*}}) to min affine_map<(d0)[s0] -> (s0, d0 + 512)>(%{{.*}})[%[[M]]] step 16 {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0) -> (0, d0)>(%{{.*}}) to min affine_map<(d0)[s0] -> (s0, d0 + 1024)>(%{{.*}})[%[[N]]] step 32 {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0, d1) -> (0, d0, d1)>(%{{.*}}, %{{.*}}) to min affine_map<(d0, d1)[s0] -> (s0, d0 + 1024, d1 + 32)>(%{{.*}}, %{{.*}})[%[[N]]] {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0, d1) -> (0, d0, d1)>(%{{.*}}, %{{.*}}) to min affine_map<(d0, d1)[s0] -> (s0, d0 + 512, d1 + 16)>(%{{.*}}, %{{.*}})[%[[M]]] {
// CHECK-NEXT: {{.*}} = affine.load {{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-NEXT: {{.*}} = affine.load {{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-NEXT: {{.*}} = addf {{.*}}, {{.*}} : f32
@@ -654,9 +654,9 @@ TEST_FUNC(tile_2d) {
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
- // CHECK-NEXT: affine.for %{{.*}} = (d0) -> (d0)(%[[ZERO]]) to (d0) -> (d0)(%[[P]]) {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0) -> (0, d0)(%{{.*}}) to min (d0)[s0] -> (s0, d0 + 512)(%{{.*}})[%[[M]]] {
- // CHECK-NEXT: affine.for %{{.*}} = max (d0) -> (0, d0)(%{{.*}}) to min (d0)[s0] -> (s0, d0 + 1024)(%{{.*}})[%[[N]]] {
+ // CHECK-NEXT: affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[P]]) {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0) -> (0, d0)>(%{{.*}}) to min affine_map<(d0)[s0] -> (s0, d0 + 512)>(%{{.*}})[%[[M]]] {
+ // CHECK-NEXT: affine.for %{{.*}} = max affine_map<(d0) -> (0, d0)>(%{{.*}}) to min affine_map<(d0)[s0] -> (s0, d0 + 1024)>(%{{.*}})[%[[N]]] {
// CHECK-NEXT: {{.*}} = affine.load {{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-NEXT: {{.*}} = affine.load {{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32>
// CHECK-NEXT: {{.*}}= addf {{.*}}, {{.*}} : f32
@@ -704,15 +704,15 @@ TEST_FUNC(vectorize_2d) {
// xCHECK-NEXT: affine.for %{{.*}} = 0 to (d0) -> (d0)(%[[N]]) step 4 {
// xCHECK-NEXT: affine.for %{{.*}} = 0 to (d0) -> (d0)(%[[P]]) step 4 {
// xCHECK-NEXT: %[[vA:.*]] = "vector.transfer_read"(%{{.*}}, %{{.*}},
-%{{.*}}, %i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (memref<?x?x?xf32>,
-index, index, index) -> vector<4x4xf32>
+%{{.*}}, %i2) {permutation_map = affine_map<(d0, d1, d2) -> (d1, d2)>} :
+(memref<?x?x?xf32>, index, index, index) -> vector<4x4xf32>
// xCHECK-NEXT: %[[vB:.*]] = "vector.transfer_read"(%{{.*}}, %{{.*}},
-%{{.*}}, %i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} :
+%{{.*}}, %i2) {permutation_map = affine_map<(d0, d1, d2) -> (d1, d2)>} :
(memref<?x?x?xf32>, index, index, index) -> vector<4x4xf32>
// xCHECK-NEXT: %[[vRES:.*]] = addf %[[vB]], %[[vA]] : vector<4x4xf32>
// xCHECK-NEXT: "vector.transfer_write"(%[[vRES:.*]], %{{.*}}, %{{.*}},
-%{{.*}}, %i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (vector<4x4xf32>,
-memref<?x?x?xf32>, index, index, index) -> ()
+%{{.*}}, %i2) {permutation_map = affine_map<(d0, d1, d2) -> (d1, d2)>} :
+(vector<4x4xf32>, memref<?x?x?xf32>, index, index, index) -> ()
// clang-format on
mlir::PassManager pm;
@@ -795,10 +795,10 @@ TEST_FUNC(empty_map_load_store) {
}
// CHECK-LABEL: func @affine_if_op
-// CHECK: affine.if ([[d0:.*]], [[d1:.*]]){{\[}}[[s0:.*]], [[s1:.*]]{{\]}}
+// CHECK: affine.if affine_set<([[d0:.*]], [[d1:.*]]){{\[}}[[s0:.*]], [[s1:.*]]{{\]}}
// CHECK-NOT: else
-// CHECK: affine.if ([[d0:.*]], [[d1:.*]]){{\[}}[[s0:.*]], [[s1:.*]]{{\]}}
-// CHECK-NEXT: } else {
+// CHECK: affine.if affine_set<([[d0:.*]], [[d1:.*]]){{\[}}[[s0:.*]], [[s1:.*]]{{\]}}
+// CHECK-NEXT: } else {
TEST_FUNC(affine_if_op) {
using namespace edsc;
using namespace edsc::intrinsics;
@@ -832,18 +832,18 @@ TEST_FUNC(affine_if_op) {
// clang-format off
// CHECK-LABEL: func @linalg_pointwise
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1) -> (d0, d1), (d0, d1) -> (d0, d1), (d0, d1) -> (d0, d1)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK: addf
// CHECK: }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1) -> (d0, d1), (d0, d1) -> (d0, d1), (d0, d1) -> (d0, d1)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK: cmpf "ogt"
// CHECK: select
// CHECK: }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
// CHECK: linalg.generic {args_in = 1 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1) -> (d0, d1), (d0, d1) -> (d0, d1)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK: tanh
// CHECK: }: memref<?x?xf32>, memref<?x?xf32>
@@ -874,7 +874,7 @@ TEST_FUNC(linalg_pointwise_test) {
// clang-format off
// CHECK-LABEL: func @linalg_matmul
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1, d2) -> (d0, d2), (d0, d1, d2) -> (d2, d1), (d0, d1, d2) -> (d0, d1)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]}
/// CHECK: ^bb1(%[[a0:.*]]: f32, %[[a1:.*]]: f32, %[[a2:.*]]: f32):
// CHECK: %[[a3:.*]] = mulf %[[a0]], %[[a1]] : f32
@@ -902,9 +902,9 @@ TEST_FUNC(linalg_matmul_test) {
// clang-format off
// CHECK-LABEL: func @linalg_conv_nhwc
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1, d2, d3, d4, d5, d6) -> (d0, d2 * 3 + d4 * 5, d3 * 4 + d5 * 6, d6),
-// CHECK-SAME: (d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d1),
-// CHECK-SAME: (d0, d1, d2, d3, d4, d5, d6) -> (d0, d2, d3, d1)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d2 * 3 + d4 * 5, d3 * 4 + d5 * 6, d6)>,
+// CHECK-SAME: affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d1)>,
+// CHECK-SAME: affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d2, d3, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]}
/// CHECK: ^bb1(%[[a0:.*]]: f32, %[[a1:.*]]: f32, %[[a2:.*]]: f32):
// CHECK: %[[a3:.*]] = mulf %[[a0]], %[[a1]] : f32
@@ -933,11 +933,11 @@ TEST_FUNC(linalg_conv_nhwc) {
// clang-format off
// CHECK-LABEL: func @linalg_dilated_conv_nhwc
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME: indexing_maps = [(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3 * 3 + d5 * 5, d4 * 4 + d6 * 6, d2),
-// CHECK-SAME: (d0, d1, d2, d3, d4, d5, d6) -> (d5, d6, d2, d1),
-// CHECK-SAME: (d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d4, d1 + d2 * 7)],
+// CHECK-SAME: indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3 * 3 + d5 * 5, d4 * 4 + d6 * 6, d2)>,
+// CHECK-SAME: affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d5, d6, d2, d1)>,
+// CHECK-SAME: affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d4, d1 + d2 * 7)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "reduction", "reduction"]}
-/// CHECK: ^bb1(%[[a0:.*]]: f32, %[[a1:.*]]: f32, %[[a2:.*]]: f32):
+// CHECK: ^bb1(%[[a0:.*]]: f32, %[[a1:.*]]: f32, %[[a2:.*]]: f32):
// CHECK: %[[a3:.*]] = mulf %[[a0]], %[[a1]] : f32
// CHECK: %[[a4:.*]] = addf %[[a2]], %[[a3]] : f32
// CHECK: linalg.yield %[[a4]] : f32
@@ -965,8 +965,8 @@ TEST_FUNC(linalg_dilated_conv_nhwc) {
// clang-format off
// CHECK-LABEL: func @linalg_metadata_ops
-// CHECK: linalg.reshape {{.*}} [(d0, d1, d2) -> (d0, d1), (d0, d1, d2) -> (d2)] : memref<4x8x16xf32> into memref<32x16xf32>
-// CHECK: linalg.reshape {{.*}} [(d0, d1, d2) -> (d0, d1), (d0, d1, d2) -> (d2)] : memref<32x16xf32> into memref<4x8x16xf32>
+// CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<4x8x16xf32> into memref<32x16xf32>
+// CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<32x16xf32> into memref<4x8x16xf32>
// clang-format on
TEST_FUNC(linalg_metadata_ops) {
using namespace edsc;
diff --git a/mlir/test/IR/affine-map.mlir b/mlir/test/IR/affine-map.mlir
index ebbd4735635..9ce4d8c0bfc 100644
--- a/mlir/test/IR/affine-map.mlir
+++ b/mlir/test/IR/affine-map.mlir
@@ -1,187 +1,187 @@
// RUN: mlir-opt %s | FileCheck %s
// Identity maps used in trivial compositions in MemRefs are optimized away.
-// CHECK-NOT: #map{{[0-9]+}} = (d0, d1) -> (d0, d1)
-#map0 = (i, j) -> (i, j)
+// CHECK-NOT: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0, d1)>
+#map0 = affine_map<(i, j) -> (i, j)>
-// CHECK-NOT: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0, d1)
-#map1 = (i, j)[s0] -> (i, j)
+// CHECK-NOT: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0, d1)>
+#map1 = affine_map<(i, j)[s0] -> (i, j)>
-// CHECK: #map{{[0-9]+}} = () -> (0)
+// CHECK: #map{{[0-9]+}} = affine_map<() -> (0)>
// A map may have 0 inputs.
// However, an affine.apply always takes at least one input.
-#map2 = () -> (0)
+#map2 = affine_map<() -> (0)>
// All the maps in the following block are equivalent and are unique'd as one
// map. Therefore there should be only one output and we explicitly CHECK-NOT
// for the others.
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0 + 1, d1 * 4 + 2)
-#map3 = (i, j) -> (i+1, 4*j + 2)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0 + 1, d1 * 4 + 2)>
+#map3 = affine_map<(i, j) -> (i+1, 4*j + 2)>
// CHECK-NOT: #map3{{[a-z]}}
-#map3a = (i, j) -> (1+i, 4*j + 2)
-#map3b = (i, j) -> (2 + 3 - 2*2 + i, 4*j + 2)
-#map3c = (i, j) -> (i +1 + 0, 4*j + 2)
-#map3d = (i, j) -> (i + 3 + 2 - 4, 4*j + 2)
-#map3e = (i, j) -> (1*i+3*2-2*2-1, 4*j + 2)
-#map3f = (i, j) -> (i + 1, 4*j*1 + 2)
-#map3g = (i, j) -> (i + 1, 2*2*j + 2)
-#map3h = (i, j) -> (i + 1, 2*j*2 + 2)
-#map3i = (i, j) -> (i + 1, j*2*2 + 2)
-#map3j = (i, j) -> (i + 1, j*1*4 + 2)
-#map3k = (i, j) -> (i + 1, j*4*1 + 2)
+#map3a = affine_map<(i, j) -> (1+i, 4*j + 2)>
+#map3b = affine_map<(i, j) -> (2 + 3 - 2*2 + i, 4*j + 2)>
+#map3c = affine_map<(i, j) -> (i +1 + 0, 4*j + 2)>
+#map3d = affine_map<(i, j) -> (i + 3 + 2 - 4, 4*j + 2)>
+#map3e = affine_map<(i, j) -> (1*i+3*2-2*2-1, 4*j + 2)>
+#map3f = affine_map<(i, j) -> (i + 1, 4*j*1 + 2)>
+#map3g = affine_map<(i, j) -> (i + 1, 2*2*j + 2)>
+#map3h = affine_map<(i, j) -> (i + 1, 2*j*2 + 2)>
+#map3i = affine_map<(i, j) -> (i + 1, j*2*2 + 2)>
+#map3j = affine_map<(i, j) -> (i + 1, j*1*4 + 2)>
+#map3k = affine_map<(i, j) -> (i + 1, j*4*1 + 2)>
// The following reduction should be unique'd out too but such expression
// simplification is not performed for IR parsing, but only through analyses
// and transforms.
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d1 - d0 + (d0 - d1 + 1) * 2 + d1 - 1, d1 + d1 + d1 + d1 + 2)
-#map3l = (i, j) -> ((j - i) + 2*(i - j + 1) + j - 1 + 0, j + j + 1 + j + j + 1)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d1 - d0 + (d0 - d1 + 1) * 2 + d1 - 1, d1 + d1 + d1 + d1 + 2)>
+#map3l = affine_map<(i, j) -> ((j - i) + 2*(i - j + 1) + j - 1 + 0, j + j + 1 + j + j + 1)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0 + 2, d1)
-#map4 = (i, j) -> (3+3-2*2+i, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0 + 2, d1)>
+#map4 = affine_map<(i, j) -> (3+3-2*2+i, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + s0, d1)
-#map5 = (i, j)[s0] -> (i + s0, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
+#map5 = affine_map<(i, j)[s0] -> (i + s0, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + s0, d1 + 5)
-#map6 = (i, j)[s0] -> (i + s0, j + 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + s0, d1 + 5)>
+#map6 = affine_map<(i, j)[s0] -> (i + s0, j + 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + s0, d1)
-#map7 = (i, j)[s0] -> (i + j + s0, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + s0, d1)>
+#map7 = affine_map<(i, j)[s0] -> (i + j + s0, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + s0 + 5, d1)
-#map8 = (i, j)[s0] -> (5 + i + j + s0, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + s0 + 5, d1)>
+#map8 = affine_map<(i, j)[s0] -> (5 + i + j + s0, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + 5, d1)
-#map9 = (i, j)[s0] -> ((i + j) + 5, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + 5, d1)>
+#map9 = affine_map<(i, j)[s0] -> ((i + j) + 5, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + 5, d1)
-#map10 = (i, j)[s0] -> (i + (j + 5), j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + 5, d1)>
+#map10 = affine_map<(i, j)[s0] -> (i + (j + 5), j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 * 2, d1 * 3)
-#map11 = (i, j)[s0] -> (2*i, 3*j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 * 2, d1 * 3)>
+#map11 = affine_map<(i, j)[s0] -> (2*i, 3*j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + (d1 + s0 * 3) * 5 + 12, d1)
-#map12 = (i, j)[s0] -> (i + 2*6 + 5*(j+s0*3), j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + (d1 + s0 * 3) * 5 + 12, d1)>
+#map12 = affine_map<(i, j)[s0] -> (i + 2*6 + 5*(j+s0*3), j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 * 5 + d1, d1)
-#map13 = (i, j)[s0] -> (5*i + j, j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 * 5 + d1, d1)>
+#map13 = affine_map<(i, j)[s0] -> (5*i + j, j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1, d1)
-#map14 = (i, j)[s0] -> ((i + j), (j))
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1, d1)>
+#map14 = affine_map<(i, j)[s0] -> ((i + j), (j))>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + 7, d1 + 3)
-#map15 = (i, j)[s0] -> ((i + j + 2) + 5, (j)+3)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + 7, d1 + 3)>
+#map15 = affine_map<(i, j)[s0] -> ((i + j + 2) + 5, (j)+3)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0, 0)
-#map16 = (i, j)[s1] -> (i, 0)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0, 0)>
+#map16 = affine_map<(i, j)[s1] -> (i, 0)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0, d1 * s0)
-#map17 = (i, j)[s0] -> (i, s0*j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0, d1 * s0)>
+#map17 = affine_map<(i, j)[s0] -> (i, s0*j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0, d0 * 3 + d1)
-#map19 = (i, j) -> (i, 3*i + j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0, d0 * 3 + d1)>
+#map19 = affine_map<(i, j) -> (i, 3*i + j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0, d0 + d1 * 3)
-#map20 = (i, j) -> (i, i + 3*j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0, d0 + d1 * 3)>
+#map20 = affine_map<(i, j) -> (i, i + 3*j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0, d0 * ((s0 * s0) * 9) + 3)
-#map18 = (i, j)[N] -> (i, 2 + N*N*9*i + 1)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0, d0 * ((s0 * s0) * 9) + 3)>
+#map18 = affine_map<(i, j)[N] -> (i, 2 + N*N*9*i + 1)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (1, d0 + d1 * 3 + 5)
-#map21 = (i, j) -> (1, i + 3*j + 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (1, d0 + d1 * 3 + 5)>
+#map21 = affine_map<(i, j) -> (1, i + 3*j + 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (s0 * 5, d0 + d1 * 3 + d0 * 5)
-#map22 = (i, j)[s0] -> (5*s0, i + 3*j + 5*i)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (s0 * 5, d0 + d1 * 3 + d0 * 5)>
+#map22 = affine_map<(i, j)[s0] -> (5*s0, i + 3*j + 5*i)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0 * (s0 * s1), d1)
-#map23 = (i, j)[s0, s1] -> (i*(s0*s1), j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0 * (s0 * s1), d1)>
+#map23 = affine_map<(i, j)[s0, s1] -> (i*(s0*s1), j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d1 mod 5)
-#map24 = (i, j)[s0, s1] -> (i, j mod 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d1 mod 5)>
+#map24 = affine_map<(i, j)[s0, s1] -> (i, j mod 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d1 floordiv 5)
-#map25 = (i, j)[s0, s1] -> (i, j floordiv 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d1 floordiv 5)>
+#map25 = affine_map<(i, j)[s0, s1] -> (i, j floordiv 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d1 ceildiv 5)
-#map26 = (i, j)[s0, s1] -> (i, j ceildiv 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d1 ceildiv 5)>
+#map26 = affine_map<(i, j)[s0, s1] -> (i, j ceildiv 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d0 - d1 - 5)
-#map29 = (i, j)[s0, s1] -> (i, i - j - 5)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d0 - d1 - 5)>
+#map29 = affine_map<(i, j)[s0, s1] -> (i, i - j - 5)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d0 - d1 * s1 + 2)
-#map30 = (i, j)[M, N] -> (i, i - N*j + 2)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d0 - d1 * s1 + 2)>
+#map30 = affine_map<(i, j)[M, N] -> (i, i - N*j + 2)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0 * -5, d1 * -3, -2, -(d0 + d1), -s0)
-#map32 = (i, j)[s0, s1] -> (-5*i, -3*j, -2, -1*(i+j), -1*s0)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0 * -5, d1 * -3, -2, -(d0 + d1), -s0)>
+#map32 = affine_map<(i, j)[s0, s1] -> (-5*i, -3*j, -2, -1*(i+j), -1*s0)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (-4, -d0)
-#map33 = (i, j) -> (-2+-5-(-3), -1*i)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (-4, -d0)>
+#map33 = affine_map<(i, j) -> (-2+-5-(-3), -1*i)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0, d1 floordiv s0, d1 mod s0)
-#map34 = (i, j)[s0, s1] -> (i, j floordiv s0, j mod s0)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0, d1 floordiv s0, d1 mod s0)>
+#map34 = affine_map<(i, j)[s0, s1] -> (i, j floordiv s0, j mod s0)>
-// CHECK: #map{{[0-9]+}} = (d0, d1, d2)[s0, s1, s2] -> ((d0 * s1) * s2 + d1 * s1 + d2)
-#map35 = (i, j, k)[s0, s1, s2] -> (i*s1*s2 + j*s1 + k)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1, d2)[s0, s1, s2] -> ((d0 * s1) * s2 + d1 * s1 + d2)>
+#map35 = affine_map<(i, j, k)[s0, s1, s2] -> (i*s1*s2 + j*s1 + k)>
// Constant folding.
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (8, 4, 1, 3, 2, 4)
-#map36 = (i, j) -> (5+3, 2*2, 8-7, 100 floordiv 32, 5 mod 3, 10 ceildiv 3)
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (4, 11, 512, 15)
-#map37 = (i, j) -> (5 mod 3 + 2, 5*3 - 4, 128 * (500 ceildiv 128), 40 floordiv 7 * 3)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (8, 4, 1, 3, 2, 4)>
+#map36 = affine_map<(i, j) -> (5+3, 2*2, 8-7, 100 floordiv 32, 5 mod 3, 10 ceildiv 3)>
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (4, 11, 512, 15)>
+#map37 = affine_map<(i, j) -> (5 mod 3 + 2, 5*3 - 4, 128 * (500 ceildiv 128), 40 floordiv 7 * 3)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0 * 2 + 1, d1 + 2)
-#map38 = (i, j) -> (1 + i*2, 2 + j)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0 * 2 + 1, d1 + 2)>
+#map38 = affine_map<(i, j) -> (1 + i*2, 2 + j)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0, s1] -> (d0 * s0, d0 + s0, d0 + 2, d1 * 2, s1 * 2, s0 + 2)
-#map39 = (i, j)[M, N] -> (i*M, M + i, 2+i, j*2, N*2, 2 + M)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0, s1] -> (d0 * s0, d0 + s0, d0 + 2, d1 * 2, s1 * 2, s0 + 2)>
+#map39 = affine_map<(i, j)[M, N] -> (i*M, M + i, 2+i, j*2, N*2, 2 + M)>
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> ((d0 * 5) floordiv 4, (d1 ceildiv 7) mod s0)
-#map43 = (i, j) [s0] -> ( i * 5 floordiv 4, j ceildiv 7 mod s0)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> ((d0 * 5) floordiv 4, (d1 ceildiv 7) mod s0)>
+#map43 = affine_map<(i, j) [s0] -> ( i * 5 floordiv 4, j ceildiv 7 mod s0)>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0 - d1 * 2, (d1 * 6) floordiv 4)
-#map44 = (i, j) -> (i - 2*j, j * 6 floordiv 4)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0 - d1 * 2, (d1 * 6) floordiv 4)>
+#map44 = affine_map<(i, j) -> (i - 2*j, j * 6 floordiv 4)>
// Simplifications
-// CHECK: #map{{[0-9]+}} = (d0, d1, d2)[s0] -> (d0 + d1 + d2 + 1, d2 + d1, (d0 * s0) * 8)
-#map45 = (i, j, k) [N] -> (1 + i + 3 + j - 3 + k, k + 5 + j - 5, 2*i*4*N)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1, d2)[s0] -> (d0 + d1 + d2 + 1, d2 + d1, (d0 * s0) * 8)>
+#map45 = affine_map<(i, j, k) [N] -> (1 + i + 3 + j - 3 + k, k + 5 + j - 5, 2*i*4*N)>
-// CHECK: #map{{[0-9]+}} = (d0, d1, d2) -> (0, d1, d0 * 2, 0)
-#map46 = (i, j, k) -> (i*0, 1*j, i * 128 floordiv 64, j * 0 floordiv 64)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1, d2) -> (0, d1, d0 * 2, 0)>
+#map46 = affine_map<(i, j, k) -> (i*0, 1*j, i * 128 floordiv 64, j * 0 floordiv 64)>
-// CHECK: #map{{[0-9]+}} = (d0, d1, d2) -> (d0, d0 * 4, 0, 0, 0)
-#map47 = (i, j, k) -> (i * 64 ceildiv 64, i * 512 ceildiv 128, 4 * j mod 4, 4*j*4 mod 8, k mod 1)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1, d2) -> (d0, d0 * 4, 0, 0, 0)>
+#map47 = affine_map<(i, j, k) -> (i * 64 ceildiv 64, i * 512 ceildiv 128, 4 * j mod 4, 4*j*4 mod 8, k mod 1)>
// floordiv should resolve similarly to ceildiv and be unique'd out.
// CHECK-NOT: #map48{{[a-z]}}
-#map48 = (i, j, k) -> (i * 64 floordiv 64, i * 512 floordiv 128, 4 * j mod 4, 4*j*4 mod 8)
+#map48 = affine_map<(i, j, k) -> (i * 64 floordiv 64, i * 512 floordiv 128, 4 * j mod 4, 4*j*4 mod 8)>
// Simplifications for mod using known GCD's of the LHS expr.
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (0, 0, 0, 1)
-#map49 = (i, j)[s0] -> ( (i * 4 + 8) mod 4, 32 * j * s0 * 8 mod 256, (4*i + (j * (s0 * 2))) mod 2, (4*i + 3) mod 2)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (0, 0, 0, 1)>
+#map49 = affine_map<(i, j)[s0] -> ( (i * 4 + 8) mod 4, 32 * j * s0 * 8 mod 256, (4*i + (j * (s0 * 2))) mod 2, (4*i + 3) mod 2)>
// Floordiv, ceildiv divide by one.
-// CHECK: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 * 2 + 1, d1 + s0)
-#map50 = (i, j)[s0] -> ( (i * 2 + 1) ceildiv 1, (j + s0) floordiv 1)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 * 2 + 1, d1 + s0)>
+#map50 = affine_map<(i, j)[s0] -> ( (i * 2 + 1) ceildiv 1, (j + s0) floordiv 1)>
// floordiv, ceildiv, and mod where LHS is negative.
-// CHECK: #map{{[0-9]+}} = (d0) -> (-2, 1, -1)
-#map51 = (i) -> (-5 floordiv 3, -5 mod 3, -5 ceildiv 3)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0) -> (-2, 1, -1)>
+#map51 = affine_map<(i) -> (-5 floordiv 3, -5 mod 3, -5 ceildiv 3)>
// Parenthesis elision.
-// CHECK: #map{{[0-9]+}} = (d0) -> (d0 * 16 - (d0 + 1) + 15)
-#map52 = (d0) -> (16*d0 + ((d0 + 1) * -1) + 15)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0) -> (d0 * 16 - (d0 + 1) + 15)>
+#map52 = affine_map<(d0) -> (16*d0 + ((d0 + 1) * -1) + 15)>
-// CHECK: #map{{[0-9]+}} = (d0) -> (d0 - (d0 + 1))
-#map53 = (d0) -> (d0 - (d0 + 1))
+// CHECK: #map{{[0-9]+}} = affine_map<(d0) -> (d0 - (d0 + 1))>
+#map53 = affine_map<(d0) -> (d0 - (d0 + 1))>
-// CHECK: #map{{[0-9]+}} = (d0)[s0] -> ((-s0) floordiv 4, d0 floordiv -1)
-#map54 = (d0)[s0] -> (-s0 floordiv 4, d0 floordiv -1)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0)[s0] -> ((-s0) floordiv 4, d0 floordiv -1)>
+#map54 = affine_map<(d0)[s0] -> (-s0 floordiv 4, d0 floordiv -1)>
-// CHECK: #map{{[0-9]+}} = () -> ()
-#map55 = () -> ()
+// CHECK: #map{{[0-9]+}} = affine_map<() -> ()>
+#map55 = affine_map<() -> ()>
-// CHECK: #map{{[0-9]+}} = (d0, d1) -> (d0, d0 * 2 + d1 * 4 + 2, 1, 2, (d0 * 4) mod 8)
-#map56 = (d0, d1) -> ((4*d0 + 2) floordiv 4, (4*d0 + 8*d1 + 5) floordiv 2, (2*d0 + 4*d1 + 3) mod 2, (3*d0 - 4) mod 3, (4*d0 + 8*d1) mod 8)
+// CHECK: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0, d0 * 2 + d1 * 4 + 2, 1, 2, (d0 * 4) mod 8)>
+#map56 = affine_map<(d0, d1) -> ((4*d0 + 2) floordiv 4, (4*d0 + 8*d1 + 5) floordiv 2, (2*d0 + 4*d1 + 3) mod 2, (3*d0 - 4) mod 3, (4*d0 + 8*d1) mod 8)>
// Single identity maps are removed.
// CHECK: func @f0(memref<2x4xi8, 1>)
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index aac0dfc03a3..3590a28cd16 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -4,26 +4,26 @@
// Verify the generic form can be parsed.
// RUN: mlir-opt -mlir-print-op-generic %s | mlir-opt | FileCheck %s
-// CHECK: #map0 = (d0) -> (d0 + 1)
+// CHECK: #map0 = affine_map<(d0) -> (d0 + 1)>
-// CHECK: #map1 = ()[s0] -> (s0 + 1)
+// CHECK: #map1 = affine_map<()[s0] -> (s0 + 1)>
-// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1)
-// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)
-// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = (d0, d1)[s0] -> (d0 * s0 + d1)
+// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
+// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
-// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)
-// CHECK-DAG: #[[BASE_MAP3:map[0-9]+]] = (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)
-// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)
+// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+// CHECK-DAG: #[[BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
+// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
-// CHECK-DAG: #[[BASE_MAP1:map[0-9]+]] = (d0)[s0] -> (d0 + s0)
-// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = (d0)[s0, s1] -> (d0 * s1 + s0)
+// CHECK-DAG: #[[BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-// CHECK-DAG: #[[BASE_MAP2:map[0-9]+]] = (d0, d1) -> (d0 * 22 + d1)
-// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)
-// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = (d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)
-// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = (d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)
-// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = (d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)
+// CHECK-DAG: #[[BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)>
+// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>
+// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>
// CHECK-LABEL: func @func_with_ops(%arg0: f32) {
func @func_with_ops(f32) {
@@ -503,11 +503,11 @@ func @affine_apply() {
%j = "std.constant"() {value = 1: index} : () -> index
// CHECK: affine.apply #map0(%c0)
- %a = "affine.apply" (%i) { map = (d0) -> (d0 + 1) } :
+ %a = "affine.apply" (%i) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
// CHECK: affine.apply #map1()[%c0]
- %b = affine.apply ()[x] -> (x+1)()[%i]
+ %b = affine.apply affine_map<()[x] -> (x+1)>()[%i]
return
}
@@ -631,27 +631,27 @@ func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// Test two dynamic sizes and dynamic offset.
// CHECK: %{{.*}} = std.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]>
%1 = view %0[%arg2][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>>
// Test two dynamic sizes and static offset.
// CHECK: %{{.*}} = std.view %0[][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP3]]>
%2 = view %0[][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * s0 + d1)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>
// Test one dynamic size and dynamic offset.
// CHECK: %{{.*}} = std.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32, #[[VIEW_MAP2]]>
%3 = view %0[%arg2][%arg1]
- : memref<2048xi8> to memref<4x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
+ : memref<2048xi8> to memref<4x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>>
// Test one dynamic size and static offset.
// CHECK: %{{.*}} = std.view %0[][%arg0] : memref<2048xi8> to memref<?x4xf32, #[[VIEW_MAP1]]>
%4 = view %0[][%arg0]
- : memref<2048xi8> to memref<?x4xf32, (d0, d1) -> (d0 * 4 + d1)>
+ : memref<2048xi8> to memref<?x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>
// Test static sizes and static offset.
// CHECK: %{{.*}} = std.view %0[][] : memref<2048xi8> to memref<64x4xf32, #[[VIEW_MAP1]]>
%5 = view %0[][]
- : memref<2048xi8> to memref<64x4xf32, (d0, d1) -> (d0 * 4 + d1)>
+ : memref<2048xi8> to memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>
return
}
@@ -660,29 +660,29 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%c0 = constant 0 : index
%c1 = constant 1 : index
- %0 = alloc() : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+ %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// CHECK: std.subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP0]]>
%1 = subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
- %2 = alloc()[%arg2] : memref<64xf32, (d0)[s0] -> (d0 + s0)>
+ %2 = alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
// CHECK: std.subview %2[%c1][%arg0][%c1] : memref<64xf32, #[[BASE_MAP1]]> to memref<?xf32, #[[SUBVIEW_MAP1]]>
%3 = subview %2[%c1][%arg0][%c1]
- : memref<64xf32, (d0)[s0] -> (d0 + s0)> to
- memref<?xf32, (d0)[s0, s1] -> (d0 * s1 + s0)>
+ : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to
+ memref<?xf32, affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>>
- %4 = alloc() : memref<64x22xf32, (d0, d1) -> (d0 * 22 + d1)>
+ %4 = alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>>
// CHECK: std.subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0] : memref<64x22xf32, #[[BASE_MAP2]]> to memref<?x?xf32, #[[SUBVIEW_MAP2]]>
%5 = subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0]
- : memref<64x22xf32, (d0, d1) -> (d0 * 22 + d1)> to
- memref<?x?xf32, (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+ : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>> to
+ memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
// CHECK: std.subview %0[][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<4x4x4xf32, #[[SUBVIEW_MAP3]]>
%6 = subview %0[][][]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
- memref<4x4x4xf32, (d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
+ memref<4x4x4xf32, affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>>
%7 = alloc(%arg1, %arg2) : memref<?x?xf32>
// CHECK: std.subview {{%.*}}[][][] : memref<?x?xf32> to memref<4x4xf32, #[[SUBVIEW_MAP4]]>
@@ -704,7 +704,7 @@ func @test_dimop(%arg0: tensor<4x4x?xf32>) {
// CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32>
%0 = dim %arg0, 2 : tensor<4x4x?xf32>
// use dim as an index to ensure type correctness
- %1 = affine.apply (d0) -> (d0)(%0)
+ %1 = affine.apply affine_map<(d0) -> (d0)>(%0)
return
}
diff --git a/mlir/test/IR/invalid-affinemap.mlir b/mlir/test/IR/invalid-affinemap.mlir
index f13636238e3..741ad9a3dfd 100644
--- a/mlir/test/IR/invalid-affinemap.mlir
+++ b/mlir/test/IR/invalid-affinemap.mlir
@@ -2,102 +2,102 @@
// Check different error cases.
// -----
-#hello_world = (i, j) -> ((), j) // expected-error {{no expression inside parentheses}}
+#hello_world = affine_map<(i, j) -> ((), j)> // expected-error {{no expression inside parentheses}}
// -----
-#hello_world = (i, j) -> (->, j) // expected-error {{expected affine expression}}
+#hello_world = affine_map<(i, j) -> (->, j) // expected-error {{expected affine expression}}
// -----
-#hello_world = (i, j) -> (:) // expected-error {{expected affine expression}}
+#hello_world = affine_map<(i, j) -> (:) // expected-error {{expected affine expression}}
// -----
-#hello_world = (i, j) -> (, j) // expected-error {{expected affine expression}}
+#hello_world = affine_map<(i, j) -> (, j)> // expected-error {{expected affine expression}}
// -----
-#hello_world (i, j) [s0] -> (i, j) // expected-error {{expected '=' in attribute alias definition}}
+#hello_world affine_map<(i, j) [s0] -> (i, j)> // expected-error {{expected '=' in attribute alias definition}}
// -----
-#hello_world = (i, j) [s0] -> (2*i*, 3*j*i*2 + 5) // expected-error {{missing right operand of binary op}}
+#hello_world = affine_map<(i, j) [s0] -> (2*i*, 3*j*i*2 + 5)> // expected-error {{missing right operand of binary op}}
// -----
-#hello_world = (i, j) [s0] -> (i+, i+j+2 + 5) // expected-error {{missing right operand of binary op}}
+#hello_world = affine_map<(i, j) [s0] -> (i+, i+j+2 + 5)> // expected-error {{missing right operand of binary op}}
// -----
-#hello_world = (i, j) [s0] -> ((s0 + i, j) // expected-error {{expected ')'}}
+#hello_world = affine_map<(i, j) [s0] -> ((s0 + i, j)> // expected-error {{expected ')'}}
// -----
-#hello_world = (i, j) [s0] -> (((s0 + (i + j) + 5), j) // expected-error {{expected ')'}}
+#hello_world = affine_map<(i, j) [s0] -> (((s0 + (i + j) + 5), j)> // expected-error {{expected ')'}}
// -----
-#hello_world = (i, j) [s0] -> i + s0, j) // expected-error {{expected '(' at start of affine map range}}
+#hello_world = affine_map<(i, j) [s0] -> i + s0, j)> // expected-error {{expected '(' at start of affine map range}}
// -----
-#hello_world = (i, j) [s0] -> (x) // expected-error {{use of undeclared identifier}}
+#hello_world = affine_map<(i, j) [s0] -> (x)> // expected-error {{use of undeclared identifier}}
// -----
-#hello_world = (i, j, i) [s0] -> (i) // expected-error {{redefinition of identifier 'i'}}
+#hello_world = affine_map<(i, j, i) [s0] -> (i)> // expected-error {{redefinition of identifier 'i'}}
// -----
-#hello_world = (i, j) [s0, s1, s0] -> (i) // expected-error {{redefinition of identifier 's0'}}
+#hello_world = affine_map<(i, j) [s0, s1, s0] -> (i)> // expected-error {{redefinition of identifier 's0'}}
// -----
-#hello_world = (i, j) [i, s0] -> (j) // expected-error {{redefinition of identifier 'i'}}
+#hello_world = affine_map<(i, j) [i, s0] -> (j)> // expected-error {{redefinition of identifier 'i'}}
// -----
-#hello_world = (i, j) [s0, s1] -> (+i, j) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (+i, j)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, *j) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, *j)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (floordiv i 2, j) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (floordiv i 2, j)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (ceildiv i 2, j) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (ceildiv i 2, j)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (mod i 2, j) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (mod i 2, j)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (-(), j)
+#hello_world = affine_map<(i, j) [s0, s1] -> (-(), j)>
// expected-error@-1 {{no expression inside parentheses}}
// expected-error@-2 {{missing operand of negation}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, *j+5) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, *j+5)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, floordiv j+5) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, floordiv j+5)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, ceildiv j+5) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, ceildiv j+5)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, mod j+5) // expected-error {{missing left operand of binary op}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, mod j+5)> // expected-error {{missing left operand of binary op}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i*j, j) // expected-error {{non-affine expression: at least one of the multiply operands has to be either a constant or symbolic}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i*j, j)> // expected-error {{non-affine expression: at least one of the multiply operands has to be either a constant or symbolic}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, j + j ceildiv 128 mod 16 * i - 4) // expected-error {{non-affine expression: at least one of the multiply operands has to be either a constant or symbolic}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, j + j ceildiv 128 mod 16 * i - 4)> // expected-error {{non-affine expression: at least one of the multiply operands has to be either a constant or symbolic}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, j floordiv i) // expected-error {{non-affine expression: right operand of floordiv has to be either a constant or symbolic}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, j floordiv i)> // expected-error {{non-affine expression: right operand of floordiv has to be either a constant or symbolic}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, i*2 ceildiv j*5) // expected-error {{non-affine expression: right operand of ceildiv has to be either a constant or symbolic}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, i*2 ceildiv j*5)> // expected-error {{non-affine expression: right operand of ceildiv has to be either a constant or symbolic}}
// -----
-#hello_world = (i, j) [s0, s1] -> (i, i mod (2+i)) // expected-error {{non-affine expression: right operand of mod has to be either a constant or symbolic}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (i, i mod (2+i))> // expected-error {{non-affine expression: right operand of mod has to be either a constant or symbolic}}
// -----
-#hello_world = (i, j) [s0, s1] -> (-1*i j, j) // expected-error {{expected ',' or ')'}}
+#hello_world = affine_map<(i, j) [s0, s1] -> (-1*i j, j)> // expected-error {{expected ',' or ')'}}
// -----
-#hello_world = (i, j) -> (i, 3*d0 + ) // expected-error {{use of undeclared identifier}}
+#hello_world = affine_map<(i, j) -> (i, 3*d0 + )> // expected-error {{use of undeclared identifier}}
// TODO(bondhugula): Add more tests; coverage of error messages emitted not complete
// -----
-#ABC = (i,j) -> (i+j)
-#ABC = (i,j) -> (i+j) // expected-error {{redefinition of attribute alias id 'ABC'}}
+#ABC = affine_map<(i,j) -> (i+j)>
+#ABC = affine_map<(i,j) -> (i+j)> // expected-error {{redefinition of attribute alias id 'ABC'}}
diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index 67b8d4acb55..d96f646e60a 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -67,7 +67,7 @@ func @affine_apply_no_map() {
func @affine_apply_wrong_operand_count() {
^bb0:
%i = constant 0 : index
- %x = "affine.apply" (%i) {map = (d0, d1) -> ((d0 + 1), (d1 + 2))} : (index) -> (index) // expected-error {{'affine.apply' op operand count and affine map dimension and symbol count must match}}
+ %x = "affine.apply" (%i) {map = affine_map<(d0, d1) -> ((d0 + 1), (d1 + 2))>} : (index) -> (index) // expected-error {{'affine.apply' op operand count and affine map dimension and symbol count must match}}
return
}
@@ -77,7 +77,7 @@ func @affine_apply_wrong_result_count() {
^bb0:
%i = constant 0 : index
%j = constant 1 : index
- %x = "affine.apply" (%i, %j) {map = (d0, d1) -> ((d0 + 1), (d1 + 2))} : (index,index) -> (index) // expected-error {{'affine.apply' op mapping must produce one value}}
+ %x = "affine.apply" (%i, %j) {map = affine_map<(d0, d1) -> ((d0 + 1), (d1 + 2))>} : (index,index) -> (index) // expected-error {{'affine.apply' op mapping must produce one value}}
return
}
@@ -103,7 +103,7 @@ func @bad_alloc_wrong_dynamic_dim_count() {
^bb0:
%0 = constant 7 : index
// Test alloc with wrong number of dynamic dimensions.
- %1 = alloc(%0)[%1] : memref<2x4xf32, (d0, d1)[s0] -> ((d0 + s0), d1), 1> // expected-error {{op 'std.alloc' dimension operand count does not equal memref dynamic dimension count}}
+ %1 = alloc(%0)[%1] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> // expected-error {{op 'std.alloc' dimension operand count does not equal memref dynamic dimension count}}
return
}
@@ -113,7 +113,7 @@ func @bad_alloc_wrong_symbol_count() {
^bb0:
%0 = constant 7 : index
// Test alloc with wrong number of symbols
- %1 = alloc(%0) : memref<2x?xf32, (d0, d1)[s0] -> ((d0 + s0), d1), 1> // expected-error {{operand count does not equal dimension plus symbol operand count}}
+ %1 = alloc(%0) : memref<2x?xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> // expected-error {{operand count does not equal dimension plus symbol operand count}}
return
}
@@ -121,12 +121,12 @@ func @bad_alloc_wrong_symbol_count() {
func @test_store_zero_results() {
^bb0:
- %0 = alloc() : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ %0 = alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%1 = constant 0 : index
%2 = constant 1 : index
- %3 = load %0[%1, %2] : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ %3 = load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
// Test that store returns zero results.
- %4 = store %3, %0[%1, %2] : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1> // expected-error {{cannot name an operation with no results}}
+ %4 = store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> // expected-error {{cannot name an operation with no results}}
return
}
@@ -141,7 +141,7 @@ func @test_store_zero_results2(%x: i32, %p: memref<i32>) {
func @test_alloc_memref_map_rank_mismatch() {
^bb0:
- %0 = alloc() : memref<1024x64xf32, (d0) -> (d0), 1> // expected-error {{memref affine map dimension mismatch}}
+ %0 = alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1> // expected-error {{memref affine map dimension mismatch}}
return
}
@@ -729,7 +729,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = alloc() : memref<2048xi8>
// expected-error@+1 {{incorrect number of operands for type}}
%1 = view %0[][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>>
return
}
@@ -739,7 +739,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = alloc() : memref<2048xi8>
// expected-error@+1 {{is not strided}}
%1 = view %0[][%arg0, %arg1]
- : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0, d1, s0)>
+ : memref<2048xi8> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0, d1, s0)>>
return
}
@@ -749,18 +749,18 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = alloc() : memref<2048xf32>
// expected-error@+1 {{must be 1D memref of 8-bit integer values}}
%1 = view %0[][%arg0, %arg1]
- : memref<2048xf32> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)>
+ : memref<2048xf32> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>>
return
}
// -----
func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
- %0 = alloc() : memref<2048xi8, (d0) -> (d0 floordiv 8, d0 mod 8)>
+ %0 = alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>>
// expected-error@+1 {{unsupported map for base memref}}
%1 = view %0[][%arg0, %arg1]
- : memref<2048xi8, (d0) -> (d0 floordiv 8, d0 mod 8)> to
- memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)>
+ : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to
+ memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>>
return
}
@@ -771,7 +771,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{different memory spaces}}
%1 = view %0[][%arg0, %arg1]
: memref<2048xi8, 2> to
- memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0), 1>
+ memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>, 1>
return
}
@@ -782,7 +782,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{incorrect dynamic strides}}
%1 = view %0[][%arg0, %arg1]
: memref<2048xi8> to
- memref<?x?x4xf32, (d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>
+ memref<?x?x4xf32, affine_map<(d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>>
return
}
@@ -793,7 +793,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) {
// expected-error@+1 {{incorrect dynamic strides}}
%1 = view %0[%arg0][]
: memref<2048xi8> to
- memref<16x4x?xf32, (d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>
+ memref<16x4x?xf32, affine_map<(d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>>
return
}
@@ -804,40 +804,40 @@ func @multiple_offsets(%arg0: index) {
// expected-error@+1 {{expects 0 or 1 offset operand}}
%1 = view %0[%arg0, %arg0][%arg0]
: memref<2048xi8> to
- memref<?x?x4xf32, (d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>
+ memref<?x?x4xf32, affine_map<(d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>>
return
}
// -----
func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
- %0 = alloc() : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2), 2>
+ %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>, 2>
// expected-error@+1 {{different memory spaces}}
%1 = subview %0[][%arg2][]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2), 2> to
- memref<8x?x4xf32, (d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>, 2> to
+ memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>>
return
}
// -----
func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
- %0 = alloc() : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+ %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// expected-error@+1 {{is not strided}}
%1 = subview %0[][%arg2][]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
- memref<8x?x4xf32, (d0, d1, d2)[s0] -> (d0 + s0, d1, d2)>
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
+ memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 + s0, d1, d2)>>
return
}
// -----
func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
- %0 = alloc() : memref<8x16x4xf32, (d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>
+ %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>>
// expected-error@+1 {{is not strided}}
%1 = subview %0[][%arg2][]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 + d1, d1 + d2, d2)> to
- memref<8x?x4xf32, (d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> to
+ memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>>
return
}
@@ -966,7 +966,7 @@ func @invalid_subview(%arg0 : index, %arg1 : memref<?x8x?xf32>) {
// -----
func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
- // expected-error@+1{{operand type 'memref<12x4x16xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>' and result type 'memref<12x4x16xf32, (d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>' are cast incompatible}}
+ // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}}
%0 = memref_cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]>
return
}
@@ -974,7 +974,7 @@ func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16,
// -----
func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) {
- // expected-error@+1{{operand type 'memref<12x4x16xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>' and result type 'memref<12x4x16xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>' are cast incompatible}}
+ // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}}
%0 = memref_cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]>
return
}
diff --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir
index d0714d55a26..7d8ee567628 100644
--- a/mlir/test/IR/invalid.mlir
+++ b/mlir/test/IR/invalid.mlir
@@ -41,19 +41,19 @@ func @memrefs(memref<2x4xi8, i8>) // expected-error {{expected affine map in mem
// -----
// Test non-existent map in map composition of memref type.
-#map0 = (d0, d1) -> (d0, d1)
+#map0 = affine_map<(d0, d1) -> (d0, d1)>
func @memrefs(memref<2x4xi8, #map0, #map8>) // expected-error {{undefined symbol alias id 'map8'}}
// -----
// Test multiple memory space error.
-#map0 = (d0, d1) -> (d0, d1)
+#map0 = affine_map<(d0, d1) -> (d0, d1)>
func @memrefs(memref<2x4xi8, #map0, 1, 2>) // expected-error {{multiple memory spaces specified in memref type}}
// -----
// Test affine map after memory space.
-#map0 = (d0, d1) -> (d0, d1)
-#map1 = (d0, d1) -> (d0, d1)
+#map0 = affine_map<(d0, d1) -> (d0, d1)>
+#map1 = affine_map<(d0, d1) -> (d0, d1)>
func @memrefs(memref<2x4xi8, #map0, 1, #map1>) // expected-error {{expected memory space to be last in memref type}}
@@ -61,13 +61,13 @@ func @memrefs(memref<2x4xi8, #map0, 1, #map1>) // expected-error {{expected memo
// Test dimension mismatch between memref and layout map.
// The error must be emitted even for the trivial identity layout maps that are
// dropped in type creation.
-#map0 = (d0, d1) -> (d0, d1)
+#map0 = affine_map<(d0, d1) -> (d0, d1)>
func @memrefs(memref<42xi8, #map0>) // expected-error {{memref affine map dimension mismatch}}
// -----
-#map0 = (d0, d1) -> (d0, d1)
-#map1 = (d0) -> (d0)
+#map0 = affine_map<(d0, d1) -> (d0, d1)>
+#map1 = affine_map<(d0) -> (d0)>
func @memrefs(memref<42x42xi8, #map0, #map1>) // expected-error {{memref affine map dimension mismatch}}
// -----
@@ -227,7 +227,7 @@ func @incomplete_for() {
// -----
-#map0 = (d0) -> (d0 floordiv 4)
+#map0 = affine_map<(d0) -> (d0 floordiv 4)>
func @reference_to_iv_in_bound() {
// expected-error@+2 {{region entry argument '%i0' is already in use}}
@@ -257,7 +257,7 @@ func @non_operation() {
func @invalid_if_conditional2() {
affine.for %i = 1 to 10 {
- affine.if (i)[N] : (i >= ) // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
+ affine.if affine_set<(i)[N] : (i >= )> // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
}
}
@@ -265,7 +265,7 @@ func @invalid_if_conditional2() {
func @invalid_if_conditional3() {
affine.for %i = 1 to 10 {
- affine.if (i)[N] : (i == 1) // expected-error {{expected '0' after '=='}}
+ affine.if affine_set<(i)[N] : (i == 1)> // expected-error {{expected '0' after '=='}}
}
}
@@ -273,7 +273,7 @@ func @invalid_if_conditional3() {
func @invalid_if_conditional4() {
affine.for %i = 1 to 10 {
- affine.if (i)[N] : (i >= 2) // expected-error {{expected '0' after '>='}}
+ affine.if affine_set<(i)[N] : (i >= 2)> // expected-error {{expected '0' after '>='}}
}
}
@@ -281,7 +281,7 @@ func @invalid_if_conditional4() {
func @invalid_if_conditional5() {
affine.for %i = 1 to 10 {
- affine.if (i)[N] : (i <= 0 ) // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
+ affine.if affine_set<(i)[N] : (i <= 0)> // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
}
}
@@ -289,7 +289,7 @@ func @invalid_if_conditional5() {
func @invalid_if_conditional6() {
affine.for %i = 1 to 10 {
- affine.if (i) : (i) // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
+ affine.if affine_set<(i) : (i)> // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
}
}
@@ -297,13 +297,13 @@ func @invalid_if_conditional6() {
// TODO (support affine.if (1)?
func @invalid_if_conditional7() {
affine.for %i = 1 to 10 {
- affine.if (i) : (1) // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
+ affine.if affine_set<(i) : (1)> // expected-error {{expected '== 0' or '>= 0' at end of affine constraint}}
}
}
// -----
-#map = (d0) -> (% // expected-error {{invalid SSA name}}
+#map = affine_map<(d0) -> (% // expected-error {{invalid SSA name}}
// -----
@@ -524,7 +524,7 @@ func @undefined_function() {
// -----
-#map1 = (i)[j] -> (i+j)
+#map1 = affine_map<(i)[j] -> (i+j)>
func @bound_symbol_mismatch(%N : index) {
affine.for %i = #map1(%N) to 100 {
@@ -535,7 +535,7 @@ func @bound_symbol_mismatch(%N : index) {
// -----
-#map1 = (i)[j] -> (i+j)
+#map1 = affine_map<(i)[j] -> (i+j)>
func @bound_dim_mismatch(%N : index) {
affine.for %i = #map1(%N, %N)[%N] to 100 {
@@ -556,7 +556,7 @@ func @large_bound() {
// -----
func @max_in_upper_bound(%N : index) {
- affine.for %i = 1 to max (i)->(N, 100) { //expected-error {{expected non-function type}}
+ affine.for %i = 1 to max affine_map<(i)->(N, 100)> { //expected-error {{expected non-function type}}
}
return
}
@@ -572,17 +572,17 @@ func @step_typo() {
// -----
func @invalid_bound_map(%N : i32) {
- affine.for %i = 1 to (i)->(j)(%N) { //expected-error {{use of undeclared identifier}}
+ affine.for %i = 1 to affine_map<(i)->(j)>(%N) { //expected-error {{use of undeclared identifier}}
}
return
}
// -----
-#set0 = (i)[N, M] : )i >= 0) // expected-error {{expected '(' at start of integer set constraint list}}
+#set0 = affine_set<(i)[N, M] : )i >= 0)> // expected-error {{expected '(' at start of integer set constraint list}}
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @invalid_if_operands1(%N : index) {
affine.for %i = 1 to 10 {
@@ -590,7 +590,7 @@ func @invalid_if_operands1(%N : index) {
// expected-error@-1 {{symbol operand count and integer set symbol count must match}}
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @invalid_if_operands2(%N : index) {
affine.for %i = 1 to 10 {
@@ -598,7 +598,7 @@ func @invalid_if_operands2(%N : index) {
// expected-error@-1 {{dim operand count and integer set dim count must match}}
// -----
-#set0 = (i)[N] : (i >= 0, N - i >= 0)
+#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
func @invalid_if_operands3(%N : index) {
affine.for %i = 1 to 10 {
@@ -842,7 +842,7 @@ func @invalid_tensor_literal() {
func @invalid_affine_structure() {
%c0 = constant 0 : index
- %idx = affine.apply (d0, d1) (%c0, %c0) // expected-error {{expected '->' or ':'}}
+ %idx = affine.apply affine_map<(d0, d1)> (%c0, %c0) // expected-error {{expected '->' or ':'}}
return
}
@@ -850,7 +850,7 @@ func @invalid_affine_structure() {
func @missing_for_max(%arg0: index, %arg1: index, %arg2: memref<100xf32>) {
// expected-error @+1 {{lower loop bound affine map with multiple results requires 'max' prefix}}
- affine.for %i0 = ()[s]->(0,s-1)()[%arg0] to %arg1 {
+ affine.for %i0 = affine_map<()[s]->(0,s-1)>()[%arg0] to %arg1 {
}
return
}
@@ -859,7 +859,7 @@ func @missing_for_max(%arg0: index, %arg1: index, %arg2: memref<100xf32>) {
func @missing_for_min(%arg0: index, %arg1: index, %arg2: memref<100xf32>) {
// expected-error @+1 {{upper loop bound affine map with multiple results requires 'min' prefix}}
- affine.for %i0 = %arg0 to ()[s]->(100,s+1)()[%arg1] {
+ affine.for %i0 = %arg0 to affine_map<()[s]->(100,s+1)>()[%arg1] {
}
return
}
diff --git a/mlir/test/IR/locations.mlir b/mlir/test/IR/locations.mlir
index 9a2017eec73..b0039dde15e 100644
--- a/mlir/test/IR/locations.mlir
+++ b/mlir/test/IR/locations.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -mlir-print-debuginfo | FileCheck %s
// This test verifies that debug locations are round-trippable.
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @inline_notation
func @inline_notation() -> i32 {
diff --git a/mlir/test/IR/memory-ops.mlir b/mlir/test/IR/memory-ops.mlir
index 24ea180a9cd..c204bed16db 100644
--- a/mlir/test/IR/memory-ops.mlir
+++ b/mlir/test/IR/memory-ops.mlir
@@ -1,28 +1,28 @@
// RUN: mlir-opt %s | FileCheck %s
-// CHECK: #map0 = (d0, d1)[s0] -> (d0 + s0, d1)
+// CHECK: #map0 = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
// CHECK-LABEL: func @alloc() {
func @alloc() {
^bb0:
// Test simple alloc.
// CHECK: %0 = alloc() : memref<1024x64xf32, 1>
- %0 = alloc() : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ %0 = alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%c0 = "std.constant"() {value = 0: index} : () -> index
%c1 = "std.constant"() {value = 1: index} : () -> index
// Test alloc with dynamic dimensions.
// CHECK: %1 = alloc(%c0, %c1) : memref<?x?xf32, 1>
- %1 = alloc(%c0, %c1) : memref<?x?xf32, (d0, d1) -> (d0, d1), 1>
+ %1 = alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
// Test alloc with no dynamic dimensions and one symbol.
// CHECK: %2 = alloc()[%c0] : memref<2x4xf32, #map0, 1>
- %2 = alloc()[%c0] : memref<2x4xf32, (d0, d1)[s0] -> ((d0 + s0), d1), 1>
+ %2 = alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
// Test alloc with dynamic dimensions and one symbol.
// CHECK: %3 = alloc(%c1)[%c0] : memref<2x?xf32, #map0, 1>
- %3 = alloc(%c1)[%c0] : memref<2x?xf32, (d0, d1)[s0] -> (d0 + s0, d1), 1>
+ %3 = alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
// Alloc with no mappings.
// b/116054838 Parser crash while parsing ill-formed AllocOp
@@ -37,10 +37,10 @@ func @alloc() {
func @dealloc() {
^bb0:
// CHECK: %0 = alloc() : memref<1024x64xf32>
- %0 = alloc() : memref<1024x64xf32, (d0, d1) -> (d0, d1), 0>
+ %0 = alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
// CHECK: dealloc %0 : memref<1024x64xf32>
- dealloc %0 : memref<1024x64xf32, (d0, d1) -> (d0, d1), 0>
+ dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
return
}
@@ -48,16 +48,16 @@ func @dealloc() {
func @load_store() {
^bb0:
// CHECK: %0 = alloc() : memref<1024x64xf32, 1>
- %0 = alloc() : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ %0 = alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
%1 = constant 0 : index
%2 = constant 1 : index
// CHECK: %1 = load %0[%c0, %c1] : memref<1024x64xf32, 1>
- %3 = load %0[%1, %2] : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ %3 = load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
// CHECK: store %1, %0[%c0, %c1] : memref<1024x64xf32, 1>
- store %3, %0[%1, %2] : memref<1024x64xf32, (d0, d1) -> (d0, d1), 1>
+ store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
return
}
@@ -68,8 +68,8 @@ func @dma_ops() {
%stride = constant 32 : index
%elt_per_stride = constant 16 : index
- %A = alloc() : memref<256 x f32, (d0) -> (d0), 0>
- %Ah = alloc() : memref<256 x f32, (d0) -> (d0), 1>
+ %A = alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
+ %Ah = alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 1>
%tag = alloc() : memref<1 x f32>
%num_elements = constant 256 : index
diff --git a/mlir/test/IR/opaque_locations.mlir b/mlir/test/IR/opaque_locations.mlir
index 557534d558e..3718d345197 100644
--- a/mlir/test/IR/opaque_locations.mlir
+++ b/mlir/test/IR/opaque_locations.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -test-opaque-loc -mlir-print-debuginfo | FileCheck %s
// This test verifies that debug opaque locations can be printed.
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK: MyLocation: 0: 'foo' op
// CHECK: nullptr: 'foo' op
diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir
index 41e6d5cefcb..c6636b30a02 100644
--- a/mlir/test/IR/parser.mlir
+++ b/mlir/test/IR/parser.mlir
@@ -1,50 +1,50 @@
// RUN: mlir-opt %s | FileCheck %s
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1, d2, d3, d4)[s0] -> (d0, d1, d2, d4, d3)
-#map0 = (d0, d1, d2, d3, d4)[s0] -> (d0, d1, d2, d4, d3)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1, d2, d3, d4)[s0] -> (d0, d1, d2, d4, d3)>
+#map0 = affine_map<(d0, d1, d2, d3, d4)[s0] -> (d0, d1, d2, d4, d3)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0) -> (d0)
-#map1 = (d0) -> (d0)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0) -> (d0)>
+#map1 = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1, d2) -> (d0, d1, d2)
-#map2 = (d0, d1, d2) -> (d0, d1, d2)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1, d2) -> (d1, d0, d2)
-#map3 = (d0, d1, d2) -> (d1, d0, d2)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
+#map3 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1, d2) -> (d2, d1, d0)
-#map4 = (d0, d1, d2) -> (d2, d1, d0)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
+#map4 = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
-// CHECK-DAG: #map{{[0-9]+}} = ()[s0] -> (0, s0 - 1)
-#inline_map_minmax_loop1 = ()[s0] -> (0, s0 - 1)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<()[s0] -> (0, s0 - 1)>
+#inline_map_minmax_loop1 = affine_map<()[s0] -> (0, s0 - 1)>
-// CHECK-DAG: #map{{[0-9]+}} = ()[s0] -> (100, s0 + 1)
-#inline_map_minmax_loop2 = ()[s0] -> (100, s0 + 1)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<()[s0] -> (100, s0 + 1)>
+#inline_map_minmax_loop2 = affine_map<()[s0] -> (100, s0 + 1)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1)[s0] -> (d0 + d1 + s0)
-#bound_map1 = (i, j)[s] -> (i + j + s)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1)[s0] -> (d0 + d1 + s0)>
+#bound_map1 = affine_map<(i, j)[s] -> (i + j + s)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0, d1) -> (d0 + d1)
-#inline_map_loop_bounds2 = (d0, d1) -> (d0 + d1)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0, d1) -> (d0 + d1)>
+#inline_map_loop_bounds2 = affine_map<(d0, d1) -> (d0 + d1)>
-// CHECK-DAG: #map{{[0-9]+}} = (d0)[s0] -> (d0 + s0, d0 - s0)
-#bound_map2 = (i)[s] -> (i + s, i - s)
+// CHECK-DAG: #map{{[0-9]+}} = affine_map<(d0)[s0] -> (d0 + s0, d0 - s0)>
+#bound_map2 = affine_map<(i)[s] -> (i + s, i - s)>
// All maps appear in arbitrary order before all sets, in arbitrary order.
// CHECK-NOT: Placeholder
-// CHECK-DAG: #set{{[0-9]+}} = (d0)[s0, s1] : (d0 >= 0, -d0 + s0 >= 0, s0 - 5 == 0, -d0 + s1 + 1 >= 0)
-#set0 = (i)[N, M] : (i >= 0, -i + N >= 0, N - 5 == 0, -i + M + 1 >= 0)
+// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0)[s0, s1] : (d0 >= 0, -d0 + s0 >= 0, s0 - 5 == 0, -d0 + s1 + 1 >= 0)>
+#set0 = affine_set<(i)[N, M] : (i >= 0, -i + N >= 0, N - 5 == 0, -i + M + 1 >= 0)>
-// CHECK-DAG: #set{{[0-9]+}} = (d0, d1)[s0] : (d0 >= 0, d1 >= 0)
-#set1 = (d0, d1)[s0] : (d0 >= 0, d1 >= 0)
+// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0, d1)[s0] : (d0 >= 0, d1 >= 0)>
+#set1 = affine_set<(d0, d1)[s0] : (d0 >= 0, d1 >= 0)>
-// CHECK-DAG: #set{{[0-9]+}} = (d0) : (d0 - 1 == 0)
-#set2 = (d0) : (d0 - 1 == 0)
+// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0) : (d0 - 1 == 0)>
+#set2 = affine_set<(d0) : (d0 - 1 == 0)>
-// CHECK-DAG: [[SET_TRUE:#set[0-9]+]] = () : (0 == 0)
+// CHECK-DAG: [[SET_TRUE:#set[0-9]+]] = affine_set<() : (0 == 0)>
-// CHECK-DAG: #set{{[0-9]+}} = (d0)[s0] : (d0 - 2 >= 0, -d0 + 4 >= 0)
+// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0)[s0] : (d0 - 2 >= 0, -d0 + 4 >= 0)>
// CHECK: func @foo(i32, i64) -> f32
func @foo(i32, i64) -> f32
@@ -86,48 +86,48 @@ func @memrefs234(memref<2x4x8xi8, #map2, #map3, #map4, 3>)
// Test memref inline affine map compositions, minding that identity maps are removed.
// CHECK: func @memrefs3(memref<2x4x8xi8>)
-func @memrefs3(memref<2x4x8xi8, (d0, d1, d2) -> (d0, d1, d2)>)
+func @memrefs3(memref<2x4x8xi8, affine_map<(d0, d1, d2) -> (d0, d1, d2)>>)
// CHECK: func @memrefs33(memref<2x4x8xi8, #map{{[0-9]+}}, 1>)
-func @memrefs33(memref<2x4x8xi8, (d0, d1, d2) -> (d0, d1, d2), (d0, d1, d2) -> (d1, d0, d2), 1>)
+func @memrefs33(memref<2x4x8xi8, affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d1, d0, d2)>, 1>)
// CHECK: func @memrefs_drop_triv_id_inline(memref<2xi8>)
-func @memrefs_drop_triv_id_inline(memref<2xi8, (d0) -> (d0)>)
+func @memrefs_drop_triv_id_inline(memref<2xi8, affine_map<(d0) -> (d0)>>)
// CHECK: func @memrefs_drop_triv_id_inline0(memref<2xi8>)
-func @memrefs_drop_triv_id_inline0(memref<2xi8, (d0) -> (d0), 0>)
+func @memrefs_drop_triv_id_inline0(memref<2xi8, affine_map<(d0) -> (d0)>, 0>)
// CHECK: func @memrefs_drop_triv_id_inline1(memref<2xi8, 1>)
-func @memrefs_drop_triv_id_inline1(memref<2xi8, (d0) -> (d0), 1>)
+func @memrefs_drop_triv_id_inline1(memref<2xi8, affine_map<(d0) -> (d0)>, 1>)
// Identity maps should be dropped from the composition, but not the pair of
// "interchange" maps that, if composed, would be also an identity.
// CHECK: func @memrefs_drop_triv_id_composition(memref<2x2xi8, #map{{[0-9]+}}, #map{{[0-9]+}}>)
func @memrefs_drop_triv_id_composition(memref<2x2xi8,
- (d0, d1) -> (d1, d0),
- (d0, d1) -> (d0, d1),
- (d0, d1) -> (d1, d0),
- (d0, d1) -> (d0, d1),
- (d0, d1) -> (d0, d1)>)
+ affine_map<(d0, d1) -> (d1, d0)>,
+ affine_map<(d0, d1) -> (d0, d1)>,
+ affine_map<(d0, d1) -> (d1, d0)>,
+ affine_map<(d0, d1) -> (d0, d1)>,
+ affine_map<(d0, d1) -> (d0, d1)>>)
// CHECK: func @memrefs_drop_triv_id_trailing(memref<2x2xi8, #map{{[0-9]+}}>)
-func @memrefs_drop_triv_id_trailing(memref<2x2xi8, (d0, d1) -> (d1, d0),
- (d0, d1) -> (d0, d1)>)
+func @memrefs_drop_triv_id_trailing(memref<2x2xi8, affine_map<(d0, d1) -> (d1, d0)>,
+ affine_map<(d0, d1) -> (d0, d1)>>)
// CHECK: func @memrefs_drop_triv_id_middle(memref<2x2xi8, #map{{[0-9]+}}, #map{{[0-9]+}}>)
func @memrefs_drop_triv_id_middle(memref<2x2xi8,
- (d0, d1) -> (d0, d1 + 1),
- (d0, d1) -> (d0, d1),
- (d0, d1) -> (d0 + 1, d1)>)
+ affine_map<(d0, d1) -> (d0, d1 + 1)>,
+ affine_map<(d0, d1) -> (d0, d1)>,
+ affine_map<(d0, d1) -> (d0 + 1, d1)>>)
// CHECK: func @memrefs_drop_triv_id_multiple(memref<2xi8>)
-func @memrefs_drop_triv_id_multiple(memref<2xi8, (d0) -> (d0), (d0) -> (d0)>)
+func @memrefs_drop_triv_id_multiple(memref<2xi8, affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>>)
// These maps appeared before, so they must be uniqued and hoisted to the beginning.
// Identity map should be removed.
// CHECK: func @memrefs_compose_with_id(memref<2x2xi8, #map{{[0-9]+}}>)
-func @memrefs_compose_with_id(memref<2x2xi8, (d0, d1) -> (d0, d1),
- (d0, d1) -> (d1, d0)>)
+func @memrefs_compose_with_id(memref<2x2xi8, affine_map<(d0, d1) -> (d0, d1)>,
+ affine_map<(d0, d1) -> (d1, d0)>>)
// CHECK: func @complex_types(complex<i1>) -> complex<f32>
@@ -259,7 +259,7 @@ func @complex_loops() {
func @triang_loop(%arg0: index, %arg1: memref<?x?xi32>) {
%c = constant 0 : i32 // CHECK: %{{.*}} = constant 0 : i32
affine.for %i0 = 1 to %arg0 { // CHECK: affine.for %{{.*}} = 1 to %{{.*}} {
- affine.for %i1 = (d0)[]->(d0)(%i0)[] to %arg0 { // CHECK: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to %{{.*}} {
+ affine.for %i1 = affine_map<(d0)[]->(d0)>(%i0)[] to %arg0 { // CHECK: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to %{{.*}} {
store %c, %arg1[%i0, %i1] : memref<?x?xi32> // CHECK: store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}]
} // CHECK: }
} // CHECK: }
@@ -269,7 +269,7 @@ func @triang_loop(%arg0: index, %arg1: memref<?x?xi32>) {
// CHECK: func @minmax_loop(%{{.*}}: index, %{{.*}}: index, %{{.*}}: memref<100xf32>) {
func @minmax_loop(%arg0: index, %arg1: index, %arg2: memref<100xf32>) {
// CHECK: affine.for %{{.*}} = max #map{{.*}}()[%{{.*}}] to min #map{{.*}}()[%{{.*}}] {
- affine.for %i0 = max()[s]->(0,s-1)()[%arg0] to min()[s]->(100,s+1)()[%arg1] {
+ affine.for %i0 = max affine_map<()[s]->(0,s-1)>()[%arg0] to min affine_map<()[s]->(100,s+1)>()[%arg1] {
// CHECK: "foo"(%{{.*}}, %{{.*}}) : (memref<100xf32>, index) -> ()
"foo"(%arg2, %i0) : (memref<100xf32>, index) -> ()
} // CHECK: }
@@ -283,19 +283,19 @@ func @loop_bounds(%N : index) {
// CHECK: affine.for %{{.*}} = %{{.*}} to %{{.*}}
affine.for %i = %s to %N {
// CHECK: affine.for %{{.*}} = #map{{[0-9]+}}(%{{.*}}) to 0
- affine.for %j = (d0)[]->(d0)(%i)[] to 0 step 1 {
+ affine.for %j = affine_map<(d0)[]->(d0)>(%i)[] to 0 step 1 {
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]
- %w1 = affine.apply(d0, d1)[s0] -> (d0+d1) (%i, %j) [%s]
+ %w1 = affine.apply affine_map<(d0, d1)[s0] -> (d0+d1)> (%i, %j) [%s]
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]
- %w2 = affine.apply(d0, d1)[s0] -> (s0+1) (%i, %j) [%s]
+ %w2 = affine.apply affine_map<(d0, d1)[s0] -> (s0+1)> (%i, %j) [%s]
// CHECK: affine.for %{{.*}} = #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}] to #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}] {
- affine.for %k = #bound_map1 (%w1, %i)[%N] to (i, j)[s] -> (i + j + s) (%w2, %j)[%s] {
+ affine.for %k = #bound_map1 (%w1, %i)[%N] to affine_map<(i, j)[s] -> (i + j + s)> (%w2, %j)[%s] {
// CHECK: "foo"(%{{.*}}, %{{.*}}, %{{.*}}) : (index, index, index) -> ()
"foo"(%i, %j, %k) : (index, index, index)->()
// CHECK: %{{.*}} = constant 30 : index
%c = constant 30 : index
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})
- %u = affine.apply (d0, d1)->(d0+d1) (%N, %c)
+ %u = affine.apply affine_map<(d0, d1)->(d0+d1)> (%N, %c)
// CHECK: affine.for %{{.*}} = max #map{{.*}}(%{{.*}})[%{{.*}}] to min #map{{.*}}(%{{.*}})[%{{.*}}] {
affine.for %l = max #bound_map2(%i)[%u] to min #bound_map2(%k)[%c] {
// CHECK: "bar"(%{{.*}}) : (index) -> ()
@@ -317,11 +317,11 @@ func @ifinst(%N: index) {
%y = "add"(%x, %i) : (i32, index) -> i32 // CHECK: %{{.*}} = "add"(%{{.*}}, %{{.*}}) : (i32, index) -> i32
%z = "mul"(%y, %y) : (i32, i32) -> i32 // CHECK: %{{.*}} = "mul"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
} else { // CHECK } else {
- affine.if (i)[N] : (i - 2 >= 0, 4 - i >= 0)(%i)[%N] { // CHECK affine.if (#set1(%{{.*}})[%{{.*}}]) {
+ affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%N] { // CHECK affine.if (#set1(%{{.*}})[%{{.*}}]) {
// CHECK: %{{.*}} = constant 1 : index
%u = constant 1 : index
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]
- %w = affine.apply (d0,d1)[s0] -> (d0+d1+s0) (%i, %i) [%u]
+ %w = affine.apply affine_map<(d0,d1)[s0] -> (d0+d1+s0)> (%i, %i) [%u]
} else { // CHECK } else {
%v = constant 3 : i32 // %c3_i32 = constant 3 : i32
}
@@ -356,7 +356,7 @@ func @attributes() {
"foo"() {map1 = #map1} : () -> ()
// CHECK: "foo"() {map2 = #map{{[0-9]+}}}
- "foo"() {map2 = (d0, d1, d2) -> (d0, d1, d2)} : () -> ()
+ "foo"() {map2 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>} : () -> ()
// CHECK: "foo"() {map12 = [#map{{[0-9]+}}, #map{{[0-9]+}}]}
"foo"() {map12 = [#map1, #map2]} : () -> ()
@@ -365,7 +365,7 @@ func @attributes() {
"foo"() {set1 = #set1} : () -> ()
// CHECK: "foo"() {set2 = #set{{[0-9]+}}}
- "foo"() {set2 = (d0, d1, d2) : (d0 >= 0, d1 >= 0, d2 - d1 == 0)} : () -> ()
+ "foo"() {set2 = affine_set<(d0, d1, d2) : (d0 >= 0, d1 >= 0, d2 - d1 == 0)>} : () -> ()
// CHECK: "foo"() {set12 = [#set{{[0-9]+}}, #set{{[0-9]+}}]}
"foo"() {set12 = [#set1, #set2]} : () -> ()
@@ -565,12 +565,12 @@ func @funcattrwithblock() -> ()
}
// CHECK-label func @funcsimplemap
-#map_simple0 = ()[] -> (10)
-#map_simple1 = ()[s0] -> (s0)
-#map_non_simple0 = (d0)[] -> (d0)
-#map_non_simple1 = (d0)[s0] -> (d0 + s0)
-#map_non_simple2 = ()[s0, s1] -> (s0 + s1)
-#map_non_simple3 = ()[s0] -> (s0 + 3)
+#map_simple0 = affine_map<()[] -> (10)>
+#map_simple1 = affine_map<()[s0] -> (s0)>
+#map_non_simple0 = affine_map<(d0)[] -> (d0)>
+#map_non_simple1 = affine_map<(d0)[s0] -> (d0 + s0)>
+#map_non_simple2 = affine_map<()[s0, s1] -> (s0 + s1)>
+#map_non_simple3 = affine_map<()[s0] -> (s0 + 3)>
func @funcsimplemap(%arg0: index, %arg1: index) -> () {
affine.for %i0 = 0 to #map_simple0()[] {
// CHECK: affine.for %{{.*}} = 0 to 10 {
@@ -789,7 +789,7 @@ func @type_alias() -> !i32_type_alias {
// CHECK-LABEL: func @no_integer_set_constraints(
func @no_integer_set_constraints() {
// CHECK: affine.if [[SET_TRUE]]() {
- affine.if () : () () {
+ affine.if affine_set<() : ()> () {
}
return
}
diff --git a/mlir/test/IR/pretty-locations.mlir b/mlir/test/IR/pretty-locations.mlir
index da76ab94c5f..116bfa51233 100644
--- a/mlir/test/IR/pretty-locations.mlir
+++ b/mlir/test/IR/pretty-locations.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -mlir-print-debuginfo -mlir-pretty-debuginfo | FileCheck %s
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @inline_notation
func @inline_notation() -> i32 {
diff --git a/mlir/test/IR/print-op-local-scope.mlir b/mlir/test/IR/print-op-local-scope.mlir
index 8fef248f4f1..b6f36052d70 100644
--- a/mlir/test/IR/print-op-local-scope.mlir
+++ b/mlir/test/IR/print-op-local-scope.mlir
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -mlir-print-local-scope | FileCheck %s --dump-input-on-failure
-// CHECK: "foo.op"() : () -> memref<?xf32, (d0) -> (d0 * 2)>
-"foo.op"() : () -> (memref<?xf32, (d0) -> (2*d0)>)
+// CHECK: "foo.op"() : () -> memref<?xf32, affine_map<(d0) -> (d0 * 2)>>
+"foo.op"() : () -> (memref<?xf32, affine_map<(d0) -> (2*d0)>>)
diff --git a/mlir/test/Transforms/Vectorize/compose_maps.mlir b/mlir/test/Transforms/Vectorize/compose_maps.mlir
index f1826f440f2..1e6a0436e4b 100644
--- a/mlir/test/Transforms/Vectorize/compose_maps.mlir
+++ b/mlir/test/Transforms/Vectorize/compose_maps.mlir
@@ -11,121 +11,121 @@
func @simple1() {
// CHECK: Composed map: (d0) -> (d0)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 1) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 1)> } : () -> ()
return
}
func @simple2() {
// CHECK: Composed map: (d0)[s0, s1] -> (d0 - s0 + s1)
- "test_affine_map"() { affine_map = (d0)[s0] -> (d0 + s0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0)[s0] -> (d0 - s0 + 1) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 - s0 + 1)> } : () -> ()
return
}
func @simple3a() {
// CHECK: Composed map: (d0, d1)[s0, s1, s2, s3] -> ((d0 ceildiv s2) * s0, (d1 ceildiv s3) * s1)
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> (d0 ceildiv s0, d1 ceildiv s1) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> (d0 * s0, d1 * s1) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 ceildiv s0, d1 ceildiv s1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 * s0, d1 * s1)> } : () -> ()
return
}
func @simple3b() {
// CHECK: Composed map: (d0, d1)[s0, s1] -> (d0 mod s0, d1 mod s1)
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> (d0 mod s0, d1 mod s1) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 mod s0, d1 mod s1)> } : () -> ()
return
}
func @simple3c() {
// CHECK: Composed map: (d0, d1)[s0, s1, s2, s3, s4, s5] -> ((d0 ceildiv s4) * s4 + d0 mod s2, (d1 ceildiv s5) * s5 + d1 mod s3)
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> ((d0 ceildiv s0) * s0, (d1 ceildiv s1) * s1, d0, d1) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 + d2 mod s2, d1 + d3 mod s3) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> ((d0 ceildiv s0) * s0, (d1 ceildiv s1) * s1, d0, d1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 + d2 mod s2, d1 + d3 mod s3)> } : () -> ()
return
}
func @simple4() {
// CHECK: Composed map: (d0, d1)[s0, s1] -> (d1 * s1, d0 ceildiv s0)
- "test_affine_map"() { affine_map = (d0, d1) -> (d1, d0) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> (d0 * s1, d1 ceildiv s0) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1) -> (d1, d0)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 * s1, d1 ceildiv s0)> } : () -> ()
return
}
func @simple5a() {
// CHECK: Composed map: (d0) -> (d0 * 3 + 18)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 7) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 * 24) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 ceildiv 8) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 24)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 ceildiv 8)> } : () -> ()
return
}
func @simple5b() {
// CHECK: Composed map: (d0) -> ((d0 + 6) ceildiv 2)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 7) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 * 4) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 ceildiv 8) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 4)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 ceildiv 8)> } : () -> ()
return
}
func @simple5c() {
// CHECK: Composed map: (d0) -> (d0 * 8 + 48)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 7) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 * 24) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 floordiv 3) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 24)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 floordiv 3)> } : () -> ()
return
}
func @simple5d() {
// CHECK: Composed map: (d0) -> ((d0 * 4) floordiv 3 + 8)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 7) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 * 4) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 floordiv 3) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 4)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 floordiv 3)> } : () -> ()
return
}
func @simple5e() {
// CHECK: Composed map: (d0) -> ((d0 + 6) ceildiv 8)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 + 7) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 ceildiv 8) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 ceildiv 8)> } : () -> ()
return
}
func @simple5f() {
// CHECK: Composed map: (d0) -> ((d0 * 4 - 4) floordiv 3)
- "test_affine_map"() { affine_map = (d0) -> (d0 - 1) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 * 4) } : () -> ()
- "test_affine_map"() { affine_map = (d0) -> (d0 floordiv 3) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 4)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 floordiv 3)> } : () -> ()
return
}
func @perm_and_proj() {
// CHECK: Composed map: (d0, d1, d2, d3) -> (d1, d3, d0)
- "test_affine_map"() { affine_map = (d0, d1, d2, d3) -> (d3, d1, d2, d0) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1, d2, d3) -> (d1, d0, d3) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2, d0)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3)> } : () -> ()
return
}
func @symbols1() {
// CHECK: Composed map: (d0)[s0] -> (d0 + s0 + 1, d0 - s0 - 1)
- "test_affine_map"() { affine_map = (d0)[s0] -> (d0 + s0, d0 - s0) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1) -> (d0 + 1, d1 - 1) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0, d0 - s0)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1) -> (d0 + 1, d1 - 1)> } : () -> ()
return
}
func @drop() {
// CHECK: Composed map: (d0, d1, d2)[s0, s1] -> (d0 * 2 + d1 + d2 + s1)
- "test_affine_map"() { affine_map = (d0, d1, d2)[s0, s1] -> (d0 + s1, d1 + s0, d0 + d1 + d2) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1, d2) -> (d0 + d2) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1, d2)[s0, s1] -> (d0 + s1, d1 + s0, d0 + d1 + d2)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1, d2) -> (d0 + d2)> } : () -> ()
return
}
func @multi_symbols() {
// CHECK: Composed map: (d0)[s0, s1, s2] -> (d0 + s1 + s2 + 1, d0 - s0 - s2 - 1)
- "test_affine_map"() { affine_map = (d0)[s0] -> (d0 + s0, d0 - s0) } : () -> ()
- "test_affine_map"() { affine_map = (d0, d1)[s0, s1] -> (d0 + 1 + s1, d1 - 1 - s0) } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0, d0 - s0)> } : () -> ()
+ "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 + 1 + s1, d1 - 1 - s0)> } : () -> ()
return
}
diff --git a/mlir/test/Transforms/Vectorize/normalize_maps.mlir b/mlir/test/Transforms/Vectorize/normalize_maps.mlir
index 4854a622642..0d778595742 100644
--- a/mlir/test/Transforms/Vectorize/normalize_maps.mlir
+++ b/mlir/test/Transforms/Vectorize/normalize_maps.mlir
@@ -1,19 +1,19 @@
// RUN: mlir-opt %s -affine-vectorizer-test -normalize-maps | FileCheck %s
-// CHECK-DAG: #[[ZERO:[a-zA-Z0-9]+]] = () -> (0)
-// CHECK-DAG: #[[ID1:[a-zA-Z0-9]+]] = (d0) -> (d0)
-// CHECK-DAG: #[[D0TIMES2:[a-zA-Z0-9]+]] = (d0) -> (d0 * 2)
-// CHECK-DAG: #[[D0PLUSD1:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 + d1)
-// CHECK-DAG: #[[MINSD0PLUSD1:[a-zA-Z0-9]+]] = (d0, d1) -> (-d0 + d1)
-// CHECK-DAG: #[[D0MINUSD1:[a-zA-Z0-9]+]] = (d0, d1) -> (d0 - d1)
+// CHECK-DAG: #[[ZERO:[a-zA-Z0-9]+]] = affine_map<() -> (0)>
+// CHECK-DAG: #[[ID1:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[D0TIMES2:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0 * 2)>
+// CHECK-DAG: #[[D0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK-DAG: #[[MINSD0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (-d0 + d1)>
+// CHECK-DAG: #[[D0MINUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 - d1)>
// CHECK-LABEL: func @simple()
func @simple() {
affine.for %i0 = 0 to 7 {
- %0 = affine.apply (d0) -> (d0) (%i0)
- %1 = affine.apply (d0) -> (d0) (%0)
- %2 = affine.apply (d0, d1) -> (d0 + d1) (%0, %0)
- %3 = affine.apply (d0, d1) -> (d0 - d1) (%0, %0)
+ %0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
+ %1 = affine.apply affine_map<(d0) -> (d0)> (%0)
+ %2 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%0, %0)
+ %3 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%0, %0)
}
// CHECK-NEXT: affine.for %{{.*}} = 0 to 7
// CHECK-NEXT: {{.*}} affine.apply #[[ID1]](%{{.*}})
@@ -22,11 +22,11 @@ func @simple() {
affine.for %i1 = 0 to 7 {
affine.for %i2 = 0 to 42 {
- %20 = affine.apply (d0, d1) -> (d1) (%i1, %i2)
- %21 = affine.apply (d0, d1) -> (d0) (%i1, %i2)
- %22 = affine.apply (d0, d1) -> (d0 + d1) (%20, %21)
- %23 = affine.apply (d0, d1) -> (d0 - d1) (%20, %21)
- %24 = affine.apply (d0, d1) -> (-d0 + d1) (%20, %21)
+ %20 = affine.apply affine_map<(d0, d1) -> (d1)> (%i1, %i2)
+ %21 = affine.apply affine_map<(d0, d1) -> (d0)> (%i1, %i2)
+ %22 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%20, %21)
+ %23 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%20, %21)
+ %24 = affine.apply affine_map<(d0, d1) -> (-d0 + d1)> (%20, %21)
}
}
// CHECK: affine.for %{{.*}} = 0 to 7
@@ -38,12 +38,12 @@ func @simple() {
affine.for %i3 = 0 to 16 {
affine.for %i4 = 0 to 47 step 2 {
affine.for %i5 = 0 to 78 step 16 {
- %50 = affine.apply (d0) -> (d0) (%i3)
- %51 = affine.apply (d0) -> (d0) (%i4)
- %52 = affine.apply (d0) -> (d0) (%i5)
- %53 = affine.apply (d0, d1, d2) -> (d0) (%50, %51, %52)
- %54 = affine.apply (d0, d1, d2) -> (d1) (%50, %51, %52)
- %55 = affine.apply (d0, d1, d2) -> (d2) (%50, %51, %52)
+ %50 = affine.apply affine_map<(d0) -> (d0)> (%i3)
+ %51 = affine.apply affine_map<(d0) -> (d0)> (%i4)
+ %52 = affine.apply affine_map<(d0) -> (d0)> (%i5)
+ %53 = affine.apply affine_map<(d0, d1, d2) -> (d0)> (%50, %51, %52)
+ %54 = affine.apply affine_map<(d0, d1, d2) -> (d1)> (%50, %51, %52)
+ %55 = affine.apply affine_map<(d0, d1, d2) -> (d2)> (%50, %51, %52)
}
}
}
diff --git a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
index 83f783c3aef..7fbb6fe0b22 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
@@ -1,14 +1,14 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 128 --test-fastest-varying=0 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK: #[[map_proj_d0d1_0:map[0-9]+]] = (d0, d1) -> (0)
-// CHECK: #[[map_proj_d0d1_d1:map[0-9]+]] = (d0, d1) -> (d1)
+// CHECK: #[[map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
+// CHECK: #[[map_proj_d0d1_d1:map[0-9]+]] = affine_map<(d0, d1) -> (d1)>
-#map0 = (d0) -> (d0)
-#mapadd1 = (d0) -> (d0 + 1)
-#mapadd2 = (d0) -> (d0 + 2)
-#mapadd3 = (d0) -> (d0 + 3)
-#set0 = (i) : (i >= 0)
+#map0 = affine_map<(d0) -> (d0)>
+#mapadd1 = affine_map<(d0) -> (d0 + 1)>
+#mapadd2 = affine_map<(d0) -> (d0 + 2)>
+#mapadd3 = affine_map<(d0) -> (d0 + 3)>
+#set0 = affine_set<(i) : (i >= 0)>
// Maps introduced to vectorize fastest varying memory index.
// CHECK-LABEL: func @vec1d_1
diff --git a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
index a7553092505..8fa3842edea 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
@@ -2,14 +2,14 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=1 --test-fastest-varying=0 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK-DAG: #[[map_id1:map[0-9]+]] = (d0) -> (d0)
-// CHECK-DAG: #[[map_id2:map[0-9]+]] = (d0, d1) -> (d0, d1)
-// CHECK-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = (d0, d1) -> (0, d1)
-// CHECK-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = (d0, d1) -> (d0, 0)
-// VECT-DAG: #[[map_id1:map[0-9]+]] = (d0) -> (d0)
-// VECT-DAG: #[[map_id2:map[0-9]+]] = (d0, d1) -> (d0, d1)
-// VECT-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = (d0, d1) -> (0, d1)
-// VECT-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = (d0, d1) -> (d0, 0)
+// CHECK-DAG: #[[map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
+// CHECK-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
+// VECT-DAG: #[[map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// VECT-DAG: #[[map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
+// VECT-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
+// VECT-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
func @vec2d(%A : memref<?x?x?xf32>) {
%M = dim %A, 0 : memref<?x?x?xf32>
@@ -111,8 +111,8 @@ func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: me
// VECT-NEXT: {{.*}} #[[map_id1]](%[[N]]) step 8 {
// VECT: %[[VC0:.*]] = constant dense<0.000000e+00> : vector<4x8xf32>
// VECT-NEXT: vector.transfer_write %[[VC0]], %{{.*}}[%{{.*}}, %{{.*}}] {permutation_map = #[[map_id2]]} : vector<4x8xf32>, memref<?x?xf32>
- affine.for %i0 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%M) {
- affine.for %i1 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%N) {
+ affine.for %i0 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%M) {
+ affine.for %i1 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%N) {
%cst = constant 0.000000e+00 : f32
affine.store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
}
@@ -126,9 +126,9 @@ func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: me
// VECT-NEXT: %[[D:.*]] = vector.transfer_read %{{.*}}[%[[I2]], %[[I3]]], %{{.*}} {permutation_map = #[[map_id2]]} : memref<?x?xf32>, vector<4x8xf32>
// VECT-NEXT: %[[E:.*]] = addf %[[D]], %[[C]] : vector<4x8xf32>
// VECT-NEXT: vector.transfer_write %[[E]], %{{.*}}[%[[I2]], %[[I3]]] {permutation_map = #[[map_id2]]} : vector<4x8xf32>, memref<?x?xf32>
- affine.for %i2 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%M) {
- affine.for %i3 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%N) {
- affine.for %i4 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%K) {
+ affine.for %i2 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%M) {
+ affine.for %i3 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%N) {
+ affine.for %i4 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%K) {
%6 = affine.load %arg1[%i4, %i3] : memref<?x?xf32>
%7 = affine.load %arg0[%i2, %i4] : memref<?x?xf32>
%8 = mulf %7, %6 : f32
diff --git a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
index df60806155a..b7355c6e3cf 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 64 -virtual-vector-size 256 --test-fastest-varying=2 --test-fastest-varying=1 --test-fastest-varying=0 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK: #[[map_proj_d0d1d2_d0d1d2:map[0-9]+]] = (d0, d1, d2) -> (d0, d1, d2)
+// CHECK: #[[map_proj_d0d1d2_d0d1d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
func @vec3d(%A : memref<?x?x?xf32>) {
%0 = dim %A, 0 : memref<?x?x?xf32>
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
index e398144a222..39350c88610 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=2 --test-fastest-varying=0 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]+]] = (d0, d1, d2) -> (d0, d2)
+// CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
func @vec2d(%A : memref<?x?x?xf32>) {
%M = dim %A, 0 : memref<?x?x?xf32>
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
index d2de5f8d159..bac0c0cdb58 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=0 --test-fastest-varying=2 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]+]] = (d0, d1, d2) -> (d2, d0)
+// CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d0)>
func @vec2d(%A : memref<?x?x?xf32>) {
%M = dim %A, 0 : memref<?x?x?xf32>
diff --git a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
index 765cd07ce7d..d86ad1ccbde 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -affine-vectorize -virtual-vector-size 32 -virtual-vector-size 256 --test-fastest-varying=0 --test-fastest-varying=1 | FileCheck %s
// Permutation maps used in vectorization.
-// CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]+]] = (d0, d1, d2) -> (d2, d1)
+// CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
func @vec2d(%A : memref<?x?x?xf32>) {
%M = dim %A, 0 : memref<?x?x?xf32>
diff --git a/mlir/test/Transforms/affine-data-copy.mlir b/mlir/test/Transforms/affine-data-copy.mlir
index 5a0b2eb058c..c83beb18302 100644
--- a/mlir/test/Transforms/affine-data-copy.mlir
+++ b/mlir/test/Transforms/affine-data-copy.mlir
@@ -7,13 +7,13 @@
// footprint -- so that one could write a definite test case and not have to
// update it each time something related to the cost functions change.
-#map0 = (d0) -> (d0)
-#map1 = (d0) -> (d0 + 128)
+#map0 = affine_map<(d0) -> (d0)>
+#map1 = affine_map<(d0) -> (d0 + 128)>
// Map used to index the original memref while copying.
-// CHECK-DAG: [[MEM_IDX_MAP:map[0-9]+]] = (d0, d1) -> (d0 + d1)
+// CHECK-DAG: [[MEM_IDX_MAP:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
// Map used to index the buffer while computing.
-// CHECK-DAG: [[BUF_IDX_MAP:map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2, -d1 + d3)
+// CHECK-DAG: [[BUF_IDX_MAP:map[0-9]+]] = affine_map<(d0, d1, d2, d3) -> (-d0 + d2, -d1 + d3)>
// CHECK-LABEL: func @matmul
func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<4096x4096xf32>) -> memref<4096x4096xf32> {
diff --git a/mlir/test/Transforms/affine-loop-invariant-code-motion.mlir b/mlir/test/Transforms/affine-loop-invariant-code-motion.mlir
index f7143b7ad7d..4c9c0dbbf77 100644
--- a/mlir/test/Transforms/affine-loop-invariant-code-motion.mlir
+++ b/mlir/test/Transforms/affine-loop-invariant-code-motion.mlir
@@ -29,7 +29,7 @@ func @store_affine_apply() -> memref<10xf32> {
%cf7 = constant 7.0 : f32
%m = alloc() : memref<10xf32>
affine.for %arg0 = 0 to 10 {
- %t0 = affine.apply (d1) -> (d1 + 1)(%arg0)
+ %t0 = affine.apply affine_map<(d1) -> (d1 + 1)>(%arg0)
affine.store %cf7, %m[%t0] : memref<10xf32>
}
return %m : memref<10xf32>
@@ -87,8 +87,8 @@ func @invariant_code_inside_affine_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
- %t0 = affine.apply (d1) -> (d1 + 1)(%arg0)
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %t0) {
+ %t0 = affine.apply affine_map<(d1) -> (d1 + 1)>(%arg0)
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %t0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
@@ -224,7 +224,7 @@ func @invariant_affine_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
@@ -249,7 +249,7 @@ func @invariant_affine_if2() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg1] : memref<10xf32>
@@ -276,10 +276,10 @@ func @invariant_affine_nested_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
affine.store %cf9, %m[%arg1] : memref<10xf32>
}
}
@@ -308,10 +308,10 @@ func @invariant_affine_nested_if_else() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
affine.store %cf9, %m[%arg0] : memref<10xf32>
} else {
affine.store %cf9, %m[%arg1] : memref<10xf32>
@@ -345,10 +345,10 @@ func @invariant_affine_nested_if_else2() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
%tload1 = affine.load %m[%arg0] : memref<10xf32>
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
affine.store %cf9, %m2[%arg0] : memref<10xf32>
} else {
%tload2 = affine.load %m[%arg0] : memref<10xf32>
@@ -381,10 +381,10 @@ func @invariant_affine_nested_if2() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
%v1 = affine.load %m[%arg0] : memref<10xf32>
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%v2 = affine.load %m[%arg0] : memref<10xf32>
}
}
@@ -411,7 +411,7 @@ func @invariant_affine_for_inside_affine_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
affine.for %arg2 = 0 to 10 {
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index 16ee00923d2..e7be915159b 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -418,8 +418,8 @@ func @dyn_shape_fold(%L : index, %M : index) -> (memref<? x ? x i32>, memref<? x
return %c, %d : memref<? x ? x i32>, memref<? x ? x f32>
}
-#map1 = (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)
-#map2 = (d0, d1, d2)[s0, s1, s2] -> (d0 * s2 + d1 * s1 + d2 + s0)
+#map1 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>
+#map2 = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s2 + d1 * s1 + d2 + s0)>
// CHECK-LABEL: func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index,
func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>, %M : index, %N : index, %K : index) {
@@ -499,8 +499,8 @@ func @hoist_constant(%arg0: memref<8xi32>) {
func @const_fold_propagate() -> memref<?x?xf32> {
%VT_i = constant 512 : index
- %VT_i_s = affine.apply (d0) -> (d0 floordiv 8) (%VT_i)
- %VT_k_l = affine.apply (d0) -> (d0 floordiv 16) (%VT_i)
+ %VT_i_s = affine.apply affine_map<(d0) -> (d0 floordiv 8)> (%VT_i)
+ %VT_k_l = affine.apply affine_map<(d0) -> (d0 floordiv 16)> (%VT_i)
// CHECK: = alloc() : memref<64x32xf32>
%Av = alloc(%VT_i_s, %VT_k_l) : memref<?x?xf32>
@@ -568,7 +568,7 @@ func @indirect_call_folding() {
//
// IMPORTANT NOTE: the operations in this test are exactly those produced by
-// lowering affine.apply (i) -> (i mod 42) to standard operations. Please only
+// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
// change these operations together with the affine lowering pass tests.
//
// CHECK-LABEL: @lowered_affine_mod
@@ -594,7 +594,7 @@ func @lowered_affine_mod() -> (index, index) {
//
// IMPORTANT NOTE: the operations in this test are exactly those produced by
-// lowering affine.apply (i) -> (i mod 42) to standard operations. Please only
+// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
// change these operations together with the affine lowering pass tests.
//
// CHECK-LABEL: func @lowered_affine_floordiv
@@ -626,7 +626,7 @@ func @lowered_affine_floordiv() -> (index, index) {
//
// IMPORTANT NOTE: the operations in this test are exactly those produced by
-// lowering affine.apply (i) -> (i mod 42) to standard operations. Please only
+// lowering affine.apply affine_map<(i) -> (i mod 42)> to standard operations. Please only
// change these operations together with the affine lowering pass tests.
//
// CHECK-LABEL: func @lowered_affine_ceildiv
@@ -683,15 +683,15 @@ func @cast_values(%arg0: tensor<*xi32>, %arg1: memref<?xi32>) -> (tensor<2xi32>,
// -----
-#TEST_VIEW_MAP0 = (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)
-#TEST_VIEW_MAP1 = (d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * s0 + d2)
-#TEST_VIEW_MAP2 = (d0, d1)[s0] -> (d0 * 4 + d1 + s0)
+#TEST_VIEW_MAP0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
+#TEST_VIEW_MAP1 = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * s0 + d2)>
+#TEST_VIEW_MAP2 = affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)>
-// CHECK-DAG: #[[VIEW_MAP0:map[0-9]+]] = (d0, d1) -> (d0 * 11 + d1 + 15)
-// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = (d0, d1)[s0] -> (d0 * 11 + s0 + d1)
-// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = (d0, d1)[s0] -> (d0 * s0 + d1 + 15)
-// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = (d0, d1, d2)[s0] -> (d0 * s0 + d1 * 7 + d2)
-// CHECK-DAG: #[[VIEW_MAP4:map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1 + 15)
+// CHECK-DAG: #[[VIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 11 + d1 + 15)>
+// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 11 + s0 + d1)>
+// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1 + 15)>
+// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 7 + d2)>
+// CHECK-DAG: #[[VIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 15)>
// CHECK-LABEL: func @view
func @view(%arg0 : index) {
@@ -745,15 +745,15 @@ func @view(%arg0 : index) {
// -----
-// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)
-// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = (d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)
-// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)
-// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = (d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)
-// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)
-// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = (d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)
-// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = (d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)
-// CHECK-DAG: #[[SUBVIEW_MAP6:map[0-9]+]] = (d0, d1)[s0] -> (d0 * 4 + s0 + d1)
-// CHECK-DAG: #[[SUBVIEW_MAP7:map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1 + 12)
+// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
+// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
+// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
+// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
+// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
+// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
+// CHECK-DAG: #[[SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
+// CHECK-DAG: #[[SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
// CHECK-LABEL: func @subview
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
@@ -771,56 +771,56 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
%c15 = constant 15 : index
// CHECK: %[[ALLOC0:.*]] = alloc()
- %0 = alloc() : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+ %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// Test: subview with constant base memref and constant operands is folded.
// Note that the subview uses the base memrefs layout map because it used
// zero offset and unit stride arguments.
// CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[BASE_MAP0]]>
%1 = subview %0[%c0, %c0, %c0][%c7, %c11, %c2][%c1, %c1, %c1]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
load %1[%c0, %c0, %c0] : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview with one dynamic operand should not be folded.
// CHECK: std.subview %[[ALLOC0]][%[[C0]], %[[ARG0]], %[[C0]]][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
%2 = subview %0[%c0, %arg0, %c0][%c7, %c11, %c15][%c1, %c1, %c1]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
load %2[%c0, %c0, %c0] : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// CHECK: %[[ALLOC1:.*]] = alloc(%[[ARG0]])
- %3 = alloc(%arg0) : memref<?x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+ %3 = alloc(%arg0) : memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
// CHECK: std.subview %[[ALLOC1]][][][] : memref<?x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[BASE_MAP0]]>
%4 = subview %3[%c0, %c0, %c0][%c7, %c11, %c15][%c1, %c1, %c1]
- : memref<?x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
load %4[%c0, %c0, %c0] : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview offset operands are folded correctly w.r.t. base strides.
// CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
%5 = subview %0[%c1, %c2, %c7][%c7, %c11, %c2][%c1, %c1, %c1]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
load %5[%c0, %c0, %c0] : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview stride operands are folded correctly w.r.t. base strides.
// CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
%6 = subview %0[%c0, %c0, %c0][%c7, %c11, %c2][%c2, %c7, %c11]
- : memref<8x16x4xf32, (d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> to
+ : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
load %6[%c0, %c0, %c0] : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview shape are folded, but offsets and strides are not even if base memref is static
// CHECK: std.subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][][%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
@@ -870,9 +870,9 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: dim on subview is rewritten to size operand.
%7 = dim %4, 0 : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
%8 = dim %4, 1 : memref<?x?x?xf32,
- (d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>
+ affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// CHECK: return %[[C7]], %[[C11]]
return %7, %8 : index, index
diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir
index a24aad2847a..b156ca91875 100644
--- a/mlir/test/Transforms/constant-fold.mlir
+++ b/mlir/test/Transforms/constant-fold.mlir
@@ -170,13 +170,13 @@ func @affine_apply(%variable : index) -> (index, index, index) {
// CHECK:[[C1159:%.+]] = constant 1159 : index
// CHECK:[[C1152:%.+]] = constant 1152 : index
- %x0 = affine.apply (d0, d1)[S0] -> ( (d0 + 128 * S0) floordiv 128 + d1 mod 128)
+ %x0 = affine.apply affine_map<(d0, d1)[S0] -> ( (d0 + 128 * S0) floordiv 128 + d1 mod 128)>
(%c177, %c211)[%N]
- %x1 = affine.apply (d0, d1)[S0] -> (128 * (S0 ceildiv 128))
+ %x1 = affine.apply affine_map<(d0, d1)[S0] -> (128 * (S0 ceildiv 128))>
(%c177, %c211)[%N]
// CHECK:[[C42:%.+]] = constant 42 : index
- %y = affine.apply (d0) -> (42) (%variable)
+ %y = affine.apply affine_map<(d0) -> (42)> (%variable)
// CHECK: return [[C1159]], [[C1152]], [[C42]]
return %x0, %x1, %y : index, index, index
diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir
index 8cc41e6a1a0..8e526176a02 100644
--- a/mlir/test/Transforms/cse.mlir
+++ b/mlir/test/Transforms/cse.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -pass-pipeline='func(cse)' | FileCheck %s
-// CHECK-DAG: #map0 = (d0) -> (d0 mod 2)
-#map0 = (d0) -> (d0 mod 2)
+// CHECK-DAG: #map0 = affine_map<(d0) -> (d0 mod 2)>
+#map0 = affine_map<(d0) -> (d0 mod 2)>
// CHECK-LABEL: @simple_constant
func @simple_constant() -> (i32, i32) {
diff --git a/mlir/test/Transforms/dma-generate.mlir b/mlir/test/Transforms/dma-generate.mlir
index 0ca34554287..9724f990f97 100644
--- a/mlir/test/Transforms/dma-generate.mlir
+++ b/mlir/test/Transforms/dma-generate.mlir
@@ -13,8 +13,8 @@
// -----
// Index of the buffer for the second DMA is remapped.
-// CHECK-DAG: [[MAP_PLUS_256:#map[0-9]+]] = (d0) -> (d0 + 256)
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0)
+// CHECK-DAG: [[MAP_PLUS_256:#map[0-9]+]] = affine_map<(d0) -> (d0 + 256)>
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0)>
// CHECK-LABEL: func @loop_nest_1d() {
func @loop_nest_1d() {
@@ -52,7 +52,7 @@ func @loop_nest_1d() {
// CHECK-NEXT: return
affine.for %i = 0 to 256 {
affine.load %A[%i] : memref<256 x f32>
- %idx = affine.apply (d0) -> (d0 + 256)(%i)
+ %idx = affine.apply affine_map<(d0) -> (d0 + 256)>(%i)
affine.load %B[%idx] : memref<512 x f32>
affine.load %F[%i] : memref<256 x f32, 2>
}
@@ -124,18 +124,18 @@ func @loop_nest_high_d(%A: memref<512 x 32 x f32>,
affine.for %kT = 0 to 32 {
affine.for %iT = 0 to 32 {
affine.for %kk = 0 to 16 { // k intratile
- %k = affine.apply (d0, d1) -> (16*d0 + d1) (%kT, %kk)
+ %k = affine.apply affine_map<(d0, d1) -> (16*d0 + d1)> (%kT, %kk)
%v0 = affine.load %B[%k, %jT] : memref<512 x 32 x f32>
"foo"(%v0) : (f32) -> ()
}
affine.for %ii = 0 to 16 { // i intratile.
- %i = affine.apply (d0, d1) -> (16*d0 + d1)(%iT, %ii)
+ %i = affine.apply affine_map<(d0, d1) -> (16*d0 + d1)>(%iT, %ii)
%v1 = affine.load %A[%i, %kT] : memref<512 x 32 x f32>
"bar"(%v1) : (f32) -> ()
}
affine.for %ii_ = 0 to 16 { // i intratile.
%v2 = "abc_compute"() : () -> f32
- %i_ = affine.apply (d0, d1) -> (16*d0 + d1)(%iT, %ii_)
+ %i_ = affine.apply affine_map<(d0, d1) -> (16*d0 + d1)>(%iT, %ii_)
%v3 = affine.load %C[%i_, %jT] : memref<512 x 32 x f32>
%v4 = "addf32"(%v2, %v3) : (f32, f32) -> (f32)
affine.store %v4, %C[%i_, %jT] : memref<512 x 32 x f32>
@@ -174,7 +174,7 @@ func @loop_nest_modulo() {
affine.for %i = 0 to 32 step 4 {
// DMAs will be performed at this level (%j is the first unit stride loop)
affine.for %j = 0 to 8 {
- %idx = affine.apply (d0) -> (d0 mod 2) (%j)
+ %idx = affine.apply affine_map<(d0) -> (d0 mod 2)> (%j)
// A buffer of size 32 x 2 will be allocated (original buffer was 256 x 8).
%v = affine.load %A[%i, %idx] : memref<256 x 8 x f32>
}
@@ -198,8 +198,8 @@ func @loop_nest_tiled() -> memref<256x1024xf32> {
// CHECK-NEXT: affine.dma_wait
// CHECK-NEXT: affine.for %{{.*}} = #map
// CHECK-NEXT: affine.for %{{.*}} = #map
- affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 32)(%i0) {
- affine.for %i3 = (d0) -> (d0)(%i1) to (d0) -> (d0 + 32)(%i1) {
+ affine.for %i2 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 32)>(%i0) {
+ affine.for %i3 = affine_map<(d0) -> (d0)>(%i1) to affine_map<(d0) -> (d0 + 32)>(%i1) {
// CHECK: %{{.*}} = affine.load %{{.*}}[-%{{.*}} + %{{.*}}, -%{{.*}} + %{{.*}}] : memref<32x32xf32, 2>
%1 = affine.load %0[%i2, %i3] : memref<256x1024xf32>
} // CHECK-NEXT: }
@@ -221,7 +221,7 @@ func @dma_constant_dim_access(%A : memref<100x100xf32>) {
// CHECK: affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}], %{{.*}} : memref<100x100xf32>, memref<1x100xf32, 2>,
// CHECK-NEXT: affine.dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xi32>
affine.for %i = 0 to 100 {
- affine.for %j = 0 to ()[s0] -> (s0) ()[%N] {
+ affine.for %j = 0 to affine_map<()[s0] -> (s0)> ()[%N] {
// CHECK: %{{.*}} = affine.load %{{.*}}[0, %{{.*}}] : memref<1x100xf32, 2>
affine.load %A[%one, %j] : memref<100 x 100 x f32>
}
@@ -231,14 +231,14 @@ func @dma_constant_dim_access(%A : memref<100x100xf32>) {
// -----
-// CHECK-DAG: [[MAP_SYM_SHIFT:#map[0-9]+]] = (d0, d1)[s0, s1] -> (d1 + s0 + s1)
+// CHECK-DAG: [[MAP_SYM_SHIFT:#map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d1 + s0 + s1)>
// CHECK-LABEL: func @dma_with_symbolic_accesses
func @dma_with_symbolic_accesses(%A : memref<100x100xf32>, %M : index) {
%N = constant 9 : index
affine.for %i = 0 to 100 {
affine.for %j = 0 to 100 {
- %idy = affine.apply (d0, d1) [s0, s1] -> (d1 + s0 + s1)(%i, %j)[%M, %N]
+ %idy = affine.apply affine_map<(d0, d1) [s0, s1] -> (d1 + s0 + s1)>(%i, %j)[%M, %N]
affine.load %A[%i, %idy] : memref<100 x 100 x f32>
}
}
@@ -269,7 +269,7 @@ func @dma_with_symbolic_loop_bounds(%A : memref<100x100xf32>, %M : index, %N: in
// CHECK-NEXT: affine.dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xi32>
affine.for %i = 0 to 100 {
affine.for %j = %M to %N {
- %idy = affine.apply (d1) [s0] -> (d1 + s0)(%j)[%K]
+ %idy = affine.apply affine_map<(d1) [s0] -> (d1 + s0)>(%j)[%K]
affine.load %A[%i, %idy] : memref<100 x 100 x f32>
}
}
@@ -301,9 +301,9 @@ func @dma_memref_3d(%arg0: memref<1024x1024x1024xf32>) {
affine.for %i = 0 to 1024 {
affine.for %j = 0 to 1024 {
affine.for %k = 0 to 1024 {
- %idx = affine.apply (d0) -> (d0 mod 128)(%i)
- %idy = affine.apply (d0) -> (d0 mod 128)(%j)
- %idz = affine.apply (d0) -> (d0 mod 128)(%k)
+ %idx = affine.apply affine_map<(d0) -> (d0 mod 128)>(%i)
+ %idy = affine.apply affine_map<(d0) -> (d0 mod 128)>(%j)
+ %idz = affine.apply affine_map<(d0) -> (d0 mod 128)>(%k)
// DMA with nested striding (or emulating with loop around strided DMA)
// not yet implemented.
// CHECK: %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<1024x1024x1024xf32>
@@ -317,10 +317,10 @@ func @dma_memref_3d(%arg0: memref<1024x1024x1024xf32>) {
// -----
-// CHECK-DAG: [[MAP_PLUS_64:#map[0-9]+]] = (d0) -> (d0 + 64)
-// CHECK-DAG: [[MAP_PLUS_128:#map[0-9]+]] = (d0) -> (d0 + 128)
-// CHECK-DAG: [[MAP_PLUS_2:#map[0-9]+]] = (d0) -> (d0 + 2)
-// CHECK-DAG: [[MAP_PLUS_192:#map[0-9]+]] = (d0) -> (d0 + 192)
+// CHECK-DAG: [[MAP_PLUS_64:#map[0-9]+]] = affine_map<(d0) -> (d0 + 64)>
+// CHECK-DAG: [[MAP_PLUS_128:#map[0-9]+]] = affine_map<(d0) -> (d0 + 128)>
+// CHECK-DAG: [[MAP_PLUS_2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// CHECK-DAG: [[MAP_PLUS_192:#map[0-9]+]] = affine_map<(d0) -> (d0 + 192)>
// The first load accesses ([2,258), [128,384))
// The second load accesses ([64,320), [2,258))
@@ -334,16 +334,16 @@ func @multi_load_store_union() {
%A = alloc() : memref<512 x 512 x f32>
affine.for %i = 0 to 256 {
affine.for %j = 0 to 256 {
- %idx = affine.apply (d0) -> (d0 + 64)(%i)
- %idy = affine.apply (d0) -> (d0 + 128)(%j)
- %ishift = affine.apply (d0) -> (d0 + 2)(%i)
- %jshift = affine.apply (d0) -> (d0 + 2)(%j)
+ %idx = affine.apply affine_map<(d0) -> (d0 + 64)>(%i)
+ %idy = affine.apply affine_map<(d0) -> (d0 + 128)>(%j)
+ %ishift = affine.apply affine_map<(d0) -> (d0 + 2)>(%i)
+ %jshift = affine.apply affine_map<(d0) -> (d0 + 2)>(%j)
%u = affine.load %A[%ishift, %idy] : memref<512 x 512 x f32>
%v = affine.load %A[%idx, %jshift] : memref<512 x 512 x f32>
- %sidx = affine.apply (d0) -> (d0 + 128)(%i)
- %sidy = affine.apply (d0) -> (d0 + 192)(%j)
+ %sidx = affine.apply affine_map<(d0) -> (d0 + 128)>(%i)
+ %sidy = affine.apply affine_map<(d0) -> (d0 + 192)>(%j)
affine.store %u, %A[%ishift, %sidy] : memref<512 x 512 x f32>
affine.store %v, %A[%sidx, %jshift] : memref<512 x 512 x f32>
@@ -458,7 +458,7 @@ func @dma_mixed_loop_blocks() {
// CHECK-LABEL: func @relative_loop_bounds
func @relative_loop_bounds(%arg0: memref<1027xf32>) {
affine.for %i0 = 0 to 1024 {
- affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 4)(%i0) {
+ affine.for %i2 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 4)>(%i0) {
%0 = constant 0.0 : f32
affine.store %0, %arg0[%i2] : memref<1027xf32>
}
@@ -478,8 +478,8 @@ func @relative_loop_bounds(%arg0: memref<1027xf32>) {
// -----
-// CHECK-DAG: [[MAP_READ_OFFSET:#map[0-9]+]] = (d0) -> (d0 + 100)
-// CHECK-DAG: [[MAP_WRITE_OFFSET:#map[0-9]+]] = (d0) -> (d0 + 25)
+// CHECK-DAG: [[MAP_READ_OFFSET:#map[0-9]+]] = affine_map<(d0) -> (d0 + 100)>
+// CHECK-DAG: [[MAP_WRITE_OFFSET:#map[0-9]+]] = affine_map<(d0) -> (d0 + 25)>
func @test_read_write_region_union() {
%0 = alloc() : memref<256xf32>
@@ -488,8 +488,8 @@ func @test_read_write_region_union() {
// read region: [100, 110)
// write region: [25, 35)
// union region: [25, 110)
- %a0 = affine.apply (d0) -> (d0 + 100)(%i0)
- %a1 = affine.apply (d0) -> (d0 + 25)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 100)>(%i0)
+ %a1 = affine.apply affine_map<(d0) -> (d0 + 25)>(%i0)
%1 = affine.load %0[%a0] : memref<256xf32>
affine.store %1, %0[%a1] : memref<256xf32>
}
@@ -515,9 +515,9 @@ func @test_read_write_region_union() {
// This should create a buffer of size 2 affine.for %arg2.
-#map_lb = (d0) -> (d0)
-#map_ub = (d0) -> (d0 + 3)
-#map_acc = (d0) -> (d0 floordiv 8)
+#map_lb = affine_map<(d0) -> (d0)>
+#map_ub = affine_map<(d0) -> (d0 + 3)>
+#map_acc = affine_map<(d0) -> (d0 floordiv 8)>
// CHECK-LABEL: func @test_analysis_util
func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>, %arg2: memref<2xf32>) -> (memref<144x9xf32>, memref<2xf32>) {
%c0 = constant 0 : index
@@ -545,11 +545,11 @@ func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>,
// ----
-#map3 = (d0) -> (d0)
-#map12 = (d0) -> (d0 + 3)
-#map14 = (d0, d1) -> ((d0 + d1 * 72) floordiv 2304 + ((((d0 + d1 * 72) mod 2304) mod 1152) mod 9) floordiv 3)
-#map15 = (d0, d1) -> ((d0 + d1 * 72) mod 2304 - (((d0 + d1 * 72) mod 2304) floordiv 1152) * 1151 - ((((d0 + d1 * 72) mod 2304) mod 1152) floordiv 9) * 9 - (((((d0 + d1 * 72) mod 2304) mod 1152) mod 9) floordiv 3) * 3)
-#map16 = (d0, d1) -> (((((d0 + d1 * 72) mod 2304) mod 1152) floordiv 9) floordiv 8)
+#map3 = affine_map<(d0) -> (d0)>
+#map12 = affine_map<(d0) -> (d0 + 3)>
+#map14 = affine_map<(d0, d1) -> ((d0 + d1 * 72) floordiv 2304 + ((((d0 + d1 * 72) mod 2304) mod 1152) mod 9) floordiv 3)>
+#map15 = affine_map<(d0, d1) -> ((d0 + d1 * 72) mod 2304 - (((d0 + d1 * 72) mod 2304) floordiv 1152) * 1151 - ((((d0 + d1 * 72) mod 2304) mod 1152) floordiv 9) * 9 - (((((d0 + d1 * 72) mod 2304) mod 1152) mod 9) floordiv 3) * 3)>
+#map16 = affine_map<(d0, d1) -> (((((d0 + d1 * 72) mod 2304) mod 1152) floordiv 9) floordiv 8)>
// Test for test case in b/128303048 #4.
func @test_memref_bounds(%arg0: memref<4x4x16x1xvector<8x128xf32>>, %arg1: memref<144x9xvector<8x128xf32>>, %arg2: memref<2xvector<8x128xf32>>) -> (memref<144x9xvector<8x128xf32>>, memref<2xvector<8x128xf32>>) {
%c0 = constant 0 : index
@@ -586,9 +586,9 @@ func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
// FAST-MEM-16KB: affine.for %{{.*}}
affine.for %i1 = 0 to 1024 step 4 {
// FAST-MEM-16KB: affine.for %{{.*}}
- affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 4)(%i0) {
+ affine.for %i2 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 4)>(%i0) {
// FAST-MEM-16KB: affine.for %{{.*}}
- affine.for %i3 = (d0) -> (d0)(%i1) to (d0) -> (d0 + 4)(%i1) {
+ affine.for %i3 = affine_map<(d0) -> (d0)>(%i1) to affine_map<(d0) -> (d0 + 4)>(%i1) {
%3 = affine.load %arg0[%i2, %i3] : memref<256x1024xf32>
%4 = mulf %3, %3 : f32
affine.store %4, %arg0[%i2, %i3] : memref<256x1024xf32>
@@ -610,8 +610,8 @@ func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
// %arg0 and %arg1. So, its DMA can be hoisted one level up and placed under
// %j, while the DMAs for arg0 and arg1 appear right under the %k loop.
-#map0 = (d0) -> (d0)
-#map1 = (d0) -> (d0 + 4)
+#map0 = affine_map<(d0) -> (d0)>
+#map1 = affine_map<(d0) -> (d0 + 4)>
// FAST-MEM-16KB-LABEL: func @simple_matmul
func @simple_matmul(%arg0: memref<8x8xvector<64xf32>>, %arg1: memref<8x8xvector<64xf32>>, %arg2: memref<8x8xvector<64xf32>>) -> memref<8x8xvector<64xf32>> {
affine.for %i = 0 to 8 step 4 {
diff --git a/mlir/test/Transforms/loop-fusion-slice-computation.mlir b/mlir/test/Transforms/loop-fusion-slice-computation.mlir
index f6872c20131..dd1a8a339ca 100644
--- a/mlir/test/Transforms/loop-fusion-slice-computation.mlir
+++ b/mlir/test/Transforms/loop-fusion-slice-computation.mlir
@@ -28,12 +28,12 @@ func @slice_depth1_loop_nest_with_offsets() {
%cst = constant 7.000000e+00 : f32
affine.for %i0 = 0 to 16 {
// expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 2) loop bounds: [(d0) -> (d0 + 3), (d0) -> (d0 + 4)] )}}
- %a0 = affine.apply (d0) -> (d0 + 2)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 2)>(%i0)
affine.store %cst, %0[%a0] : memref<100xf32>
}
affine.for %i1 = 4 to 8 {
// expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0) -> (d0 - 3), (d0) -> (d0 - 2)] )}}
- %a1 = affine.apply (d0) -> (d0 - 1)(%i1)
+ %a1 = affine.apply affine_map<(d0) -> (d0 - 1)>(%i1)
%1 = affine.load %0[%a1] : memref<100xf32>
}
return
diff --git a/mlir/test/Transforms/loop-fusion.mlir b/mlir/test/Transforms/loop-fusion.mlir
index 339cc31f549..78b45d6a485 100644
--- a/mlir/test/Transforms/loop-fusion.mlir
+++ b/mlir/test/Transforms/loop-fusion.mlir
@@ -70,8 +70,8 @@ func @should_fuse_reduction_to_pointwise() {
// -----
-// CHECK-DAG: [[MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = (d0) -> (d0 - 1)
-// CHECK-DAG: [[MAP_SHIFT_BY_ONE:#map[0-9]+]] = (d0) -> (d0 + 1)
+// CHECK-DAG: [[MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[MAP_SHIFT_BY_ONE:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
// CHECK-LABEL: func @should_fuse_loop_nests_with_shifts() {
func @should_fuse_loop_nests_with_shifts() {
@@ -80,8 +80,8 @@ func @should_fuse_loop_nests_with_shifts() {
affine.for %i0 = 0 to 9 {
affine.for %i1 = 0 to 9 {
- %idx = affine.apply (d0) -> (d0 + 1) (%i0)
- %idy = affine.apply (d0) -> (d0 + 1) (%i1)
+ %idx = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
+ %idy = affine.apply affine_map<(d0) -> (d0 + 1)> (%i1)
affine.store %cf7, %a[%idx, %idy] : memref<10x10xf32>
}
}
@@ -413,7 +413,7 @@ func @should_fuse_no_top_level_access() {
// -----
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @should_not_fuse_if_inst_at_top_level() {
func @should_not_fuse_if_inst_at_top_level() {
@@ -441,7 +441,7 @@ func @should_not_fuse_if_inst_at_top_level() {
// -----
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @should_not_fuse_if_inst_in_loop_nest() {
func @should_not_fuse_if_inst_in_loop_nest() {
@@ -508,9 +508,9 @@ func @permute_and_fuse() {
// -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 floordiv 4)
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0) -> (d0 mod 4)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
// Reshape from a 64 x f32 to 16 x 4 x f32.
// CHECK-LABEL: func @fuse_reshape_64_16_4
@@ -519,8 +519,8 @@ func @fuse_reshape_64_16_4(%in : memref<64xf32>) {
affine.for %i0 = 0 to 64 {
%v = affine.load %in[%i0] : memref<64xf32>
- %idx = affine.apply (d0) -> (d0 floordiv 4) (%i0)
- %idy = affine.apply (d0) -> (d0 mod 4) (%i0)
+ %idx = affine.apply affine_map<(d0) -> (d0 floordiv 4)> (%i0)
+ %idy = affine.apply affine_map<(d0) -> (d0 mod 4)> (%i0)
affine.store %v, %out[%idx, %idy] : memref<16x4xf32>
}
@@ -540,9 +540,9 @@ func @fuse_reshape_64_16_4(%in : memref<64xf32>) {
}
// -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 floordiv 4)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 mod 4)
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
// Reshape a 16x4xf32 to 64xf32.
// CHECK-LABEL: func @fuse_reshape_16_4_64
@@ -553,7 +553,7 @@ func @fuse_reshape_16_4_64() {
affine.for %i0 = 0 to 16 {
affine.for %i1 = 0 to 4 {
%v = affine.load %in[%i0, %i1] : memref<16x4xf32>
- %idx = affine.apply (d0, d1) -> (4*d0 + d1) (%i0, %i1)
+ %idx = affine.apply affine_map<(d0, d1) -> (4*d0 + d1)> (%i0, %i1)
affine.store %v, %out[%idx] : memref<64xf32>
}
}
@@ -604,13 +604,13 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
affine.for %ii = 0 to 64 {
affine.for %jj = 0 to 9 {
// Convert output coordinates to linear index.
- %a0 = affine.apply (d0, d1) -> (d0 * 9 + d1) (%ii, %jj)
- %0 = affine.apply (d0) -> (d0 floordiv (2 * 3 * 3 * 16 * 1))(%a0)
- %1 = affine.apply (d0) -> ((d0 mod 288) floordiv (3 * 3 * 16 * 1))(%a0)
- %2 = affine.apply (d0) -> (((d0 mod 288) mod 144) floordiv (3 * 16 * 1))(%a0)
- %3 = affine.apply (d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv (16 * 1))(%a0)
- %4 = affine.apply (d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)(%a0)
- %5 = affine.apply (d0) -> (((((d0 mod 144) mod 144) mod 48) mod 16) mod 1)(%a0)
+ %a0 = affine.apply affine_map<(d0, d1) -> (d0 * 9 + d1)> (%ii, %jj)
+ %0 = affine.apply affine_map<(d0) -> (d0 floordiv (2 * 3 * 3 * 16 * 1))>(%a0)
+ %1 = affine.apply affine_map<(d0) -> ((d0 mod 288) floordiv (3 * 3 * 16 * 1))>(%a0)
+ %2 = affine.apply affine_map<(d0) -> (((d0 mod 288) mod 144) floordiv (3 * 16 * 1))>(%a0)
+ %3 = affine.apply affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv (16 * 1))>(%a0)
+ %4 = affine.apply affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)>(%a0)
+ %5 = affine.apply affine_map<(d0) -> (((((d0 mod 144) mod 144) mod 48) mod 16) mod 1)>(%a0)
%v = affine.load %in[%0, %1, %2, %3, %4, %5] : memref<2x2x3x3x16x1xi32>
affine.store %v, %out[%ii, %jj] : memref<64x9xi32>
}
@@ -628,18 +628,18 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
// Everything above is fused to a single 2-d loop nest, and the 6-d tensor %in
// is eliminated if -memref-dataflow-opt is also supplied.
//
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> ((d0 * 9 + d1) floordiv 288)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> ((((d0 * 9 + d1) mod 288) mod 144) floordiv 48)
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) floordiv 16)
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) mod 16)
-// CHECK-DAG: [[MAP11:#map[0-9]+]] = (d0, d1) -> (d0 * 9 + d1)
-// CHECK-DAG: [[MAP12:#map[0-9]+]] = (d0) -> (d0 floordiv 288)
-// CHECK-DAG: [[MAP13:#map[0-9]+]] = (d0) -> ((d0 mod 288) floordiv 144)
-// CHECK-DAG: [[MAP14:#map[0-9]+]] = (d0) -> (((d0 mod 288) mod 144) floordiv 48)
-// CHECK-DAG: [[MAP15:#map[0-9]+]] = (d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv 16)
-// CHECK-DAG: [[MAP16:#map[0-9]+]] = (d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)
-// CHECK-DAG: [[MAP17:#map[0-9]+]] = (d0) -> (0)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) floordiv 288)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)>
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> ((((d0 * 9 + d1) mod 288) mod 144) floordiv 48)>
+// CHECK-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) floordiv 16)>
+// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) mod 16)>
+// CHECK-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 9 + d1)>
+// CHECK-DAG: [[MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 288)>
+// CHECK-DAG: [[MAP13:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 288) floordiv 144)>
+// CHECK-DAG: [[MAP14:#map[0-9]+]] = affine_map<(d0) -> (((d0 mod 288) mod 144) floordiv 48)>
+// CHECK-DAG: [[MAP15:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv 16)>
+// CHECK-DAG: [[MAP16:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)>
+// CHECK-DAG: [[MAP17:#map[0-9]+]] = affine_map<(d0) -> (0)>
//
// CHECK-LABEL: func @R6_to_R2_reshape
@@ -675,21 +675,21 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
// CHECK-LABEL: func @fuse_symbolic_bounds
func @fuse_symbolic_bounds(%M : index, %N : index) {
- %N_plus_5 = affine.apply (d0) -> (d0 + 5)(%N)
+ %N_plus_5 = affine.apply affine_map<(d0) -> (d0 + 5)>(%N)
%m = alloc(%M, %N_plus_5) : memref<? x ? x f32>
%c0 = constant 0.0 : f32
%s = constant 5 : index
affine.for %i0 = 0 to %M {
- affine.for %i1 = 0 to (d0) -> (d0 + 5) (%N) {
+ affine.for %i1 = 0 to affine_map<(d0) -> (d0 + 5)> (%N) {
affine.store %c0, %m[%i0, %i1] : memref<? x ? x f32>
}
}
affine.for %i2 = 0 to %M {
affine.for %i3 = 0 to %N {
- %idy = affine.apply (d0)[s0] -> (d0 + s0) (%i3)[%s]
+ %idy = affine.apply affine_map<(d0)[s0] -> (d0 + s0)> (%i3)[%s]
%v = affine.load %m[%i2, %idy] : memref<? x ? x f32>
}
}
@@ -790,7 +790,7 @@ func @should_fuse_at_src_depth1_and_dst_depth1() {
}
// -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0 * 10 + d1)
+// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 10 + d1)>
// CHECK-LABEL: func @should_fuse_src_depth1_at_dst_depth2
func @should_fuse_src_depth1_at_dst_depth2() {
@@ -803,7 +803,7 @@ func @should_fuse_src_depth1_at_dst_depth2() {
affine.for %i1 = 0 to 10 {
affine.for %i2 = 0 to 10 {
- %a0 = affine.apply (d0, d1) -> (d0 * 10 + d1) (%i1, %i2)
+ %a0 = affine.apply affine_map<(d0, d1) -> (d0 * 10 + d1)> (%i1, %i2)
%v0 = affine.load %a[%a0] : memref<100xf32>
}
}
@@ -1207,17 +1207,17 @@ func @R3_to_R2_reshape() {
affine.for %ii = 0 to 32 {
affine.for %jj = 0 to 3 {
- %a0 = affine.apply (d0, d1) -> (d0 * 3 + d1) (%ii, %jj)
- %idx = affine.apply (d0) -> (d0 floordiv (3 * 16)) (%a0)
+ %a0 = affine.apply affine_map<(d0, d1) -> (d0 * 3 + d1)> (%ii, %jj)
+ %idx = affine.apply affine_map<(d0) -> (d0 floordiv (3 * 16))> (%a0)
%v = affine.load %in[%idx, %jj, %c0]
: memref<2x3x16xi32>
}
}
return
}
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> ((d0 * 3 + d1) floordiv 48)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0, d1) -> (d0 * 3 + d1)
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0) -> (d0 floordiv 48)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 3 + d1) floordiv 48)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1)>
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 48)>
// CHECK-LABEL: func @R3_to_R2_reshape()
// CHECK-DAG: %{{.*}} = alloc() : memref<1x1x1xi32>
@@ -1441,8 +1441,8 @@ func @should_fuse_and_preserve_dep_on_constant() {
// -----
-// CHECK: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d0 * 16 - d1 + 15)
-// CHECK: [[MAP3:#map[0-9]+]] = (d0, d1) -> (d0 * 16 + d1)
+// CHECK: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>
+// CHECK: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
// CHECK-LABEL: func @should_fuse_at_depth_above_loop_carried_dependence(%{{.*}}: memref<64x4xf32>, %{{.*}}: memref<64x4xf32>) {
func @should_fuse_at_depth_above_loop_carried_dependence(%arg0: memref<64x4xf32>, %arg1: memref<64x4xf32>) {
@@ -1456,19 +1456,19 @@ func @should_fuse_at_depth_above_loop_carried_dependence(%arg0: memref<64x4xf32>
affine.for %i2 = 0 to 4 {
affine.for %i3 = 0 to 4 {
affine.for %i4 = 0 to 16 {
- %1 = affine.apply (d0, d1) -> (d0 * 16 - d1 + 15)(%i3, %i4)
+ %1 = affine.apply affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>(%i3, %i4)
%2 = affine.load %arg1[%1, %i2] : memref<64x4xf32>
"op0"(%2) : (f32) -> ()
}
affine.for %i5 = 0 to 4 {
affine.for %i6 = 0 to 16 {
- %3 = affine.apply (d0, d1) -> (d0 * 16 - d1 + 15)(%i5, %i6)
+ %3 = affine.apply affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>(%i5, %i6)
%4 = affine.load %arg0[%3, %i3] : memref<64x4xf32>
"op1"(%4) : (f32) -> ()
}
affine.for %i7 = 0 to 16 {
%5 = "op2"() : () -> (f32)
- %6 = affine.apply (d0, d1) -> (d0 * 16 + d1)(%i5, %i7)
+ %6 = affine.apply affine_map<(d0, d1) -> (d0 * 16 + d1)>(%i5, %i7)
%7 = affine.load %out[%6, %i2] : memref<64x4xf32>
%8 = addf %7, %5 : f32
affine.store %8, %out[%6, %i2] : memref<64x4xf32>
@@ -1666,10 +1666,10 @@ func @should_fuse_live_out_writer(%arg0 : memref<10xf32>) -> memref<10xf32> {
// The fused slice has 16 iterations from along %i0.
-// CHECK-DAG: [[MAP_LB:#map[0-9]+]] = (d0) -> (d0 * 16)
-// CHECK-DAG: [[MAP_UB:#map[0-9]+]] = (d0) -> (d0 * 16 + 16)
+// CHECK-DAG: [[MAP_LB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16)>
+// CHECK-DAG: [[MAP_UB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16 + 16)>
-#map = (d0, d1) -> (d0 * 16 + d1)
+#map = affine_map<(d0, d1) -> (d0 * 16 + d1)>
// CHECK-LABEL: slice_tile
func @slice_tile(%arg0: memref<128x8xf32>, %arg1: memref<32x8xf32>, %0 : f32) -> memref<32x8xf32> {
@@ -1732,9 +1732,9 @@ func @test_add_slice_bounds() {
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
affine.for %i2 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
- %a1 = affine.apply (d0) -> (d0) (%i0)
- %a2 = affine.apply (d0, d1) -> (d0 - d1) (%a0, %a1)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%i0)
+ %a2 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%a0, %a1)
affine.store %cf7, %a[%a2] : memref<10xf32>
}
}
@@ -1931,7 +1931,7 @@ func @should_not_slice_past_slice_barrier() {
// -----
-#map0 = (d0, d1) -> (d0 * 16 + d1)
+#map0 = affine_map<(d0, d1) -> (d0 * 16 + d1)>
func @fuse_across_dim_mismatch(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>, %arg2: memref<9xf32>) {
%1 = alloc() : memref<144x4xf32>
%2 = constant 0.0 : f32
@@ -1955,7 +1955,7 @@ func @fuse_across_dim_mismatch(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9x
}
return
}
-// MAXIMAL: #map0 = (d0, d1) -> (d0 * 16 + d1)
+// MAXIMAL: #map0 = affine_map<(d0, d1) -> (d0 * 16 + d1)>
// MAXIMAL-LABEL: func @fuse_across_dim_mismatch
// MAXIMAL: %{{.*}} = alloc() : memref<1x1xf32>
// MAXIMAL: affine.for %{{.*}} = 0 to 9 {
@@ -1973,14 +1973,14 @@ func @fuse_across_dim_mismatch(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9x
// -----
-#map3 = (d0, d1) -> ((d0 * 72 + d1) floordiv 2304)
-#map4 = (d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)
-#map5 = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)
-#map6 = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)
-#map7 = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)
-#map10 = (d0, d1) -> (d0 * 16 + d1)
-#map11 = (d0, d1) -> (d0 * 16 + d1)
-#map12 = (d0, d1) -> (d0 * 16 - d1 + 15)
+#map3 = affine_map<(d0, d1) -> ((d0 * 72 + d1) floordiv 2304)>
+#map4 = affine_map<(d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)>
+#map5 = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)>
+#map6 = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)>
+#map7 = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)>
+#map10 = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+#map11 = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+#map12 = affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>
func @fuse_across_varying_dims_complex(%arg0: f32) {
%c0 = constant 0 : index
%0 = alloc() : memref<2x2x3x3x16x1xf32>
@@ -2021,13 +2021,13 @@ func @fuse_across_varying_dims_complex(%arg0: f32) {
}
return
}
-// MAXIMAL-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> ((d0 * 72 + d1) floordiv 2304)
-// MAXIMAL-DAG: [[MAP1:#map[0-9]+]] = (d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)
-// MAXIMAL-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)
-// MAXIMAL-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)
-// MAXIMAL-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)
-// MAXIMAL-DAG: [[MAP7:#map[0-9]+]] = (d0, d1) -> (d0 * 16 + d1)
-// MAXIMAL-DAG: [[MAP8:#map[0-9]+]] = (d0, d1) -> (d0 * 16 - d1 + 15)
+// MAXIMAL-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 72 + d1) floordiv 2304)>
+// MAXIMAL-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)>
+// MAXIMAL-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)>
+// MAXIMAL-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)>
+// MAXIMAL-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)>
+// MAXIMAL-DAG: [[MAP7:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+// MAXIMAL-DAG: [[MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>
// MAXIMAL-LABEL: func @fuse_across_varying_dims_complex
// MAXIMAL-NEXT: %{{.*}} = alloc() : memref<64x1xf32>
// MAXIMAL-NEXT: %{{.*}} = constant 0 : index
diff --git a/mlir/test/Transforms/loop-invariant-code-motion.mlir b/mlir/test/Transforms/loop-invariant-code-motion.mlir
index 4d742acf246..1c39d56a28e 100644
--- a/mlir/test/Transforms/loop-invariant-code-motion.mlir
+++ b/mlir/test/Transforms/loop-invariant-code-motion.mlir
@@ -70,8 +70,8 @@ func @invariant_code_inside_affine_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
- %t0 = affine.apply (d1) -> (d1 + 1)(%arg0)
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %t0) {
+ %t0 = affine.apply affine_map<(d1) -> (d1 + 1)>(%arg0)
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %t0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
@@ -96,7 +96,7 @@ func @invariant_affine_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
}
}
@@ -117,7 +117,7 @@ func @invariant_affine_if2() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg1] : memref<10xf32>
}
@@ -142,9 +142,9 @@ func @invariant_affine_nested_if() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf10 = addf %cf9, %cf9 : f32
}
}
@@ -172,10 +172,10 @@ func @invariant_affine_nested_if_else() {
%cf8 = constant 8.0 : f32
affine.for %arg0 = 0 to 10 {
affine.for %arg1 = 0 to 10 {
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf9 = addf %cf8, %cf8 : f32
affine.store %cf9, %m[%arg0] : memref<10xf32>
- affine.if (d0, d1) : (d1 - d0 >= 0) (%arg0, %arg0) {
+ affine.if affine_set<(d0, d1) : (d1 - d0 >= 0)> (%arg0, %arg0) {
%cf10 = addf %cf9, %cf9 : f32
} else {
affine.store %cf9, %m[%arg1] : memref<10xf32>
diff --git a/mlir/test/Transforms/loop-tiling.mlir b/mlir/test/Transforms/loop-tiling.mlir
index cf0208faf4f..c0a583f52cf 100644
--- a/mlir/test/Transforms/loop-tiling.mlir
+++ b/mlir/test/Transforms/loop-tiling.mlir
@@ -3,9 +3,9 @@
// -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 + 32)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 + 32, 50)
-// CHECK-DAG: [[IDENTITY:#map[0-9]+]] = (d0) -> (d0)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32, 50)>
+// CHECK-DAG: [[IDENTITY:#map[0-9]+]] = affine_map<(d0) -> (d0)>
// CHECK-LABEL: func @loop_tiling()
// CHECK-NEXT: affine.for %{{.*}} = 0 to 256 step 32 {
@@ -55,18 +55,18 @@ func @loop_tiling() {
// -----
-// CHECK-DAG: [[IDENTITY:#map[0-9]+]] = (d0) -> (d0)
-// CHECK-DAG: [[LB:#map[0-9]+]] = ()[s0] -> (0, s0)
-// CHECK-DAG: [[UB:#map[0-9]+]] = ()[s0, s1] -> (s0, 4096 floordiv s1)
-// CHECK-DAG: [[UB_INTRA_TILE:#map[0-9]+]] = (d0)[s0, s1] -> (d0 + 32, s0, 4096 floordiv s1)
+// CHECK-DAG: [[IDENTITY:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[LB:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK-DAG: [[UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
+// CHECK-DAG: [[UB_INTRA_TILE:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0, 4096 floordiv s1)>
-#lb = ()[s0] -> (0, s0)
-#ub = ()[s0, s1] -> (s0, 4096 floordiv s1)
+#lb = affine_map<()[s0] -> (0, s0)>
+#ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
// CHECK-LABEL: func @loop_max_min_bound(%{{.*}}: memref<?xi32>, %{{.*}}: index, %{{.*}}: index) {
func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
%M = dim %A, 0 : memref<? x i32>
affine.for %iTT = max #lb()[%L] to min #ub()[%M, %U] {
- %out = affine.apply (d0) -> (d0) (%iTT)
+ %out = affine.apply affine_map<(d0) -> (d0)> (%iTT)
}
return
// CHECK: affine.for %{{.*}} = max [[LB]]()[%{{.*}}] to min [[UB]]()[%{{.*}}, %{{.*}}] step 32 {
@@ -105,7 +105,7 @@ func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256
// -----
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = (d0)[s0] -> (d0 + 32, s0)
+// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
%cst = constant 0.000000e+00 : f32
@@ -148,13 +148,13 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
// -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = ()[s0, s1] -> (s0 + s1)
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = (d0)[s0, s1] -> (d0 + 32, s0 + s1)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 + s1)>
+// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)>
func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
%dim0 = dim %arg0, 0 : memref<?xf32>
- affine.for %i0 = 0 to ()[s0, s1] -> (s0 + s1) ()[%dim0, %limit] {
+ affine.for %i0 = 0 to affine_map<()[s0, s1] -> (s0 + s1)> ()[%dim0, %limit] {
%v0 = affine.load %arg0[%i0] : memref<?xf32>
}
return
diff --git a/mlir/test/Transforms/lower-affine.mlir b/mlir/test/Transforms/lower-affine.mlir
index 1c3de885adf..c422090194b 100644
--- a/mlir/test/Transforms/lower-affine.mlir
+++ b/mlir/test/Transforms/lower-affine.mlir
@@ -114,7 +114,7 @@ func @more_imperfectly_nested_loops() {
// CHECK-NEXT: }
func @affine_apply_loops_shorthand(%N : index) {
affine.for %i = 0 to %N {
- affine.for %j = (d0)[]->(d0)(%i)[] to 42 {
+ affine.for %j = affine_map<(d0)[]->(d0)>(%i)[] to 42 {
call @body2(%i, %j) : (index, index) -> ()
}
}
@@ -125,8 +125,8 @@ func @affine_apply_loops_shorthand(%N : index) {
func @get_idx() -> (index)
-#set1 = (d0) : (20 - d0 >= 0)
-#set2 = (d0) : (d0 - 10 >= 0)
+#set1 = affine_set<(d0) : (20 - d0 >= 0)>
+#set2 = affine_set<(d0) : (d0 - 10 >= 0)>
// CHECK-LABEL: func @if_only
// CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index
@@ -215,7 +215,7 @@ func @nested_ifs() {
return
}
-#setN = (d0)[N,M,K,L] : (N - d0 + 1 >= 0, N - 1 >= 0, M - 1 >= 0, K - 1 >= 0, L - 42 == 0)
+#setN = affine_set<(d0)[N,M,K,L] : (N - d0 + 1 >= 0, N - 1 >= 0, M - 1 >= 0, K - 1 >= 0, L - 42 == 0)>
// CHECK-LABEL: func @multi_cond
// CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index
@@ -311,8 +311,8 @@ func @if_for() {
return
}
-#lbMultiMap = (d0)[s0] -> (d0, s0 - d0)
-#ubMultiMap = (d0)[s0] -> (s0, d0 + 10)
+#lbMultiMap = affine_map<(d0)[s0] -> (d0, s0 - d0)>
+#ubMultiMap = affine_map<(d0)[s0] -> (s0, d0 + 10)>
// CHECK-LABEL: func @loop_min_max
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
@@ -344,7 +344,7 @@ func @loop_min_max(%N : index) {
return
}
-#map_7_values = (i) -> (i, i, i, i, i, i, i)
+#map_7_values = affine_map<(i) -> (i, i, i, i, i, i, i)>
// Check that the "min" (cmpi "slt" + select) reduction sequence is emitted
// correctly for a an affine map with 7 results.
@@ -378,13 +378,13 @@ func @min_reduction_tree(%v : index) {
/////////////////////////////////////////////////////////////////////
-#map0 = () -> (0)
-#map1 = ()[s0] -> (s0)
-#map2 = (d0) -> (d0)
-#map3 = (d0)[s0] -> (d0 + s0 + 1)
-#map4 = (d0,d1,d2,d3)[s0,s1,s2] -> (d0 + 2*d1 + 3*d2 + 4*d3 + 5*s0 + 6*s1 + 7*s2)
-#map5 = (d0,d1,d2) -> (d0,d1,d2)
-#map6 = (d0,d1,d2) -> (d0 + d1 + d2)
+#map0 = affine_map<() -> (0)>
+#map1 = affine_map<()[s0] -> (s0)>
+#map2 = affine_map<(d0) -> (d0)>
+#map3 = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
+#map4 = affine_map<(d0,d1,d2,d3)[s0,s1,s2] -> (d0 + 2*d1 + 3*d2 + 4*d3 + 5*s0 + 6*s1 + 7*s2)>
+#map5 = affine_map<(d0,d1,d2) -> (d0,d1,d2)>
+#map6 = affine_map<(d0,d1,d2) -> (d0 + d1 + d2)>
// CHECK-LABEL: func @affine_applies(
func @affine_applies(%arg0 : index) {
@@ -442,7 +442,7 @@ func @args_ret_affine_apply(index, index) -> (index, index) {
// applying constant folding transformation after affine lowering.
//===---------------------------------------------------------------------===//
-#mapmod = (i) -> (i mod 42)
+#mapmod = affine_map<(i) -> (i mod 42)>
// --------------------------------------------------------------------------//
// IMPORTANT NOTE: if you change this test, also change the @lowered_affine_mod
@@ -461,7 +461,7 @@ func @affine_apply_mod(%arg0 : index) -> (index) {
return %0 : index
}
-#mapfloordiv = (i) -> (i floordiv 42)
+#mapfloordiv = affine_map<(i) -> (i floordiv 42)>
// --------------------------------------------------------------------------//
// IMPORTANT NOTE: if you change this test, also change the @lowered_affine_mod
@@ -483,7 +483,7 @@ func @affine_apply_floordiv(%arg0 : index) -> (index) {
return %0 : index
}
-#mapceildiv = (i) -> (i ceildiv 42)
+#mapceildiv = affine_map<(i) -> (i ceildiv 42)>
// --------------------------------------------------------------------------//
// IMPORTANT NOTE: if you change this test, also change the @lowered_affine_mod
diff --git a/mlir/test/Transforms/memref-bound-check.mlir b/mlir/test/Transforms/memref-bound-check.mlir
index c81e44d1bfd..25ec800465e 100644
--- a/mlir/test/Transforms/memref-bound-check.mlir
+++ b/mlir/test/Transforms/memref-bound-check.mlir
@@ -13,8 +13,8 @@ func @test() {
affine.for %i = -1 to 10 {
affine.for %j = -1 to 10 {
- %idx0 = affine.apply (d0, d1) -> (d0)(%i, %j)
- %idx1 = affine.apply (d0, d1) -> (d1)(%i, %j)
+ %idx0 = affine.apply affine_map<(d0, d1) -> (d0)>(%i, %j)
+ %idx1 = affine.apply affine_map<(d0, d1) -> (d1)>(%i, %j)
// Out of bound access.
%x = affine.load %A[%idx0, %idx1] : memref<9 x 9 x i32>
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
@@ -22,7 +22,7 @@ func @test() {
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-4 {{'affine.load' op memref out of lower bound access along dimension #2}}
// This will access 0 to 110 - hence an overflow.
- %idy = affine.apply (d0, d1) -> (10*d0 - d1 + 19)(%i, %j)
+ %idy = affine.apply affine_map<(d0, d1) -> (10*d0 - d1 + 19)>(%i, %j)
%y = affine.load %B[%idy] : memref<111 x i32>
}
}
@@ -45,16 +45,16 @@ func @test_mod_floordiv_ceildiv() {
affine.for %i = 0 to 256 {
affine.for %j = 0 to 256 {
- %idx0 = affine.apply (d0, d1, d2) -> (d0 mod 128 + 1)(%i, %j, %j)
- %idx1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4 + 1)(%i, %j, %j)
- %idx2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4)(%i, %j, %j)
+ %idx0 = affine.apply affine_map<(d0, d1, d2) -> (d0 mod 128 + 1)>(%i, %j, %j)
+ %idx1 = affine.apply affine_map<(d0, d1, d2) -> (d1 floordiv 4 + 1)>(%i, %j, %j)
+ %idx2 = affine.apply affine_map<(d0, d1, d2) -> (d2 ceildiv 4)>(%i, %j, %j)
%x = affine.load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
// expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
- %idy0 = affine.apply (d0, d1, d2) -> (d0 mod 128)(%i, %j, %j)
- %idy1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4)(%i, %j, %j)
- %idy2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4 - 1)(%i, %j, %j)
+ %idy0 = affine.apply affine_map<(d0, d1, d2) -> (d0 mod 128)>(%i, %j, %j)
+ %idy1 = affine.apply affine_map<(d0, d1, d2) -> (d1 floordiv 4)>(%i, %j, %j)
+ %idy2 = affine.apply affine_map<(d0, d1, d2) -> (d2 ceildiv 4 - 1)>(%i, %j, %j)
affine.store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
} // CHECK }
} // CHECK }
@@ -75,12 +75,12 @@ func @test_no_out_of_bounds() {
// CHECK-NEXT: %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<257x256xi32>
// CHECK-NEXT: %{{.*}} = affine.apply {{#map.*}}(%{{.*}}, %{{.*}})
// CHECK-NEXT: %{{.*}} = affine.load %{{.*}}[%{{.*}}] : memref<1xi32>
- %idx0 = affine.apply (d0, d1) -> ( 64 * (d0 ceildiv 64))(%i, %j)
+ %idx0 = affine.apply affine_map<(d0, d1) -> ( 64 * (d0 ceildiv 64))>(%i, %j)
// Without GCDTightenInequalities(), the upper bound on the region
// accessed along first memref dimension would have come out as d0 <= 318
// (instead of d0 <= 256), and led to a false positive out of bounds.
%x = affine.load %A[%idx0, %zero] : memref<257 x 256 x i32>
- %idy = affine.apply (d0, d1) -> (d0 floordiv 256)(%i, %i)
+ %idy = affine.apply affine_map<(d0, d1) -> (d0 floordiv 256)>(%i, %i)
%y = affine.load %B[%idy] : memref<1 x i32>
} // CHECK-NEXT }
}
@@ -94,16 +94,16 @@ func @mod_div() {
affine.for %i = 0 to 256 {
affine.for %j = 0 to 256 {
- %idx0 = affine.apply (d0, d1, d2) -> (d0 mod 128 + 1)(%i, %j, %j)
- %idx1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4 + 1)(%i, %j, %j)
- %idx2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4)(%i, %j, %j)
+ %idx0 = affine.apply affine_map<(d0, d1, d2) -> (d0 mod 128 + 1)>(%i, %j, %j)
+ %idx1 = affine.apply affine_map<(d0, d1, d2) -> (d1 floordiv 4 + 1)>(%i, %j, %j)
+ %idx2 = affine.apply affine_map<(d0, d1, d2) -> (d2 ceildiv 4)>(%i, %j, %j)
%x = affine.load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
// expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
- %idy0 = affine.apply (d0, d1, d2) -> (d0 mod 128)(%i, %j, %j)
- %idy1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4)(%i, %j, %j)
- %idy2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4 - 1)(%i, %j, %j)
+ %idy0 = affine.apply affine_map<(d0, d1, d2) -> (d0 mod 128)>(%i, %j, %j)
+ %idy1 = affine.apply affine_map<(d0, d1, d2) -> (d1 floordiv 4)>(%i, %j, %j)
+ %idy2 = affine.apply affine_map<(d0, d1, d2) -> (d2 ceildiv 4 - 1)>(%i, %j, %j)
affine.store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
}
}
@@ -116,8 +116,8 @@ func @mod_floordiv_nested() {
%A = alloc() : memref<256 x 256 x i32>
affine.for %i = 0 to 256 {
affine.for %j = 0 to 256 {
- %idx0 = affine.apply (d0, d1) -> ((d0 mod 1024) floordiv 4)(%i, %j)
- %idx1 = affine.apply (d0, d1) -> ((((d1 mod 128) mod 32) ceildiv 4) * 32)(%i, %j)
+ %idx0 = affine.apply affine_map<(d0, d1) -> ((d0 mod 1024) floordiv 4)>(%i, %j)
+ %idx1 = affine.apply affine_map<(d0, d1) -> ((((d1 mod 128) mod 32) ceildiv 4) * 32)>(%i, %j)
affine.load %A[%idx0, %idx1] : memref<256 x 256 x i32> // expected-error {{'affine.load' op memref out of upper bound access along dimension #2}}
}
}
@@ -128,7 +128,7 @@ func @mod_floordiv_nested() {
func @test_semi_affine_bailout(%N : index) {
%B = alloc() : memref<10 x i32>
affine.for %i = 0 to 10 {
- %idx = affine.apply (d0)[s0] -> (d0 * s0)(%i)[%N]
+ %idx = affine.apply affine_map<(d0)[s0] -> (d0 * s0)>(%i)[%N]
%y = affine.load %B[%idx] : memref<10 x i32>
// expected-error@-1 {{getMemRefRegion: compose affine map failed}}
}
@@ -139,8 +139,8 @@ func @test_semi_affine_bailout(%N : index) {
func @multi_mod_floordiv() {
%A = alloc() : memref<2x2xi32>
affine.for %ii = 0 to 64 {
- %idx0 = affine.apply (d0) -> ((d0 mod 147456) floordiv 1152) (%ii)
- %idx1 = affine.apply (d0) -> (((d0 mod 147456) mod 1152) floordiv 384) (%ii)
+ %idx0 = affine.apply affine_map<(d0) -> ((d0 mod 147456) floordiv 1152)> (%ii)
+ %idx1 = affine.apply affine_map<(d0) -> (((d0 mod 147456) mod 1152) floordiv 384)> (%ii)
%v = affine.load %A[%idx0, %idx1] : memref<2x2xi32>
}
return
@@ -155,20 +155,20 @@ func @delinearize_mod_floordiv() {
// Reshape '%in' into '%out'.
affine.for %ii = 0 to 64 {
affine.for %jj = 0 to 9 {
- %a0 = affine.apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
- %a10 = affine.apply (d0) ->
- (d0 floordiv (2 * 3 * 3 * 128 * 128)) (%a0)
- %a11 = affine.apply (d0) ->
- ((d0 mod 294912) floordiv (3 * 3 * 128 * 128)) (%a0)
- %a12 = affine.apply (d0) ->
- ((((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8) (%a0)
- %a13 = affine.apply (d0) ->
- ((((d0 mod 294912) mod 147456) mod 1152) floordiv 384) (%a0)
- %a14 = affine.apply (d0) ->
- (((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128) (%a0)
- %a15 = affine.apply (d0) ->
+ %a0 = affine.apply affine_map<(d0, d1) -> (d0 * (9 * 1024) + d1 * 128)> (%ii, %jj)
+ %a10 = affine.apply affine_map<(d0) ->
+ (d0 floordiv (2 * 3 * 3 * 128 * 128))> (%a0)
+ %a11 = affine.apply affine_map<(d0) ->
+ ((d0 mod 294912) floordiv (3 * 3 * 128 * 128))> (%a0)
+ %a12 = affine.apply affine_map<(d0) ->
+ ((((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8)> (%a0)
+ %a13 = affine.apply affine_map<(d0) ->
+ ((((d0 mod 294912) mod 147456) mod 1152) floordiv 384)> (%a0)
+ %a14 = affine.apply affine_map<(d0) ->
+ (((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128)> (%a0)
+ %a15 = affine.apply affine_map<(d0) ->
((((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
- floordiv 128) (%a0)
+ floordiv 128)> (%a0)
%v0 = affine.load %in[%a10, %a11, %a13, %a14, %a12, %a15]
: memref<2x2x3x3x16x1xi32>
}
@@ -190,7 +190,7 @@ func @out_of_bounds() {
%c9 = constant 9 : i32
affine.for %i0 = 10 to 11 {
- %idy = affine.apply (d0) -> (100 * d0 floordiv 1000) (%i0)
+ %idy = affine.apply affine_map<(d0) -> (100 * d0 floordiv 1000)> (%i0)
affine.store %c9, %in[%idy] : memref<1xi32> // expected-error {{'affine.store' op memref out of upper bound access along dimension #1}}
}
return
@@ -202,9 +202,9 @@ func @out_of_bounds() {
// trivially redundant constraints (those differing only in their constant
// term), the number of constraints here explodes, and this would return out of
// bounds errors conservatively due to FlatAffineConstraints::kExplosionFactor.
-#map3 = (d0, d1) -> ((d0 * 72 + d1) floordiv 2304 + ((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)
-#map4 = (d0, d1) -> ((d0 * 72 + d1) mod 2304 - (((d0 * 72 + d1) mod 2304) floordiv 1152) * 1151 - ((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) * 9 - (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3) * 3)
-#map5 = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)
+#map3 = affine_map<(d0, d1) -> ((d0 * 72 + d1) floordiv 2304 + ((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)>
+#map4 = affine_map<(d0, d1) -> ((d0 * 72 + d1) mod 2304 - (((d0 * 72 + d1) mod 2304) floordiv 1152) * 1151 - ((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) * 9 - (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3) * 3)>
+#map5 = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)>
// CHECK-LABEL: func @test_complex_mod_floordiv
func @test_complex_mod_floordiv(%arg0: memref<4x4x16x1xf32>) {
%c0 = constant 0 : index
@@ -223,8 +223,8 @@ func @test_complex_mod_floordiv(%arg0: memref<4x4x16x1xf32>) {
// -----
// The first load is within bounds, but not the second one.
-#map0 = (d0) -> (d0 mod 4)
-#map1 = (d0) -> (d0 mod 4 + 4)
+#map0 = affine_map<(d0) -> (d0 mod 4)>
+#map1 = affine_map<(d0) -> (d0 mod 4 + 4)>
// CHECK-LABEL: func @test_mod_bound
func @test_mod_bound() {
@@ -242,9 +242,9 @@ func @test_mod_bound() {
// -----
-#map0 = (d0) -> (d0 floordiv 4)
-#map1 = (d0) -> (d0 floordiv 4 + 4)
-#map2 = (d0) -> (4 * (d0 floordiv 4) + d0 mod 4)
+#map0 = affine_map<(d0) -> (d0 floordiv 4)>
+#map1 = affine_map<(d0) -> (d0 floordiv 4 + 4)>
+#map2 = affine_map<(d0) -> (4 * (d0 floordiv 4) + d0 mod 4)>
// CHECK-LABEL: func @test_floordiv_bound
func @test_floordiv_bound() {
@@ -271,13 +271,13 @@ func @test_floordiv_bound() {
// This should not give an out of bounds error. The result of the affine.apply
// is composed into the bound map during analysis.
-#map_lb = (d0) -> (d0)
-#map_ub = (d0) -> (d0 + 4)
+#map_lb = affine_map<(d0) -> (d0)>
+#map_ub = affine_map<(d0) -> (d0 + 4)>
// CHECK-LABEL: func @non_composed_bound_operand
func @non_composed_bound_operand(%arg0: memref<1024xf32>) {
affine.for %i0 = 4 to 1028 step 4 {
- %i1 = affine.apply (d0) -> (d0 - 4) (%i0)
+ %i1 = affine.apply affine_map<(d0) -> (d0 - 4)> (%i0)
affine.for %i2 = #map_lb(%i1) to #map_ub(%i1) {
%0 = affine.load %arg0[%i2] : memref<1024xf32>
}
diff --git a/mlir/test/Transforms/memref-dataflow-opt.mlir b/mlir/test/Transforms/memref-dataflow-opt.mlir
index a7f6f25b816..0fe8f715c0e 100644
--- a/mlir/test/Transforms/memref-dataflow-opt.mlir
+++ b/mlir/test/Transforms/memref-dataflow-opt.mlir
@@ -1,10 +1,10 @@
// RUN: mlir-opt %s -memref-dataflow-opt | FileCheck %s
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d1 + 1)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0, d1) -> (d0)
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d1)
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (d0 - 1)
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = (d0) -> (d0 + 1)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0)>
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 - 1)>
+// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
// CHECK-LABEL: func @simple_store_load() {
func @simple_store_load() {
@@ -61,10 +61,10 @@ func @store_load_affine_apply() -> memref<10x10xf32> {
%m = alloc() : memref<10x10xf32>
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
- %t0 = affine.apply (d0, d1) -> (d1 + 1)(%i0, %i1)
- %t1 = affine.apply (d0, d1) -> (d0)(%i0, %i1)
- %idx0 = affine.apply (d0, d1) -> (d1) (%t0, %t1)
- %idx1 = affine.apply (d0, d1) -> (d0 - 1) (%t0, %t1)
+ %t0 = affine.apply affine_map<(d0, d1) -> (d1 + 1)>(%i0, %i1)
+ %t1 = affine.apply affine_map<(d0, d1) -> (d0)>(%i0, %i1)
+ %idx0 = affine.apply affine_map<(d0, d1) -> (d1)> (%t0, %t1)
+ %idx1 = affine.apply affine_map<(d0, d1) -> (d0 - 1)> (%t0, %t1)
affine.store %cf7, %m[%idx0, %idx1] : memref<10x10xf32>
// CHECK-NOT: affine.load %{{[0-9]+}}
%v0 = affine.load %m[%i0, %i1] : memref<10x10xf32>
@@ -228,7 +228,7 @@ func @store_load_store_nested_fwd(%N : index) -> f32 {
affine.for %i1 = 0 to %N {
%v0 = affine.load %m[%i0] : memref<10xf32>
%v1 = addf %v0, %v0 : f32
- %idx = affine.apply (d0) -> (d0 + 1) (%i0)
+ %idx = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
affine.store %cf9, %m[%idx] : memref<10xf32>
}
}
@@ -260,7 +260,7 @@ func @should_not_fwd(%A: memref<100xf32>, %M : index, %N : index) -> f32 {
// Can store forward to A[%j, %i], but no forwarding to load on %A[%i, %j]
// CHECK-LABEL: func @refs_not_known_to_be_equal
func @refs_not_known_to_be_equal(%A : memref<100 x 100 x f32>, %M : index) {
- %N = affine.apply (d0) -> (d0 + 1) (%M)
+ %N = affine.apply affine_map<(d0) -> (d0 + 1)> (%M)
%cf1 = constant 1.0 : f32
affine.for %i = 0 to 100 {
// CHECK: affine.for %[[I:.*]] =
diff --git a/mlir/test/Transforms/memref-dependence-check.mlir b/mlir/test/Transforms/memref-dependence-check.mlir
index 03b6c74654d..f48a63ef984 100644
--- a/mlir/test/Transforms/memref-dependence-check.mlir
+++ b/mlir/test/Transforms/memref-dependence-check.mlir
@@ -2,7 +2,7 @@
// -----
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @store_may_execute_before_load() {
func @store_may_execute_before_load() {
@@ -164,11 +164,11 @@ func @store_load_diff_element_affine_apply_const() {
%m = alloc() : memref<100xf32>
%c1 = constant 1 : index
%c8 = constant 8.0 : f32
- %a0 = affine.apply (d0) -> (d0) (%c1)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%c1)
affine.store %c8, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
- %a1 = affine.apply (d0) -> (d0 + 1) (%c1)
+ %a1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%c1)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -182,11 +182,11 @@ func @store_load_same_element_affine_apply_const() {
%c7 = constant 7.0 : f32
%c9 = constant 9 : index
%c11 = constant 11 : index
- %a0 = affine.apply (d0) -> (d0 + 1) (%c9)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 1)> (%c9)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = true}}
- %a1 = affine.apply (d0) -> (d0 - 1) (%c11)
+ %a1 = affine.apply affine_map<(d0) -> (d0 - 1)> (%c11)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -198,11 +198,11 @@ func @store_load_same_element_affine_apply_const() {
func @store_load_affine_apply_symbol(%arg0: index) {
%m = alloc() : memref<100xf32>
%c7 = constant 7.0 : f32
- %a0 = affine.apply (d0) -> (d0) (%arg0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%arg0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = true}}
- %a1 = affine.apply (d0) -> (d0) (%arg0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%arg0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -214,11 +214,11 @@ func @store_load_affine_apply_symbol(%arg0: index) {
func @store_load_affine_apply_symbol_offset(%arg0: index) {
%m = alloc() : memref<100xf32>
%c7 = constant 7.0 : f32
- %a0 = affine.apply (d0) -> (d0) (%arg0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%arg0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
- %a1 = affine.apply (d0) -> (d0 + 1) (%arg0)
+ %a1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%arg0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -232,13 +232,13 @@ func @store_range_load_after_range() {
%c7 = constant 7.0 : f32
%c10 = constant 10 : index
affine.for %i0 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- %a1 = affine.apply (d0) -> (d0) (%c10)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%c10)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -255,13 +255,13 @@ func @store_load_func_symbol(%arg0: index, %arg1: index) {
%c7 = constant 7.0 : f32
%c10 = constant 10 : index
affine.for %i0 = 0 to %arg1 {
- %a0 = affine.apply (d0) -> (d0) (%arg0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%arg0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = [1, +inf]}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [1, +inf]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = true}}
- %a1 = affine.apply (d0) -> (d0) (%arg0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%arg0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = [1, +inf]}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -278,7 +278,7 @@ func @store_range_load_last_in_range() {
%c7 = constant 7.0 : f32
%c10 = constant 10 : index
affine.for %i0 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
// For dependence from 0 to 1, we do not have a loop carried dependence
// because only the final write in the loop accesses the same element as the
// load, so this dependence appears only at depth 2 (loop independent).
@@ -287,7 +287,7 @@ func @store_range_load_last_in_range() {
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = true}}
- %a1 = affine.apply (d0) -> (d0 - 1) (%c10)
+ %a1 = affine.apply affine_map<(d0) -> (d0 - 1)> (%c10)
// For dependence from 1 to 0, we have write-after-read (WAR) dependences
// for all loads in the loop to the store on the last iteration.
%v0 = affine.load %m[%a1] : memref<100xf32>
@@ -306,13 +306,13 @@ func @store_range_load_before_range() {
%c7 = constant 7.0 : f32
%c0 = constant 0 : index
affine.for %i0 = 1 to 11 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- %a1 = affine.apply (d0) -> (d0) (%c0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%c0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -329,7 +329,7 @@ func @store_range_load_first_in_range() {
%c7 = constant 7.0 : f32
%c0 = constant 0 : index
affine.for %i0 = 1 to 11 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
// Dependence from 0 to 1 at depth 1 is a range because all loads at
// constant index zero are reads after first store at index zero during
// first iteration of the loop.
@@ -338,7 +338,7 @@ func @store_range_load_first_in_range() {
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [1, 9]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = true}}
- %a1 = affine.apply (d0) -> (d0 + 1) (%c0)
+ %a1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%c0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -354,13 +354,13 @@ func @store_plus_3() {
%m = alloc() : memref<100xf32>
%c7 = constant 7.0 : f32
affine.for %i0 = 1 to 11 {
- %a0 = affine.apply (d0) -> (d0 + 3) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 3)> (%i0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [3, 3]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- %a1 = affine.apply (d0) -> (d0) (%i0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%i0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -376,13 +376,13 @@ func @load_minus_2() {
%m = alloc() : memref<100xf32>
%c7 = constant 7.0 : f32
affine.for %i0 = 2 to 11 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
affine.store %c7, %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [2, 2]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- %a1 = affine.apply (d0) -> (d0 - 2) (%i0)
+ %a1 = affine.apply affine_map<(d0) -> (d0 - 2)> (%i0)
%v0 = affine.load %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -400,8 +400,8 @@ func @perfectly_nested_loops_loop_independent() {
affine.for %i0 = 0 to 11 {
affine.for %i1 = 0 to 11 {
// Dependence from access 0 to 1 is loop independent at depth = 3.
- %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a00 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a01 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -409,8 +409,8 @@ func @perfectly_nested_loops_loop_independent() {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 3 = true}}
- %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a10 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a11 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
%v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -431,8 +431,8 @@ func @perfectly_nested_loops_loop_carried_at_depth1() {
affine.for %i0 = 0 to 9 {
affine.for %i1 = 0 to 9 {
// Dependence from access 0 to 1 is loop carried at depth 1.
- %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a00 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a01 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -440,8 +440,8 @@ func @perfectly_nested_loops_loop_carried_at_depth1() {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [2, 2][0, 0]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 3 = false}}
- %a10 = affine.apply (d0, d1) -> (d0 - 2) (%i0, %i1)
- %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a10 = affine.apply affine_map<(d0, d1) -> (d0 - 2)> (%i0, %i1)
+ %a11 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
%v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -462,8 +462,8 @@ func @perfectly_nested_loops_loop_carried_at_depth2() {
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
// Dependence from access 0 to 1 is loop carried at depth 2.
- %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a00 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a01 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -471,8 +471,8 @@ func @perfectly_nested_loops_loop_carried_at_depth2() {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = [0, 0][3, 3]}}
// expected-remark@above {{dependence from 0 to 1 at depth 3 = false}}
- %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a11 = affine.apply (d0, d1) -> (d1 - 3) (%i0, %i1)
+ %a10 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a11 = affine.apply affine_map<(d0, d1) -> (d1 - 3)> (%i0, %i1)
%v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -493,8 +493,8 @@ func @one_common_loop() {
// There is a loop-independent dependence from access 0 to 1 at depth 2.
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
- %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a00 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a01 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -503,8 +503,8 @@ func @one_common_loop() {
// expected-remark@above {{dependence from 0 to 1 at depth 2 = true}}
}
affine.for %i2 = 0 to 9 {
- %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i2)
- %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i2)
+ %a10 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i2)
+ %a11 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i2)
%v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -526,7 +526,7 @@ func @dependence_cycle() {
// *) loop-independent dependence from access 1 to 2 at depth 2.
// *) loop-carried dependence from access 3 to 0 at depth 1.
affine.for %i0 = 0 to 9 {
- %a0 = affine.apply (d0) -> (d0) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0)> (%i0)
%v0 = affine.load %m.a[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -536,7 +536,7 @@ func @dependence_cycle() {
// expected-remark@above {{dependence from 0 to 2 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 3 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 3 at depth 2 = false}}
- %a1 = affine.apply (d0) -> (d0) (%i0)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%i0)
affine.store %v0, %m.b[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -546,7 +546,7 @@ func @dependence_cycle() {
// expected-remark@above {{dependence from 1 to 2 at depth 2 = true}}
// expected-remark@above {{dependence from 1 to 3 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 3 at depth 2 = false}}
- %a2 = affine.apply (d0) -> (d0) (%i0)
+ %a2 = affine.apply affine_map<(d0) -> (d0)> (%i0)
%v1 = affine.load %m.b[%a2] : memref<100xf32>
// expected-remark@above {{dependence from 2 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 2 to 0 at depth 2 = false}}
@@ -556,7 +556,7 @@ func @dependence_cycle() {
// expected-remark@above {{dependence from 2 to 2 at depth 2 = false}}
// expected-remark@above {{dependence from 2 to 3 at depth 1 = false}}
// expected-remark@above {{dependence from 2 to 3 at depth 2 = false}}
- %a3 = affine.apply (d0) -> (d0 + 1) (%i0)
+ %a3 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
affine.store %v1, %m.a[%a3] : memref<100xf32>
// expected-remark@above {{dependence from 3 to 0 at depth 1 = [1, 1]}}
// expected-remark@above {{dependence from 3 to 0 at depth 2 = false}}
@@ -577,8 +577,8 @@ func @negative_and_positive_direction_vectors(%arg0: index, %arg1: index) {
%c7 = constant 7.0 : f32
affine.for %i0 = 0 to %arg0 {
affine.for %i1 = 0 to %arg1 {
- %a00 = affine.apply (d0, d1) -> (d0 - 1) (%i0, %i1)
- %a01 = affine.apply (d0, d1) -> (d1 + 1) (%i0, %i1)
+ %a00 = affine.apply affine_map<(d0, d1) -> (d0 - 1)> (%i0, %i1)
+ %a01 = affine.apply affine_map<(d0, d1) -> (d1 + 1)> (%i0, %i1)
%v0 = affine.load %m[%a00, %a01] : memref<10x10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -586,8 +586,8 @@ func @negative_and_positive_direction_vectors(%arg0: index, %arg1: index) {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 3 = false}}
- %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
- %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
+ %a10 = affine.apply affine_map<(d0, d1) -> (d0)> (%i0, %i1)
+ %a11 = affine.apply affine_map<(d0, d1) -> (d1)> (%i0, %i1)
affine.store %c7, %m[%a10, %a11] : memref<10x10xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = [1, 1][-1, -1]}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -607,7 +607,7 @@ func @war_raw_waw_deps() {
%c7 = constant 7.0 : f32
affine.for %i0 = 0 to 10 {
affine.for %i1 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0 + 1) (%i1)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i1)
%v0 = affine.load %m[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -615,7 +615,7 @@ func @war_raw_waw_deps() {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [1, 9][1, 1]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = [0, 0][1, 1]}}
// expected-remark@above {{dependence from 0 to 1 at depth 3 = false}}
- %a1 = affine.apply (d0) -> (d0) (%i1)
+ %a1 = affine.apply affine_map<(d0) -> (d0)> (%i1)
affine.store %c7, %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = [1, 9][-1, -1]}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -634,7 +634,7 @@ func @mod_deps() {
%m = alloc() : memref<100xf32>
%c7 = constant 7.0 : f32
affine.for %i0 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0 mod 2) (%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 mod 2)> (%i0)
// Results are conservative here since we currently don't have a way to
// represent strided sets in FlatAffineConstraints.
%v0 = affine.load %m[%a0] : memref<100xf32>
@@ -642,7 +642,7 @@ func @mod_deps() {
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [1, 9]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- %a1 = affine.apply (d0) -> ( (d0 + 1) mod 2) (%i0)
+ %a1 = affine.apply affine_map<(d0) -> ( (d0 + 1) mod 2)> (%i0)
affine.store %c7, %m[%a1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = [1, 9]}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -671,7 +671,7 @@ func @loop_nest_depth() {
affine.for %i3 = 0 to 8 {
affine.for %i4 = 0 to 8 {
affine.for %i5 = 0 to 16 {
- %8 = affine.apply (d0, d1) -> (d0 * 16 + d1)(%i4, %i5)
+ %8 = affine.apply affine_map<(d0, d1) -> (d0 * 16 + d1)>(%i4, %i5)
%9 = affine.load %0[%8, %i3] : memref<100x100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -696,9 +696,9 @@ func @mod_div_3d() {
affine.for %i0 = 0 to 8 {
affine.for %i1 = 0 to 8 {
affine.for %i2 = 0 to 8 {
- %idx0 = affine.apply (d0, d1, d2) -> (d0 floordiv 4) (%i0, %i1, %i2)
- %idx1 = affine.apply (d0, d1, d2) -> (d1 mod 2) (%i0, %i1, %i2)
- %idx2 = affine.apply (d0, d1, d2) -> (d2 floordiv 4) (%i0, %i1, %i2)
+ %idx0 = affine.apply affine_map<(d0, d1, d2) -> (d0 floordiv 4)> (%i0, %i1, %i2)
+ %idx1 = affine.apply affine_map<(d0, d1, d2) -> (d1 mod 2)> (%i0, %i1, %i2)
+ %idx2 = affine.apply affine_map<(d0, d1, d2) -> (d2 floordiv 4)> (%i0, %i1, %i2)
affine.store %c0, %M[%idx0, %idx1, %idx2] : memref<2 x 2 x 2 x i32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = [1, 3][-7, 7][-3, 3]}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = [0, 0][2, 7][-3, 3]}}
@@ -744,20 +744,20 @@ func @delinearize_mod_floordiv() {
affine.for %ii = 0 to 64 {
affine.for %jj = 0 to 9 {
- %a0 = affine.apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
- %a10 = affine.apply (d0) ->
- (d0 floordiv (2 * 3 * 3 * 128 * 128)) (%a0)
- %a11 = affine.apply (d0) ->
- ((d0 mod 294912) floordiv (3 * 3 * 128 * 128)) (%a0)
- %a12 = affine.apply (d0) ->
- ((((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8) (%a0)
- %a13 = affine.apply (d0) ->
- ((((d0 mod 294912) mod 147456) mod 1152) floordiv 384) (%a0)
- %a14 = affine.apply (d0) ->
- (((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128) (%a0)
- %a15 = affine.apply (d0) ->
+ %a0 = affine.apply affine_map<(d0, d1) -> (d0 * (9 * 1024) + d1 * 128)> (%ii, %jj)
+ %a10 = affine.apply affine_map<(d0) ->
+ (d0 floordiv (2 * 3 * 3 * 128 * 128))> (%a0)
+ %a11 = affine.apply affine_map<(d0) ->
+ ((d0 mod 294912) floordiv (3 * 3 * 128 * 128))> (%a0)
+ %a12 = affine.apply affine_map<(d0) ->
+ ((((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8)> (%a0)
+ %a13 = affine.apply affine_map<(d0) ->
+ ((((d0 mod 294912) mod 147456) mod 1152) floordiv 384)> (%a0)
+ %a14 = affine.apply affine_map<(d0) ->
+ (((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128)> (%a0)
+ %a15 = affine.apply affine_map<(d0) ->
((((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
- floordiv 128) (%a0)
+ floordiv 128)> (%a0)
%v0 = affine.load %in[%a10, %a11, %a13, %a14, %a12, %a15] : memref<2x2x3x3x16x1xi32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 1 at depth 1 = false}}
@@ -813,7 +813,7 @@ func @strided_loop_with_no_dependence() {
%0 = alloc() : memref<10xf32>
%cf0 = constant 0.0 : f32
affine.for %i0 = 0 to 8 step 2 {
- %a0 = affine.apply (d0) -> (d0 + 1)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 1)>(%i0)
affine.store %cf0, %0[%a0] : memref<10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -836,7 +836,7 @@ func @strided_loop_with_loop_carried_dependence_at_depth1() {
%0 = alloc() : memref<10xf32>
%cf0 = constant 0.0 : f32
affine.for %i0 = 0 to 8 step 2 {
- %a0 = affine.apply (d0) -> (d0 + 4)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 + 4)>(%i0)
affine.store %cf0, %0[%a0] : memref<10xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -860,13 +860,13 @@ func @test_dep_store_depth1_load_depth2() {
%0 = alloc() : memref<100xf32>
%cst = constant 7.000000e+00 : f32
affine.for %i0 = 0 to 10 {
- %a0 = affine.apply (d0) -> (d0 - 1)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 - 1)>(%i0)
affine.store %cst, %0[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
- affine.for %i1 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 1)(%i0) {
+ affine.for %i1 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 1)>(%i0) {
%1 = affine.load %0[%i1] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = [1, 1]}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
@@ -887,7 +887,7 @@ func @test_dep_store_depth2_load_depth1() {
%0 = alloc() : memref<100xf32>
%cst = constant 7.000000e+00 : f32
affine.for %i0 = 0 to 10 {
- affine.for %i1 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 1)(%i0) {
+ affine.for %i1 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 1)>(%i0) {
affine.store %cst, %0[%i1] : memref<100xf32>
// expected-remark@above {{dependence from 0 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 0 to 0 at depth 2 = false}}
@@ -895,7 +895,7 @@ func @test_dep_store_depth2_load_depth1() {
// expected-remark@above {{dependence from 0 to 1 at depth 1 = [2, 2]}}
// expected-remark@above {{dependence from 0 to 1 at depth 2 = false}}
}
- %a0 = affine.apply (d0) -> (d0 - 2)(%i0)
+ %a0 = affine.apply affine_map<(d0) -> (d0 - 2)>(%i0)
%1 = affine.load %0[%a0] : memref<100xf32>
// expected-remark@above {{dependence from 1 to 0 at depth 1 = false}}
// expected-remark@above {{dependence from 1 to 0 at depth 2 = false}}
diff --git a/mlir/test/Transforms/memref-normalize.mlir b/mlir/test/Transforms/memref-normalize.mlir
index 90b363219ee..2ed69da5fb0 100644
--- a/mlir/test/Transforms/memref-normalize.mlir
+++ b/mlir/test/Transforms/memref-normalize.mlir
@@ -2,13 +2,13 @@
// CHECK-LABEL: func @permute()
func @permute() {
- %A = alloc() : memref<64x256xf32, (d0, d1) -> (d1, d0)>
+ %A = alloc() : memref<64x256xf32, affine_map<(d0, d1) -> (d1, d0)>>
affine.for %i = 0 to 64 {
affine.for %j = 0 to 256 {
- affine.load %A[%i, %j] : memref<64x256xf32, (d0, d1) -> (d1, d0)>
+ affine.load %A[%i, %j] : memref<64x256xf32, affine_map<(d0, d1) -> (d1, d0)>>
}
}
- dealloc %A : memref<64x256xf32, (d0, d1) -> (d1, d0)>
+ dealloc %A : memref<64x256xf32, affine_map<(d0, d1) -> (d1, d0)>>
return
}
// The old memref alloc should disappear.
@@ -25,11 +25,11 @@ func @permute() {
// CHECK-LABEL: func @shift
func @shift(%idx : index) {
// CHECK-NEXT: alloc() : memref<65xf32>
- %A = alloc() : memref<64xf32, (d0) -> (d0 + 1)>
+ %A = alloc() : memref<64xf32, affine_map<(d0) -> (d0 + 1)>>
// CHECK-NEXT: affine.load %{{.*}}[symbol(%arg0) + 1] : memref<65xf32>
- affine.load %A[%idx] : memref<64xf32, (d0) -> (d0 + 1)>
+ affine.load %A[%idx] : memref<64xf32, affine_map<(d0) -> (d0 + 1)>>
affine.for %i = 0 to 64 {
- affine.load %A[%i] : memref<64xf32, (d0) -> (d0 + 1)>
+ affine.load %A[%i] : memref<64xf32, affine_map<(d0) -> (d0 + 1)>>
// CHECK: %{{.*}} = affine.load %{{.*}}[%arg{{.*}} + 1] : memref<65xf32>
}
return
@@ -38,14 +38,14 @@ func @shift(%idx : index) {
// CHECK-LABEL: func @high_dim_permute()
func @high_dim_permute() {
// CHECK-NOT: memref<64x128x256xf32,
- %A = alloc() : memref<64x128x256xf32, (d0, d1, d2) -> (d2, d0, d1)>
+ %A = alloc() : memref<64x128x256xf32, affine_map<(d0, d1, d2) -> (d2, d0, d1)>>
// CHECK: %[[I:arg[0-9]+]]
affine.for %i = 0 to 64 {
// CHECK: %[[J:arg[0-9]+]]
affine.for %j = 0 to 128 {
// CHECK: %[[K:arg[0-9]+]]
affine.for %k = 0 to 256 {
- affine.load %A[%i, %j, %k] : memref<64x128x256xf32, (d0, d1, d2) -> (d2, d0, d1)>
+ affine.load %A[%i, %j, %k] : memref<64x128x256xf32, affine_map<(d0, d1, d2) -> (d2, d0, d1)>>
// CHECK: %{{.*}} = affine.load %{{.*}}[%[[K]], %[[I]], %[[J]]] : memref<256x64x128xf32>
}
}
@@ -55,7 +55,7 @@ func @high_dim_permute() {
// CHECK-LABEL: func @invalid_map
func @invalid_map() {
- %A = alloc() : memref<64x128xf32, (d0, d1) -> (d0, -d1 - 10)>
+ %A = alloc() : memref<64x128xf32, affine_map<(d0, d1) -> (d0, -d1 - 10)>>
// CHECK: %{{.*}} = alloc() : memref<64x128xf32,
return
}
@@ -64,22 +64,22 @@ func @invalid_map() {
// CHECK-LABEL: func @data_tiling
func @data_tiling(%idx : index) {
// CHECK: alloc() : memref<8x32x8x16xf32>
- %A = alloc() : memref<64x512xf32, (d0, d1) -> (d0 floordiv 8, d1 floordiv 16, d0 mod 8, d1 mod 16)>
+ %A = alloc() : memref<64x512xf32, affine_map<(d0, d1) -> (d0 floordiv 8, d1 floordiv 16, d0 mod 8, d1 mod 16)>>
// CHECK: affine.load %{{.*}}[symbol(%arg0) floordiv 8, symbol(%arg0) floordiv 16, symbol(%arg0) mod 8, symbol(%arg0) mod 16]
- affine.load %A[%idx, %idx] : memref<64x512xf32, (d0, d1) -> (d0 floordiv 8, d1 floordiv 16, d0 mod 8, d1 mod 16)>
+ affine.load %A[%idx, %idx] : memref<64x512xf32, affine_map<(d0, d1) -> (d0 floordiv 8, d1 floordiv 16, d0 mod 8, d1 mod 16)>>
return
}
// Strides 2 and 4 along respective dimensions.
// CHECK-LABEL: func @strided
func @strided() {
- %A = alloc() : memref<64x128xf32, (d0, d1) -> (2*d0, 4*d1)>
+ %A = alloc() : memref<64x128xf32, affine_map<(d0, d1) -> (2*d0, 4*d1)>>
// CHECK: affine.for %[[IV0:.*]] =
affine.for %i = 0 to 64 {
// CHECK: affine.for %[[IV1:.*]] =
affine.for %j = 0 to 128 {
// CHECK: affine.load %{{.*}}[%[[IV0]] * 2, %[[IV1]] * 4] : memref<127x509xf32>
- affine.load %A[%i, %j] : memref<64x128xf32, (d0, d1) -> (2*d0, 4*d1)>
+ affine.load %A[%i, %j] : memref<64x128xf32, affine_map<(d0, d1) -> (2*d0, 4*d1)>>
}
}
return
@@ -88,13 +88,13 @@ func @strided() {
// Strided, but the strides are in the linearized space.
// CHECK-LABEL: func @strided_cumulative
func @strided_cumulative() {
- %A = alloc() : memref<2x5xf32, (d0, d1) -> (3*d0 + 17*d1)>
+ %A = alloc() : memref<2x5xf32, affine_map<(d0, d1) -> (3*d0 + 17*d1)>>
// CHECK: affine.for %[[IV0:.*]] =
affine.for %i = 0 to 2 {
// CHECK: affine.for %[[IV1:.*]] =
affine.for %j = 0 to 5 {
// CHECK: affine.load %{{.*}}[%[[IV0]] * 3 + %[[IV1]] * 17] : memref<72xf32>
- affine.load %A[%i, %j] : memref<2x5xf32, (d0, d1) -> (3*d0 + 17*d1)>
+ affine.load %A[%i, %j] : memref<2x5xf32, affine_map<(d0, d1) -> (3*d0 + 17*d1)>>
}
}
return
@@ -105,11 +105,11 @@ func @strided_cumulative() {
// CHECK-LABEL: func @symbolic_operands
func @symbolic_operands(%s : index) {
// CHECK: alloc() : memref<100xf32>
- %A = alloc()[%s] : memref<10x10xf32, (d0,d1)[s0] -> (10*d0 + d1)>
+ %A = alloc()[%s] : memref<10x10xf32, affine_map<(d0,d1)[s0] -> (10*d0 + d1)>>
affine.for %i = 0 to 10 {
affine.for %j = 0 to 10 {
// CHECK: affine.load %{{.*}}[%{{.*}} * 10 + %{{.*}}] : memref<100xf32>
- affine.load %A[%i, %j] : memref<10x10xf32, (d0,d1)[s0] -> (10*d0 + d1)>
+ affine.load %A[%i, %j] : memref<10x10xf32, affine_map<(d0,d1)[s0] -> (10*d0 + d1)>>
}
}
return
@@ -117,20 +117,20 @@ func @symbolic_operands(%s : index) {
// Memref escapes; no normalization.
// CHECK-LABEL: func @escaping() -> memref<64xf32, #map{{[0-9]+}}>
-func @escaping() -> memref<64xf32, (d0) -> (d0 + 2)> {
+func @escaping() -> memref<64xf32, affine_map<(d0) -> (d0 + 2)>> {
// CHECK: %{{.*}} = alloc() : memref<64xf32, #map{{[0-9]+}}>
- %A = alloc() : memref<64xf32, (d0) -> (d0 + 2)>
- return %A : memref<64xf32, (d0) -> (d0 + 2)>
+ %A = alloc() : memref<64xf32, affine_map<(d0) -> (d0 + 2)>>
+ return %A : memref<64xf32, affine_map<(d0) -> (d0 + 2)>>
}
// Semi-affine maps, normalization not implemented yet.
// CHECK-LABEL: func @semi_affine_layout_map
func @semi_affine_layout_map(%s0: index, %s1: index) {
- %A = alloc()[%s0, %s1] : memref<256x1024xf32, (d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>
+ %A = alloc()[%s0, %s1] : memref<256x1024xf32, affine_map<(d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>>
affine.for %i = 0 to 256 {
affine.for %j = 0 to 1024 {
// CHECK: memref<256x1024xf32, #map{{[0-9]+}}>
- affine.load %A[%i, %j] : memref<256x1024xf32, (d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>
+ affine.load %A[%i, %j] : memref<256x1024xf32, affine_map<(d0, d1)[s0, s1] -> (d0*s0 + d1*s1)>>
}
}
return
diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index c4e17ce1682..8293120d50e 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -2,14 +2,14 @@
// -----
-// CHECK-DAG: [[MOD_2:#map[0-9]+]] = (d0) -> (d0 mod 2)
-// CHECK-DAG: [[MAP_MINUS_1:#map[0-9]+]] = (d0) -> (d0 - 1)
+// CHECK-DAG: [[MOD_2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 2)>
+// CHECK-DAG: [[MAP_MINUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
// CHECK-LABEL: func @loop_nest_dma() {
func @loop_nest_dma() {
- %A = alloc() : memref<256 x f32, (d0) -> (d0), 0>
- %Ah = alloc() : memref<32 x f32, (d0) -> (d0), 1>
+ %A = alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
+ %Ah = alloc() : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
%tag = alloc() : memref<1 x f32>
@@ -19,15 +19,15 @@ func @loop_nest_dma() {
affine.for %i = 0 to 8 {
affine.dma_start %A[%i], %Ah[%i], %tag[%zero], %num_elts : memref<256 x f32>, memref<32 x f32, 1>, memref<1 x f32>
affine.dma_wait %tag[%zero], %num_elts : memref<1 x f32>
- %v = affine.load %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
+ %v = affine.load %Ah[%i] : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
%r = "compute"(%v) : (f32) -> (f32)
- affine.store %r, %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
+ affine.store %r, %Ah[%i] : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
affine.for %j = 0 to 32 {
"do_more_compute"(%i, %j) : (index, index) -> ()
}
}
dealloc %tag : memref<1 x f32>
- dealloc %Ah : memref<32 x f32, (d0) -> (d0), 1>
+ dealloc %Ah : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
return
}
// CHECK: %{{.*}} = alloc() : memref<256xf32>
@@ -64,8 +64,8 @@ func @loop_nest_dma() {
// -----
-// CHECK-DAG: [[FLOOR_MOD_2:#map[0-9]+]] = (d0) -> ((d0 floordiv 4) mod 2)
-// CHECK-DAG: [[REMAP_SHIFT_MINUS_4:#map[0-9]+]] = (d0) -> (d0 - 4)
+// CHECK-DAG: [[FLOOR_MOD_2:#map[0-9]+]] = affine_map<(d0) -> ((d0 floordiv 4) mod 2)>
+// CHECK-DAG: [[REMAP_SHIFT_MINUS_4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
// CHECK-LABEL: @loop_step
func @loop_step(%arg0: memref<512xf32>,
@@ -105,8 +105,8 @@ func @loop_step(%arg0: memref<512xf32>,
// -----
-#map1 = (d0, d1) -> ((d0 * 2048 + d1 * 256) floordiv 32)
-#map2 = (d0) -> ((d0 * 2048) floordiv 32)
+#map1 = affine_map<(d0, d1) -> ((d0 * 2048 + d1 * 256) floordiv 32)>
+#map2 = affine_map<(d0) -> ((d0 * 2048) floordiv 32)>
// CHECK-LABEL: func @loop_dma_nested(%{{.*}}: memref<512x32xvector<8xf32>
func @loop_dma_nested(%arg0: memref<512x32xvector<8xf32>>, %arg1: memref<512x32xvector<8xf32>>, %arg2: memref<512x32xvector<8xf32>>) {
%num_elts = constant 256 : index
@@ -199,7 +199,7 @@ func @loop_dma_nested(%arg0: memref<512x32xvector<8xf32>>, %arg1: memref<512x32x
}
// -----
-#map2 = (d0) -> ((d0 * 2048) floordiv 32)
+#map2 = affine_map<(d0) -> ((d0 * 2048) floordiv 32)>
// CHECK: func @loop_dma_dependent
func @loop_dma_dependent(%arg2: memref<512x32xvector<8xf32>>) {
@@ -356,8 +356,8 @@ func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>) {
// before performing any replacement.
// CHECK-LABEL: func @escaping_and_indexed_use_mix
func @escaping_and_indexed_use_mix() {
- %A = alloc() : memref<256 x f32, (d0) -> (d0), 0>
- %Ah = alloc() : memref<32 x f32, (d0) -> (d0), 1>
+ %A = alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
+ %Ah = alloc() : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
%tag = alloc() : memref<1 x f32>
%zero = constant 0 : index
%num_elts = constant 32 : index
@@ -367,11 +367,11 @@ func @escaping_and_indexed_use_mix() {
affine.dma_start %A[%i], %Ah[%i], %tag[%zero], %num_elts : memref<256 x f32>, memref<32 x f32, 1>, memref<1 x f32>
affine.dma_wait %tag[%zero], %num_elts : memref<1 x f32>
"compute"(%Ah) : (memref<32 x f32, 1>) -> ()
- %v = affine.load %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
+ %v = affine.load %Ah[%i] : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
"foo"(%v) : (f32) -> ()
}
- dealloc %A : memref<256 x f32, (d0) -> (d0), 0>
- dealloc %Ah : memref<32 x f32, (d0) -> (d0), 1>
+ dealloc %A : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
+ dealloc %Ah : memref<32 x f32, affine_map<(d0) -> (d0)>, 1>
return
}
// No replacement.
diff --git a/mlir/test/Transforms/simplify-affine-structures.mlir b/mlir/test/Transforms/simplify-affine-structures.mlir
index 9e5e7f966d2..89f37d0b6c9 100644
--- a/mlir/test/Transforms/simplify-affine-structures.mlir
+++ b/mlir/test/Transforms/simplify-affine-structures.mlir
@@ -1,39 +1,40 @@
// RUN: mlir-opt %s -simplify-affine-structures | FileCheck %s
-// CHECK-DAG: [[SET_EMPTY_2D:#set[0-9]+]] = (d0, d1) : (1 == 0)
-// CHECK-DAG: #set1 = (d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)
-// CHECK-DAG: #set2 = (d0, d1)[s0, s1] : (1 == 0)
-// CHECK-DAG: #set3 = (d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0, d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0, d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0, d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0)
-// CHECK-DAG: [[SET_EMPTY_1D:#set[0-9]+]] = (d0) : (1 == 0)
-// CHECK-DAG: [[SET_EMPTY_1D_2S:#set[0-9]+]] = (d0)[s0, s1] : (1 == 0)
-// CHECK-DAG: [[SET_EMPTY_3D:#set[0-9]+]] = (d0, d1, d2) : (1 == 0)
+// CHECK-DAG: [[SET_EMPTY_2D:#set[0-9]+]] = affine_set<(d0, d1) : (1 == 0)>
+// CHECK-DAG: #set1 = affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)>
+// CHECK-DAG: #set2 = affine_set<(d0, d1)[s0, s1] : (1 == 0)>
+// CHECK-DAG: #set3 = affine_set<(d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0, d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0, d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0, d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0)>
+// CHECK-DAG: [[SET_EMPTY_1D:#set[0-9]+]] = affine_set<(d0) : (1 == 0)>
+// CHECK-DAG: [[SET_EMPTY_1D_2S:#set[0-9]+]] = affine_set<(d0)[s0, s1] : (1 == 0)>
+// CHECK-DAG: [[SET_EMPTY_3D:#set[0-9]+]] = affine_set<(d0, d1, d2) : (1 == 0)>
// Set for test case: test_gaussian_elimination_non_empty_set2
-// #set2 = (d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)
-#set2 = (d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)
+// #set2 = affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)>
+#set2 = affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)>
// Set for test case: test_gaussian_elimination_empty_set3
-// #set3 = (d0, d1)[s0, s1] : (1 == 0)
-#set3 = (d0, d1)[s0, s1] : (d0 - s0 == 0, d0 + s0 == 0, s0 - 1 == 0)
+// #set3 = affine_set<(d0, d1)[s0, s1] : (1 == 0)>
+#set3 = affine_set<(d0, d1)[s0, s1] : (d0 - s0 == 0, d0 + s0 == 0, s0 - 1 == 0)>
// Set for test case: test_gaussian_elimination_non_empty_set4
-#set4 = (d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
- d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0,
- d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0,
- d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0)
+#set4 = affine_set<(d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
+ d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0,
+ d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0,
+ d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0)>
// Add invalid constraints to previous non-empty set to make it empty.
// Set for test case: test_gaussian_elimination_empty_set5
-#set5 = (d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
- d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0,
- d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0,
- d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
- d0 - 1 == 0, d0 + 2 == 0)
+#set5 = affine_set<(d0, d1)[s0, s1] : (d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
+ d0 * 5 - d1 * 11 + s0 * 7 + s1 == 0,
+ d0 * 11 + d1 * 7 - s0 * 5 + s1 == 0,
+ d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0,
+ d0 - 1 == 0, d0 + 2 == 0)>
// This is an artificially created system to exercise the worst case behavior of
// FM elimination - as a safeguard against improperly constructed constraint
// systems or fuzz input.
-#set_fuzz_virus = (d0, d1, d2, d3, d4, d5) : ( 1089234*d0 + 203472*d1 + 82342 >= 0,
+#set_fuzz_virus = affine_set<(d0, d1, d2, d3, d4, d5) : (
+ 1089234*d0 + 203472*d1 + 82342 >= 0,
-55*d0 + 24*d1 + 238*d2 - 234*d3 - 9743 >= 0,
-5445*d0 - 284*d1 + 23*d2 + 34*d3 - 5943 >= 0,
-5445*d0 + 284*d1 + 238*d2 - 34*d3 >= 0,
@@ -63,20 +64,20 @@
2039*d0 + 793*d2 - 99*d3 - 24*d4 + 234*d5 >= 0,
78*d2 - 788*d5 + 257 >= 0,
d3 - (d5 + 97*d0) floordiv 423 >= 0,
- 234* (d0 + d3 mod 5 floordiv 2342) mod 2309
+ 234* (d0 + d3 mod 5 floordiv 2342) mod 2309
+ (d0 + 2038*d3) floordiv 208 >= 0,
- 239* (d0 + 2300 * d3) floordiv 2342
+ 239* (d0 + 2300 * d3) floordiv 2342
mod 2309 mod 239423 == 0,
- d0 + d3 mod 2642 + (d3 + 2*d0) mod 1247
+ d0 + d3 mod 2642 + (d3 + 2*d0) mod 1247
mod 2038 mod 2390 mod 2039 floordiv 55 >= 0
-)
+)>
// CHECK-LABEL: func @test_gaussian_elimination_empty_set0() {
func @test_gaussian_elimination_empty_set0() {
affine.for %arg0 = 1 to 10 {
affine.for %arg1 = 1 to 100 {
// CHECK: [[SET_EMPTY_2D]](%arg0, %arg1)
- affine.if (d0, d1) : (2 == 0)(%arg0, %arg1) {
+ affine.if affine_set<(d0, d1) : (2 == 0)>(%arg0, %arg1) {
}
}
}
@@ -88,7 +89,7 @@ func @test_gaussian_elimination_empty_set1() {
affine.for %arg0 = 1 to 10 {
affine.for %arg1 = 1 to 100 {
// CHECK: [[SET_EMPTY_2D]](%arg0, %arg1)
- affine.if (d0, d1) : (1 >= 0, -1 >= 0) (%arg0, %arg1) {
+ affine.if affine_set<(d0, d1) : (1 >= 0, -1 >= 0)> (%arg0, %arg1) {
}
}
}
@@ -166,33 +167,33 @@ func @test_empty_set(%N : index) {
affine.for %i = 0 to 10 {
affine.for %j = 0 to 10 {
// CHECK: affine.if [[SET_EMPTY_2D]](%arg1, %arg2)
- affine.if (d0, d1) : (d0 - d1 >= 0, d1 - d0 - 1 >= 0)(%i, %j) {
+ affine.if affine_set<(d0, d1) : (d0 - d1 >= 0, d1 - d0 - 1 >= 0)>(%i, %j) {
"foo"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_1D]](%arg1)
- affine.if (d0) : (d0 >= 0, -d0 - 1 >= 0)(%i) {
+ affine.if affine_set<(d0) : (d0 >= 0, -d0 - 1 >= 0)>(%i) {
"bar"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_1D]](%arg1)
- affine.if (d0) : (d0 >= 0, -d0 - 1 >= 0)(%i) {
+ affine.if affine_set<(d0) : (d0 >= 0, -d0 - 1 >= 0)>(%i) {
"foo"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_1D_2S]](%arg1)[%arg0, %arg0]
- affine.if (d0)[s0, s1] : (d0 >= 0, -d0 + s0 - 1 >= 0, -s0 >= 0)(%i)[%N, %N] {
+ affine.if affine_set<(d0)[s0, s1] : (d0 >= 0, -d0 + s0 - 1 >= 0, -s0 >= 0)>(%i)[%N, %N] {
"bar"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_3D]](%arg1, %arg2, %arg0)
// The set below implies d0 = d1; so d1 >= d0, but d0 >= d1 + 1.
- affine.if (d0, d1, d2) : (d0 - d1 == 0, d2 - d0 >= 0, d0 - d1 - 1 >= 0)(%i, %j, %N) {
+ affine.if affine_set<(d0, d1, d2) : (d0 - d1 == 0, d2 - d0 >= 0, d0 - d1 - 1 >= 0)>(%i, %j, %N) {
"foo"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_2D]](%arg1, %arg2)
// The set below has rational solutions but no integer solutions; GCD test catches it.
- affine.if (d0, d1) : (d0*2 -d1*2 - 1 == 0, d0 >= 0, -d0 + 100 >= 0, d1 >= 0, -d1 + 100 >= 0)(%i, %j) {
+ affine.if affine_set<(d0, d1) : (d0*2 -d1*2 - 1 == 0, d0 >= 0, -d0 + 100 >= 0, d1 >= 0, -d1 + 100 >= 0)>(%i, %j) {
"foo"() : () -> ()
}
// CHECK: affine.if [[SET_EMPTY_2D]](%arg1, %arg2)
- affine.if (d0, d1) : (d1 == 0, d0 - 1 >= 0, - d0 - 1 >= 0)(%i, %j) {
+ affine.if affine_set<(d0, d1) : (d1 == 0, d0 - 1 >= 0, - d0 - 1 >= 0)>(%i, %j) {
"foo"() : () -> ()
}
}
@@ -202,12 +203,12 @@ func @test_empty_set(%N : index) {
affine.for %l = 0 to 10 {
// Empty because no multiple of 8 lies between 4 and 7.
// CHECK: affine.if [[SET_EMPTY_1D]](%arg1)
- affine.if (d0) : (8*d0 - 4 >= 0, -8*d0 + 7 >= 0)(%k) {
+ affine.if affine_set<(d0) : (8*d0 - 4 >= 0, -8*d0 + 7 >= 0)>(%k) {
"foo"() : () -> ()
}
// Same as above but with equalities and inequalities.
// CHECK: affine.if [[SET_EMPTY_2D]](%arg1, %arg2)
- affine.if (d0, d1) : (d0 - 4*d1 == 0, 4*d1 - 5 >= 0, -4*d1 + 7 >= 0)(%k, %l) {
+ affine.if affine_set<(d0, d1) : (d0 - 4*d1 == 0, 4*d1 - 5 >= 0, -4*d1 + 7 >= 0)>(%k, %l) {
"foo"() : () -> ()
}
// Same as above but with a combination of multiple identifiers. 4*d0 +
@@ -215,12 +216,12 @@ func @test_empty_set(%N : index) {
// tightening will tighten constraints to 4*d0 + 8*d1 >= 12 and 4*d0 +
// 8*d1 <= 8; hence infeasible.
// CHECK: affine.if [[SET_EMPTY_2D]](%arg1, %arg2)
- affine.if (d0, d1) : (4*d0 + 8*d1 - 9 >= 0, -4*d0 - 8*d1 + 11 >= 0)(%k, %l) {
+ affine.if affine_set<(d0, d1) : (4*d0 + 8*d1 - 9 >= 0, -4*d0 - 8*d1 + 11 >= 0)>(%k, %l) {
"foo"() : () -> ()
}
// Same as above but with equalities added into the mix.
// CHECK: affine.if [[SET_EMPTY_3D]](%arg1, %arg1, %arg2)
- affine.if (d0, d1, d2) : (d0 - 4*d2 == 0, d0 + 8*d1 - 9 >= 0, -d0 - 8*d1 + 11 >= 0)(%k, %k, %l) {
+ affine.if affine_set<(d0, d1, d2) : (d0 - 4*d2 == 0, d0 + 8*d1 - 9 >= 0, -d0 - 8*d1 + 11 >= 0)>(%k, %k, %l) {
"foo"() : () -> ()
}
}
@@ -228,7 +229,7 @@ func @test_empty_set(%N : index) {
affine.for %m = 0 to 10 {
// CHECK: affine.if [[SET_EMPTY_1D]](%arg{{[0-9]+}})
- affine.if (d0) : (d0 mod 2 - 3 == 0) (%m) {
+ affine.if affine_set<(d0) : (d0 mod 2 - 3 == 0)> (%m) {
"foo"() : () -> ()
}
}
diff --git a/mlir/test/Transforms/slicing-utils.mlir b/mlir/test/Transforms/slicing-utils.mlir
index 8c6fb01e1f7..145695db5fb 100644
--- a/mlir/test/Transforms/slicing-utils.mlir
+++ b/mlir/test/Transforms/slicing-utils.mlir
@@ -225,7 +225,7 @@ func @slicing_test_2() {
%c2 = constant 2 : index
%c16 = constant 16 : index
affine.for %i0 = %c0 to %c16 {
- affine.for %i1 = (i)[] -> (i)(%i0) to 10 {
+ affine.for %i1 = affine_map<(i)[] -> (i)>(%i0) to 10 {
// BWD: matched: %[[b:.*]] {{.*}} backward static slice:
// BWD: affine.for {{.*}}
diff --git a/mlir/test/Transforms/strip-debuginfo.mlir b/mlir/test/Transforms/strip-debuginfo.mlir
index 9c746fa9c09..49bb22052f9 100644
--- a/mlir/test/Transforms/strip-debuginfo.mlir
+++ b/mlir/test/Transforms/strip-debuginfo.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -mlir-print-debuginfo -strip-debuginfo | FileCheck %s
// This test verifies that debug locations are stripped.
-#set0 = (d0) : (1 == 0)
+#set0 = affine_set<(d0) : (1 == 0)>
// CHECK-LABEL: func @inline_notation
func @inline_notation() -> i32 {
diff --git a/mlir/test/Transforms/unroll-jam.mlir b/mlir/test/Transforms/unroll-jam.mlir
index be46548f652..60de5f17508 100644
--- a/mlir/test/Transforms/unroll-jam.mlir
+++ b/mlir/test/Transforms/unroll-jam.mlir
@@ -1,15 +1,15 @@
// RUN: mlir-opt %s -affine-loop-unroll-jam -unroll-jam-factor=2 | FileCheck %s
// RUN: mlir-opt %s -affine-loop-unroll-jam -unroll-jam-factor=4 | FileCheck --check-prefix=UJAM-FOUR %s
-// CHECK-DAG: [[MAP_PLUS_1:#map[0-9]+]] = (d0) -> (d0 + 1)
-// CHECK-DAG: [[MAP_DIV_OFFSET:#map[0-9]+]] = ()[s0] -> (((s0 - 1) floordiv 2) * 2 + 1)
-// CHECK-DAG: [[MAP_MULTI_RES:#map[0-9]+]] = ()[s0, s1] -> ((s0 floordiv 2) * 2, (s1 floordiv 2) * 2, 1024)
-// CHECK-DAG: [[MAP_SYM_UB:#map[0-9]+]] = ()[s0, s1] -> (s0, s1, 1024)
+// CHECK-DAG: [[MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// CHECK-DAG: [[MAP_DIV_OFFSET:#map[0-9]+]] = affine_map<()[s0] -> (((s0 - 1) floordiv 2) * 2 + 1)>
+// CHECK-DAG: [[MAP_MULTI_RES:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s0 floordiv 2) * 2, (s1 floordiv 2) * 2, 1024)>
+// CHECK-DAG: [[MAP_SYM_UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, s1, 1024)>
-// UJAM-FOUR-DAG: [[UBMAP:#map[0-9]+]] = ()[s0] -> (s0 + 8)
-// UJAM-FOUR-DAG: [[MAP_PLUS_1:#map[0-9]+]] = (d0) -> (d0 + 1)
-// UJAM-FOUR-DAG: [[MAP_PLUS_2:#map[0-9]+]] = (d0) -> (d0 + 2)
-// UJAM-FOUR-DAG: [[MAP_PLUS_3:#map[0-9]+]] = (d0) -> (d0 + 3)
+// UJAM-FOUR-DAG: [[UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (s0 + 8)>
+// UJAM-FOUR-DAG: [[MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UJAM-FOUR-DAG: [[MAP_PLUS_2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UJAM-FOUR-DAG: [[MAP_PLUS_3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
// CHECK-LABEL: func @unroll_jam_imperfect_nest() {
func @unroll_jam_imperfect_nest() {
@@ -89,7 +89,7 @@ func @loop_nest_unknown_count_2(%N : index) {
// UJAM-FOUR-NEXT: affine.for [[IV0]] = 1 to 100 {
// UJAM-FOUR-NEXT: "foo"([[RES]])
// UJAM-FOUR-NEXT: }
- affine.for %i = %N to ()[s0] -> (s0+9) ()[%N] {
+ affine.for %i = %N to affine_map<()[s0] -> (s0+9)> ()[%N] {
affine.for %j = 1 to 100 {
"foo"(%i) : (index) -> ()
}
@@ -102,7 +102,7 @@ func @loop_nest_unknown_count_2(%N : index) {
// CHECK-SAME: [[N:arg[0-9]+]]: index
// CHECK-SAME: [[K:arg[0-9]+]]: index
func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
- affine.for %i = 0 to min ()[s0, s1] -> (s0, s1, 1024)()[%M, %N] {
+ affine.for %i = 0 to min affine_map<()[s0, s1] -> (s0, s1, 1024)>()[%M, %N] {
affine.for %j = 0 to %K {
"foo"(%i, %j) : (index, index) -> ()
}
diff --git a/mlir/test/Transforms/unroll.mlir b/mlir/test/Transforms/unroll.mlir
index da2a5e59bc9..3414aa1e0d5 100644
--- a/mlir/test/Transforms/unroll.mlir
+++ b/mlir/test/Transforms/unroll.mlir
@@ -3,25 +3,25 @@
// RUN: mlir-opt %s -affine-loop-unroll -unroll-factor=4 | FileCheck %s --check-prefix UNROLL-BY-4
// RUN: mlir-opt %s -affine-loop-unroll -unroll-factor=1 | FileCheck %s --check-prefix UNROLL-BY-1
-// UNROLL-FULL-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 + 1)
-// UNROLL-FULL-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 + 2)
-// UNROLL-FULL-DAG: [[MAP2:#map[0-9]+]] = (d0) -> (d0 + 3)
-// UNROLL-FULL-DAG: [[MAP3:#map[0-9]+]] = (d0) -> (d0 + 4)
-// UNROLL-FULL-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (d0 + 1)
-// UNROLL-FULL-DAG: [[MAP5:#map[0-9]+]] = (d0, d1) -> (d0 + 3)
-// UNROLL-FULL-DAG: [[MAP6:#map[0-9]+]] = (d0)[s0] -> (d0 + s0 + 1)
-
-// SHORT-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 + 1)
-
-// UNROLL-BY-4-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 + 1)
-// UNROLL-BY-4-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 + 2)
-// UNROLL-BY-4-DAG: [[MAP2:#map[0-9]+]] = (d0) -> (d0 + 3)
-// UNROLL-BY-4-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (d0 + 1)
-// UNROLL-BY-4-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (d0 + 3)
-// UNROLL-BY-4-DAG: [[MAP5:#map[0-9]+]] = (d0)[s0] -> (d0 + s0 + 1)
-// UNROLL-BY-4-DAG: [[MAP6:#map[0-9]+]] = (d0, d1) -> (d0 * 16 + d1)
-// UNROLL-BY-4-DAG: [[MAP11:#map[0-9]+]] = (d0) -> (d0)
-// UNROLL-BY-4-DAG: [[MAP_TRIP_COUNT_MULTIPLE_FOUR:#map[0-9]+]] = ()[s0, s1, s2] -> (s0 + ((-s0 + s1) floordiv 4) * 4, s0 + ((-s0 + s2) floordiv 4) * 4, s0 + ((-s0) floordiv 4) * 4 + 1024)
+// UNROLL-FULL-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UNROLL-FULL-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UNROLL-FULL-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// UNROLL-FULL-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 4)>
+// UNROLL-FULL-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// UNROLL-FULL-DAG: [[MAP5:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
+// UNROLL-FULL-DAG: [[MAP6:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
+
+// SHORT-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+
+// UNROLL-BY-4-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UNROLL-BY-4-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UNROLL-BY-4-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// UNROLL-BY-4-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// UNROLL-BY-4-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
+// UNROLL-BY-4-DAG: [[MAP5:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
+// UNROLL-BY-4-DAG: [[MAP6:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+// UNROLL-BY-4-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// UNROLL-BY-4-DAG: [[MAP_TRIP_COUNT_MULTIPLE_FOUR:#map[0-9]+]] = affine_map<()[s0, s1, s2] -> (s0 + ((-s0 + s1) floordiv 4) * 4, s0 + ((-s0 + s2) floordiv 4) * 4, s0 + ((-s0) floordiv 4) * 4 + 1024)>
// UNROLL-FULL-LABEL: func @loop_nest_simplest() {
func @loop_nest_simplest() {
@@ -77,7 +77,7 @@ func @loop_nest_body_def_use() {
// UNROLL-FULL-NEXT: %9 = affine.apply [[MAP0]](%8)
// UNROLL-FULL-NEXT: %10 = "addi32"(%9, %c0_0) : (index, index) -> index
affine.for %j = 0 to 4 {
- %x = "affine.apply" (%j) { map = (d0) -> (d0 + 1) } :
+ %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
%y = "addi32"(%x, %c0) : (index, index) -> index
}
@@ -97,7 +97,7 @@ func @loop_nest_strided() {
// UNROLL-FULL-NEXT: %3 = affine.apply [[MAP0]](%2)
// UNROLL-FULL-NEXT: %4 = "addi32"(%3, %3) : (index, index) -> index
affine.for %j = 2 to 6 step 2 {
- %x = "affine.apply" (%j) { map = (d0) -> (d0 + 1) } :
+ %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
%y = "addi32"(%x, %x) : (index, index) -> index
}
@@ -110,7 +110,7 @@ func @loop_nest_strided() {
// UNROLL-FULL-NEXT: %11 = affine.apply [[MAP0]](%10)
// UNROLL-FULL-NEXT: %12 = "addi32"(%11, %11) : (index, index) -> index
affine.for %k = 2 to 7 step 2 {
- %z = "affine.apply" (%k) { map = (d0) -> (d0 + 1) } :
+ %z = "affine.apply" (%k) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
%w = "addi32"(%z, %z) : (index, index) -> index
}
@@ -133,9 +133,9 @@ func @loop_nest_multiple_results() {
// UNROLL-FULL-NEXT: %7 = affine.apply #map{{.*}}(%arg0, %4)
// UNROLL-FULL-NEXT: %8:2 = "fma"(%7, %5, %5) : (index, index, index) -> (index, index)
affine.for %j = 0 to 2 step 1 {
- %x = affine.apply (d0, d1) -> (d0 + 1) (%i, %j)
+ %x = affine.apply affine_map<(d0, d1) -> (d0 + 1)> (%i, %j)
%y = "addi32"(%x, %x) : (index, index) -> index
- %z = affine.apply (d0, d1) -> (d0 + 3) (%i, %j)
+ %z = affine.apply affine_map<(d0, d1) -> (d0 + 3)> (%i, %j)
%w:2 = "fma"(%z, %x, %x) : (index, index, index) -> (index, index)
}
} // UNROLL-FULL: }
@@ -169,7 +169,7 @@ func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
// UNROLL-FULL-NEXT: %14 = "vmulf"(%12, %13) : (index, index) -> index
// UNROLL-FULL-NEXT: %15 = "vaddf"(%14, %14) : (index, index) -> index
affine.for %j = 0 to 4 {
- %x = "affine.apply" (%j) { map = (d0) -> (d0 + 1) } :
+ %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
%y = "vmulf"(%j, %x) : (index, index) -> index
%z = "vaddf"(%y, %y) : (index, index) -> index
@@ -198,7 +198,7 @@ func @loop_nest_seq_multiple() {
// UNROLL-FULL-NEXT: %6 = affine.apply [[MAP0]](%5)
// UNROLL-FULL-NEXT: "mul"(%6, %6) : (index, index) -> ()
affine.for %j = 0 to 4 {
- %x = "affine.apply" (%j) { map = (d0) -> (d0 + 1) } :
+ %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
"mul"(%x, %x) : (index, index) -> ()
}
@@ -219,9 +219,9 @@ func @loop_nest_seq_multiple() {
// UNROLL-FULL-NEXT: %16 = affine.apply [[MAP0]](%15)
// UNROLL-FULL-NEXT: %17 = affine.apply [[MAP6]](%15)[%c99]
affine.for %n = 0 to 4 {
- %y = "affine.apply" (%n) { map = (d0) -> (d0 + 1) } :
+ %y = "affine.apply" (%n) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
- %z = "affine.apply" (%n, %k) { map = (d0) [s0] -> (d0 + s0 + 1) } :
+ %z = "affine.apply" (%n, %k) { map = affine_map<(d0) [s0] -> (d0 + s0 + 1)> } :
(index, index) -> (index)
} // UNROLL-FULL }
} // UNROLL-FULL }
@@ -252,7 +252,7 @@ func @loop_nest_outer_unroll() {
// SHORT-NEXT: }
affine.for %i = 0 to 2 {
affine.for %j = 0 to 4 {
- %x = "affine.apply" (%j) { map = (d0) -> (d0 + 1) } :
+ %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
(index) -> (index)
%y = "addi32"(%x, %x) : (index, index) -> index
}
@@ -265,9 +265,9 @@ func @loop_nest_outer_unroll() {
// count threshold set to 2.
// SHORT-LABEL: func @loop_nest_seq_long() -> i32 {
func @loop_nest_seq_long() -> i32 {
- %A = alloc() : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
- %B = alloc() : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
- %C = alloc() : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ %A = alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
+ %B = alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
+ %C = alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%zero = constant 0 : i32
%one = constant 1 : i32
@@ -279,9 +279,9 @@ func @loop_nest_seq_long() -> i32 {
affine.for %n0 = 0 to 512 {
// CHECK: affine.for %arg1 = 0 to 8
affine.for %n1 = 0 to 8 {
- store %one, %A[%n0, %n1] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
- store %two, %B[%n0, %n1] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
- store %zero, %C[%n0, %n1] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ store %one, %A[%n0, %n1] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
+ store %two, %B[%n0, %n1] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
+ store %zero, %C[%n0, %n1] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
}
}
@@ -291,28 +291,28 @@ func @loop_nest_seq_long() -> i32 {
affine.for %arg2 = 0 to 8 {
// CHECK-NOT: affine.for
// CHECK: %{{[0-9]+}} = affine.apply
- %b2 = "affine.apply" (%y, %arg2) {map = (d0, d1) -> (16*d0 + d1)} : (index, index) -> index
- %z = load %B[%x, %b2] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ %b2 = "affine.apply" (%y, %arg2) {map = affine_map<(d0, d1) -> (16*d0 + d1)>} : (index, index) -> index
+ %z = load %B[%x, %b2] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
"op1"(%z) : (i32) -> ()
}
affine.for %j1 = 0 to 8 {
affine.for %j2 = 0 to 8 {
- %a2 = "affine.apply" (%y, %j2) {map = (d0, d1) -> (16*d0 + d1)} : (index, index) -> index
- %v203 = load %A[%j1, %a2] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ %a2 = "affine.apply" (%y, %j2) {map = affine_map<(d0, d1) -> (16*d0 + d1)>} : (index, index) -> index
+ %v203 = load %A[%j1, %a2] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
"op2"(%v203) : (i32) -> ()
}
affine.for %k2 = 0 to 8 {
%s0 = "op3"() : () -> i32
- %c2 = "affine.apply" (%x, %k2) {map = (d0, d1) -> (16*d0 + d1)} : (index, index) -> index
- %s1 = load %C[%j1, %c2] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ %c2 = "affine.apply" (%x, %k2) {map = affine_map<(d0, d1) -> (16*d0 + d1)>} : (index, index) -> index
+ %s1 = load %C[%j1, %c2] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
%s2 = "addi32"(%s0, %s1) : (i32, i32) -> i32
- store %s2, %C[%j1, %c2] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ store %s2, %C[%j1, %c2] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
}
}
"op4"() : () -> ()
}
}
- %ret = load %C[%zero_idx, %zero_idx] : memref<512 x 512 x i32, (d0, d1) -> (d0, d1), 2>
+ %ret = load %C[%zero_idx, %zero_idx] : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
return %ret : i32
}
@@ -441,7 +441,7 @@ func @loop_nest_operand1() {
// UNROLL-BY-4-NEXT: }
// UNROLL-BY-4-NEXT: return
affine.for %i = 0 to 100 step 2 {
- affine.for %j = 0 to (d0) -> (d0 - d0 mod 4) (%i) {
+ affine.for %j = 0 to affine_map<(d0) -> (d0 - d0 mod 4)> (%i) {
%x = "foo"() : () -> i32
}
}
@@ -461,7 +461,7 @@ func @loop_nest_operand2() {
// UNROLL-BY-4-NEXT: }
// UNROLL-BY-4-NEXT: return
affine.for %i = 0 to 100 step 2 {
- affine.for %j = (d0) -> (d0) (%i) to (d0) -> (5*d0 + 4) (%i) {
+ affine.for %j = affine_map<(d0) -> (d0)> (%i) to affine_map<(d0) -> (5*d0 + 4)> (%i) {
%x = "foo"() : () -> i32
}
}
@@ -481,7 +481,7 @@ func @loop_nest_operand3() {
// UNROLL-BY-4-NEXT: %4 = "foo"() : () -> i32
// UNROLL-BY-4-NEXT: }
// UNROLL-BY-4-NEXT: %0 = "foo"() : () -> i32
- affine.for %j = (d0) -> (d0) (%i) to (d0) -> (d0 + 9) (%i) {
+ affine.for %j = affine_map<(d0) -> (d0)> (%i) to affine_map<(d0) -> (d0 + 9)> (%i) {
%x = "foo"() : () -> i32
}
} // UNROLL-BY-4: }
@@ -533,7 +533,7 @@ func @loop_nest_symbolic_bound_with_step(%N : index) {
// UNROLL-BY-4-LABEL: func @loop_nest_symbolic_and_min_upper_bound
func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
- affine.for %i = %M to min ()[s0, s1] -> (s0, s1, 1024)()[%N, %K] {
+ affine.for %i = %M to min affine_map<()[s0, s1] -> (s0, s1, 1024)>()[%N, %K] {
"foo"() : () -> ()
}
return
@@ -553,9 +553,9 @@ func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index)
// through composition. Check for no cleanup loop.
// UNROLL-BY-4-LABEL: func @loop_nest_non_trivial_multiple_upper_bound
func @loop_nest_non_trivial_multiple_upper_bound(%M : index, %N : index) {
- %T = affine.apply (d0) -> (4*d0 + 1)(%M)
- %K = affine.apply (d0) -> (d0 - 1) (%T)
- affine.for %i = 0 to min (d0, d1) -> (4 * d0, d1, 1024)(%N, %K) {
+ %T = affine.apply affine_map<(d0) -> (4*d0 + 1)>(%M)
+ %K = affine.apply affine_map<(d0) -> (d0 - 1)> (%T)
+ affine.for %i = 0 to min affine_map<(d0, d1) -> (4 * d0, d1, 1024)>(%N, %K) {
"foo"() : () -> ()
}
return
@@ -566,8 +566,8 @@ func @loop_nest_non_trivial_multiple_upper_bound(%M : index, %N : index) {
// UNROLL-BY-4-LABEL: func @loop_nest_non_trivial_multiple_upper_bound_alt
func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index) {
- %K = affine.apply (d0) -> (4*d0) (%M)
- affine.for %i = 0 to min ()[s0, s1] -> (4 * s0, s1, 1024)()[%N, %K] {
+ %K = affine.apply affine_map<(d0) -> (4*d0)> (%M)
+ affine.for %i = 0 to min affine_map<()[s0, s1] -> (4 * s0, s1, 1024)>()[%N, %K] {
"foo"() : () -> ()
}
// UNROLL-BY-4: affine.for %arg2 = 0 to min
diff --git a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
index 4fce008ae82..306d86f0373 100644
--- a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
+++ b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir
@@ -5,8 +5,8 @@
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 -linalg-promote-subviews -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
// RUN: mlir-opt %s -linalg-tile -linalg-tile-sizes=2,3,4 -linalg-promote-subviews -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
-#strided1D = (d0) -> (d0)
-#strided2D = (d0, d1)[s0] -> (d0 * s0 + d1)
+#strided1D = affine_map<(d0) -> (d0)>
+#strided2D = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
// Creates and returns a 1-D buffer of size %s filled with the value %f
func @alloc_filled_f32(%s : index, %f : f32) -> memref<?xi8> {
OpenPOWER on IntegriCloud