summaryrefslogtreecommitdiffstats
path: root/mlir/test/Transforms/Vectorize
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-02-06 11:58:03 -0800
committerjpienaar <jpienaar@google.com>2019-03-29 16:17:59 -0700
commit90d10b4e00cc6397a03ddc981b7be8bab43a9f38 (patch)
treed7e05dace26da1d29ba08dbb279701d4c9ae3250 /mlir/test/Transforms/Vectorize
parent905d84851ddfa9463f872a215a6cb0ad3b7c3894 (diff)
downloadbcm5719-llvm-90d10b4e00cc6397a03ddc981b7be8bab43a9f38.tar.gz
bcm5719-llvm-90d10b4e00cc6397a03ddc981b7be8bab43a9f38.zip
NFC: Rename the 'for' operation in the AffineOps dialect to 'affine.for'. The is the second step to adding a namespace to the AffineOps dialect.
PiperOrigin-RevId: 232717775
Diffstat (limited to 'mlir/test/Transforms/Vectorize')
-rw-r--r--mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir58
-rw-r--r--mlir/test/Transforms/Vectorize/materialize.mlir16
-rw-r--r--mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir24
-rw-r--r--mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir24
-rw-r--r--mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir24
-rw-r--r--mlir/test/Transforms/Vectorize/normalize_maps.mlir24
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_1d.mlir62
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_2d.mlir30
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_3d.mlir20
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir18
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir42
-rw-r--r--mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir42
12 files changed, 192 insertions, 192 deletions
diff --git a/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir b/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir
index b82ac08fe59..e896e0588d3 100644
--- a/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir
+++ b/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir
@@ -6,8 +6,8 @@
// CHECK-LABEL: func @materialize_read_1d() {
func @materialize_read_1d() {
%A = alloc () : memref<7x42xf32>
- for %i0 = 0 to 7 step 4 {
- for %i1 = 0 to 42 step 4 {
+ affine.for %i0 = 0 to 7 step 4 {
+ affine.for %i1 = 0 to 42 step 4 {
%f1 = vector_transfer_read %A, %i0, %i1 {permutation_map: (d0, d1) -> (d0)} : (memref<7x42xf32>, index, index) -> vector<4xf32>
%ip1 = affine.apply (d0) -> (d0 + 1) (%i1)
%f2 = vector_transfer_read %A, %i0, %ip1 {permutation_map: (d0, d1) -> (d0)} : (memref<7x42xf32>, index, index) -> vector<4xf32>
@@ -29,11 +29,11 @@ func @materialize_read_1d() {
// CHECK-LABEL: func @materialize_read_1d_partially_specialized
func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %dyn4 : index) {
%A = alloc (%dyn1, %dyn2, %dyn4) : memref<7x?x?x42x?xf32>
- for %i0 = 0 to 7 {
- for %i1 = 0 to %dyn1 {
- for %i2 = 0 to %dyn2 {
- for %i3 = 0 to 42 step 2 {
- for %i4 = 0 to %dyn4 {
+ affine.for %i0 = 0 to 7 {
+ affine.for %i1 = 0 to %dyn1 {
+ affine.for %i2 = 0 to %dyn2 {
+ affine.for %i3 = 0 to 42 step 2 {
+ affine.for %i4 = 0 to %dyn4 {
%f1 = vector_transfer_read %A, %i0, %i1, %i2, %i3, %i4 {permutation_map: (d0, d1, d2, d3, d4) -> (d3)} : ( memref<7x?x?x42x?xf32>, index, index, index, index, index) -> vector<4xf32>
%i3p1 = affine.apply (d0) -> (d0 + 1) (%i3)
%f2 = vector_transfer_read %A, %i0, %i1, %i2, %i3p1, %i4 {permutation_map: (d0, d1, d2, d3, d4) -> (d3)} : ( memref<7x?x?x42x?xf32>, index, index, index, index, index) -> vector<4xf32>
@@ -54,10 +54,10 @@ func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %d
// CHECK-LABEL: func @materialize_read(%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: %0 = alloc(%arg0, %arg1, %arg2, %arg3) : memref<?x?x?x?xf32>
- // CHECK-NEXT: for %[[I0:.*]] = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %[[I1:.*]] = 0 to %arg1 {
- // CHECK-NEXT: for %[[I2:.*]] = 0 to %arg2 {
- // CHECK-NEXT: for %[[I3:.*]] = 0 to %arg3 step 5 {
+ // CHECK-NEXT: affine.for %[[I0:.*]] = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %arg1 {
+ // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %arg2 {
+ // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %arg3 step 5 {
// CHECK-NEXT: %[[C0:.*]] = constant 0 : index
// CHECK-NEXT: %[[C1:.*]] = constant 1 : index
// CHECK: {{.*}} = dim %0, 0 : memref<?x?x?x?xf32>
@@ -66,9 +66,9 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: {{.*}} = dim %0, 3 : memref<?x?x?x?xf32>
// CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
// CHECK-NEXT: %[[VECTOR_VIEW:.*]] = vector_type_cast %[[ALLOC]] : memref<5x4x3xf32>, memref<1xvector<5x4x3xf32>>
- // CHECK-NEXT: for %[[I4:.*]] = 0 to 3 {
- // CHECK-NEXT: for %[[I5:.*]] = 0 to 4 {
- // CHECK-NEXT: for %[[I6:.*]] = 0 to 5 {
+ // CHECK-NEXT: affine.for %[[I4:.*]] = 0 to 3 {
+ // CHECK-NEXT: affine.for %[[I5:.*]] = 0 to 4 {
+ // CHECK-NEXT: affine.for %[[I6:.*]] = 0 to 5 {
// CHECK-NEXT: {{.*}} = affine.apply #[[ADD]]
// CHECK-NEXT: {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
// CHECK-NEXT: {{.*}} = affine.apply #[[ADD]]
@@ -109,10 +109,10 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: return
// CHECK-NEXT:}
%A = alloc (%M, %N, %O, %P) : memref<?x?x?x?xf32, 0>
- for %i0 = 0 to %M step 3 {
- for %i1 = 0 to %N {
- for %i2 = 0 to %O {
- for %i3 = 0 to %P step 5 {
+ affine.for %i0 = 0 to %M step 3 {
+ affine.for %i1 = 0 to %N {
+ affine.for %i2 = 0 to %O {
+ affine.for %i3 = 0 to %P step 5 {
%f = vector_transfer_read %A, %i0, %i1, %i2, %i3 {permutation_map: (d0, d1, d2, d3) -> (d3, 0, d0)} : (memref<?x?x?x?xf32, 0>, index, index, index, index) -> vector<5x4x3xf32>
}
}
@@ -125,10 +125,10 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: %0 = alloc(%arg0, %arg1, %arg2, %arg3) : memref<?x?x?x?xf32>
// CHECK-NEXT: %cst = constant splat<vector<5x4x3xf32>, 1.000000e+00> : vector<5x4x3xf32>
- // CHECK-NEXT: for %[[I0:.*]] = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %[[I1:.*]] = 0 to %arg1 step 4 {
- // CHECK-NEXT: for %[[I2:.*]] = 0 to %arg2 {
- // CHECK-NEXT: for %[[I3:.*]] = 0 to %arg3 step 5 {
+ // CHECK-NEXT: affine.for %[[I0:.*]] = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %arg1 step 4 {
+ // CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %arg2 {
+ // CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %arg3 step 5 {
// CHECK-NEXT: %[[C0:.*]] = constant 0 : index
// CHECK-NEXT: %[[C1:.*]] = constant 1 : index
// CHECK: {{.*}} = dim %0, 0 : memref<?x?x?x?xf32>
@@ -138,9 +138,9 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
// CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
// CHECK-NEXT: %[[VECTOR_VIEW:.*]] = vector_type_cast {{.*}} : memref<5x4x3xf32>, memref<1xvector<5x4x3xf32>>
// CHECK-NEXT: store %cst, {{.*}}[%[[C0]]] : memref<1xvector<5x4x3xf32>>
- // CHECK-NEXT: for %[[I4:.*]] = 0 to 3 {
- // CHECK-NEXT: for %[[I5:.*]] = 0 to 4 {
- // CHECK-NEXT: for %[[I6:.*]] = 0 to 5 {
+ // CHECK-NEXT: affine.for %[[I4:.*]] = 0 to 3 {
+ // CHECK-NEXT: affine.for %[[I5:.*]] = 0 to 4 {
+ // CHECK-NEXT: affine.for %[[I6:.*]] = 0 to 5 {
// CHECK-NEXT: {{.*}} = load {{.*}}[%[[I6]], %[[I5]], %[[I4]]] : memref<5x4x3xf32>
// CHECK-NEXT: {{.*}} = affine.apply #[[ADD]](%[[I0]], %[[I4]])
// CHECK-NEXT: {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
@@ -184,10 +184,10 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT:}
%A = alloc (%M, %N, %O, %P) : memref<?x?x?x?xf32, 0>
%f1 = constant splat<vector<5x4x3xf32>, 1.000000e+00> : vector<5x4x3xf32>
- for %i0 = 0 to %M step 3 {
- for %i1 = 0 to %N step 4 {
- for %i2 = 0 to %O {
- for %i3 = 0 to %P step 5 {
+ affine.for %i0 = 0 to %M step 3 {
+ affine.for %i1 = 0 to %N step 4 {
+ affine.for %i2 = 0 to %O {
+ affine.for %i3 = 0 to %P step 5 {
vector_transfer_write %f1, %A, %i0, %i1, %i2, %i3 {permutation_map: (d0, d1, d2, d3) -> (d3, d1, d0)} : vector<5x4x3xf32>, memref<?x?x?x?xf32, 0>, index, index, index, index
}
}
diff --git a/mlir/test/Transforms/Vectorize/materialize.mlir b/mlir/test/Transforms/Vectorize/materialize.mlir
index 80458c75333..ce445ec75bb 100644
--- a/mlir/test/Transforms/Vectorize/materialize.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize.mlir
@@ -10,10 +10,10 @@
func @materialize(%M : index, %N : index, %O : index, %P : index) {
%A = alloc (%M, %N, %O, %P) : memref<?x?x?x?xf32, 0>
%f1 = constant splat<vector<4x4x4xf32>, 1.000000e+00> : vector<4x4x4xf32>
- // CHECK: for %i0 = 0 to %arg0 step 4 {
- // CHECK-NEXT: for %i1 = 0 to %arg1 step 4 {
- // CHECK-NEXT: for %i2 = 0 to %arg2 {
- // CHECK-NEXT: for %i3 = 0 to %arg3 step 4 {
+ // CHECK: affine.for %i0 = 0 to %arg0 step 4 {
+ // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 4 {
+ // CHECK-NEXT: affine.for %i2 = 0 to %arg2 {
+ // CHECK-NEXT: affine.for %i3 = 0 to %arg3 step 4 {
// CHECK-NEXT: %[[a:[0-9]+]] = {{.*}}[[ID1]](%i0)
// CHECK-NEXT: %[[b:[0-9]+]] = {{.*}}[[ID1]](%i1)
// CHECK-NEXT: %[[c:[0-9]+]] = {{.*}}[[ID1]](%i2)
@@ -25,10 +25,10 @@ func @materialize(%M : index, %N : index, %O : index, %P : index) {
// CHECK: vector_transfer_write {{.*}}, %0, {{.*}}, %[[b2]], {{.*}} {permutation_map: #[[D0D1D2D3TOD1D0]]} : vector<4x4xf32>, memref<?x?x?x?xf32>, index, index, index, index
// CHECK: %[[b3:[0-9]+]] = {{.*}}[[D0P3]](%i1)
// CHECK: vector_transfer_write {{.*}}, %0, {{.*}}, %[[b3]], {{.*}} {permutation_map: #[[D0D1D2D3TOD1D0]]} : vector<4x4xf32>, memref<?x?x?x?xf32>, index, index, index, index
- for %i0 = 0 to %M step 4 {
- for %i1 = 0 to %N step 4 {
- for %i2 = 0 to %O {
- for %i3 = 0 to %P step 4 {
+ affine.for %i0 = 0 to %M step 4 {
+ affine.for %i1 = 0 to %N step 4 {
+ affine.for %i2 = 0 to %O {
+ affine.for %i3 = 0 to %P step 4 {
"vector_transfer_write"(%f1, %A, %i0, %i1, %i2, %i3) {permutation_map: (d0, d1, d2, d3) -> (d3, d1, d0)} : (vector<4x4x4xf32>, memref<?x?x?x?xf32, 0>, index, index, index, index) -> ()
}
}
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
index b5f771d7e62..71c442b965e 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
@@ -15,8 +15,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
%f1 = constant 1.0 : f32
%f2 = constant 2.0 : f32
// 4x unroll (jammed by construction).
- // CHECK: for %i0 = 0 to %arg0 {
- // CHECK-NEXT: for %i1 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i0 = 0 to %arg0 {
+ // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 32 {
// CHECK-NEXT: [[CST0:%.*]] = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
// CHECK-NEXT: [[CST1:%.*]] = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
// CHECK-NEXT: [[CST2:%.*]] = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
@@ -34,15 +34,15 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: [[VAL31:%.*]] = affine.apply [[D0P24]]{{.*}}
// CHECK-NEXT: vector_transfer_write [[CST3]], {{.*}}, [[VAL30]], [[VAL31]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>
//
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
// non-scoped %f1
store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
}
}
// 4x unroll (jammed by construction).
- // CHECK: for %i2 = 0 to %arg0 {
- // CHECK-NEXT: for %i3 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i2 = 0 to %arg0 {
+ // CHECK-NEXT: affine.for %i3 = 0 to %arg1 step 32 {
// CHECK-NEXT: [[CST0:%.*]] = constant splat<vector<8xf32>, 2.000000e+00> : vector<8xf32>
// CHECK-NEXT: [[CST1:%.*]] = constant splat<vector<8xf32>, 2.000000e+00> : vector<8xf32>
// CHECK-NEXT: [[CST2:%.*]] = constant splat<vector<8xf32>, 2.000000e+00> : vector<8xf32>
@@ -60,15 +60,15 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: [[VAL31:%.*]] = affine.apply [[D0P24]]{{.*}}
// CHECK-NEXT: vector_transfer_write [[CST3]], {{.*}}, [[VAL30]], [[VAL31]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>
//
- for %i2 = 0 to %M {
- for %i3 = 0 to %N {
+ affine.for %i2 = 0 to %M {
+ affine.for %i3 = 0 to %N {
// non-scoped %f2
store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
}
}
// 4x unroll (jammed by construction).
- // CHECK: for %i4 = 0 to %arg0 {
- // CHECK-NEXT: for %i5 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i4 = 0 to %arg0 {
+ // CHECK-NEXT: affine.for %i5 = 0 to %arg1 step 32 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = vector_transfer_read
@@ -110,8 +110,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: vector_transfer_write
//
- for %i4 = 0 to %M {
- for %i5 = 0 to %N {
+ affine.for %i4 = 0 to %M {
+ affine.for %i5 = 0 to %N {
%a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
%b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
%s5 = addf %a5, %b5 : f32
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
index 92df49fa8fa..62149c323b6 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
@@ -15,8 +15,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
%f1 = constant 1.0 : f32
%f2 = constant 2.0 : f32
// (3x2)x unroll (jammed by construction).
- // CHECK: for %i0 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i1 = 0 to %arg1 step 16 {
+ // CHECK: affine.for %i0 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 16 {
// CHECK-NEXT: {{.*}} = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
// CHECK-NEXT: {{.*}} = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
// CHECK-NEXT: {{.*}} = constant splat<vector<8xf32>, 1.000000e+00> : vector<8xf32>
@@ -41,26 +41,26 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: [[VAL50:%.*]] = affine.apply [[D0P2]](%i0)
// CHECK-NEXT: [[VAL51:%.*]] = affine.apply [[D0P8]](%i1)
// CHECK-NEXT: vector_transfer_write {{.*}}, {{.*}}, [[VAL50]], [[VAL51]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
// non-scoped %f1
store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
}
}
// (3x2)x unroll (jammed by construction).
- // CHECK: for %i2 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i3 = 0 to %arg1 step 16 {
+ // CHECK: affine.for %i2 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i3 = 0 to %arg1 step 16 {
// .....
- for %i2 = 0 to %M {
- for %i3 = 0 to %N {
+ affine.for %i2 = 0 to %M {
+ affine.for %i3 = 0 to %N {
// non-scoped %f2
// CHECK does (3x4)x unrolling.
store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
}
}
// (3x2)x unroll (jammed by construction).
- // CHECK: for %i4 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i5 = 0 to %arg1 step 16 {
+ // CHECK: affine.for %i4 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i5 = 0 to %arg1 step 16 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = vector_transfer_read
@@ -122,8 +122,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: vector_transfer_write
//
- for %i4 = 0 to %M {
- for %i5 = 0 to %N {
+ affine.for %i4 = 0 to %M {
+ affine.for %i5 = 0 to %N {
%a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
%b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
%s5 = addf %a5, %b5 : f32
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
index 36ec96e30b4..59705eca69e 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
@@ -13,8 +13,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
%f1 = constant 1.0 : f32
%f2 = constant 2.0 : f32
// 2x unroll (jammed by construction).
- // CHECK: for %i0 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i1 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i0 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 32 {
// CHECK-NEXT: {{.*}} = constant splat<vector<3x16xf32>, 1.000000e+00> : vector<3x16xf32>
// CHECK-NEXT: {{.*}} = constant splat<vector<3x16xf32>, 1.000000e+00> : vector<3x16xf32>
// CHECK-NEXT: [[VAL00:%.*]] = affine.apply [[ID1]](%i0)
@@ -24,15 +24,15 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: [[VAL11:%.*]] = affine.apply [[D0P16]](%i1)
// CHECK-NEXT: vector_transfer_write {{.*}}, {{.*}}, [[VAL10]], [[VAL11]] {permutation_map: [[ID2]]} : vector<3x16xf32>
//
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
// non-scoped %f1
store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
}
}
// 2x unroll (jammed by construction).
- // CHECK: for %i2 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i3 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i2 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i3 = 0 to %arg1 step 32 {
// CHECK-NEXT: {{.*}} = constant splat<vector<3x16xf32>, 2.000000e+00> : vector<3x16xf32>
// CHECK-NEXT: {{.*}} = constant splat<vector<3x16xf32>, 2.000000e+00> : vector<3x16xf32>
// CHECK-NEXT: [[VAL00:%.*]] = affine.apply [[ID1]](%i2)
@@ -42,15 +42,15 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: [[VAL11:%.*]] = affine.apply [[D0P16]](%i3)
// CHECK-NEXT: vector_transfer_write {{.*}}, {{.*}}, [[VAL10]], [[VAL11]] {permutation_map: [[ID2]]} : vector<3x16xf32>
//
- for %i2 = 0 to %M {
- for %i3 = 0 to %N {
+ affine.for %i2 = 0 to %M {
+ affine.for %i3 = 0 to %N {
// non-scoped %f2
store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
}
}
// 2x unroll (jammed by construction).
- // CHECK: for %i4 = 0 to %arg0 step 3 {
- // CHECK-NEXT: for %i5 = 0 to %arg1 step 32 {
+ // CHECK: affine.for %i4 = 0 to %arg0 step 3 {
+ // CHECK-NEXT: affine.for %i5 = 0 to %arg1 step 32 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: {{.*}} = vector_transfer_read
@@ -72,8 +72,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-NEXT: {{.*}} = affine.apply
// CHECK-NEXT: vector_transfer_write
//
- for %i4 = 0 to %M {
- for %i5 = 0 to %N {
+ affine.for %i4 = 0 to %M {
+ affine.for %i5 = 0 to %N {
%a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
%b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
%s5 = addf %a5, %b5 : f32
diff --git a/mlir/test/Transforms/Vectorize/normalize_maps.mlir b/mlir/test/Transforms/Vectorize/normalize_maps.mlir
index 9569dbe07fe..076d2c75633 100644
--- a/mlir/test/Transforms/Vectorize/normalize_maps.mlir
+++ b/mlir/test/Transforms/Vectorize/normalize_maps.mlir
@@ -9,19 +9,19 @@
// CHECK-LABEL: func @simple()
func @simple() {
- for %i0 = 0 to 7 {
+ affine.for %i0 = 0 to 7 {
%0 = affine.apply (d0) -> (d0) (%i0)
%1 = affine.apply (d0) -> (d0) (%0)
%2 = affine.apply (d0, d1) -> (d0 + d1) (%0, %0)
%3 = affine.apply (d0, d1) -> (d0 - d1) (%0, %0)
}
- // CHECK-NEXT: for %i0 = 0 to 7
+ // CHECK-NEXT: affine.for %i0 = 0 to 7
// CHECK-NEXT: {{.*}} affine.apply #[[ID1]](%i0)
// CHECK-NEXT: {{.*}} affine.apply #[[D0TIMES2]](%i0)
// CHECK-NEXT: {{.*}} affine.apply #[[ZERO]]()
- for %i1 = 0 to 7 {
- for %i2 = 0 to 42 {
+ affine.for %i1 = 0 to 7 {
+ affine.for %i2 = 0 to 42 {
%20 = affine.apply (d0, d1) -> (d1) (%i1, %i2)
%21 = affine.apply (d0, d1) -> (d0) (%i1, %i2)
%22 = affine.apply (d0, d1) -> (d0 + d1) (%20, %21)
@@ -29,15 +29,15 @@ func @simple() {
%24 = affine.apply (d0, d1) -> (-d0 + d1) (%20, %21)
}
}
- // CHECK: for %i1 = 0 to 7
- // CHECK-NEXT: for %i2 = 0 to 42
+ // CHECK: affine.for %i1 = 0 to 7
+ // CHECK-NEXT: affine.for %i2 = 0 to 42
// CHECK-NEXT: {{.*}} affine.apply #[[D0PLUSD1]](%i1, %i2)
// CHECK-NEXT: {{.*}} affine.apply #[[MINSD0PLUSD1]](%i1, %i2)
// CHECK-NEXT: {{.*}} affine.apply #[[D0MINUSD1]](%i1, %i2)
- for %i3 = 0 to 16 {
- for %i4 = 0 to 47 step 2 {
- for %i5 = 0 to 78 step 16 {
+ affine.for %i3 = 0 to 16 {
+ affine.for %i4 = 0 to 47 step 2 {
+ affine.for %i5 = 0 to 78 step 16 {
%50 = affine.apply (d0) -> (d0) (%i3)
%51 = affine.apply (d0) -> (d0) (%i4)
%52 = affine.apply (d0) -> (d0) (%i5)
@@ -47,9 +47,9 @@ func @simple() {
}
}
}
- // CHECK: for %i3 = 0 to 16
- // CHECK-NEXT: for %i4 = 0 to 47 step 2
- // CHECK-NEXT: for %i5 = 0 to 78 step 16
+ // CHECK: affine.for %i3 = 0 to 16
+ // CHECK-NEXT: affine.for %i4 = 0 to 47 step 2
+ // CHECK-NEXT: affine.for %i5 = 0 to 78 step 16
// CHECK-NEXT: {{.*}} affine.apply #[[ID1]](%i3)
// CHECK-NEXT: {{.*}} affine.apply #[[ID1]](%i4)
// CHECK-NEXT: {{.*}} affine.apply #[[ID1]](%i5)
diff --git a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
index da69e8dd26d..6d3f3a54e99 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
@@ -23,17 +23,17 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
//
// CHECK: for {{.*}} step 128
// CHECK-NEXT: {{.*}} = vector_transfer_read %arg0, [[C0]], [[C0]] {permutation_map: #[[map_proj_d0d1_0]]} : (memref<?x?xf32>, index, index) -> vector<128xf32>
- for %i0 = 0 to %M { // vectorized due to scalar -> vector
+ affine.for %i0 = 0 to %M { // vectorized due to scalar -> vector
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
//
// CHECK:for {{.*}} [[ARG_M]] {
- for %i1 = 0 to %M { // not vectorized
+ affine.for %i1 = 0 to %M { // not vectorized
%a1 = load %A[%i1, %i1] : memref<?x?xf32>
}
//
-// CHECK: for %i{{[0-9]*}} = 0 to [[ARG_M]] {
- for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
+// CHECK: affine.for %i{{[0-9]*}} = 0 to [[ARG_M]] {
+ affine.for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
%r2 = affine.apply (d0) -> (d0) (%i2)
%a2 = load %A[%r2#0, %cst0] : memref<?x?xf32>
}
@@ -41,7 +41,7 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK:for [[IV3:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
// CHECK-NEXT: [[APP3:%[a-zA-Z0-9]+]] = affine.apply {{.*}}[[IV3]]
// CHECK-NEXT: {{.*}} = vector_transfer_read %arg0, [[C0]], [[APP3]] {permutation_map: #[[map_proj_d0d1_d1]]} : {{.*}} -> vector<128xf32>
- for %i3 = 0 to %M { // vectorized
+ affine.for %i3 = 0 to %M { // vectorized
%r3 = affine.apply (d0) -> (d0) (%i3)
%a3 = load %A[%cst0, %r3#0] : memref<?x?xf32>
}
@@ -51,8 +51,8 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-NEXT: [[APP50:%[0-9]+]] = affine.apply {{.*}}([[IV4]], [[IV5]])
// CHECK-NEXT: [[APP51:%[0-9]+]] = affine.apply {{.*}}([[IV4]], [[IV5]])
// CHECK-NEXT: {{.*}} = vector_transfer_read %arg0, [[APP50]], [[APP51]] {permutation_map: #[[map_proj_d0d1_d1]]} : {{.*}} -> vector<128xf32>
- for %i4 = 0 to %M { // vectorized
- for %i5 = 0 to %N { // not vectorized, would vectorize with --test-fastest-varying=1
+ affine.for %i4 = 0 to %M { // vectorized
+ affine.for %i5 = 0 to %N { // not vectorized, would vectorize with --test-fastest-varying=1
%r50 = affine.apply (d0, d1) -> (d1) (%i4, %i5)
%r51 = affine.apply (d0, d1) -> (d0) (%i4, %i5)
%a5 = load %A[%r50, %r51] : memref<?x?xf32>
@@ -61,8 +61,8 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
//
// CHECK: for [[IV6:%[i0-9]*]] = 0 to [[ARG_M]] {
// CHECK-NEXT: for [[IV7:%[i0-9]*]] = 0 to [[ARG_N]] {
- for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
- for %i7 = 0 to %N { // not vectorized, can never vectorize
+ affine.for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
+ affine.for %i7 = 0 to %N { // not vectorized, can never vectorize
%r70 = affine.apply (d0, d1) -> (d1 + d0) (%i6, %i7)
%r71 = affine.apply (d0, d1) -> (d0) (%i6, %i7)
%a7 = load %A[%r70, %r71] : memref<?x?xf32>
@@ -74,8 +74,8 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK-NEXT: [[APP9_0:%[0-9]+]] = affine.apply {{.*}}([[IV8]], [[IV9]])
// CHECK-NEXT: [[APP9_1:%[0-9]+]] = affine.apply {{.*}}([[IV8]], [[IV9]])
// CHECK-NEXT: {{.*}} = vector_transfer_read %arg0, [[APP9_0]], [[APP9_1]] {permutation_map: #[[map_proj_d0d1_d1]]} : {{.*}} -> vector<128xf32>
- for %i8 = 0 to %M { // vectorized
- for %i9 = 0 to %N {
+ affine.for %i8 = 0 to %M { // vectorized
+ affine.for %i9 = 0 to %N {
%r90 = affine.apply (d0, d1) -> (d1) (%i8, %i9)
%r91 = affine.apply (d0, d1) -> (d0 + d1) (%i8, %i9)
%a9 = load %A[%r90, %r91] : memref<?x?xf32>
@@ -84,8 +84,8 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
//
// CHECK: for [[IV10:%[i0-9]*]] = 0 to %{{[0-9]*}} {
// CHECK: for [[IV11:%[i0-9]*]] = 0 to %{{[0-9]*}} {
- for %i10 = 0 to %M { // not vectorized, need per load transposes
- for %i11 = 0 to %N { // not vectorized, need per load transposes
+ affine.for %i10 = 0 to %M { // not vectorized, need per load transposes
+ affine.for %i11 = 0 to %N { // not vectorized, need per load transposes
%r11_0 = affine.apply (d0, d1) -> (d0) (%i10, %i11)
%r11_1 = affine.apply (d0, d1) -> (d1) (%i10, %i11)
%a11 = load %A[%r11_0, %r11_1] : memref<?x?xf32>
@@ -98,9 +98,9 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
// CHECK: for [[IV12:%[i0-9]*]] = 0 to %{{[0-9]*}} {
// CHECK: for [[IV13:%[i0-9]*]] = 0 to %{{[0-9]*}} {
// CHECK: for [[IV14:%[i0-9]+]] = 0 to [[ARG_P]] step 128
- for %i12 = 0 to %M { // not vectorized, can never vectorize
- for %i13 = 0 to %N { // not vectorized, can never vectorize
- for %i14 = 0 to %P { // vectorized
+ affine.for %i12 = 0 to %M { // not vectorized, can never vectorize
+ affine.for %i13 = 0 to %N { // not vectorized, can never vectorize
+ affine.for %i14 = 0 to %P { // vectorized
%r14_0 = affine.apply (d0, d1, d2) -> (d1) (%i12, %i13, %i14)
%r14_1 = affine.apply (d0, d1, d2) -> (d0 + d1) (%i12, %i13, %i14)
%r14_2 = affine.apply (d0, d1, d2) -> (d0 + d2) (%i12, %i13, %i14)
@@ -109,24 +109,24 @@ func @vec1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
}
}
//
-// CHECK: for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
- for %i15 = 0 to %M { // not vectorized due to condition below
+// CHECK: affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
+ affine.for %i15 = 0 to %M { // not vectorized due to condition below
if #set0(%i15) {
%a15 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
}
//
-// CHECK: for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
- for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load
+// CHECK: affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
+ affine.for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load
%a16 = alloc(%M) : memref<?xvector<2xf32>>
%l16 = load %a16[%i16] : memref<?xvector<2xf32>>
}
//
-// CHECK: for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
+// CHECK: affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
// CHECK: for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
// CHECK: {{.*}} = vector_transfer_read %arg0, [[C0]], [[C0]] {permutation_map: #[[map_proj_d0d1_0]]} : {{.*}} -> vector<128xf32>
- for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %i17
- for %i18 = 0 to %M { // vectorized due to scalar -> vector
+ affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %i17
+ affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
%a18 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
}
@@ -139,24 +139,24 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
%C = alloc (%M, %N) : memref<?x?xf32, 0>
%f1 = constant 1.0 : f32
%f2 = constant 2.0 : f32
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
// CHECK: [[C1:%.*]] = constant splat<vector<128xf32>, 1.000000e+00> : vector<128xf32>
// CHECK: vector_transfer_write [[C1]], {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref<?x?xf32>, index, index
// non-scoped %f1
store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
}
}
- for %i2 = 0 to %M {
- for %i3 = 0 to %N {
+ affine.for %i2 = 0 to %M {
+ affine.for %i3 = 0 to %N {
// CHECK: [[C3:%.*]] = constant splat<vector<128xf32>, 2.000000e+00> : vector<128xf32>
// CHECK: vector_transfer_write [[C3]], {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref<?x?xf32>, index, index
// non-scoped %f2
store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
}
}
- for %i4 = 0 to %M {
- for %i5 = 0 to %N {
+ affine.for %i4 = 0 to %M {
+ affine.for %i5 = 0 to %N {
// CHECK: [[A5:%.*]] = vector_transfer_read %0, {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : (memref<?x?xf32>, index, index) -> vector<128xf32>
// CHECK: [[B5:%.*]] = vector_transfer_read %1, {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : (memref<?x?xf32>, index, index) -> vector<128xf32>
// CHECK: [[S5:%.*]] = addf [[A5]], [[B5]] : vector<128xf32>
@@ -188,10 +188,10 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
// CHECK-LABEL: @vec_rejected
func @vec_rejected(%A : memref<?x?xf32>, %C : memref<?x?xf32>) {
%N = dim %A, 0 : memref<?x?xf32>
- for %i = 0 to %N {
+ affine.for %i = 0 to %N {
// CHECK-NOT: vector
%a = load %A[%i, %i] : memref<?x?xf32> // not vectorized
- for %j = 0 to %N {
+ affine.for %j = 0 to %N {
%b = load %A[%i, %j] : memref<?x?xf32> // may be vectorized
// CHECK-NOT: vector
%c = addf %a, %b : f32 // not vectorized because %a wasn't
diff --git a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
index d847f6bb5ce..59c7483749b 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
@@ -11,13 +11,13 @@ func @vec2d(%A : memref<?x?x?xf32>) {
// CHECK: for {{.*}} = 0 to %1 step 32
// CHECK: for {{.*}} = 0 to %2 step 256
// Example:
- // for %i0 = 0 to %0 {
- // for %i1 = 0 to %1 step 32 {
- // for %i2 = 0 to %2 step 256 {
+ // affine.for %i0 = 0 to %0 {
+ // affine.for %i1 = 0 to %1 step 32 {
+ // affine.for %i2 = 0 to %2 step 256 {
// %3 = "vector_transfer_read"(%arg0, %i0, %i1, %i2) : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
- for %i2 = 0 to %P {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
+ affine.for %i2 = 0 to %P {
%a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
}
}
@@ -27,9 +27,9 @@ func @vec2d(%A : memref<?x?x?xf32>) {
// CHECK: for {{.*}} = 0 to %2 {
// For the case: --test-fastest-varying=1 --test-fastest-varying=0 no
// vectorization happens because of loop nesting order .
- for %i3 = 0 to %M {
- for %i4 = 0 to %N {
- for %i5 = 0 to %P {
+ affine.for %i3 = 0 to %M {
+ affine.for %i4 = 0 to %N {
+ affine.for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}
}
@@ -43,24 +43,24 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
%C = alloc (%M, %N) : memref<?x?xf32, 0>
%f1 = constant 1.0 : f32
%f2 = constant 2.0 : f32
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
// CHECK: [[C1:%.*]] = constant splat<vector<32x256xf32>, 1.000000e+00> : vector<32x256xf32>
// CHECK: vector_transfer_write [[C1]], {{.*}} {permutation_map: #[[map_proj_d0d1_d0d1]]} : vector<32x256xf32>, memref<?x?xf32>, index, index
// non-scoped %f1
store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
}
}
- for %i2 = 0 to %M {
- for %i3 = 0 to %N {
+ affine.for %i2 = 0 to %M {
+ affine.for %i3 = 0 to %N {
// CHECK: [[C3:%.*]] = constant splat<vector<32x256xf32>, 2.000000e+00> : vector<32x256xf32>
// CHECK: vector_transfer_write [[C3]], {{.*}} {permutation_map: #[[map_proj_d0d1_d0d1]]} : vector<32x256xf32>, memref<?x?xf32>, index, index
// non-scoped %f2
store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
}
}
- for %i4 = 0 to %M {
- for %i5 = 0 to %N {
+ affine.for %i4 = 0 to %M {
+ affine.for %i5 = 0 to %N {
// CHECK: [[A5:%.*]] = vector_transfer_read %0, {{.*}} {permutation_map: #[[map_proj_d0d1_d0d1]]} : (memref<?x?xf32>, index, index) -> vector<32x256xf32>
// CHECK: [[B5:%.*]] = vector_transfer_read %1, {{.*}} {permutation_map: #[[map_proj_d0d1_d0d1]]} : (memref<?x?xf32>, index, index) -> vector<32x256xf32>
// CHECK: [[S5:%.*]] = addf [[A5]], [[B5]] : vector<32x256xf32>
diff --git a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
index 1a6bee585ee..08ca27dbeee 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
@@ -7,17 +7,17 @@ func @vec3d(%A : memref<?x?x?xf32>) {
%0 = dim %A, 0 : memref<?x?x?xf32>
%1 = dim %A, 1 : memref<?x?x?xf32>
%2 = dim %A, 2 : memref<?x?x?xf32>
- // CHECK: for %i0 = 0 to %0 {
- // CHECK: for %i1 = 0 to %0 {
- // CHECK: for %i2 = 0 to %0 step 32 {
- // CHECK: for %i3 = 0 to %1 step 64 {
- // CHECK: for %i4 = 0 to %2 step 256 {
+ // CHECK: affine.for %i0 = 0 to %0 {
+ // CHECK: affine.for %i1 = 0 to %0 {
+ // CHECK: affine.for %i2 = 0 to %0 step 32 {
+ // CHECK: affine.for %i3 = 0 to %1 step 64 {
+ // CHECK: affine.for %i4 = 0 to %2 step 256 {
// CHECK: %3 = vector_transfer_read %arg0, %i2, %i3, %i4 {permutation_map: #[[map_proj_d0d1d2_d0d1d2]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x64x256xf32>
- for %t0 = 0 to %0 {
- for %t1 = 0 to %0 {
- for %i0 = 0 to %0 {
- for %i1 = 0 to %1 {
- for %i2 = 0 to %2 {
+ affine.for %t0 = 0 to %0 {
+ affine.for %t1 = 0 to %0 {
+ affine.for %i0 = 0 to %0 {
+ affine.for %i1 = 0 to %1 {
+ affine.for %i2 = 0 to %2 {
%a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
}
}
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
index 4654ab810df..d00b99f1716 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
@@ -7,13 +7,13 @@ func @vec2d(%A : memref<?x?x?xf32>) {
%M = dim %A, 0 : memref<?x?x?xf32>
%N = dim %A, 1 : memref<?x?x?xf32>
%P = dim %A, 2 : memref<?x?x?xf32>
- // CHECK: for %i0 = 0 to %0 step 32
- // CHECK: for %i1 = 0 to %1 {
- // CHECK: for %i2 = 0 to %2 step 256
+ // CHECK: affine.for %i0 = 0 to %0 step 32
+ // CHECK: affine.for %i1 = 0 to %1 {
+ // CHECK: affine.for %i2 = 0 to %2 step 256
// CHECK: {{.*}} = vector_transfer_read %arg0, %i0, %i1, %i2 {permutation_map: #[[map_proj_d0d1d2_d0d2]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
- for %i2 = 0 to %P {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
+ affine.for %i2 = 0 to %P {
%a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
}
}
@@ -23,9 +23,9 @@ func @vec2d(%A : memref<?x?x?xf32>) {
// CHECK: for {{.*}} = 0 to %2 {
// For the case: --test-fastest-varying=2 --test-fastest-varying=0 no
// vectorization happens because of loop nesting order
- for %i3 = 0 to %M {
- for %i4 = 0 to %N {
- for %i5 = 0 to %P {
+ affine.for %i3 = 0 to %M {
+ affine.for %i4 = 0 to %N {
+ affine.for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}
}
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
index 0eebf816535..a8a8d5d7790 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
@@ -12,20 +12,20 @@ func @vec2d(%A : memref<?x?x?xf32>) {
// CHECK: for {{.*}} = 0 to %2 {
// For the case: --test-fastest-varying=0 --test-fastest-varying=2 no
// vectorization happens because of loop nesting order.
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
- for %i2 = 0 to %P {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
+ affine.for %i2 = 0 to %P {
%a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
}
}
}
- // CHECK: for %i3 = 0 to %0 step 32
- // CHECK: for %i4 = 0 to %1 step 256
- // CHECK: for %i5 = 0 to %2 {
+ // CHECK: affine.for %i3 = 0 to %0 step 32
+ // CHECK: affine.for %i4 = 0 to %1 step 256
+ // CHECK: affine.for %i5 = 0 to %2 {
// CHECK: {{.*}} = vector_transfer_read %arg0, %i4, %i5, %i3 {permutation_map: #[[map_proj_d0d1d2_d2d0]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i3 = 0 to %M {
- for %i4 = 0 to %N {
- for %i5 = 0 to %P {
+ affine.for %i3 = 0 to %M {
+ affine.for %i4 = 0 to %N {
+ affine.for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}
}
@@ -37,26 +37,26 @@ func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
%0 = dim %A, 0 : memref<?x?x?xf32>
%1 = dim %A, 1 : memref<?x?x?xf32>
%2 = dim %A, 2 : memref<?x?x?xf32>
- // CHECK: for %i0 = 0 to %0 step 32 {
- // CHECK: for %i1 = 0 to %1 {
- // CHECK: for %i2 = 0 to %2 step 256 {
+ // CHECK: affine.for %i0 = 0 to %0 step 32 {
+ // CHECK: affine.for %i1 = 0 to %1 {
+ // CHECK: affine.for %i2 = 0 to %2 step 256 {
// CHECK: %3 = vector_transfer_read %arg0, %i2, %i1, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d0]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- // CHECK: for %i3 = 0 to %1 step 256 {
- // CHECK: for %i4 = 0 to %2 {
+ // CHECK: affine.for %i3 = 0 to %1 step 256 {
+ // CHECK: affine.for %i4 = 0 to %2 {
// CHECK: %4 = vector_transfer_read %arg0, %i3, %i4, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d0]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- // CHECK: for %i5 = 0 to %2 {
+ // CHECK: affine.for %i5 = 0 to %2 {
// CHECK: %5 = vector_transfer_read %arg0, %i3, %i5, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d0]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i0 = 0 to %0 {
- for %i1 = 0 to %1 {
- for %i2 = 0 to %2 {
+ affine.for %i0 = 0 to %0 {
+ affine.for %i1 = 0 to %1 {
+ affine.for %i2 = 0 to %2 {
%a2 = load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
}
}
- for %i3 = 0 to %1 {
- for %i4 = 0 to %2 {
+ affine.for %i3 = 0 to %1 {
+ affine.for %i4 = 0 to %2 {
%a4 = load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
}
- for %i5 = 0 to %2 {
+ affine.for %i5 = 0 to %2 {
%a5 = load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
}
}
diff --git a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
index 1ba563b3442..b8e4e075890 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
@@ -12,20 +12,20 @@ func @vec2d(%A : memref<?x?x?xf32>) {
// CHECK: for {{.*}} = 0 to %2 {
// For the case: --test-fastest-varying=0 --test-fastest-varying=1 no
// vectorization happens because of loop nesting order.
- for %i0 = 0 to %M {
- for %i1 = 0 to %N {
- for %i2 = 0 to %P {
+ affine.for %i0 = 0 to %M {
+ affine.for %i1 = 0 to %N {
+ affine.for %i2 = 0 to %P {
%a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
}
}
}
- // CHECK: for %i3 = 0 to %0 step 32
- // CHECK: for %i4 = 0 to %1 {
- // CHECK: for %i5 = 0 to %2 step 256
+ // CHECK: affine.for %i3 = 0 to %0 step 32
+ // CHECK: affine.for %i4 = 0 to %1 {
+ // CHECK: affine.for %i5 = 0 to %2 step 256
// CHECK: {{.*}} = vector_transfer_read %arg0, %i4, %i5, %i3 {permutation_map: #[[map_proj_d0d1d2_d2d1]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i3 = 0 to %M {
- for %i4 = 0 to %N {
- for %i5 = 0 to %P {
+ affine.for %i3 = 0 to %M {
+ affine.for %i4 = 0 to %N {
+ affine.for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}
}
@@ -37,26 +37,26 @@ func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
%0 = dim %A, 0 : memref<?x?x?xf32>
%1 = dim %A, 1 : memref<?x?x?xf32>
%2 = dim %A, 2 : memref<?x?x?xf32>
- // CHECK: for %i0 = 0 to %0 step 32 {
- // CHECK: for %i1 = 0 to %1 step 256 {
- // CHECK: for %i2 = 0 to %2 {
+ // CHECK: affine.for %i0 = 0 to %0 step 32 {
+ // CHECK: affine.for %i1 = 0 to %1 step 256 {
+ // CHECK: affine.for %i2 = 0 to %2 {
// CHECK: %3 = vector_transfer_read %arg0, %i2, %i1, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d1]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- // CHECK: for %i3 = 0 to %1 {
- // CHECK: for %i4 = 0 to %2 step 256 {
+ // CHECK: affine.for %i3 = 0 to %1 {
+ // CHECK: affine.for %i4 = 0 to %2 step 256 {
// CHECK: %4 = vector_transfer_read %arg0, %i3, %i4, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d1]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- // CHECK: for %i5 = 0 to %2 step 256 {
+ // CHECK: affine.for %i5 = 0 to %2 step 256 {
// CHECK: %5 = vector_transfer_read %arg0, %i3, %i5, %i0 {permutation_map: #[[map_proj_d0d1d2_d2d1]]} : (memref<?x?x?xf32>, index, index, index) -> vector<32x256xf32>
- for %i0 = 0 to %0 {
- for %i1 = 0 to %1 {
- for %i2 = 0 to %2 {
+ affine.for %i0 = 0 to %0 {
+ affine.for %i1 = 0 to %1 {
+ affine.for %i2 = 0 to %2 {
%a2 = load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
}
}
- for %i3 = 0 to %1 {
- for %i4 = 0 to %2 {
+ affine.for %i3 = 0 to %1 {
+ affine.for %i4 = 0 to %2 {
%a4 = load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
}
- for %i5 = 0 to %2 {
+ affine.for %i5 = 0 to %2 {
%a5 = load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
}
}
OpenPOWER on IntegriCloud