diff options
| author | Nicolas Vasilache <ntv@google.com> | 2019-09-24 11:21:04 -0700 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-09-24 11:21:49 -0700 |
| commit | 42d8fa667bcdb81ad305e64a4d6fb0ff1cc4cc3d (patch) | |
| tree | 0ee6fa9213a13fb449229dc95f43905d6d1b0f78 /mlir/test/Examples | |
| parent | 74cdbf5909e57b42b6ed5b3b6eea4f76448a7d48 (diff) | |
| download | bcm5719-llvm-42d8fa667bcdb81ad305e64a4d6fb0ff1cc4cc3d.tar.gz bcm5719-llvm-42d8fa667bcdb81ad305e64a4d6fb0ff1cc4cc3d.zip | |
Normalize lowering of MemRef types
The RFC for unifying Linalg and Affine compilation passes into an end-to-end flow with a predictable ABI and linkage to external function calls raised the question of why we have variable sized descriptors for memrefs depending on whether they have static or dynamic dimensions (https://groups.google.com/a/tensorflow.org/forum/#!topic/mlir/MaL8m2nXuio).
This CL standardizes the ABI on the rank of the memrefs.
The LLVM struct for a memref becomes equivalent to:
```
template <typename Elem, size_t Rank>
struct {
Elem *ptr;
int64_t sizes[Rank];
};
```
PiperOrigin-RevId: 270947276
Diffstat (limited to 'mlir/test/Examples')
| -rw-r--r-- | mlir/test/Examples/Linalg/Linalg1.mlir | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mlir/test/Examples/Linalg/Linalg1.mlir b/mlir/test/Examples/Linalg/Linalg1.mlir index a5a3bacc2cd..03e16db1d44 100644 --- a/mlir/test/Examples/Linalg/Linalg1.mlir +++ b/mlir/test/Examples/Linalg/Linalg1.mlir @@ -64,9 +64,9 @@ func @viewRangeConversion(%arg0: memref<?x?xf32>, %arg1: !linalg.range, %arg2: ! } // LLVM-LABEL: @viewRangeConversion // LLVM-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64, [2 x i64], [2 x i64] }"> -// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, i64, i64 }"> +// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, [2 x i64] }"> // LLVM-NEXT: %2 = llvm.insertvalue %1, %0[0] : !llvm<"{ float*, i64, [2 x i64], [2 x i64] }"> -// LLVM-NEXT: %3 = llvm.extractvalue %arg0[2] : !llvm<"{ float*, i64, i64 }"> +// LLVM-NEXT: %3 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }"> // LLVM-NEXT: %4 = llvm.mlir.constant(1 : index) : !llvm.i64 // LLVM-NEXT: %5 = llvm.mul %4, %3 : !llvm.i64 // LLVM-NEXT: %6 = llvm.mlir.constant(0 : index) : !llvm.i64 @@ -98,9 +98,9 @@ func @viewNonRangeConversion(%arg0: memref<?x?xf32>, %arg1: !linalg.range, %arg2 } // LLVM-LABEL: @viewNonRangeConversion // LLVM-NEXT: %0 = llvm.mlir.undef : !llvm<"{ float*, i64, [1 x i64], [1 x i64] }"> -// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, i64, i64 }"> +// LLVM-NEXT: %1 = llvm.extractvalue %arg0[0] : !llvm<"{ float*, [2 x i64] }"> // LLVM-NEXT: %2 = llvm.insertvalue %1, %0[0] : !llvm<"{ float*, i64, [1 x i64], [1 x i64] }"> -// LLVM-NEXT: %3 = llvm.extractvalue %arg0[2] : !llvm<"{ float*, i64, i64 }"> +// LLVM-NEXT: %3 = llvm.extractvalue %arg0[1, 1] : !llvm<"{ float*, [2 x i64] }"> // LLVM-NEXT: %4 = llvm.mlir.constant(1 : index) : !llvm.i64 // LLVM-NEXT: %5 = llvm.mul %4, %3 : !llvm.i64 // LLVM-NEXT: %6 = llvm.mlir.constant(0 : index) : !llvm.i64 |

