diff options
| author | River Riddle <riverriddle@google.com> | 2019-10-16 17:32:30 -0700 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-10-16 17:32:57 -0700 |
| commit | 7045471913e7211fc75b8f073cc0ed4a90e902e2 (patch) | |
| tree | 73573baa6a520bf55303842d35fbe39974a1a0d4 /mlir/test/Examples | |
| parent | 7053a30f4b304f7f779c11eb3aa9a50bdc231ce9 (diff) | |
| download | bcm5719-llvm-7045471913e7211fc75b8f073cc0ed4a90e902e2.tar.gz bcm5719-llvm-7045471913e7211fc75b8f073cc0ed4a90e902e2.zip | |
Add support for inlining toy call operations.
The GenericCallOp needed to have the CallOpInterface to be picked up by the inliner. This also adds a CastOp to perform shape casts that are generated during inlining. The casts generated by the inliner will be folded away after shape inference.
PiperOrigin-RevId: 275150438
Diffstat (limited to 'mlir/test/Examples')
| -rw-r--r-- | mlir/test/Examples/Toy/Ch4/shape_inference.mlir | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir new file mode 100644 index 00000000000..e783300c5cf --- /dev/null +++ b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir @@ -0,0 +1,30 @@ +// RUN: toyc-ch4 %s -emit=mlir -opt 2>&1 | FileCheck %s + +// Check the result of inlining+shape inference on an input module. + +func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + %0 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> + %1 = "toy.mul"(%arg0, %0) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> + "toy.return"(%1) : (tensor<*xf64>) -> () +} +func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> + %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> + %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> + %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> + %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + "toy.print"(%5) : (tensor<*xf64>) -> () + "toy.return"() : () -> () +} + +// CHECK-NOT: func @multiply_transpose +// CHECK-NOT: tensor<*xf64> + +// CHECK-LABEL: func @main() { +// CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> +// CHECK: [[VAL_1:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> +// CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> +// CHECK: [[VAL_3:%.*]] = "toy.mul"([[VAL_1]], [[VAL_2]]) : (tensor<2x3xf64>, tensor<3x2xf64>) -> tensor<2x2xf64> +// CHECK: "toy.print"([[VAL_3]]) : (tensor<2x2xf64>) -> () +// CHECK: "toy.return"() : () -> () |

