diff options
| author | River Riddle <riverriddle@google.com> | 2019-06-25 16:06:13 -0700 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-06-25 16:06:58 -0700 |
| commit | 4842b2d42e95ff4b5a8425ec7108c9e4e90e8b6c (patch) | |
| tree | 04fefb7f46510e3d39bdb61d5bdcab903810f908 /mlir/test/Quantizer | |
| parent | 1df5f3159e538fdc12c451f09f3f879ca0200d66 (diff) | |
| download | bcm5719-llvm-4842b2d42e95ff4b5a8425ec7108c9e4e90e8b6c.tar.gz bcm5719-llvm-4842b2d42e95ff4b5a8425ec7108c9e4e90e8b6c.zip | |
Modify the syntax of the the ElementsAttrs to print the type as a colon type.
This is the standard syntax for types on operations, and is also already used by IntegerAttr and FloatAttr.
Example:
dense<5> : tensor<i32>
dense<[3]> : tensor<1xi32>
PiperOrigin-RevId: 255069157
Diffstat (limited to 'mlir/test/Quantizer')
| -rw-r--r-- | mlir/test/Quantizer/matmul.mlir | 24 | ||||
| -rw-r--r-- | mlir/test/Quantizer/remove-instrumentation.mlir | 2 |
2 files changed, 13 insertions, 13 deletions
diff --git a/mlir/test/Quantizer/matmul.mlir b/mlir/test/Quantizer/matmul.mlir index c2071624475..645f368caba 100644 --- a/mlir/test/Quantizer/matmul.mlir +++ b/mlir/test/Quantizer/matmul.mlir @@ -3,30 +3,30 @@ // ---- // A matmul without fused clamp or bias. // CHECK-LABEL: @matmul -// CHECK: %cst = constant dense<tensor<3x5xi8> +// CHECK: %cst = constant dense{{.*}}tensor<3x5xi8> // CHECK-NEXT: %0 = "quant.qcast"(%arg0) : (tensor<300x3xf32>) -> tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>> // CHECK-NEXT: %1 = "quant.scast"(%cst) : (tensor<3x5xi8>) -> tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>> // CHECK-NEXT: %2 = "fxpmath.real_matmul"(%0, %1) : (tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>>, tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>>) -> tensor<300x5x!quant.uniform<i8:f32, 0.0629921259842528:-1>> // CHECK-NEXT: %3 = "quant.dcast"(%2) : (tensor<300x5x!quant.uniform<i8:f32, 0.0629921259842528:-1>>) -> tensor<300x5xf32> func @matmul(%arg0: tensor<300x3xf32>) -> tensor<300x5xf32> { - %0 = "quant.stats"(%arg0) {layerStats: dense<tensor<2xf32>, [-6.123e+00, 3.45e+00]>} : (tensor<300x3xf32>) -> tensor<300x3xf32> - %cst = constant {name: "constant.35"} dense<tensor<3x5xf32>, [[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> + %0 = "quant.stats"(%arg0) {layerStats: dense<[-6.123e+00, 3.45e+00]> : tensor<2xf32>} : (tensor<300x3xf32>) -> tensor<300x3xf32> + %cst = constant {name: "constant.35"} dense<[[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> : tensor<3x5xf32> %1 = "fxpmath.real_matmul"(%0, %cst) : (tensor<300x3xf32>, tensor<3x5xf32>) -> tensor<300x5xf32> - %2 = "quant.stats"(%1) {layerStats: dense<tensor<2xf32>, [-8.000000e+00, 8.000000e+00]>} : (tensor<300x5xf32>) -> tensor<300x5xf32> + %2 = "quant.stats"(%1) {layerStats: dense<[-8.000000e+00, 8.000000e+00]> : tensor<2xf32>} : (tensor<300x5xf32>) -> tensor<300x5xf32> return %2 : tensor<300x5xf32> } // ---- // A matmul with fused clamp which serves as statistics for the result. // CHECK-LABEL: @matmul_clamp -// CHECK: %cst = constant dense<tensor<3x5xi8> +// CHECK: %cst = constant dense{{.*}}tensor<3x5xi8> // CHECK-NEXT: %0 = "quant.qcast"(%arg0) : (tensor<300x3xf32>) -> tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>> // CHECK-NEXT: %1 = "quant.scast"(%cst) : (tensor<3x5xi8>) -> tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>> // CHECK-NEXT: %2 = "fxpmath.real_matmul"(%0, %1) {clamp_max: 6.100000e+00 : f64, clamp_min: -1.225000e+01 : f64} : (tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>>, tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>>) -> tensor<300x5x!quant.uniform<i8:f32, 0.072058823529412216:42>> // CHECK-NEXT: %3 = "quant.dcast"(%2) : (tensor<300x5x!quant.uniform<i8:f32, 0.072058823529412216:42>>) -> tensor<300x5xf32> func @matmul_clamp(%arg0: tensor<300x3xf32>) -> tensor<300x5xf32> { - %0 = "quant.stats"(%arg0) {layerStats: dense<tensor<2xf32>, [-6.123e+00, 3.45e+00]>} : (tensor<300x3xf32>) -> tensor<300x3xf32> - %cst = constant {name: "constant.35"} dense<tensor<3x5xf32>, [[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> + %0 = "quant.stats"(%arg0) {layerStats: dense<[-6.123e+00, 3.45e+00]> : tensor<2xf32>} : (tensor<300x3xf32>) -> tensor<300x3xf32> + %cst = constant {name: "constant.35"} dense<[[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> : tensor<3x5xf32> %1 = "fxpmath.real_matmul"(%0, %cst) {clamp_max: 6.10, clamp_min: -12.25} : (tensor<300x3xf32>, tensor<3x5xf32>) -> tensor<300x5xf32> return %1 : tensor<300x5xf32> } @@ -34,17 +34,17 @@ func @matmul_clamp(%arg0: tensor<300x3xf32>) -> tensor<300x5xf32> { // ---- // A matmul with bias and clamp. // CHECK-LABEL: @matmul_add_clamp -// CHECK: %cst = constant dense<tensor<3x5xi8> -// CHECK-NEXT: %cst_0 = constant dense<tensor<5xi32>, [14, 28, 42, 56, 69]> +// CHECK: %cst = constant dense{{.*}}tensor<3x5xi8> +// CHECK-NEXT: %cst_0 = constant dense<[14, 28, 42, 56, 69]> : tensor<5xi32> // CHECK-NEXT: %0 = "quant.qcast"(%arg0) : (tensor<300x3xf32>) -> tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>> // CHECK-NEXT: %1 = "quant.scast"(%cst) : (tensor<3x5xi8>) -> tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>> // CHECK-NEXT: %2 = "quant.scast"(%cst_0) : (tensor<5xi32>) -> tensor<5x!quant.uniform<i32:f32, 0.072058823529412216>> // CHECK-NEXT: %3 = "fxpmath.real_matmul_bias"(%0, %1, %2) {clamp_max: 6.100000e+00 : f64, clamp_min: -1.225000e+01 : f64} : (tensor<300x3x!quant.uniform<i8:f32, 0.037564418067230126:35>>, tensor<3x5x!quant.uniform<i8:f32, 0.0062823070315864236:-1>>, tensor<5x!quant.uniform<i32:f32, 0.072058823529412216>>) -> tensor<300x5x!quant.uniform<i8:f32, 0.072058823529412216:42>> // CHECK-NEXT: %4 = "quant.dcast"(%3) : (tensor<300x5x!quant.uniform<i8:f32, 0.072058823529412216:42>>) -> tensor<300x5xf32> func @matmul_add_clamp(%arg0: tensor<300x3xf32>) -> tensor<300x5xf32> { - %0 = "quant.stats"(%arg0) {layerStats: dense<tensor<2xf32>, [-6.123e+00, 3.45e+00]>} : (tensor<300x3xf32>) -> tensor<300x3xf32> - %cst = constant {name: "constant.35"} dense<tensor<3x5xf32>, [[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> - %cst_0 = constant {name: "constant.37"} dense<tensor<5xf32>, [1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00]> + %0 = "quant.stats"(%arg0) {layerStats: dense<[-6.123e+00, 3.45e+00]> : tensor<2xf32>} : (tensor<300x3xf32>) -> tensor<300x3xf32> + %cst = constant {name: "constant.35"} dense<[[-1.060230e-01, 1.215050e-01, 8.002390e-01, -7.688850e-01, 0.0966112986], [6.890140e-01, -4.070560e-01, -0.797852993, 3.789250e-03, -2.088810e-01], [-6.085290e-01, 2.766170e-02, 2.685570e-01, 5.774010e-01, -4.284370e-01]]> : tensor<3x5xf32> + %cst_0 = constant {name: "constant.37"} dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00]> : tensor<5xf32> %1 = "fxpmath.real_matmul_bias"(%0, %cst, %cst_0) {clamp_max: 6.10, clamp_min: -12.25} : (tensor<300x3xf32>, tensor<3x5xf32>, tensor<5xf32>) -> tensor<300x5xf32> return %1 : tensor<300x5xf32> } diff --git a/mlir/test/Quantizer/remove-instrumentation.mlir b/mlir/test/Quantizer/remove-instrumentation.mlir index 222ddf751bd..2adae893b67 100644 --- a/mlir/test/Quantizer/remove-instrumentation.mlir +++ b/mlir/test/Quantizer/remove-instrumentation.mlir @@ -4,7 +4,7 @@ // CHECK-LABEL: remove_ops func @remove_ops(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> { %0 = "quant.stats"(%arg0) { - layerStats: dense<tensor<2xf32>, [-1.0, 1.0]> + layerStats: dense<[-1.0, 1.0]> : tensor<2xf32> } : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32> %1 = "quant.coupled_ref"(%0) { coupledKey: "foobar" } : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32> |

