diff options
| author | Tres Popp <tpopp@google.com> | 2020-01-14 11:09:59 +0100 |
|---|---|---|
| committer | Stephan Herhut <herhut@google.com> | 2020-01-14 12:05:47 +0100 |
| commit | 4624a1e8ac8a3f69cc887403b976f538f587744a (patch) | |
| tree | 13cb3b1371abedefbdbd7e09933633acc4aca44c /mlir/test/Conversion/GPUToNVVM | |
| parent | 9492e9d8cfd356109276da5aa926b297db0e16db (diff) | |
| download | bcm5719-llvm-4624a1e8ac8a3f69cc887403b976f538f587744a.tar.gz bcm5719-llvm-4624a1e8ac8a3f69cc887403b976f538f587744a.zip | |
[mlir] Create a gpu.module operation for the GPU Dialect.
Summary:
This is based on the use of code constantly checking for an attribute on
a model and instead represents the distinct operaion with a different
op. Instead, this op can be used to provide better filtering.
Reviewers: herhut, mravishankar, antiagainst, rriddle
Reviewed By: herhut, antiagainst, rriddle
Subscribers: liufengdb, aartbik, jholewinski, mgorny, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72336
Diffstat (limited to 'mlir/test/Conversion/GPUToNVVM')
| -rw-r--r-- | mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir | 20 | ||||
| -rw-r--r-- | mlir/test/Conversion/GPUToNVVM/memory-attrbution.mlir | 8 |
2 files changed, 14 insertions, 14 deletions
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir index 24bf56557c3..7f69cb7482c 100644 --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -convert-gpu-to-nvvm -split-input-file | FileCheck %s -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK-LABEL: func @gpu_index_ops() func @gpu_index_ops() attributes { gpu.kernel } { @@ -38,7 +38,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK-LABEL: func @gpu_all_reduce_op() func @gpu_all_reduce_op() attributes { gpu.kernel } { @@ -55,7 +55,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK-LABEL: func @gpu_all_reduce_region() func @gpu_all_reduce_region() attributes { gpu.kernel } { @@ -74,7 +74,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK-LABEL: func @gpu_shuffle() func @gpu_shuffle() attributes { gpu.kernel } { @@ -99,7 +99,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK-LABEL: func @gpu_sync() func @gpu_sync() attributes { gpu.kernel } { @@ -111,7 +111,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK: llvm.func @__nv_fabsf(!llvm.float) -> !llvm.float // CHECK: llvm.func @__nv_fabs(!llvm.double) -> !llvm.double // CHECK-LABEL: func @gpu_fabs @@ -126,7 +126,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK: llvm.func @__nv_ceilf(!llvm.float) -> !llvm.float // CHECK: llvm.func @__nv_ceil(!llvm.double) -> !llvm.double // CHECK-LABEL: func @gpu_ceil @@ -141,7 +141,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK: llvm.func @__nv_cosf(!llvm.float) -> !llvm.float // CHECK: llvm.func @__nv_cos(!llvm.double) -> !llvm.double // CHECK-LABEL: func @gpu_cos @@ -156,7 +156,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @test_module { // CHECK: llvm.func @__nv_expf(!llvm.float) -> !llvm.float // CHECK: llvm.func @__nv_exp(!llvm.double) -> !llvm.double // CHECK-LABEL: func @gpu_exp @@ -174,7 +174,7 @@ module attributes {gpu.kernel_module} { // ----- // Test that we handled properly operation with SymbolTable other than module op -module attributes {gpu.kernel_module} { +gpu.module @test_module { "test.symbol_scope"() ({ // CHECK: test.symbol_scope // CHECK: llvm.func @__nv_expf(!llvm.float) -> !llvm.float diff --git a/mlir/test/Conversion/GPUToNVVM/memory-attrbution.mlir b/mlir/test/Conversion/GPUToNVVM/memory-attrbution.mlir index 69a16b25139..115c71d1280 100644 --- a/mlir/test/Conversion/GPUToNVVM/memory-attrbution.mlir +++ b/mlir/test/Conversion/GPUToNVVM/memory-attrbution.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt --convert-gpu-to-nvvm --split-input-file %s | FileCheck %s -module attributes {gpu.kernel_module} { +gpu.module @kernel { // CHECK-LABEL: llvm.func @private gpu.func @private(%arg0: f32) private(%arg1: memref<4xf32, 5>) { // Allocate private memory inside the function. @@ -32,7 +32,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @kernel { // Workgroup buffers are allocated as globals. // CHECK: llvm.mlir.global internal @[[buffer:.*]]() // CHECK-SAME: addr_space = 3 @@ -72,7 +72,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @kernel { // Check that the total size was computed correctly. // CHECK: llvm.mlir.global internal @[[buffer:.*]]() // CHECK-SAME: addr_space = 3 @@ -113,7 +113,7 @@ module attributes {gpu.kernel_module} { // ----- -module attributes {gpu.kernel_module} { +gpu.module @kernel { // Check that several buffers are defined. // CHECK: llvm.mlir.global internal @[[buffer1:.*]]() // CHECK-SAME: !llvm<"[1 x float]"> |

