summaryrefslogtreecommitdiffstats
path: root/mlir/test/Target
diff options
context:
space:
mode:
authorAlex Zinenko <zinenko@google.com>2019-10-10 01:33:33 -0700
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-10-10 01:34:06 -0700
commit5e7959a3531c8019052bae3a84a42a67c5857bc9 (patch)
tree1eb248dba17a3c4bfd2f9c815865254681ca78c8 /mlir/test/Target
parent309b4556d00f531988f34930eedb546512ee619f (diff)
downloadbcm5719-llvm-5e7959a3531c8019052bae3a84a42a67c5857bc9.tar.gz
bcm5719-llvm-5e7959a3531c8019052bae3a84a42a67c5857bc9.zip
Use llvm.func to define functions with wrapped LLVM IR function type
This function-like operation allows one to define functions that have wrapped LLVM IR function type, in particular variadic functions. The operation was added in parallel to the existing lowering flow, this commit only switches the flow to use it. Using a custom function type makes the LLVM IR dialect type system more consistent and avoids complex conversion rules for functions that previously had to use the built-in function type instead of a wrapped LLVM IR dialect type and perform conversions during the analysis. PiperOrigin-RevId: 273910855
Diffstat (limited to 'mlir/test/Target')
-rw-r--r--mlir/test/Target/llvmir-intrinsics.mlir4
-rw-r--r--mlir/test/Target/llvmir.mlir93
-rw-r--r--mlir/test/Target/nvvmir.mlir12
-rw-r--r--mlir/test/Target/rocdl.mlir4
4 files changed, 57 insertions, 56 deletions
diff --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir
index cf6b8eca554..bc08a04e8a8 100644
--- a/mlir/test/Target/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/llvmir-intrinsics.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
// CHECK-LABEL: @fmuladd_test
-func @fmuladd_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm<"<8 x float>">) {
+llvm.func @fmuladd_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm<"<8 x float>">) {
// CHECK: call float @llvm.fmuladd.f32.f32.f32
"llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.float
// CHECK: call <8 x float> @llvm.fmuladd.v8f32.v8f32.v8f32
@@ -10,7 +10,7 @@ func @fmuladd_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm<"<8 x fl
}
// CHECK-LABEL: @exp_test
-func @exp_test(%arg0: !llvm.float, %arg1: !llvm<"<8 x float>">) {
+llvm.func @exp_test(%arg0: !llvm.float, %arg1: !llvm<"<8 x float>">) {
// CHECK: call float @llvm.exp.f32
"llvm.intr.exp"(%arg0) : (!llvm.float) -> !llvm.float
// CHECK: call <8 x float> @llvm.exp.v8f32
diff --git a/mlir/test/Target/llvmir.mlir b/mlir/test/Target/llvmir.mlir
index cca5661b273..6c493b100a4 100644
--- a/mlir/test/Target/llvmir.mlir
+++ b/mlir/test/Target/llvmir.mlir
@@ -29,7 +29,7 @@ llvm.mlir.global @int_global_undef() : !llvm.i64
//
// CHECK: declare i8* @malloc(i64)
-func @malloc(!llvm.i64) -> !llvm<"i8*">
+llvm.func @malloc(!llvm.i64) -> !llvm<"i8*">
// CHECK: declare void @free(i8*)
@@ -41,12 +41,12 @@ func @malloc(!llvm.i64) -> !llvm<"i8*">
// CHECK-LABEL: define void @empty() {
// CHECK-NEXT: ret void
// CHECK-NEXT: }
-func @empty() {
+llvm.func @empty() {
llvm.return
}
// CHECK-LABEL: @global_refs
-func @global_refs() {
+llvm.func @global_refs() {
// Check load from globals.
// CHECK: load i32, i32* @i32_global
%0 = llvm.mlir.addressof @i32_global : !llvm<"i32*">
@@ -63,11 +63,11 @@ func @global_refs() {
}
// CHECK-LABEL: declare void @body(i64)
-func @body(!llvm.i64)
+llvm.func @body(!llvm.i64)
// CHECK-LABEL: define void @simple_loop() {
-func @simple_loop() {
+llvm.func @simple_loop() {
// CHECK: br label %[[SIMPLE_bb1:[0-9]+]]
llvm.br ^bb1
@@ -107,7 +107,7 @@ func @simple_loop() {
// CHECK-NEXT: call void @simple_loop()
// CHECK-NEXT: ret void
// CHECK-NEXT: }
-func @simple_caller() {
+llvm.func @simple_caller() {
llvm.call @simple_loop() : () -> ()
llvm.return
}
@@ -124,20 +124,20 @@ func @simple_caller() {
// CHECK-NEXT: call void @more_imperfectly_nested_loops()
// CHECK-NEXT: ret void
// CHECK-NEXT: }
-func @ml_caller() {
+llvm.func @ml_caller() {
llvm.call @simple_loop() : () -> ()
llvm.call @more_imperfectly_nested_loops() : () -> ()
llvm.return
}
// CHECK-LABEL: declare i64 @body_args(i64)
-func @body_args(!llvm.i64) -> !llvm.i64
+llvm.func @body_args(!llvm.i64) -> !llvm.i64
// CHECK-LABEL: declare i32 @other(i64, i32)
-func @other(!llvm.i64, !llvm.i32) -> !llvm.i32
+llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32
// CHECK-LABEL: define i32 @func_args(i32 {{%.*}}, i32 {{%.*}}) {
// CHECK-NEXT: br label %[[ARGS_bb1:[0-9]+]]
-func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
+llvm.func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
%0 = llvm.mlir.constant(0 : i32) : !llvm.i32
llvm.br ^bb1
@@ -182,17 +182,17 @@ func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
}
// CHECK: declare void @pre(i64)
-func @pre(!llvm.i64)
+llvm.func @pre(!llvm.i64)
// CHECK: declare void @body2(i64, i64)
-func @body2(!llvm.i64, !llvm.i64)
+llvm.func @body2(!llvm.i64, !llvm.i64)
// CHECK: declare void @post(i64)
-func @post(!llvm.i64)
+llvm.func @post(!llvm.i64)
// CHECK-LABEL: define void @imperfectly_nested_loops() {
// CHECK-NEXT: br label %[[IMPER_bb1:[0-9]+]]
-func @imperfectly_nested_loops() {
+llvm.func @imperfectly_nested_loops() {
llvm.br ^bb1
// CHECK: [[IMPER_bb1]]:
@@ -259,10 +259,10 @@ func @imperfectly_nested_loops() {
}
// CHECK: declare void @mid(i64)
-func @mid(!llvm.i64)
+llvm.func @mid(!llvm.i64)
// CHECK: declare void @body3(i64, i64)
-func @body3(!llvm.i64, !llvm.i64)
+llvm.func @body3(!llvm.i64, !llvm.i64)
// A complete function transformation check.
// CHECK-LABEL: define void @more_imperfectly_nested_loops() {
@@ -306,7 +306,7 @@ func @body3(!llvm.i64, !llvm.i64)
// CHECK: 21: ; preds = %2
// CHECK-NEXT: ret void
// CHECK-NEXT: }
-func @more_imperfectly_nested_loops() {
+llvm.func @more_imperfectly_nested_loops() {
llvm.br ^bb1
^bb1: // pred: ^bb0
%0 = llvm.mlir.constant(0 : index) : !llvm.i64
@@ -359,7 +359,7 @@ func @more_imperfectly_nested_loops() {
//
// CHECK-LABEL: define void @memref_alloc()
-func @memref_alloc() {
+llvm.func @memref_alloc() {
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400)
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
@@ -377,10 +377,10 @@ func @memref_alloc() {
}
// CHECK-LABEL: declare i64 @get_index()
-func @get_index() -> !llvm.i64
+llvm.func @get_index() -> !llvm.i64
// CHECK-LABEL: define void @store_load_static()
-func @store_load_static() {
+llvm.func @store_load_static() {
^bb0:
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40)
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
@@ -448,7 +448,7 @@ func @store_load_static() {
}
// CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}})
-func @store_load_dynamic(%arg0: !llvm.i64) {
+llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
@@ -518,7 +518,7 @@ func @store_load_dynamic(%arg0: !llvm.i64) {
}
// CHECK-LABEL: define void @store_load_mixed(i64 {{%.*}})
-func @store_load_mixed(%arg0: !llvm.i64) {
+llvm.func @store_load_mixed(%arg0: !llvm.i64) {
%0 = llvm.mlir.constant(10 : index) : !llvm.i64
// CHECK-NEXT: %{{[0-9]+}} = mul i64 2, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
@@ -603,7 +603,7 @@ func @store_load_mixed(%arg0: !llvm.i64) {
}
// CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}}) {
-func @memref_args_rets(%arg0: !llvm<"{ float* }">, %arg1: !llvm<"{ float*, i64 }">, %arg2: !llvm<"{ float*, i64 }">) -> !llvm<"{ float*, i64 }"> {
+llvm.func @memref_args_rets(%arg0: !llvm<"{ float* }">, %arg1: !llvm<"{ float*, i64 }">, %arg2: !llvm<"{ float*, i64 }">) -> !llvm<"{ float*, i64 }"> {
%0 = llvm.mlir.constant(7 : index) : !llvm.i64
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
%1 = llvm.call @get_index() : () -> !llvm.i64
@@ -657,7 +657,7 @@ func @memref_args_rets(%arg0: !llvm<"{ float* }">, %arg1: !llvm<"{ float*, i64 }
// CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
-func @memref_dim(%arg0: !llvm<"{ float*, i64, i64 }">) -> !llvm.i64 {
+llvm.func @memref_dim(%arg0: !llvm<"{ float*, i64, i64 }">) -> !llvm.i64 {
// Expecting this to create an LLVM constant.
%0 = llvm.mlir.constant(42 : index) : !llvm.i64
// CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
@@ -678,12 +678,12 @@ func @memref_dim(%arg0: !llvm<"{ float*, i64, i64 }">) -> !llvm.i64 {
llvm.return %6 : !llvm.i64
}
-func @get_i64() -> !llvm.i64
-func @get_f32() -> !llvm.float
-func @get_memref() -> !llvm<"{ float*, i64, i64 }">
+llvm.func @get_i64() -> !llvm.i64
+llvm.func @get_f32() -> !llvm.float
+llvm.func @get_memref() -> !llvm<"{ float*, i64, i64 }">
// CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn() {
-func @multireturn() -> !llvm<"{ i64, float, { float*, i64, i64 } }"> {
+llvm.func @multireturn() -> !llvm<"{ i64, float, { float*, i64, i64 } }"> {
%0 = llvm.call @get_i64() : () -> !llvm.i64
%1 = llvm.call @get_f32() : () -> !llvm.float
%2 = llvm.call @get_memref() : () -> !llvm<"{ float*, i64, i64 }">
@@ -700,7 +700,7 @@ func @multireturn() -> !llvm<"{ i64, float, { float*, i64, i64 } }"> {
// CHECK-LABEL: define void @multireturn_caller() {
-func @multireturn_caller() {
+llvm.func @multireturn_caller() {
// CHECK-NEXT: %1 = call { i64, float, { float*, i64, i64 } } @multireturn()
// CHECK-NEXT: [[ret0:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 0
// CHECK-NEXT: [[ret1:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 1
@@ -734,7 +734,7 @@ func @multireturn_caller() {
}
// CHECK-LABEL: define <4 x float> @vector_ops(<4 x float> {{%.*}}, <4 x i1> {{%.*}}, <4 x i64> {{%.*}}) {
-func @vector_ops(%arg0: !llvm<"<4 x float>">, %arg1: !llvm<"<4 x i1>">, %arg2: !llvm<"<4 x i64>">) -> !llvm<"<4 x float>"> {
+llvm.func @vector_ops(%arg0: !llvm<"<4 x float>">, %arg1: !llvm<"<4 x i1>">, %arg2: !llvm<"<4 x i64>">) -> !llvm<"<4 x float>"> {
%0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm<"<4 x float>">
// CHECK-NEXT: %4 = fadd <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
%1 = llvm.fadd %arg0, %0 : !llvm<"<4 x float>">
@@ -763,7 +763,7 @@ func @vector_ops(%arg0: !llvm<"<4 x float>">, %arg1: !llvm<"<4 x i1>">, %arg2: !
}
// CHECK-LABEL: @ops
-func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm.i32) -> !llvm<"{ float, i32 }"> {
+llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm.i32) -> !llvm<"{ float, i32 }"> {
// CHECK-NEXT: fsub float %0, %1
%0 = llvm.fsub %arg0, %arg1 : !llvm.float
// CHECK-NEXT: %6 = sub i32 %2, %3
@@ -811,7 +811,7 @@ func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm
//
// CHECK-LABEL: define void @indirect_const_call(i64 {{%.*}}) {
-func @indirect_const_call(%arg0: !llvm.i64) {
+llvm.func @indirect_const_call(%arg0: !llvm.i64) {
// CHECK-NEXT: call void @body(i64 %0)
%0 = llvm.mlir.constant(@body) : !llvm<"void (i64)*">
llvm.call %0(%arg0) : (!llvm.i64) -> ()
@@ -820,7 +820,7 @@ func @indirect_const_call(%arg0: !llvm.i64) {
}
// CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}}) {
-func @indirect_call(%arg0: !llvm<"i32 (float)*">, %arg1: !llvm.float) -> !llvm.i32 {
+llvm.func @indirect_call(%arg0: !llvm<"i32 (float)*">, %arg1: !llvm.float) -> !llvm.i32 {
// CHECK-NEXT: %3 = call i32 %0(float %1)
%0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32
// CHECK-NEXT: ret i32 %3
@@ -833,7 +833,7 @@ func @indirect_call(%arg0: !llvm<"i32 (float)*">, %arg1: !llvm.float) -> !llvm.i
//
// CHECK-LABEL: define void @cond_br_arguments(i1 {{%.*}}, i1 {{%.*}}) {
-func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) {
+llvm.func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) {
// CHECK-NEXT: br i1 %0, label %3, label %5
llvm.cond_br %arg0, ^bb1(%arg0 : !llvm.i1), ^bb2
@@ -850,15 +850,14 @@ func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) {
}
// CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}}) {
-func @llvm_noalias(%arg0: !llvm<"float*"> {llvm.noalias = true}) {
+llvm.func @llvm_noalias(%arg0: !llvm<"float*"> {llvm.noalias = true}) {
llvm.return
}
// CHECK-LABEL: @llvm_varargs(...)
-func @llvm_varargs()
- attributes {std.varargs = true}
+llvm.func @llvm_varargs(...)
-func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
+llvm.func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
// CHECK: %2 = inttoptr i32 %0 to i32*
// CHECK-NEXT: %3 = ptrtoint i32* %2 to i32
%1 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm<"i32*">
@@ -866,19 +865,19 @@ func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
llvm.return %2 : !llvm.i32
}
-func @stringconstant() -> !llvm<"i8*"> {
+llvm.func @stringconstant() -> !llvm<"i8*"> {
%1 = llvm.mlir.constant("Hello world!") : !llvm<"i8*">
// CHECK: ret [12 x i8] c"Hello world!"
llvm.return %1 : !llvm<"i8*">
}
-func @noreach() {
+llvm.func @noreach() {
// CHECK: unreachable
llvm.unreachable
}
// CHECK-LABEL: define void @fcmp
-func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
+llvm.func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
// CHECK: fcmp oeq float %0, %1
// CHECK-NEXT: fcmp ogt float %0, %1
// CHECK-NEXT: fcmp oge float %0, %1
@@ -911,7 +910,7 @@ func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
}
// CHECK-LABEL: @vect
-func @vect(%arg0: !llvm<"<4 x float>">, %arg1: !llvm.i32, %arg2: !llvm.float) {
+llvm.func @vect(%arg0: !llvm<"<4 x float>">, %arg1: !llvm.i32, %arg2: !llvm.float) {
// CHECK-NEXT: extractelement <4 x float> {{.*}}, i32 {{.*}}
// CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i32 {{.*}}
// CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 7>
@@ -922,7 +921,7 @@ func @vect(%arg0: !llvm<"<4 x float>">, %arg1: !llvm.i32, %arg2: !llvm.float) {
}
// CHECK-LABEL: @alloca
-func @alloca(%size : !llvm.i64) {
+llvm.func @alloca(%size : !llvm.i64) {
// CHECK: alloca
// CHECK-NOT: align
llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm<"i32*">)
@@ -932,13 +931,14 @@ func @alloca(%size : !llvm.i64) {
}
// CHECK-LABEL: @constants
-func @constants() -> !llvm<"<4 x float>"> {
+llvm.func @constants() -> !llvm<"<4 x float>"> {
// CHECK: ret <4 x float> <float 4.2{{0*}}e+01, float 0.{{0*}}e+00, float 0.{{0*}}e+00, float 0.{{0*}}e+00>
%0 = llvm.mlir.constant(sparse<[[0]], [4.2e+01]> : vector<4xf32>) : !llvm<"<4 x float>">
llvm.return %0 : !llvm<"<4 x float>">
}
-func @fp_casts(%fp1 : !llvm<"float">, %fp2 : !llvm<"double">) -> !llvm.i16 {
+// CHECK-LABEL: @fp_casts
+llvm.func @fp_casts(%fp1 : !llvm<"float">, %fp2 : !llvm<"double">) -> !llvm.i16 {
// CHECK: fptrunc double {{.*}} to float
%a = llvm.fptrunc %fp2 : !llvm<"double"> to !llvm<"float">
// CHECK: fpext float {{.*}} to double
@@ -948,7 +948,8 @@ func @fp_casts(%fp1 : !llvm<"float">, %fp2 : !llvm<"double">) -> !llvm.i16 {
llvm.return %c : !llvm.i16
}
-func @integer_extension_and_truncation(%a : !llvm.i32) {
+// CHECK-LABEL: @integer_extension_and_truncation
+llvm.func @integer_extension_and_truncation(%a : !llvm.i32) {
// CHECK: sext i32 {{.*}} to i64
// CHECK: zext i32 {{.*}} to i64
// CHECK: trunc i32 {{.*}} to i16
diff --git a/mlir/test/Target/nvvmir.mlir b/mlir/test/Target/nvvmir.mlir
index 69e366255ed..c09b414bce2 100644
--- a/mlir/test/Target/nvvmir.mlir
+++ b/mlir/test/Target/nvvmir.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-translate -mlir-to-nvvmir %s | FileCheck %s
-func @nvvm_special_regs() -> !llvm.i32 {
- // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+llvm.func @nvvm_special_regs() -> !llvm.i32 {
+ // CHECK: %1 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
%1 = nvvm.read.ptx.sreg.tid.x : !llvm.i32
// CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
%2 = nvvm.read.ptx.sreg.tid.y : !llvm.i32
@@ -32,13 +32,13 @@ func @nvvm_special_regs() -> !llvm.i32 {
llvm.return %1 : !llvm.i32
}
-func @llvm.nvvm.barrier0() {
+llvm.func @llvm.nvvm.barrier0() {
// CHECK: call void @llvm.nvvm.barrier0()
nvvm.barrier0
llvm.return
}
-func @nvvm_shfl(
+llvm.func @nvvm_shfl(
%0 : !llvm.i32, %1 : !llvm.i32, %2 : !llvm.i32,
%3 : !llvm.i32, %4 : !llvm.float) -> !llvm.i32 {
// CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
@@ -48,7 +48,7 @@ func @nvvm_shfl(
llvm.return %6 : !llvm.i32
}
-func @nvvm_vote(%0 : !llvm.i32, %1 : !llvm.i1) -> !llvm.i32 {
+llvm.func @nvvm_vote(%0 : !llvm.i32, %1 : !llvm.i1) -> !llvm.i32 {
// CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32 %{{.*}}, i1 %{{.*}})
%3 = nvvm.vote.ballot.sync %0, %1 : !llvm.i32
llvm.return %3 : !llvm.i32
@@ -56,7 +56,7 @@ func @nvvm_vote(%0 : !llvm.i32, %1 : !llvm.i1) -> !llvm.i32 {
// This function has the "kernel" attribute attached and should appear in the
// NVVM annotations after conversion.
-func @kernel_func() attributes {gpu.kernel} {
+llvm.func @kernel_func() attributes {gpu.kernel} {
llvm.return
}
diff --git a/mlir/test/Target/rocdl.mlir b/mlir/test/Target/rocdl.mlir
index 2a0393bcb03..276203d4167 100644
--- a/mlir/test/Target/rocdl.mlir
+++ b/mlir/test/Target/rocdl.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-translate -mlir-to-rocdlir %s | FileCheck %s
-func @rocdl_special_regs() -> !llvm.i32 {
+llvm.func @rocdl_special_regs() -> !llvm.i32 {
// CHECK-LABEL: rocdl_special_regs
// CHECK: call i32 @llvm.amdgcn.workitem.id.x()
%1 = rocdl.workitem.id.x : !llvm.i32
@@ -29,7 +29,7 @@ func @rocdl_special_regs() -> !llvm.i32 {
llvm.return %1 : !llvm.i32
}
-func @kernel_func() attributes {gpu.kernel} {
+llvm.func @kernel_func() attributes {gpu.kernel} {
// CHECK-LABEL: amdgpu_kernel void @kernel_func
llvm.return
}
OpenPOWER on IntegriCloud