summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll')
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll18
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
index d9b80e99bf0..79bf92610a8 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: @volatile_load_flat_from_global(
; CHECK: load volatile i32, i32 addrspace(4)*
; CHECK: store i32 %val, i32 addrspace(1)*
-define void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
%tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
@@ -16,7 +16,7 @@ define void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input,
; CHECK-LABEL: @volatile_load_flat_from_constant(
; CHECK: load volatile i32, i32 addrspace(4)*
; CHECK: store i32 %val, i32 addrspace(1)*
-define void @volatile_load_flat_from_constant(i32 addrspace(2)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_load_flat_from_constant(i32 addrspace(2)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
%tmp0 = addrspacecast i32 addrspace(2)* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
@@ -27,7 +27,7 @@ define void @volatile_load_flat_from_constant(i32 addrspace(2)* nocapture %input
; CHECK-LABEL: @volatile_load_flat_from_group(
; CHECK: load volatile i32, i32 addrspace(4)*
; CHECK: store i32 %val, i32 addrspace(3)*
-define void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
%tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
@@ -38,7 +38,7 @@ define void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i
; CHECK-LABEL: @volatile_load_flat_from_private(
; CHECK: load volatile i32, i32 addrspace(4)*
; CHECK: store i32 %val, i32*
-define void @volatile_load_flat_from_private(i32* nocapture %input, i32* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_load_flat_from_private(i32* nocapture %input, i32* nocapture %output) #0 {
%tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
%val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
@@ -49,7 +49,7 @@ define void @volatile_load_flat_from_private(i32* nocapture %input, i32* nocaptu
; CHECK-LABEL: @volatile_store_flat_to_global(
; CHECK: load i32, i32 addrspace(1)*
; CHECK: store volatile i32 %val, i32 addrspace(4)*
-define void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
%tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
%val = load i32, i32 addrspace(4)* %tmp0, align 4
@@ -60,7 +60,7 @@ define void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i
; CHECK-LABEL: @volatile_store_flat_to_group(
; CHECK: load i32, i32 addrspace(3)*
; CHECK: store volatile i32 %val, i32 addrspace(4)*
-define void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
%tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
%val = load i32, i32 addrspace(4)* %tmp0, align 4
@@ -71,7 +71,7 @@ define void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i3
; CHECK-LABEL: @volatile_store_flat_to_private(
; CHECK: load i32, i32*
; CHECK: store volatile i32 %val, i32 addrspace(4)*
-define void @volatile_store_flat_to_private(i32* nocapture %input, i32* nocapture %output) #0 {
+define amdgpu_kernel void @volatile_store_flat_to_private(i32* nocapture %input, i32* nocapture %output) #0 {
%tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
%tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
%val = load i32, i32 addrspace(4)* %tmp0, align 4
@@ -119,7 +119,7 @@ define { i32, i1 } @volatile_cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr,
; CHECK-LABEL: @volatile_memset_group_to_flat(
; CHECK: addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
-define void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
+define amdgpu_kernel void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
%cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
ret void
@@ -128,7 +128,7 @@ define void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y)
; CHECK-LABEL: @volatile_memset_global_to_flat(
; CHECK: addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
-define void @volatile_memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
+define amdgpu_kernel void @volatile_memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
%cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
ret void
OpenPOWER on IntegriCloud