diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/flat_atomics.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/flat_atomics.ll | 192 |
1 files changed, 96 insertions, 96 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll index 7c91471e30a..cc95d80570e 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll @@ -3,7 +3,7 @@ ; GCN-LABEL: {{^}}atomic_add_i32_offset: ; GCN: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst @@ -13,7 +13,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_ret_offset: ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst @@ -23,7 +23,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_addr64_offset: ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -34,7 +34,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64_offset: ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -45,7 +45,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32: ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -54,7 +54,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_ret: ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -63,7 +63,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_addr64: ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -73,7 +73,7 @@ entry: ; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64: ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -83,7 +83,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_offset: ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst @@ -93,7 +93,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_ret_offset: ; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst @@ -103,7 +103,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_addr64_offset: ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -114,7 +114,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64_offset: ; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -125,7 +125,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32: ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -134,7 +134,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_ret: ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -143,7 +143,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_addr64: ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -153,7 +153,7 @@ entry: ; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64: ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -163,7 +163,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_offset: ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst @@ -173,7 +173,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_ret_offset: ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst @@ -183,7 +183,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_addr64_offset: ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -194,7 +194,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset: ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -205,7 +205,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32: ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -214,7 +214,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_ret: ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -223,7 +223,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_addr64: ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -233,7 +233,7 @@ entry: ; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64: ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -243,7 +243,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_offset: ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst @@ -253,7 +253,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_ret_offset: ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst @@ -263,7 +263,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_addr64_offset: ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -274,7 +274,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64_offset: ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -285,7 +285,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32: ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -294,7 +294,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_ret: ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -303,7 +303,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_addr64: ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -313,7 +313,7 @@ entry: ; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64: ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -323,7 +323,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_offset: ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst @@ -333,7 +333,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_ret_offset: ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst @@ -343,7 +343,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_addr64_offset: ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -354,7 +354,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset: ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -365,7 +365,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32: ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -374,7 +374,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_ret: ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -383,7 +383,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_addr64: ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -393,7 +393,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64: ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -403,7 +403,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_offset: ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst @@ -413,7 +413,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_ret_offset: ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst @@ -423,7 +423,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_addr64_offset: ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -434,7 +434,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64_offset: ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -445,7 +445,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32: ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -454,7 +454,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_ret: ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -463,7 +463,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_addr64: ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -473,7 +473,7 @@ entry: ; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64: ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -483,7 +483,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_offset: ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst @@ -493,7 +493,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_ret_offset: ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst @@ -503,7 +503,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_addr64_offset: ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -514,7 +514,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset: ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -525,7 +525,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32: ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -534,7 +534,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_ret: ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -543,7 +543,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_addr64: ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -553,7 +553,7 @@ entry: ; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64: ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]{{$}} - define void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { + define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -563,7 +563,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_offset: ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst @@ -573,7 +573,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_ret_offset: ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst @@ -583,7 +583,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_addr64_offset: ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -594,7 +594,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64_offset: ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -605,7 +605,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32: ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -614,7 +614,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_ret: ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -623,7 +623,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_addr64: ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -633,7 +633,7 @@ entry: ; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64: ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -643,7 +643,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_offset: ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst @@ -653,7 +653,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_offset: ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst @@ -663,7 +663,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64_offset: ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -674,7 +674,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset: ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -685,7 +685,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32: ; GCN: flat_atomic_swap v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -694,7 +694,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_ret: ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -703,7 +703,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64: ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -713,7 +713,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64: ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -725,7 +725,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_offset: ; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} -define void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst @@ -735,7 +735,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_offset: ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] -define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst @@ -746,7 +746,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset: ; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} -define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -757,7 +757,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64_offset: ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] -define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -769,7 +769,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32: ; GCN: flat_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} -define void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) { entry: %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst ret void @@ -778,7 +778,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret: ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] -define void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) { entry: %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst %flag = extractvalue { i32, i1 } %val, 0 @@ -788,7 +788,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64: ; GCN: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}} -define void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst @@ -798,7 +798,7 @@ entry: ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64: ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]] -define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { +define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst @@ -809,7 +809,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_offset: ; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst @@ -819,7 +819,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_ret_offset: ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst @@ -829,7 +829,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_addr64_offset: ; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -840,7 +840,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset: ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -851,7 +851,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32: ; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}} -define void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) { +define amdgpu_kernel void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) { entry: %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst ret void @@ -860,7 +860,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_ret: ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { +define amdgpu_kernel void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { entry: %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst store i32 %val, i32 addrspace(4)* %out2 @@ -869,7 +869,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_addr64: ; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} -define void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -879,7 +879,7 @@ entry: ; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64: ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { +define amdgpu_kernel void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst @@ -890,7 +890,7 @@ entry: ; GCN-LABEL: {{^}}atomic_load_i32_offset: ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +define amdgpu_kernel void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { entry: %gep = getelementptr i32, i32 addrspace(4)* %in, i32 4 %val = load atomic i32, i32 addrspace(4)* %gep seq_cst, align 4 @@ -901,7 +901,7 @@ entry: ; GCN-LABEL: {{^}}atomic_load_i32: ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +define amdgpu_kernel void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) { entry: %val = load atomic i32, i32 addrspace(4)* %in seq_cst, align 4 store i32 %val, i32 addrspace(4)* %out @@ -911,7 +911,7 @@ entry: ; GCN-LABEL: {{^}}atomic_load_i32_addr64_offset: ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { +define amdgpu_kernel void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -923,7 +923,7 @@ entry: ; GCN-LABEL: {{^}}atomic_load_i32_addr64: ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}} ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]] -define void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { +define amdgpu_kernel void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index %val = load atomic i32, i32 addrspace(4)* %ptr seq_cst, align 4 @@ -933,7 +933,7 @@ entry: ; GCN-LABEL: {{^}}atomic_store_i32_offset: ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} -define void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) { +define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) { entry: %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 store atomic i32 %in, i32 addrspace(4)* %gep seq_cst, align 4 @@ -942,7 +942,7 @@ entry: ; GCN-LABEL: {{^}}atomic_store_i32: ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} -define void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) { +define amdgpu_kernel void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) { entry: store atomic i32 %in, i32 addrspace(4)* %out seq_cst, align 4 ret void @@ -950,7 +950,7 @@ entry: ; GCN-LABEL: {{^}}atomic_store_i32_addr64_offset: ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} -define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) { +define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 @@ -960,7 +960,7 @@ entry: ; GCN-LABEL: {{^}}atomic_store_i32_addr64: ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} -define void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) { +define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index store atomic i32 %in, i32 addrspace(4)* %ptr seq_cst, align 4 |