diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
commit | 3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch) | |
tree | d74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/load-global-i64.ll | |
parent | f6021ecddc73d14c94ad70938250d58f330795be (diff) | |
download | bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip |
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated
the same as compute kernels. Make this explicit so the default
calling convention can be changed to a non-kernel.
Converted with perl -pi -e 's/define void/define amdgpu_kernel void/'
on the relevant test directories (and undoing in one place that actually
wanted a non-kernel).
llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/load-global-i64.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/load-global-i64.ll | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i64.ll b/llvm/test/CodeGen/AMDGPU/load-global-i64.ll index dd4ce2c10eb..de16b6c8997 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i64.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i64.ll @@ -13,7 +13,7 @@ ; GCN-HSA: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, [[VAL]] ; EG: VTX_READ_64 -define void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { %ld = load i64, i64 addrspace(1)* %in store i64 %ld, i64 addrspace(1)* %out ret void @@ -24,7 +24,7 @@ define void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { ; GCN-HSA: flat_load_dwordx4 ; EG: VTX_READ_128 -define void @global_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) #0 { entry: %ld = load <2 x i64>, <2 x i64> addrspace(1)* %in store <2 x i64> %ld, <2 x i64> addrspace(1)* %out @@ -40,7 +40,7 @@ entry: ; EG: VTX_READ_128 ; EG: VTX_READ_128 -define void @global_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %in) #0 { entry: %ld = load <3 x i64>, <3 x i64> addrspace(1)* %in store <3 x i64> %ld, <3 x i64> addrspace(1)* %out @@ -56,7 +56,7 @@ entry: ; EG: VTX_READ_128 ; EG: VTX_READ_128 -define void @global_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 { entry: %ld = load <4 x i64>, <4 x i64> addrspace(1)* %in store <4 x i64> %ld, <4 x i64> addrspace(1)* %out @@ -78,7 +78,7 @@ entry: ; EG: VTX_READ_128 ; EG: VTX_READ_128 ; EG: VTX_READ_128 -define void @global_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %in) #0 { entry: %ld = load <8 x i64>, <8 x i64> addrspace(1)* %in store <8 x i64> %ld, <8 x i64> addrspace(1)* %out @@ -112,7 +112,7 @@ entry: ; EG: VTX_READ_128 ; EG: VTX_READ_128 ; EG: VTX_READ_128 -define void @global_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %in) #0 { +define amdgpu_kernel void @global_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %in) #0 { entry: %ld = load <16 x i64>, <16 x i64> addrspace(1)* %in store <16 x i64> %ld, <16 x i64> addrspace(1)* %out |