diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
| commit | 3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch) | |
| tree | d74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/reorder-stores.ll | |
| parent | f6021ecddc73d14c94ad70938250d58f330795be (diff) | |
| download | bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip | |
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated
the same as compute kernels. Make this explicit so the default
calling convention can be changed to a non-kernel.
Converted with perl -pi -e 's/define void/define amdgpu_kernel void/'
on the relevant test directories (and undoing in one place that actually
wanted a non-kernel).
llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/reorder-stores.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/reorder-stores.ll | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/reorder-stores.ll b/llvm/test/CodeGen/AMDGPU/reorder-stores.ll index 412202fa5d5..ff4069226a6 100644 --- a/llvm/test/CodeGen/AMDGPU/reorder-stores.ll +++ b/llvm/test/CodeGen/AMDGPU/reorder-stores.ll @@ -7,7 +7,7 @@ ; SI: buffer_store_dwordx4 ; SI: buffer_store_dwordx4 ; SI: s_endpgm -define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind { +define amdgpu_kernel void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind { %tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16 %tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16 store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16 @@ -19,7 +19,7 @@ define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocap ; SI: ds_read2_b64 ; SI: ds_write2_b64 ; SI: s_endpgm -define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind { +define amdgpu_kernel void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind { %tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16 %tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16 store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16 @@ -39,7 +39,7 @@ define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace ; SI: buffer_store_dwordx4 ; SI: buffer_store_dwordx4 ; SI: s_endpgm -define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind { +define amdgpu_kernel void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind { %tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32 %tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32 store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32 @@ -54,7 +54,7 @@ define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* no ; SI-NOT: ds_read ; SI: ds_write_b64 ; SI: s_endpgm -define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind { +define amdgpu_kernel void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind { %tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8 %tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8 %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64> |

