diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 |
commit | 3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch) | |
tree | d74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/loop_break.ll | |
parent | f6021ecddc73d14c94ad70938250d58f330795be (diff) | |
download | bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip |
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated
the same as compute kernels. Make this explicit so the default
calling convention can be changed to a non-kernel.
Converted with perl -pi -e 's/define void/define amdgpu_kernel void/'
on the relevant test directories (and undoing in one place that actually
wanted a non-kernel).
llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/loop_break.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/loop_break.ll | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/loop_break.ll b/llvm/test/CodeGen/AMDGPU/loop_break.ll index 97212f5dc12..492472155ee 100644 --- a/llvm/test/CodeGen/AMDGPU/loop_break.ll +++ b/llvm/test/CodeGen/AMDGPU/loop_break.ll @@ -43,7 +43,7 @@ ; GCN: ; BB#4: ; %bb9 ; GCN-NEXT: s_or_b64 exec, exec, [[MASK]] ; GCN-NEXT: s_endpgm -define void @break_loop(i32 %arg) #0 { +define amdgpu_kernel void @break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg @@ -87,7 +87,7 @@ bb9: ; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void -define void @undef_phi_cond_break_loop(i32 %arg) #0 { +define amdgpu_kernel void @undef_phi_cond_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg @@ -140,7 +140,7 @@ bb9: ; preds = %Flow ; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void -define void @constexpr_phi_cond_break_loop(i32 %arg) #0 { +define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg @@ -190,7 +190,7 @@ bb9: ; preds = %Flow ; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void -define void @true_phi_cond_break_loop(i32 %arg) #0 { +define amdgpu_kernel void @true_phi_cond_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg @@ -240,7 +240,7 @@ bb9: ; preds = %Flow ; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void -define void @false_phi_cond_break_loop(i32 %arg) #0 { +define amdgpu_kernel void @false_phi_cond_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg @@ -295,7 +295,7 @@ bb9: ; preds = %Flow ; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %1) ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef ; OPT-NEXT: ret void -define void @invert_true_phi_cond_break_loop(i32 %arg) #0 { +define amdgpu_kernel void @invert_true_phi_cond_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() %tmp = sub i32 %id, %arg |