summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
commit3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch)
treed74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
parentf6021ecddc73d14c94ad70938250d58f330795be (diff)
downloadbcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz
bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated the same as compute kernels. Make this explicit so the default calling convention can be changed to a non-kernel. Converted with perl -pi -e 's/define void/define amdgpu_kernel void/' on the relevant test directories (and undoing in one place that actually wanted a non-kernel). llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll6
1 files changed, 3 insertions, 3 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
index 6014e2ed85f..d26fab4cebe 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
@@ -9,7 +9,7 @@
; CHECK-LABEL: {{^}}groupstaticsize_test0:
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0x800{{$}}
-define void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
+define amdgpu_kernel void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 64
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
@@ -23,7 +23,7 @@ define void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %
; CHECK-LABEL: {{^}}groupstaticsize_test1:
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0xc00{{$}}
-define void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
+define amdgpu_kernel void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
entry:
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
@@ -51,7 +51,7 @@ endif: ; preds = %else, %if
; Exceeds 16-bit simm limit of s_movk_i32
; CHECK-LABEL: {{^}}large_groupstaticsize:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4000{{$}}
-define void @large_groupstaticsize(i32 addrspace(1)* %size, i32 %idx) #0 {
+define amdgpu_kernel void @large_groupstaticsize(i32 addrspace(1)* %size, i32 %idx) #0 {
%gep = getelementptr inbounds [4096 x i32], [4096 x i32] addrspace(3)* @large, i32 0, i32 %idx
store volatile i32 0, i32 addrspace(3)* %gep
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize()
OpenPOWER on IntegriCloud