diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 | 
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-21 21:39:51 +0000 | 
| commit | 3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch) | |
| tree | d74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll | |
| parent | f6021ecddc73d14c94ad70938250d58f330795be (diff) | |
| download | bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip | |
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated
the same as compute kernels. Make this explicit so the default
calling convention can be changed to a non-kernel.
Converted with perl -pi -e 's/define void/define amdgpu_kernel void/'
on the relevant test directories (and undoing in one place that actually
wanted a non-kernel).
llvm-svn: 298444
Diffstat (limited to 'llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll')
| -rw-r--r-- | llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll | 16 | 
1 files changed, 8 insertions, 8 deletions
| diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll index 67b4ccda1a1..b566c147e9b 100644 --- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll +++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll @@ -45,7 +45,7 @@ define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {  ; CHECK-LABEL: @store_global_from_flat(  ; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*  ; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0 -define void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 { +define amdgpu_kernel void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {    %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*    store float 0.0, float addrspace(1)* %tmp0    ret void @@ -54,7 +54,7 @@ define void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {  ; CHECK-LABEL: @store_group_from_flat(  ; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*  ; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0 -define void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 { +define amdgpu_kernel void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {    %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*    store float 0.0, float addrspace(3)* %tmp0    ret void @@ -63,7 +63,7 @@ define void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {  ; CHECK-LABEL: @store_private_from_flat(  ; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*  ; CHECK-NEXT: store float 0.000000e+00, float* %tmp0 -define void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 { +define amdgpu_kernel void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {    %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*    store float 0.0, float* %tmp0    ret void @@ -74,7 +74,7 @@ define void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {  ; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4  ; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4  ; CHECK-NEXT: ret void -define void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 { +define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {    %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*    %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*    %val = load i32, i32 addrspace(4)* %tmp0, align 4 @@ -87,7 +87,7 @@ define void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace  ; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4  ; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4  ; CHECK-NEXT: ret void -define void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 { +define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {    %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*    %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*    %val = load i32, i32 addrspace(4)* %tmp0, align 4 @@ -100,7 +100,7 @@ define void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(  ; CHECK-NEXT: %val = load i32, i32* %input, align 4  ; CHECK-NEXT: store i32 %val, i32* %output, align 4  ; CHECK-NEXT: ret void -define void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 { +define amdgpu_kernel void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {    %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*    %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*    %val = load i32, i32 addrspace(4)* %tmp0, align 4 @@ -113,7 +113,7 @@ define void @load_store_private(i32* nocapture %input, i32* nocapture %output) #  ; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4  ; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4  ; CHECK-NEXT: ret void -define void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 { +define amdgpu_kernel void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {    %val = load i32, i32 addrspace(4)* %input, align 4    store i32 %val, i32 addrspace(4)* %output, align 4    ret void @@ -122,7 +122,7 @@ define void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4  ; CHECK-LABEL: @store_addrspacecast_ptr_value(  ; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*  ; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4 -define void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 { +define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {    %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*    store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4    ret void | 

