diff options
Diffstat (limited to 'llvm/test/Analysis')
16 files changed, 107 insertions, 107 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll index 76b21d26faa..6419eb11b2b 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll @@ -3,7 +3,7 @@ ; CHECK: 'add_i32' ; CHECK: estimated cost of 1 for {{.*}} add i32 -define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %add = add i32 %vec, %b store i32 %add, i32 addrspace(1)* %out @@ -12,7 +12,7 @@ define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; CHECK: 'add_v2i32' ; CHECK: estimated cost of 2 for {{.*}} add <2 x i32> -define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { +define amdgpu_kernel void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr %add = add <2 x i32> %vec, %b store <2 x i32> %add, <2 x i32> addrspace(1)* %out @@ -21,7 +21,7 @@ define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %va ; CHECK: 'add_v3i32' ; CHECK: estimated cost of 3 for {{.*}} add <3 x i32> -define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { +define amdgpu_kernel void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr %add = add <3 x i32> %vec, %b store <3 x i32> %add, <3 x i32> addrspace(1)* %out @@ -30,7 +30,7 @@ define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %va ; CHECK: 'add_v4i32' ; CHECK: estimated cost of 4 for {{.*}} add <4 x i32> -define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { +define amdgpu_kernel void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr %add = add <4 x i32> %vec, %b store <4 x i32> %add, <4 x i32> addrspace(1)* %out @@ -39,7 +39,7 @@ define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %va ; CHECK: 'add_i64' ; CHECK: estimated cost of 2 for {{.*}} add i64 -define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %add = add i64 %vec, %b store i64 %add, i64 addrspace(1)* %out @@ -48,7 +48,7 @@ define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) # ; CHECK: 'add_v2i64' ; CHECK: estimated cost of 4 for {{.*}} add <2 x i64> -define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { +define amdgpu_kernel void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr %add = add <2 x i64> %vec, %b store <2 x i64> %add, <2 x i64> addrspace(1)* %out @@ -57,7 +57,7 @@ define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %va ; CHECK: 'add_v3i64' ; CHECK: estimated cost of 6 for {{.*}} add <3 x i64> -define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { +define amdgpu_kernel void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr %add = add <3 x i64> %vec, %b store <3 x i64> %add, <3 x i64> addrspace(1)* %out @@ -66,7 +66,7 @@ define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %va ; CHECK: 'add_v4i64' ; CHECK: estimated cost of 8 for {{.*}} add <4 x i64> -define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { +define amdgpu_kernel void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr %add = add <4 x i64> %vec, %b store <4 x i64> %add, <4 x i64> addrspace(1)* %out @@ -75,7 +75,7 @@ define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %va ; CHECK: 'add_v16i64' ; CHECK: estimated cost of 32 for {{.*}} add <16 x i64> -define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 { +define amdgpu_kernel void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 { %vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr %add = add <16 x i64> %vec, %b store <16 x i64> %add, <16 x i64> addrspace(1)* %out @@ -84,7 +84,7 @@ define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* ; CHECK: 'add_i16' ; CHECK: estimated cost of 1 for {{.*}} add i16 -define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { +define amdgpu_kernel void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { %vec = load i16, i16 addrspace(1)* %vaddr %add = add i16 %vec, %b store i16 %add, i16 addrspace(1)* %out @@ -93,7 +93,7 @@ define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) # ; CHECK: 'add_v2i16' ; CHECK: estimated cost of 2 for {{.*}} add <2 x i16> -define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { +define amdgpu_kernel void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr %add = add <2 x i16> %vec, %b store <2 x i16> %add, <2 x i16> addrspace(1)* %out @@ -102,7 +102,7 @@ define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %va ; CHECK: 'sub_i32' ; CHECK: estimated cost of 1 for {{.*}} sub i32 -define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %sub = sub i32 %vec, %b store i32 %sub, i32 addrspace(1)* %out @@ -111,7 +111,7 @@ define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; CHECK: 'sub_i64' ; CHECK: estimated cost of 2 for {{.*}} sub i64 -define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %sub = sub i64 %vec, %b store i64 %sub, i64 addrspace(1)* %out @@ -119,7 +119,7 @@ define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) # } ; CHECK: 'sub_i16' ; CHECK: estimated cost of 1 for {{.*}} sub i16 -define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { +define amdgpu_kernel void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { %vec = load i16, i16 addrspace(1)* %vaddr %sub = sub i16 %vec, %b store i16 %sub, i16 addrspace(1)* %out @@ -128,7 +128,7 @@ define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) # ; CHECK: 'sub_v2i16' ; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16> -define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { +define amdgpu_kernel void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr %sub = sub <2 x i16> %vec, %b store <2 x i16> %sub, <2 x i16> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll b/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll index a809dbd77bb..aa70f5032cb 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll @@ -2,7 +2,7 @@ ; CHECK: 'or_i32' ; CHECK: estimated cost of 1 for {{.*}} or i32 -define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = or i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -11,7 +11,7 @@ define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 ; CHECK: 'or_i64' ; CHECK: estimated cost of 2 for {{.*}} or i64 -define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = or i64 %vec, %b store i64 %or, i64 addrspace(1)* %out @@ -20,7 +20,7 @@ define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 ; CHECK: 'xor_i32' ; CHECK: estimated cost of 1 for {{.*}} xor i32 -define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = xor i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -29,7 +29,7 @@ define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; CHECK: 'xor_i64' ; CHECK: estimated cost of 2 for {{.*}} xor i64 -define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = xor i64 %vec, %b store i64 %or, i64 addrspace(1)* %out @@ -39,7 +39,7 @@ define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) # ; CHECK: 'and_i32' ; CHECK: estimated cost of 1 for {{.*}} and i32 -define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = and i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -48,7 +48,7 @@ define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; CHECK: 'and_i64' ; CHECK: estimated cost of 2 for {{.*}} and i64 -define void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = and i64 %vec, %b store i64 %or, i64 addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/br.ll b/llvm/test/Analysis/CostModel/AMDGPU/br.ll index 0b964939756..494f8d2c8b2 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/br.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/br.ll @@ -4,7 +4,7 @@ ; CHECK: estimated cost of 10 for instruction: br i1 ; CHECK: estimated cost of 10 for instruction: br label ; CHECK: estimated cost of 10 for instruction: ret void -define void @test_br_cost(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @test_br_cost(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { bb0: br i1 undef, label %bb1, label %bb2 @@ -21,7 +21,7 @@ bb2: ; CHECK: 'test_switch_cost' ; CHECK: Unknown cost for instruction: switch -define void @test_switch_cost(i32 %a) #0 { +define amdgpu_kernel void @test_switch_cost(i32 %a) #0 { entry: switch i32 %a, label %default [ i32 0, label %case0 diff --git a/llvm/test/Analysis/CostModel/AMDGPU/extractelement.ll b/llvm/test/Analysis/CostModel/AMDGPU/extractelement.ll index c328d768646..1efbb5873ac 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/extractelement.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/extractelement.ll @@ -2,7 +2,7 @@ ; CHECK: 'extractelement_v2i32' ; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i32> -define void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) { %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr %elt = extractelement <2 x i32> %vec, i32 1 store i32 %elt, i32 addrspace(1)* %out @@ -11,7 +11,7 @@ define void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1) ; CHECK: 'extractelement_v2f32' ; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x float> -define void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %elt = extractelement <2 x float> %vec, i32 1 store float %elt, float addrspace(1)* %out @@ -20,7 +20,7 @@ define void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspac ; CHECK: 'extractelement_v3i32' ; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i32> -define void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) { %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr %elt = extractelement <3 x i32> %vec, i32 1 store i32 %elt, i32 addrspace(1)* %out @@ -29,7 +29,7 @@ define void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1) ; CHECK: 'extractelement_v4i32' ; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i32> -define void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) { %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr %elt = extractelement <4 x i32> %vec, i32 1 store i32 %elt, i32 addrspace(1)* %out @@ -38,7 +38,7 @@ define void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1) ; CHECK: 'extractelement_v8i32' ; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i32> -define void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) { %vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr %elt = extractelement <8 x i32> %vec, i32 1 store i32 %elt, i32 addrspace(1)* %out @@ -48,7 +48,7 @@ define void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1) ; FIXME: Should be non-0 ; CHECK: 'extractelement_v8i32_dynindex' ; CHECK: estimated cost of 2 for {{.*}} extractelement <8 x i32> -define void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) { +define amdgpu_kernel void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) { %vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr %elt = extractelement <8 x i32> %vec, i32 %idx store i32 %elt, i32 addrspace(1)* %out @@ -57,7 +57,7 @@ define void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> add ; CHECK: 'extractelement_v2i64' ; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i64> -define void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) { %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr %elt = extractelement <2 x i64> %vec, i64 1 store i64 %elt, i64 addrspace(1)* %out @@ -66,7 +66,7 @@ define void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1) ; CHECK: 'extractelement_v3i64' ; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i64> -define void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) { %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr %elt = extractelement <3 x i64> %vec, i64 1 store i64 %elt, i64 addrspace(1)* %out @@ -75,7 +75,7 @@ define void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1) ; CHECK: 'extractelement_v4i64' ; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i64> -define void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) { %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr %elt = extractelement <4 x i64> %vec, i64 1 store i64 %elt, i64 addrspace(1)* %out @@ -84,7 +84,7 @@ define void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1) ; CHECK: 'extractelement_v8i64' ; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i64> -define void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) { %vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr %elt = extractelement <8 x i64> %vec, i64 1 store i64 %elt, i64 addrspace(1)* %out @@ -93,7 +93,7 @@ define void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1) ; CHECK: 'extractelement_v4i8' ; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i8> -define void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) { %vec = load <4 x i8>, <4 x i8> addrspace(1)* %vaddr %elt = extractelement <4 x i8> %vec, i8 1 store i8 %elt, i8 addrspace(1)* %out @@ -102,7 +102,7 @@ define void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* % ; CHECK: 'extractelement_v2i16' ; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i16> -define void @extractelement_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) { +define amdgpu_kernel void @extractelement_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) { %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr %elt = extractelement <2 x i16> %vec, i16 1 store i16 %elt, i16 addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll b/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll index 9c551ec8afe..0d49e2967d2 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll @@ -2,7 +2,7 @@ ; CHECK: 'fabs_f32' ; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32 -define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 { %vec = load float, float addrspace(1)* %vaddr %fabs = call float @llvm.fabs.f32(float %vec) #1 store float %fabs, float addrspace(1)* %out @@ -11,7 +11,7 @@ define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 { ; CHECK: 'fabs_v2f32' ; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32 -define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1 store <2 x float> %fabs, <2 x float> addrspace(1)* %out @@ -20,7 +20,7 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; CHECK: 'fabs_v3f32' ; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32 -define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1 store <3 x float> %fabs, <3 x float> addrspace(1)* %out @@ -29,7 +29,7 @@ define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; CHECK: 'fabs_f64' ; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64 -define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 { %vec = load double, double addrspace(1)* %vaddr %fabs = call double @llvm.fabs.f64(double %vec) #1 store double %fabs, double addrspace(1)* %out @@ -38,7 +38,7 @@ define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 ; CHECK: 'fabs_v2f64' ; CHECK: estimated cost of 0 for {{.*}} call <2 x double> @llvm.fabs.v2f64 -define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %vec) #1 store <2 x double> %fabs, <2 x double> addrspace(1)* %out @@ -47,7 +47,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; CHECK: 'fabs_v3f64' ; CHECK: estimated cost of 0 for {{.*}} call <3 x double> @llvm.fabs.v3f64 -define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %fabs = call <3 x double> @llvm.fabs.v3f64(<3 x double> %vec) #1 store <3 x double> %fabs, <3 x double> addrspace(1)* %out @@ -56,7 +56,7 @@ define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; CHECK: 'fabs_f16' ; CHECK: estimated cost of 0 for {{.*}} call half @llvm.fabs.f16 -define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 { %vec = load half, half addrspace(1)* %vaddr %fabs = call half @llvm.fabs.f16(half %vec) #1 store half %fabs, half addrspace(1)* %out @@ -65,7 +65,7 @@ define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 { ; CHECK: 'fabs_v2f16' ; CHECK: estimated cost of 0 for {{.*}} call <2 x half> @llvm.fabs.v2f16 -define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %vec) #1 store <2 x half> %fabs, <2 x half> addrspace(1)* %out @@ -74,7 +74,7 @@ define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; CHECK: 'fabs_v3f16' ; CHECK: estimated cost of 0 for {{.*}} call <3 x half> @llvm.fabs.v3f16 -define void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 { +define amdgpu_kernel void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 { %vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr %fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %vec) #1 store <3 x half> %fabs, <3 x half> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll index 00e91bd6223..d7ac7359299 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll @@ -3,7 +3,7 @@ ; ALL: 'fadd_f32' ; ALL: estimated cost of 1 for {{.*}} fadd float -define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { +define amdgpu_kernel void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { %vec = load float, float addrspace(1)* %vaddr %add = fadd float %vec, %b store float %add, float addrspace(1)* %out @@ -12,7 +12,7 @@ define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa ; ALL: 'fadd_v2f32' ; ALL: estimated cost of 2 for {{.*}} fadd <2 x float> -define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { +define amdgpu_kernel void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %add = fadd <2 x float> %vec, %b store <2 x float> %add, <2 x float> addrspace(1)* %out @@ -21,7 +21,7 @@ define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; ALL: 'fadd_v3f32' ; ALL: estimated cost of 3 for {{.*}} fadd <3 x float> -define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { +define amdgpu_kernel void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %add = fadd <3 x float> %vec, %b store <3 x float> %add, <3 x float> addrspace(1)* %out @@ -31,7 +31,7 @@ define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; ALL: 'fadd_f64' ; FASTF64: estimated cost of 2 for {{.*}} fadd double ; SLOWF64: estimated cost of 3 for {{.*}} fadd double -define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { +define amdgpu_kernel void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { %vec = load double, double addrspace(1)* %vaddr %add = fadd double %vec, %b store double %add, double addrspace(1)* %out @@ -41,7 +41,7 @@ define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do ; ALL: 'fadd_v2f64' ; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double> ; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double> -define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { +define amdgpu_kernel void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %add = fadd <2 x double> %vec, %b store <2 x double> %add, <2 x double> addrspace(1)* %out @@ -51,7 +51,7 @@ define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; ALL: 'fadd_v3f64' ; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double> ; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double> -define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { +define amdgpu_kernel void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %add = fadd <3 x double> %vec, %b store <3 x double> %add, <3 x double> addrspace(1)* %out @@ -60,7 +60,7 @@ define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; ALL 'fadd_f16' ; ALL estimated cost of 1 for {{.*}} fadd half -define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { +define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { %vec = load half, half addrspace(1)* %vaddr %add = fadd half %vec, %b store half %add, half addrspace(1)* %out @@ -69,7 +69,7 @@ define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half % ; ALL 'fadd_v2f16' ; ALL estimated cost of 2 for {{.*}} fadd <2 x half> -define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { +define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %add = fadd <2 x half> %vec, %b store <2 x half> %add, <2 x half> addrspace(1)* %out @@ -78,7 +78,7 @@ define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; ALL 'fadd_v4f16' ; ALL estimated cost of 4 for {{.*}} fadd <4 x half> -define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { +define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr %add = fadd <4 x half> %vec, %b store <4 x half> %add, <4 x half> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll b/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll index 3f374422ad9..caa9bff7b2a 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll @@ -5,7 +5,7 @@ ; CHECK: 'fdiv_f32' ; ALL: estimated cost of 10 for {{.*}} fdiv float -define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { +define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { %vec = load float, float addrspace(1)* %vaddr %add = fdiv float %vec, %b store float %add, float addrspace(1)* %out @@ -14,7 +14,7 @@ define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa ; ALL: 'fdiv_v2f32' ; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float> -define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { +define amdgpu_kernel void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %add = fdiv <2 x float> %vec, %b store <2 x float> %add, <2 x float> addrspace(1)* %out @@ -23,7 +23,7 @@ define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; ALL: 'fdiv_v3f32' ; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float> -define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { +define amdgpu_kernel void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %add = fdiv <3 x float> %vec, %b store <3 x float> %add, <3 x float> addrspace(1)* %out @@ -35,7 +35,7 @@ define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double ; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double ; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double -define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { +define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { %vec = load double, double addrspace(1)* %vaddr %add = fdiv double %vec, %b store double %add, double addrspace(1)* %out @@ -47,7 +47,7 @@ define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do ; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double> ; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double> ; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double> -define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { +define amdgpu_kernel void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %add = fdiv <2 x double> %vec, %b store <2 x double> %add, <2 x double> addrspace(1)* %out @@ -59,7 +59,7 @@ define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double> ; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double> ; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double> -define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { +define amdgpu_kernel void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %add = fdiv <3 x double> %vec, %b store <3 x double> %add, <3 x double> addrspace(1)* %out @@ -68,7 +68,7 @@ define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; ALL: 'fdiv_f16' ; ALL: estimated cost of 10 for {{.*}} fdiv half -define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { +define amdgpu_kernel void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { %vec = load half, half addrspace(1)* %vaddr %add = fdiv half %vec, %b store half %add, half addrspace(1)* %out @@ -77,7 +77,7 @@ define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half % ; ALL: 'fdiv_v2f16' ; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half> -define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { +define amdgpu_kernel void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %add = fdiv <2 x half> %vec, %b store <2 x half> %add, <2 x half> addrspace(1)* %out @@ -86,7 +86,7 @@ define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; ALL: 'fdiv_v4f16' ; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half> -define void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { +define amdgpu_kernel void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr %add = fdiv <4 x half> %vec, %b store <4 x half> %add, <4 x half> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll index 6303bb7988c..915c35a23b3 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll @@ -3,7 +3,7 @@ ; ALL: 'fmul_f32' ; ALL: estimated cost of 1 for {{.*}} fmul float -define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { +define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { %vec = load float, float addrspace(1)* %vaddr %add = fmul float %vec, %b store float %add, float addrspace(1)* %out @@ -12,7 +12,7 @@ define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa ; ALL: 'fmul_v2f32' ; ALL: estimated cost of 2 for {{.*}} fmul <2 x float> -define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { +define amdgpu_kernel void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %add = fmul <2 x float> %vec, %b store <2 x float> %add, <2 x float> addrspace(1)* %out @@ -21,7 +21,7 @@ define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; ALL: 'fmul_v3f32' ; ALL: estimated cost of 3 for {{.*}} fmul <3 x float> -define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { +define amdgpu_kernel void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %add = fmul <3 x float> %vec, %b store <3 x float> %add, <3 x float> addrspace(1)* %out @@ -31,7 +31,7 @@ define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; ALL: 'fmul_f64' ; FASTF64: estimated cost of 2 for {{.*}} fmul double ; SLOWF64: estimated cost of 3 for {{.*}} fmul double -define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { +define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { %vec = load double, double addrspace(1)* %vaddr %add = fmul double %vec, %b store double %add, double addrspace(1)* %out @@ -41,7 +41,7 @@ define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do ; ALL: 'fmul_v2f64' ; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double> ; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double> -define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { +define amdgpu_kernel void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %add = fmul <2 x double> %vec, %b store <2 x double> %add, <2 x double> addrspace(1)* %out @@ -51,7 +51,7 @@ define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; ALL: 'fmul_v3f64' ; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double> ; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double> -define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { +define amdgpu_kernel void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %add = fmul <3 x double> %vec, %b store <3 x double> %add, <3 x double> addrspace(1)* %out @@ -60,7 +60,7 @@ define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; ALL 'fmul_f16' ; ALL estimated cost of 1 for {{.*}} fmul half -define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { +define amdgpu_kernel void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { %vec = load half, half addrspace(1)* %vaddr %add = fmul half %vec, %b store half %add, half addrspace(1)* %out @@ -69,7 +69,7 @@ define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half % ; ALL 'fmul_v2f16' ; ALL estimated cost of 2 for {{.*}} fmul <2 x half> -define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { +define amdgpu_kernel void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %add = fmul <2 x half> %vec, %b store <2 x half> %add, <2 x half> addrspace(1)* %out @@ -78,7 +78,7 @@ define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; ALL 'fmul_v4f16' ; ALL estimated cost of 4 for {{.*}} fmul <4 x half> -define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { +define amdgpu_kernel void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr %add = fmul <4 x half> %vec, %b store <4 x half> %add, <4 x half> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll index e0850be9867..cb89d292f71 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll @@ -3,7 +3,7 @@ ; ALL: 'fsub_f32' ; ALL: estimated cost of 1 for {{.*}} fsub float -define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { +define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { %vec = load float, float addrspace(1)* %vaddr %add = fsub float %vec, %b store float %add, float addrspace(1)* %out @@ -12,7 +12,7 @@ define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa ; ALL: 'fsub_v2f32' ; ALL: estimated cost of 2 for {{.*}} fsub <2 x float> -define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { +define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %add = fsub <2 x float> %vec, %b store <2 x float> %add, <2 x float> addrspace(1)* %out @@ -21,7 +21,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; ALL: 'fsub_v3f32' ; ALL: estimated cost of 3 for {{.*}} fsub <3 x float> -define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { +define amdgpu_kernel void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %add = fsub <3 x float> %vec, %b store <3 x float> %add, <3 x float> addrspace(1)* %out @@ -31,7 +31,7 @@ define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; ALL: 'fsub_f64' ; FASTF64: estimated cost of 2 for {{.*}} fsub double ; SLOWF64: estimated cost of 3 for {{.*}} fsub double -define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { +define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { %vec = load double, double addrspace(1)* %vaddr %add = fsub double %vec, %b store double %add, double addrspace(1)* %out @@ -41,7 +41,7 @@ define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do ; ALL: 'fsub_v2f64' ; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double> ; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double> -define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { +define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %add = fsub <2 x double> %vec, %b store <2 x double> %add, <2 x double> addrspace(1)* %out @@ -51,7 +51,7 @@ define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; ALL: 'fsub_v3f64' ; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double> ; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double> -define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { +define amdgpu_kernel void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %add = fsub <3 x double> %vec, %b store <3 x double> %add, <3 x double> addrspace(1)* %out @@ -60,7 +60,7 @@ define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; ALL: 'fsub_f16' ; ALL: estimated cost of 1 for {{.*}} fsub half -define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { +define amdgpu_kernel void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { %vec = load half, half addrspace(1)* %vaddr %add = fsub half %vec, %b store half %add, half addrspace(1)* %out @@ -69,7 +69,7 @@ define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half % ; ALL: 'fsub_v2f16' ; ALL: estimated cost of 2 for {{.*}} fsub <2 x half> -define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { +define amdgpu_kernel void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %add = fsub <2 x half> %vec, %b store <2 x half> %add, <2 x half> addrspace(1)* %out @@ -78,7 +78,7 @@ define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; ALL: 'fsub_v4f16' ; ALL: estimated cost of 4 for {{.*}} fsub <4 x half> -define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { +define amdgpu_kernel void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr %add = fsub <4 x half> %vec, %b store <4 x half> %add, <4 x half> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/insertelement.ll b/llvm/test/Analysis/CostModel/AMDGPU/insertelement.ll index 1765afe3169..6f296a3e7a3 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/insertelement.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/insertelement.ll @@ -2,7 +2,7 @@ ; CHECK: 'insertelement_v2i32' ; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i32> -define void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) { +define amdgpu_kernel void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) { %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr %insert = insertelement <2 x i32> %vec, i32 1, i32 123 store <2 x i32> %insert, <2 x i32> addrspace(1)* %out @@ -11,7 +11,7 @@ define void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspa ; CHECK: 'insertelement_v2i64' ; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i64> -define void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) { +define amdgpu_kernel void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) { %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr %insert = insertelement <2 x i64> %vec, i64 1, i64 123 store <2 x i64> %insert, <2 x i64> addrspace(1)* %out @@ -20,7 +20,7 @@ define void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspa ; CHECK: 'insertelement_v2i16' ; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i16> -define void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) { +define amdgpu_kernel void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) { %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr %insert = insertelement <2 x i16> %vec, i16 1, i16 123 store <2 x i16> %insert, <2 x i16> addrspace(1)* %out @@ -29,7 +29,7 @@ define void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspa ; CHECK: 'insertelement_v2i8' ; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i8> -define void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) { +define amdgpu_kernel void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) { %vec = load <2 x i8>, <2 x i8> addrspace(1)* %vaddr %insert = insertelement <2 x i8> %vec, i8 1, i8 123 store <2 x i8> %insert, <2 x i8> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/mul.ll b/llvm/test/Analysis/CostModel/AMDGPU/mul.ll index cbc755a6e6a..aac7b68f50c 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/mul.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/mul.ll @@ -2,7 +2,7 @@ ; CHECK: 'mul_i32' ; CHECK: estimated cost of 3 for {{.*}} mul i32 -define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %mul = mul i32 %vec, %b store i32 %mul, i32 addrspace(1)* %out @@ -11,7 +11,7 @@ define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; CHECK: 'mul_v2i32' ; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32> -define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { +define amdgpu_kernel void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr %mul = mul <2 x i32> %vec, %b store <2 x i32> %mul, <2 x i32> addrspace(1)* %out @@ -20,7 +20,7 @@ define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %va ; CHECK: 'mul_v3i32' ; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32> -define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { +define amdgpu_kernel void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr %mul = mul <3 x i32> %vec, %b store <3 x i32> %mul, <3 x i32> addrspace(1)* %out @@ -29,7 +29,7 @@ define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %va ; CHECK: 'mul_v4i32' ; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32> -define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { +define amdgpu_kernel void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr %mul = mul <4 x i32> %vec, %b store <4 x i32> %mul, <4 x i32> addrspace(1)* %out @@ -38,7 +38,7 @@ define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %va ; CHECK: 'mul_i64' ; CHECK: estimated cost of 16 for {{.*}} mul i64 -define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %mul = mul i64 %vec, %b store i64 %mul, i64 addrspace(1)* %out @@ -47,7 +47,7 @@ define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) # ; CHECK: 'mul_v2i64' ; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64> -define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { +define amdgpu_kernel void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr %mul = mul <2 x i64> %vec, %b store <2 x i64> %mul, <2 x i64> addrspace(1)* %out @@ -56,7 +56,7 @@ define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %va ; CHECK: 'mul_v3i64' ; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64> -define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { +define amdgpu_kernel void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr %mul = mul <3 x i64> %vec, %b store <3 x i64> %mul, <3 x i64> addrspace(1)* %out @@ -65,7 +65,7 @@ define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %va ; CHECK: 'mul_v4i64' ; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64> -define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { +define amdgpu_kernel void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr %mul = mul <4 x i64> %vec, %b store <4 x i64> %mul, <4 x i64> addrspace(1)* %out @@ -75,7 +75,7 @@ define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %va ; CHECK: 'mul_v8i64' ; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64> -define void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 { +define amdgpu_kernel void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 { %vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr %mul = mul <8 x i64> %vec, %b store <8 x i64> %mul, <8 x i64> addrspace(1)* %out diff --git a/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll b/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll index 003aed7b2fc..85fb0ebe14e 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll @@ -3,7 +3,7 @@ ; ALL: 'shl_i32' ; ALL: estimated cost of 1 for {{.*}} shl i32 -define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = shl i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -13,7 +13,7 @@ define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) # ; ALL: 'shl_i64' ; FAST64: estimated cost of 2 for {{.*}} shl i64 ; SLOW64: estimated cost of 3 for {{.*}} shl i64 -define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = shl i64 %vec, %b store i64 %or, i64 addrspace(1)* %out @@ -22,7 +22,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) # ; ALL: 'lshr_i32' ; ALL: estimated cost of 1 for {{.*}} lshr i32 -define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = lshr i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -32,7 +32,7 @@ define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) ; ALL: 'lshr_i64' ; FAST64: estimated cost of 2 for {{.*}} lshr i64 ; SLOW64: estimated cost of 3 for {{.*}} lshr i64 -define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = lshr i64 %vec, %b store i64 %or, i64 addrspace(1)* %out @@ -41,7 +41,7 @@ define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) ; ALL: 'ashr_i32' ; ALL: estimated cost of 1 for {{.*}} ashr i32 -define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { +define amdgpu_kernel void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { %vec = load i32, i32 addrspace(1)* %vaddr %or = ashr i32 %vec, %b store i32 %or, i32 addrspace(1)* %out @@ -51,7 +51,7 @@ define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) ; ALL: 'ashr_i64' ; FAST64: estimated cost of 2 for {{.*}} ashr i64 ; SLOW64: estimated cost of 3 for {{.*}} ashr i64 -define void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { +define amdgpu_kernel void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { %vec = load i64, i64 addrspace(1)* %vaddr %or = ashr i64 %vec, %b store i64 %or, i64 addrspace(1)* %out diff --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll index 319a697dfd3..d2266952259 100644 --- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll +++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll @@ -1,7 +1,7 @@ ; RUN: opt -mtriple=amdgcn-- -analyze -divergence %s | FileCheck %s ; CHECK: DIVERGENT: %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0 -define void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 { +define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 { %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0 store i32 %swizzle, i32 addrspace(1)* %out, align 4 ret void diff --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll index b4fa79a6ba9..6144ffea5b6 100644 --- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll +++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll @@ -5,7 +5,7 @@ ; CHECK: DIVERGENT: %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4 ; The post dominator tree does not have a root node in this case -define void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 { +define amdgpu_kernel void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 { bb0: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tmp2 = sext i32 %tmp to i64 diff --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll index ca93dda2c57..7ade8eabd45 100644 --- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll +++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll @@ -1,7 +1,7 @@ ; RUN: opt %s -mtriple amdgcn-- -analyze -divergence | FileCheck %s ; CHECK: DIVERGENT: %tmp = cmpxchg volatile -define void @unreachable_loop(i32 %tidx) #0 { +define amdgpu_kernel void @unreachable_loop(i32 %tidx) #0 { entry: unreachable diff --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll index 669ee802c51..98fbc88a2cf 100644 --- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll +++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll @@ -7,35 +7,35 @@ declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0 declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0 ; CHECK: DIVERGENT: %id.x = call i32 @llvm.amdgcn.workitem.id.x() -define void @workitem_id_x() #1 { +define amdgpu_kernel void @workitem_id_x() #1 { %id.x = call i32 @llvm.amdgcn.workitem.id.x() store volatile i32 %id.x, i32 addrspace(1)* undef ret void } ; CHECK: DIVERGENT: %id.y = call i32 @llvm.amdgcn.workitem.id.y() -define void @workitem_id_y() #1 { +define amdgpu_kernel void @workitem_id_y() #1 { %id.y = call i32 @llvm.amdgcn.workitem.id.y() store volatile i32 %id.y, i32 addrspace(1)* undef ret void } ; CHECK: DIVERGENT: %id.z = call i32 @llvm.amdgcn.workitem.id.z() -define void @workitem_id_z() #1 { +define amdgpu_kernel void @workitem_id_z() #1 { %id.z = call i32 @llvm.amdgcn.workitem.id.z() store volatile i32 %id.z, i32 addrspace(1)* undef ret void } ; CHECK: DIVERGENT: %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0) -define void @mbcnt_lo() #1 { +define amdgpu_kernel void @mbcnt_lo() #1 { %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0) store volatile i32 %mbcnt.lo, i32 addrspace(1)* undef ret void } ; CHECK: DIVERGENT: %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0) -define void @mbcnt_hi() #1 { +define amdgpu_kernel void @mbcnt_hi() #1 { %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0) store volatile i32 %mbcnt.hi, i32 addrspace(1)* undef ret void |