summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/MIR
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
commit3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch)
treed74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/MIR
parentf6021ecddc73d14c94ad70938250d58f330795be (diff)
downloadbcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz
bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated the same as compute kernels. Make this explicit so the default calling convention can be changed to a non-kernel. Converted with perl -pi -e 's/define void/define amdgpu_kernel void/' on the relevant test directories (and undoing in one place that actually wanted a non-kernel). llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/MIR')
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir18
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/intrinsics.mir2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir2
-rw-r--r--llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir4
5 files changed, 14 insertions, 14 deletions
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir b/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
index 84d3baa4c9a..5da98fb9c2d 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
@@ -6,7 +6,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir b/llvm/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
index 3277d37d7e4..7cef01c9d12 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
@@ -1,6 +1,6 @@
# RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s
--- |
- define void @add_f32_1.0_one_f16_use() #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -11,7 +11,7 @@
ret void
}
- define void @add_f32_1.0_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_f32_1.0_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -22,7 +22,7 @@
ret void
}
- define void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -33,7 +33,7 @@
ret void
}
- define void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -46,7 +46,7 @@
ret void
}
- define void @add_i32_1_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_i32_1_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f16.add0 = fadd half %f16.val0, 0xH0001
@@ -56,7 +56,7 @@
ret void
}
- define void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
+ define amdgpu_kernel void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -69,7 +69,7 @@
ret void
}
- define void @add_f16_1.0_multi_f32_use() #0 {
+ define amdgpu_kernel void @add_f16_1.0_multi_f32_use() #0 {
%f32.val0 = load volatile float, float addrspace(1)* undef
%f32.val1 = load volatile float, float addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -80,7 +80,7 @@
ret void
}
- define void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile half, half addrspace(1)* undef
@@ -91,7 +91,7 @@
ret void
}
- define void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
+ define amdgpu_kernel void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile half, half addrspace(1)* undef
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/intrinsics.mir b/llvm/test/CodeGen/MIR/AMDGPU/intrinsics.mir
index f43266eacbf..68950a4b251 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/intrinsics.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/intrinsics.mir
@@ -2,7 +2,7 @@
--- |
- define void @use_intrin() {
+ define amdgpu_kernel void @use_intrin() {
ret void
}
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir b/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
index fec7a5d7a38..8cffc86373a 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
@@ -6,7 +6,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir b/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
index 10c8128b3ce..32669de15ea 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
@@ -7,7 +7,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
@@ -15,7 +15,7 @@
ret void
}
- define void @float2(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float2(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
OpenPOWER on IntegriCloud