summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2016-04-14 16:27:07 +0000
committerTom Stellard <thomas.stellard@amd.com>2016-04-14 16:27:07 +0000
commit79a1fd718c71b4480dc9f00e8e77f4408ec9e6fa (patch)
treed7ad0eed911b428e15f871225d742595de9420b8 /llvm/test
parentf110f8f9f7f46c668e03f4808e03aa54c2157269 (diff)
downloadbcm5719-llvm-79a1fd718c71b4480dc9f00e8e77f4408ec9e6fa.tar.gz
bcm5719-llvm-79a1fd718c71b4480dc9f00e8e77f4408ec9e6fa.zip
AMDGPU: allow specifying a workgroup size that needs to fit in a compute unit
Summary: For GL_ARB_compute_shader we need to support workgroup sizes of at least 1024. However, if we want to allow large workgroup sizes, we may need to use less registers, as we have to run more waves per SIMD. This patch adds an attribute to specify the maximum work group size the compiled program needs to support. It defaults, to 256, as that has no wave restrictions. Reducing the number of registers available is done similarly to how the registers were reserved for chips with the sgpr init bug. Reviewers: mareko, arsenm, tstellarAMD, nhaehnle Subscribers: FireBurn, kerberizer, llvm-commits, arsenm Differential Revision: http://reviews.llvm.org/D18340 Patch By: Bas Nieuwenhuizen llvm-svn: 266337
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/large-work-group-registers.ll41
2 files changed, 113 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll b/llvm/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
new file mode 100644
index 00000000000..fefca0f0d4e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
@@ -0,0 +1,72 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca < %s | FileCheck %s
+
+; CHECK: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
+
+define void @promote_alloca_size_63(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+entry:
+ %stack = alloca [5 x i32], align 4
+ %0 = load i32, i32 addrspace(1)* %in, align 4
+ %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
+ store i32 4, i32* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
+ %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
+ store i32 5, i32* %arrayidx3, align 4
+ %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
+ %2 = load i32, i32* %arrayidx10, align 4
+ store i32 %2, i32 addrspace(1)* %out, align 4
+ %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
+ %3 = load i32, i32* %arrayidx12
+ %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
+ store i32 %3, i32 addrspace(1)* %arrayidx13
+ ret void
+}
+
+; CHECK: @promote_alloca_size_256.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
+
+define void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #1 {
+entry:
+ %stack = alloca [5 x i32], align 4
+ %0 = load i32, i32 addrspace(1)* %in, align 4
+ %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
+ store i32 4, i32* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
+ %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
+ store i32 5, i32* %arrayidx3, align 4
+ %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
+ %2 = load i32, i32* %arrayidx10, align 4
+ store i32 %2, i32 addrspace(1)* %out, align 4
+ %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
+ %3 = load i32, i32* %arrayidx12
+ %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
+ store i32 %3, i32 addrspace(1)* %arrayidx13
+ ret void
+}
+
+; CHECK: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4
+
+define void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 {
+entry:
+ %stack = alloca [5 x i32], align 4
+ %0 = load i32, i32 addrspace(1)* %in, align 4
+ %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
+ store i32 4, i32* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
+ %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
+ store i32 5, i32* %arrayidx3, align 4
+ %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
+ %2 = load i32, i32* %arrayidx10, align 4
+ store i32 %2, i32 addrspace(1)* %out, align 4
+ %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
+ %3 = load i32, i32* %arrayidx12
+ %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
+ store i32 %3, i32 addrspace(1)* %arrayidx13
+ ret void
+}
+
+attributes #0 = { nounwind "amdgpu-max-work-group-size"="63" }
+attributes #1 = { nounwind "amdgpu-max-work-group-size"="256" }
+attributes #2 = { nounwind "amdgpu-max-work-group-size"="1600" }
+
diff --git a/llvm/test/CodeGen/AMDGPU/large-work-group-registers.ll b/llvm/test/CodeGen/AMDGPU/large-work-group-registers.ll
new file mode 100644
index 00000000000..8a2fcb70cb6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/large-work-group-registers.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck %s
+
+; CHECK: NumVgprs: 63
+define void @main([9 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <8 x i32>] addrspace(2)* byval, [16 x <8 x i32>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, <3 x i32> inreg, <3 x i32> inreg, <3 x i32>) #0 {
+main_body:
+ %8 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %4, i64 0, i64 8
+ %9 = load <4 x i32>, <4 x i32> addrspace(2)* %8, align 16, !tbaa !0
+ %10 = extractelement <3 x i32> %7, i32 0
+ %11 = extractelement <3 x i32> %7, i32 1
+ %12 = mul i32 %10, %11
+ %bc = bitcast <3 x i32> %7 to <3 x float>
+ %13 = extractelement <3 x float> %bc, i32 1
+ %14 = insertelement <512 x float> undef, float %13, i32 %12
+ call void @llvm.amdgcn.s.barrier()
+ %15 = extractelement <3 x i32> %6, i32 0
+ %16 = extractelement <3 x i32> %7, i32 0
+ %17 = shl i32 %15, 5
+ %18 = add i32 %17, %16
+ %19 = shl i32 %18, 4
+ %20 = extractelement <3 x i32> %7, i32 1
+ %21 = shl i32 %20, 2
+ %22 = sext i32 %21 to i64
+ %23 = getelementptr i8, i8 addrspace(3)* null, i64 %22
+ %24 = bitcast i8 addrspace(3)* %23 to i32 addrspace(3)*
+ %25 = load i32, i32 addrspace(3)* %24, align 4
+ %26 = extractelement <512 x float> %14, i32 %25
+ %27 = insertelement <4 x float> undef, float %26, i32 0
+ call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %27, <4 x i32> %9, i32 0, i32 %19, i1 false, i1 false)
+ ret void
+}
+
+declare void @llvm.amdgcn.s.barrier() #1
+
+declare void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float>, <4 x i32>, i32, i32, i1, i1) #2
+
+attributes #0 = { "amdgpu-max-work-group-size"="1024" }
+attributes #1 = { convergent nounwind }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0, i32 1}
+!1 = !{!"const", null}
OpenPOWER on IntegriCloud