diff options
| author | Nicolai Haehnle <nhaehnle@gmail.com> | 2018-04-04 10:57:58 +0000 |
|---|---|---|
| committer | Nicolai Haehnle <nhaehnle@gmail.com> | 2018-04-04 10:57:58 +0000 |
| commit | 3ffd383a15349392247302866777425096aedcf2 (patch) | |
| tree | c68b934ab2f57bc13a3f3a59540191ddfc129d91 /llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll | |
| parent | 21d9b33d62772c58267cc0aa725e35ac9a4661db (diff) | |
| download | bcm5719-llvm-3ffd383a15349392247302866777425096aedcf2.tar.gz bcm5719-llvm-3ffd383a15349392247302866777425096aedcf2.zip | |
AMDGPU: Fix copying i1 value out of loop with non-uniform exit
Summary:
When an i1-value is defined inside of a loop and used outside of it, we
cannot simply use the SGPR bitmask from the loop's last iteration.
There are also useful and correct cases of an i1-value being copied between
basic blocks, e.g. when a condition is computed outside of a loop and used
inside it. The concept of dominators is not sufficient to capture what is
going on, so I propose the notion of "lane-dominators".
Fixes a bug encountered in Nier: Automata.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103743
Change-Id: If37b969ddc71d823ab3004aeafb9ea050e45bd9a
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D40547
llvm-svn: 329164
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll b/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll new file mode 100644 index 00000000000..0a4a99b5bdc --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll @@ -0,0 +1,48 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +; SI-LABEL: {{^}}i1_copy_from_loop: +; +; Cannot use an SGPR mask to copy %cc out of the loop, since the mask would +; only contain the lanes that were active during the last loop iteration. +; +; SI: ; %for.body +; SI: v_cmp_gt_u32_e64 [[SREG:s\[[0-9]+:[0-9]+\]]], 4, +; SI: v_cndmask_b32_e64 [[VREG:v[0-9]+]], 0, -1, [[SREG]] +; SI-NOT: [[VREG]] +; SI: ; %for.end +; SI: v_cmp_ne_u32_e32 vcc, 0, [[VREG]] +define amdgpu_ps void @i1_copy_from_loop(<4 x i32> inreg %rsrc, i32 %tid) { +entry: + br label %for.body + +for.body: + %i = phi i32 [0, %entry], [%i.inc, %end.loop] + %cc = icmp ult i32 %i, 4 + br i1 %cc, label %mid.loop, label %for.end + +mid.loop: + %v = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %tid, i32 %i, i1 false, i1 false) + %cc2 = fcmp oge float %v, 0.0 + br i1 %cc2, label %end.loop, label %for.end + +end.loop: + %i.inc = add i32 %i, 1 + br label %for.body + +for.end: + br i1 %cc, label %if, label %end + +if: + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float undef, float undef, float undef, float undef, i1 true, i1 true) + br label %end + +end: + ret void +} + +declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #0 +declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1 + +attributes #0 = { nounwind readonly } +attributes #1 = { nounwind inaccessiblememonly } |

