diff options
author | Tim Renouf <tpr.llvm@botech.co.uk> | 2018-05-25 07:55:04 +0000 |
---|---|---|
committer | Tim Renouf <tpr.llvm@botech.co.uk> | 2018-05-25 07:55:04 +0000 |
commit | ad8b7c1190b963e9df1757fbb28f14f2f34ec9a2 (patch) | |
tree | a89b97b0a729b8a699e572dbd66048afd72a5513 /llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll | |
parent | b161db099db7d611c14f64d36a57767f62d0a430 (diff) | |
download | bcm5719-llvm-ad8b7c1190b963e9df1757fbb28f14f2f34ec9a2.tar.gz bcm5719-llvm-ad8b7c1190b963e9df1757fbb28f14f2f34ec9a2.zip |
[AMDGPU] Fixed incorrect break from loop
Summary:
Lower control flow did not correctly handle the case that a loop break
in if/else was on a condition that was not guaranteed to be masked by
exec. The first test kernel shows an example of this going wrong; after
exiting the loop, exec is all ones, even if it was not before the loop.
The fix is for lowering of if-break and else-break to insert an
S_AND_B64 to mask the break condition with exec. This commit also
includes the optimization of not inserting that S_AND_B64 if it is
obviously not needed because the break condition is the result of a
V_CMP in the same basic block.
V2: Addressed some review comments.
V3: Test fixes.
Subscribers: arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D44046
Change-Id: I0fc56a01209a9e99d1d5c9b0ffd16f111caf200c
llvm-svn: 333258
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll b/llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll new file mode 100644 index 00000000000..486364acdb7 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/loop_exit_with_xor.ll @@ -0,0 +1,93 @@ +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; Where the mask of lanes wanting to exit the loop on this iteration is not +; obviously already masked by exec (in this case, the xor with -1 inserted by +; control flow annotation), then lower control flow must insert an S_AND_B64 +; with exec. + +; GCN-LABEL: {{^}}needs_and: +; GCN: s_xor_b64 [[REG1:[^ ,]*]], {{[^ ,]*, -1$}} +; GCN: s_and_b64 [[REG2:[^ ,]*]], exec, [[REG1]] +; GCN: s_or_b64 [[REG3:[^ ,]*]], [[REG2]], +; GCN: s_andn2_b64 exec, exec, [[REG3]] + +define void @needs_and(i32 %arg) { +entry: + br label %loop + +loop: + %tmp23phi = phi i32 [ %tmp23, %endif ], [ 0, %entry ] + %tmp23 = add nuw i32 %tmp23phi, 1 + %tmp27 = icmp ult i32 %arg, %tmp23 + br i1 %tmp27, label %then, label %endif + +then: ; preds = %bb + call void @llvm.amdgcn.buffer.store.f32(float undef, <4 x i32> undef, i32 0, i32 undef, i1 false, i1 false) #1 + br label %endif + +endif: ; preds = %bb28, %bb + br i1 %tmp27, label %loop, label %loopexit + +loopexit: + ret void +} + +; Where the mask of lanes wanting to exit the loop on this iteration is +; obviously already masked by exec (a V_CMP), then lower control flow can omit +; the S_AND_B64 to avoid an unnecessary instruction. + +; GCN-LABEL: {{^}}doesnt_need_and: +; GCN: v_cmp{{[^ ]*}} [[REG1:[^ ,]*]] +; GCN: s_or_b64 [[REG2:[^ ,]*]], [[REG1]], +; GCN: s_andn2_b64 exec, exec, [[REG2]] + +define void @doesnt_need_and(i32 %arg) { +entry: + br label %loop + +loop: + %tmp23phi = phi i32 [ %tmp23, %loop ], [ 0, %entry ] + %tmp23 = add nuw i32 %tmp23phi, 1 + %tmp27 = icmp ult i32 %arg, %tmp23 + call void @llvm.amdgcn.buffer.store.f32(float undef, <4 x i32> undef, i32 0, i32 undef, i1 false, i1 false) #1 + br i1 %tmp27, label %loop, label %loopexit + +loopexit: + ret void +} + +; Another case where the mask of lanes wanting to exit the loop is not masked +; by exec, because it is a function parameter. + +; GCN-LABEL: {{^}}break_cond_is_arg: +; GCN: s_xor_b64 [[REG1:[^ ,]*]], {{[^ ,]*, -1$}} +; GCN: s_and_b64 [[REG2:[^ ,]*]], exec, [[REG1]] +; GCN: s_or_b64 [[REG3:[^ ,]*]], [[REG2]], +; GCN: s_andn2_b64 exec, exec, [[REG3]] + +define void @break_cond_is_arg(i32 %arg, i1 %breakcond) { +entry: + br label %loop + +loop: + %tmp23phi = phi i32 [ %tmp23, %endif ], [ 0, %entry ] + %tmp23 = add nuw i32 %tmp23phi, 1 + %tmp27 = icmp ult i32 %arg, %tmp23 + br i1 %tmp27, label %then, label %endif + +then: ; preds = %bb + call void @llvm.amdgcn.buffer.store.f32(float undef, <4 x i32> undef, i32 0, i32 undef, i1 false, i1 false) #1 + br label %endif + +endif: ; preds = %bb28, %bb + br i1 %breakcond, label %loop, label %loopexit + +loopexit: + ret void +} + + +declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #3 + +attributes #3 = { nounwind writeonly } + |