diff options
| author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2018-12-13 03:17:40 +0000 |
|---|---|---|
| committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2018-12-13 03:17:40 +0000 |
| commit | 6071e1aa581eff5447ce99e5e599b59ea2ce4a62 (patch) | |
| tree | bcb53d5f88080f2cbb1204c398b2654627132535 /llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll | |
| parent | 54c01ad6a9e75f01765a45db5fe9775570707079 (diff) | |
| download | bcm5719-llvm-6071e1aa581eff5447ce99e5e599b59ea2ce4a62.tar.gz bcm5719-llvm-6071e1aa581eff5447ce99e5e599b59ea2ce4a62.zip | |
[AMDGPU] Simplify negated condition
Optimize sequence:
%sel = V_CNDMASK_B32_e64 0, 1, %cc
%cmp = V_CMP_NE_U32 1, %1
$vcc = S_AND_B64 $exec, %cmp
S_CBRANCH_VCC[N]Z
=>
$vcc = S_ANDN2_B64 $exec, %cc
S_CBRANCH_VCC[N]Z
It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
rebuildSetCC().
Differential Revision: https://reviews.llvm.org/D55402
llvm-svn: 349003
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll new file mode 100644 index 00000000000..be5d8d47205 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll @@ -0,0 +1,75 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; GCN-LABEL: {{^}}negated_cond: +; GCN: BB0_1: +; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]], +; GCN: BB0_2: +; GCN-NOT: v_cndmask_b32 +; GCN-NOT: v_cmp +; GCN: s_andn2_b64 vcc, exec, [[CC]] +; GCN: s_cbranch_vccnz BB0_4 +define amdgpu_kernel void @negated_cond(i32 addrspace(1)* %arg1) { +bb: + br label %bb1 + +bb1: + %tmp1 = load i32, i32 addrspace(1)* %arg1 + %tmp2 = icmp eq i32 %tmp1, 0 + br label %bb2 + +bb2: + %tmp3 = phi i32 [ 0, %bb1 ], [ %tmp6, %bb4 ] + %tmp4 = shl i32 %tmp3, 5 + br i1 %tmp2, label %bb3, label %bb4 + +bb3: + %tmp5 = add i32 %tmp4, 1 + br label %bb4 + +bb4: + %tmp6 = phi i32 [ %tmp5, %bb3 ], [ %tmp4, %bb2 ] + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp6 + store i32 0, i32 addrspace(1)* %gep + %tmp7 = icmp eq i32 %tmp6, 32 + br i1 %tmp7, label %bb1, label %bb2 +} + +; GCN-LABEL: {{^}}negated_cond_dominated_blocks: +; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]], +; GCN: BB1_1: +; GCN-NOT: v_cndmask_b32 +; GCN-NOT: v_cmp +; GCN: s_andn2_b64 vcc, exec, [[CC]] +; GCN: s_cbranch_vccz BB1_3 +define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) { +bb: + br label %bb2 + +bb2: + %tmp1 = load i32, i32 addrspace(1)* %arg1 + %tmp2 = icmp eq i32 %tmp1, 0 + br label %bb4 + +bb3: + ret void + +bb4: + %tmp3 = phi i32 [ 0, %bb2 ], [ %tmp7, %bb7 ] + %tmp4 = shl i32 %tmp3, 5 + br i1 %tmp2, label %bb5, label %bb6 + +bb5: + %tmp5 = add i32 %tmp4, 1 + br label %bb7 + +bb6: + %tmp6 = add i32 %tmp3, 1 + br label %bb7 + +bb7: + %tmp7 = phi i32 [ %tmp5, %bb5 ], [ %tmp6, %bb6 ] + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp7 + store i32 0, i32 addrspace(1)* %gep + %tmp8 = icmp eq i32 %tmp7, 32 + br i1 %tmp8, label %bb3, label %bb4 +} |

