diff options
| author | cdevadas <cdevadas@amd.com> | 2020-01-10 22:23:27 +0530 |
|---|---|---|
| committer | cdevadas <cdevadas@amd.com> | 2020-01-15 15:18:16 +0530 |
| commit | 0dc6c249bffac9f23a605ce4e42a84341da3ddbd (patch) | |
| tree | 113cc776987199087010ef82f8fd4728b06d0c8b /llvm/test/CodeGen/AMDGPU/GlobalISel | |
| parent | 064859bde79ccd221fd5196fd2d889014c5435c4 (diff) | |
| download | bcm5719-llvm-0dc6c249bffac9f23a605ce4e42a84341da3ddbd.tar.gz bcm5719-llvm-0dc6c249bffac9f23a605ce4e42a84341da3ddbd.zip | |
[AMDGPU] Invert the handling of skip insertion.
The current implementation of skip insertion (SIInsertSkip) makes it a
mandatory pass required for correctness. Initially, the idea was to
have an optional pass. This patch inserts the s_cbranch_execz upfront
during SILowerControlFlow to skip over the sections of code when no
lanes are active. Later, SIRemoveShortExecBranches removes the skips
for short branches, unless there is a sideeffect and the skip branch is
really necessary.
This new pass will replace the handling of skip insertion in the
existing SIInsertSkip Pass.
Differential revision: https://reviews.llvm.org/D68092
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/GlobalISel')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll index 40e18206702..d787e40707b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll @@ -10,9 +10,8 @@ define i32 @divergent_if_swap_brtarget_order0(i32 %value) { ; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; CHECK-NEXT: ; implicit-def: $vgpr0 ; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc -; CHECK-NEXT: ; mask branch BB0_2 ; CHECK-NEXT: s_cbranch_execz BB0_2 -; CHECK-NEXT: BB0_1: ; %if.true +; CHECK-NEXT: ; %bb.1: ; %if.true ; CHECK-NEXT: global_load_dword v0, v[0:1], off ; CHECK-NEXT: BB0_2: ; %endif ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] @@ -38,12 +37,10 @@ define i32 @divergent_if_swap_brtarget_order1(i32 %value) { ; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; CHECK-NEXT: ; implicit-def: $vgpr0 ; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc -; CHECK-NEXT: ; mask branch BB1_2 -; CHECK-NEXT: BB1_1: ; %endif -; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] -; CHECK-NEXT: s_setpc_b64 s[30:31] -; CHECK-NEXT: BB1_2: ; %if.true +; CHECK-NEXT: s_cbranch_execnz BB1_2 +; CHECK-NEXT: ; %bb.1: ; %if.true ; CHECK-NEXT: global_load_dword v0, v[0:1], off +; CHECK-NEXT: BB1_2: ; %endif ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: s_setpc_b64 s[30:31] |

