diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-03-27 16:58:27 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-03-27 16:58:27 +0000 |
| commit | 4ab28b64b4cea1fc7d12a352e706a46eb5e5fe4c (patch) | |
| tree | 3b013e8636ba558597e46c508960075d5737a5dc | |
| parent | a42b7247d36a43d5c5b58c99aafce9ad6d1fe796 (diff) | |
| download | bcm5719-llvm-4ab28b64b4cea1fc7d12a352e706a46eb5e5fe4c.tar.gz bcm5719-llvm-4ab28b64b4cea1fc7d12a352e706a46eb5e5fe4c.zip | |
AMDGPU: Skip debug_instr when collapsing end_cf
Based on how these are inserted, I doubt this was causing a problem in
practice.
llvm-svn: 357090
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp | 11 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/collapse-endcf.mir | 110 |
2 files changed, 118 insertions, 3 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp index 21eecb1007f..f5724a71e1d 100644 --- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -308,7 +308,8 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) { } // Try to collapse adjacent endifs. - auto Lead = MBB.begin(), E = MBB.end(); + auto E = MBB.end(); + auto Lead = skipDebugInstructionsForward(MBB.begin(), E); if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI)) continue; @@ -318,14 +319,18 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) { auto I = std::next(Lead); - for ( ; I != E; ++I) + for ( ; I != E; ++I) { + if (I->isDebugInstr()) + continue; + if (!TII->isSALU(*I) || I->readsRegister(AMDGPU::EXEC, TRI)) break; + } if (I != E) continue; - const auto NextLead = Succ->begin(); + auto NextLead = skipDebugInstructionsForward(Succ->begin(), Succ->end()); if (NextLead == Succ->end() || !isEndCF(*NextLead, TRI) || !getOrExecSource(*NextLead, *TII, MRI)) continue; diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir new file mode 100644 index 00000000000..8d0486f6d1a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir @@ -0,0 +1,110 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-optimize-exec-masking-pre-ra %s -o - | FileCheck -check-prefix=GCN %s + +# Make sure dbg_value doesn't change codeegn when collapsing end_cf +--- +name: simple_nested_if_dbg_value +tracksRegLiveness: true +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$sgpr0_sgpr1', virtual-reg: '%1' } +machineFunctionInfo: + isEntryFunction: true +body: | + ; GCN-LABEL: name: simple_nested_if_dbg_value + ; GCN: bb.0: + ; GCN: successors: %bb.1(0x40000000), %bb.4(0x40000000) + ; GCN: liveins: $vgpr0, $sgpr0_sgpr1 + ; GCN: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr0_sgpr1 + ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[V_CMP_LT_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_U32_e64 1, [[COPY1]], implicit $exec + ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec + ; GCN: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_LT_U32_e64_]], implicit-def dead $scc + ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]] + ; GCN: SI_MASK_BRANCH %bb.4, implicit $exec + ; GCN: S_BRANCH %bb.1 + ; GCN: bb.1: + ; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000) + ; GCN: undef %5.sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 9, 0 :: (dereferenceable invariant load 8, align 4, addrspace 4) + ; GCN: undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, [[COPY1]], implicit $exec + ; GCN: %6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY %5.sub1 + ; GCN: undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_I32_e64 %5.sub0, %6.sub0, 0, implicit $exec + ; GCN: %8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, [[COPY3]], %9, 0, implicit $exec + ; GCN: %5.sub3:sgpr_128 = S_MOV_B32 61440 + ; GCN: %5.sub2:sgpr_128 = S_MOV_B32 0 + ; GCN: BUFFER_STORE_DWORD_ADDR64 %6.sub1, %6, %5, 0, 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1) + ; GCN: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 2, [[COPY1]], implicit $exec + ; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_]], implicit-def dead $scc + ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_1]] + ; GCN: SI_MASK_BRANCH %bb.3, implicit $exec + ; GCN: S_BRANCH %bb.2 + ; GCN: bb.2: + ; GCN: successors: %bb.3(0x80000000) + ; GCN: %5.sub0:sgpr_128 = COPY %5.sub2 + ; GCN: %5.sub1:sgpr_128 = COPY %5.sub2 + ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec + ; GCN: BUFFER_STORE_DWORD_ADDR64 [[V_MOV_B32_e32_]], %8, %5, 0, 4, 0, 0, 0, implicit $exec :: (store 4, addrspace 1) + ; GCN: bb.3: + ; GCN: successors: %bb.4(0x80000000) + ; GCN: DBG_VALUE + ; GCN: bb.4: + ; GCN: DBG_VALUE + ; GCN: $exec = S_OR_B64 $exec, [[COPY2]], implicit-def $scc + ; GCN: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 3, implicit $exec + ; GCN: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN: $m0 = S_MOV_B32 -1 + ; GCN: DS_WRITE_B32 [[V_MOV_B32_e32_2]], [[V_MOV_B32_e32_1]], 0, 0, implicit $m0, implicit $exec :: (store 4, addrspace 3) + ; GCN: S_ENDPGM 0 + bb.0: + successors: %bb.1, %bb.4 + liveins: $vgpr0, $sgpr0_sgpr1 + + %1:sgpr_64 = COPY $sgpr0_sgpr1 + %0:vgpr_32 = COPY $vgpr0 + %2:sreg_64 = V_CMP_LT_U32_e64 1, %0, implicit $exec + %3:sreg_64 = COPY $exec, implicit-def $exec + %4:sreg_64 = S_AND_B64 %3, %2, implicit-def dead $scc + $exec = S_MOV_B64_term %4 + SI_MASK_BRANCH %bb.4, implicit $exec + S_BRANCH %bb.1 + + bb.1: + successors: %bb.2, %bb.3 + + undef %5.sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM %1, 9, 0 :: (dereferenceable invariant load 8, align 4, addrspace 4) + undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, %0, implicit $exec + %6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec + %7:vgpr_32 = COPY %5.sub1 + undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_I32_e64 %5.sub0, %6.sub0, 0, implicit $exec + %8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, %7, %9, 0, implicit $exec + %5.sub3:sgpr_128 = S_MOV_B32 61440 + %5.sub2:sgpr_128 = S_MOV_B32 0 + BUFFER_STORE_DWORD_ADDR64 %6.sub1, %6, %5, 0, 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1) + %11:sreg_64 = V_CMP_NE_U32_e64 2, %0, implicit $exec + %12:sreg_64 = COPY $exec, implicit-def $exec + %13:sreg_64 = S_AND_B64 %12, %11, implicit-def dead $scc + $exec = S_MOV_B64_term %13 + SI_MASK_BRANCH %bb.3, implicit $exec + S_BRANCH %bb.2 + + bb.2: + %5.sub0:sgpr_128 = COPY %5.sub2 + %5.sub1:sgpr_128 = COPY %5.sub2 + %14:vgpr_32 = V_MOV_B32_e32 1, implicit $exec + BUFFER_STORE_DWORD_ADDR64 %14, %8, %5, 0, 4, 0, 0, 0, implicit $exec :: (store 4, addrspace 1) + + bb.3: + $exec = S_OR_B64 $exec, %12, implicit-def $scc + DBG_VALUE + + bb.4: + DBG_VALUE + $exec = S_OR_B64 $exec, %3, implicit-def $scc + %15:vgpr_32 = V_MOV_B32_e32 3, implicit $exec + %16:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + $m0 = S_MOV_B32 -1 + DS_WRITE_B32 %16, %15, 0, 0, implicit $m0, implicit $exec :: (store 4, addrspace 3) + S_ENDPGM 0 + +... |

