diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-05-03 13:55:40 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-05-03 13:55:40 +0000 |
commit | 6d0c59605c8e867fffd4c4d13054269d9e4a6d10 (patch) | |
tree | 20e23a62bb450515a1ab9361308759ef1eb8f1c8 /llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir | |
parent | aa49be49263f26f3127f4be7fdfda04397ca7114 (diff) | |
download | bcm5719-llvm-6d0c59605c8e867fffd4c4d13054269d9e4a6d10.tar.gz bcm5719-llvm-6d0c59605c8e867fffd4c4d13054269d9e4a6d10.zip |
AMDGPU: Forgot to commit test file for r358890
llvm-svn: 359885
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir | 97 |
1 files changed, 97 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir b/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir new file mode 100644 index 00000000000..a2fec4d298b --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir @@ -0,0 +1,97 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -run-pass=si-optimize-exec-masking-pre-ra %s -o - | FileCheck -check-prefix=GCN %s + +# Check for regression from assuming an instruction was a copy after +# dropping the opcode check. +--- +name: exec_src1_is_not_copy +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' + scratchWaveOffsetReg: '$sgpr101' + frameOffsetReg: '$sgpr101' +body: | + ; GCN-LABEL: name: exec_src1_is_not_copy + ; GCN: bb.0: + ; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GCN: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[DEF]], implicit $exec + ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec + ; GCN: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], [[V_CMP_NE_U32_e64_]], implicit-def dead $scc + ; GCN: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[S_AND_B64_]], [[COPY1]], implicit-def dead $scc + ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]] + ; GCN: SI_MASK_BRANCH %bb.2, implicit $exec + ; GCN: S_BRANCH %bb.1 + ; GCN: bb.1: + ; GCN: successors: %bb.2(0x80000000) + ; GCN: bb.2: + ; GCN: successors: %bb.3(0x40000000), %bb.6(0x40000000) + ; GCN: [[S_OR_SAVEEXEC_B64_:%[0-9]+]]:sreg_64 = S_OR_SAVEEXEC_B64 [[S_XOR_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GCN: $exec = S_AND_B64 $exec, [[COPY]], implicit-def dead $scc + ; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $scc + ; GCN: $exec = S_XOR_B64_term $exec, [[S_AND_B64_1]], implicit-def $scc + ; GCN: SI_MASK_BRANCH %bb.6, implicit $exec + ; GCN: S_BRANCH %bb.3 + ; GCN: bb.3: + ; GCN: successors: %bb.4(0x40000000), %bb.5(0x40000000) + ; GCN: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[DEF]], implicit $exec + ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec + ; GCN: [[S_AND_B64_2:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_NE_U32_e64_1]], implicit-def dead $scc + ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_2]] + ; GCN: SI_MASK_BRANCH %bb.5, implicit $exec + ; GCN: S_BRANCH %bb.4 + ; GCN: bb.4: + ; GCN: successors: %bb.5(0x80000000) + ; GCN: bb.5: + ; GCN: successors: %bb.6(0x80000000) + ; GCN: $exec = S_OR_B64 $exec, [[COPY2]], implicit-def $scc + ; GCN: bb.6: + ; GCN: $exec = S_OR_B64 $exec, [[S_AND_B64_1]], implicit-def $scc + bb.0: + successors: %bb.1, %bb.2 + liveins: $vgpr0 + + %0:sreg_64 = COPY $exec + %1:vgpr_32 = IMPLICIT_DEF + %2:sreg_64 = V_CMP_NE_U32_e64 0, %1, implicit $exec + %3:sreg_64 = COPY $exec, implicit-def $exec + %4:sreg_64 = S_AND_B64 %3, %2, implicit-def dead $scc + %5:sreg_64 = S_XOR_B64 %4, %3, implicit-def dead $scc + $exec = S_MOV_B64_term %4 + SI_MASK_BRANCH %bb.2, implicit $exec + S_BRANCH %bb.1 + + bb.1: + + bb.2: + successors: %bb.3, %bb.6 + + %6:sreg_64 = S_OR_SAVEEXEC_B64 %5, implicit-def $exec, implicit-def $scc, implicit $exec + $exec = S_AND_B64 $exec, %0, implicit-def dead $scc + %7:sreg_64 = S_AND_B64 $exec, %6, implicit-def $scc + $exec = S_XOR_B64_term $exec, %7, implicit-def $scc + SI_MASK_BRANCH %bb.6, implicit $exec + S_BRANCH %bb.3 + + bb.3: + successors: %bb.4, %bb.5 + + %8:sreg_64 = V_CMP_NE_U32_e64 0, %1, implicit $exec + %9:sreg_64 = COPY $exec, implicit-def $exec + %10:sreg_64 = S_AND_B64 %9, %8, implicit-def dead $scc + $exec = S_MOV_B64_term %10 + SI_MASK_BRANCH %bb.5, implicit $exec + S_BRANCH %bb.4 + + bb.4: + + bb.5: + $exec = S_OR_B64 $exec, %9, implicit-def $scc + + bb.6: + $exec = S_OR_B64 $exec, %7, implicit-def $scc + +... |