diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-07 19:09:27 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-07 19:09:27 +0000 |
| commit | 52f14ec59666c828aa5f1ab0bd09b56b338a81d3 (patch) | |
| tree | 995052b2fdf0c00ab1e3d088394a4eeafa490702 /llvm/test | |
| parent | 6b3a7ccc7cb6f389b25576af9844e08fb32e41d4 (diff) | |
| download | bcm5719-llvm-52f14ec59666c828aa5f1ab0bd09b56b338a81d3.tar.gz bcm5719-llvm-52f14ec59666c828aa5f1ab0bd09b56b338a81d3.zip | |
AMDGPU: Preserve vcc undef flags when inverting branch
If the branch was on a read-undef of vcc, passes that used
analyzeBranch to invert the branch condition wouldn't preserve
the undef flag resulting in a verifier error.
Fixes verifier failures in a future commit.
Also fix verifier error when inserting copy for vccz
corruption bug.
llvm-svn: 286133
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/MIR/AMDGPU/invert-br-undef-vcc.mir | 89 | ||||
| -rw-r--r-- | llvm/test/CodeGen/MIR/AMDGPU/vccz-corrupt-bug-workaround.mir | 177 |
2 files changed, 266 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/invert-br-undef-vcc.mir b/llvm/test/CodeGen/MIR/AMDGPU/invert-br-undef-vcc.mir new file mode 100644 index 00000000000..66182d09289 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AMDGPU/invert-br-undef-vcc.mir @@ -0,0 +1,89 @@ +# RUN: llc -run-pass block-placement -march=amdgcn -verify-machineinstrs -o - %s | FileCheck %s +--- | + + define void @invert_br_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 { + entry: + br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + else: ; preds = %entry + store volatile i32 100, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + if: ; preds = %entry + store volatile i32 9, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %if, %else + %value = phi i32 [ 0, %if ], [ 1, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + attributes #0 = { nounwind } + + !0 = !{} + +... +--- +# CHECK-LABEL: name: invert_br_undef_vcc +# CHECK: S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc + +name: invert_br_undef_vcc +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%sgpr0_sgpr1' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0.entry: + successors: %bb.2.if, %bb.1.else + liveins: %sgpr0_sgpr1 + + %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr7 = S_MOV_B32 61440 + %sgpr6 = S_MOV_B32 -1 + S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc + + bb.1.else: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 100, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + S_BRANCH %bb.3.done + + bb.2.if: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 9, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 0, implicit %exec + + bb.3.done: + liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %sgpr3 = S_MOV_B32 61440 + %sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + S_ENDPGM + +... diff --git a/llvm/test/CodeGen/MIR/AMDGPU/vccz-corrupt-bug-workaround.mir b/llvm/test/CodeGen/MIR/AMDGPU/vccz-corrupt-bug-workaround.mir new file mode 100644 index 00000000000..03e473e3a0c --- /dev/null +++ b/llvm/test/CodeGen/MIR/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -0,0 +1,177 @@ +# RUN: llc -run-pass si-insert-waits -march=amdgcn -mcpu=tahiti -o - %s | FileCheck %s +--- | + + define void @vccz_corrupt_workaround(float %cond, i32 addrspace(1)* %out) #0 { + entry: + %cmp0 = fcmp oeq float %cond, 0.000000e+00 + br i1 %cmp0, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + else: ; preds = %entry + store volatile i32 100, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + if: ; preds = %entry + store volatile i32 9, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %if, %else + %value = phi i32 [ 0, %if ], [ 1, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + define void @vccz_corrupt_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 { + entry: + br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + else: ; preds = %entry + store volatile i32 100, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + if: ; preds = %entry + store volatile i32 9, i32 addrspace(1)* undef + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %if, %else + %value = phi i32 [ 0, %if ], [ 1, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + attributes #0 = { nounwind } + attributes #1 = { readnone } + + !0 = !{} + +... +--- +# CHECK-LABEL: name: vccz_corrupt_workaround +# CHECK: %vcc = V_CMP_EQ_F32 +# CHECK-NEXT: %vcc = S_MOV_B64 %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit killed %vcc + +name: vccz_corrupt_workaround +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%sgpr0_sgpr1' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0.entry: + successors: %bb.2.if, %bb.1.else + liveins: %sgpr0_sgpr1 + + %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`) + %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr7 = S_MOV_B32 61440 + %sgpr6 = S_MOV_B32 -1 + %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, 0, implicit %exec + S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc + + bb.2.if: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 9, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 0, implicit %exec + S_BRANCH %bb.3.done + + bb.1.else: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 100, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + + bb.3.done: + liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %sgpr3 = S_MOV_B32 61440 + %sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + S_ENDPGM + +... +--- +# CHECK-LABEL: name: vccz_corrupt_undef_vcc +# CHECK: S_WAITCNT +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit undef %vcc + +name: vccz_corrupt_undef_vcc +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%sgpr0_sgpr1' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0.entry: + successors: %bb.2.if, %bb.1.else + liveins: %sgpr0_sgpr1 + + %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr7 = S_MOV_B32 61440 + %sgpr6 = S_MOV_B32 -1 + S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc + + bb.2.if: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 9, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 0, implicit %exec + S_BRANCH %bb.3.done + + bb.1.else: + successors: %bb.3.done + liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %vgpr0 = V_MOV_B32_e32 100, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + + bb.3.done: + liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 + + %sgpr3 = S_MOV_B32 61440 + %sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out) + S_ENDPGM + +... |

