diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-07-12 19:01:23 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-07-12 19:01:23 +0000 |
| commit | 657f871a4e7e58781fa36fe9371483283c11b100 (patch) | |
| tree | d4d1a45f60ce674e6614efdbbb630aaf25c393c7 /llvm/test/CodeGen/AMDGPU/skip-if-dead.ll | |
| parent | b9f8e292902b3fda8643f0cb06b575abccf83b46 (diff) | |
| download | bcm5719-llvm-657f871a4e7e58781fa36fe9371483283c11b100.tar.gz bcm5719-llvm-657f871a4e7e58781fa36fe9371483283c11b100.zip | |
AMDGPU: Fix verifier error with kill intrinsic
Don't create a terminator in the middle of the block.
We should probably get rid of this intrinsic.
llvm-svn: 275203
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/skip-if-dead.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/skip-if-dead.ll | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll new file mode 100644 index 00000000000..1999c0cd683 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll @@ -0,0 +1,145 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s + +; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos: +; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: s_endpgm +define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 { + call void @llvm.AMDGPU.kill(float 0.0) + ret void +} + +; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg: +; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: s_mov_b64 exec, 0 +; CHECK-NEXT: s_endpgm +define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 { + call void @llvm.AMDGPU.kill(float -0.0) + ret void +} + +; CHECK-LABEL: {{^}}test_kill_depth_var: +; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 +; CHECK-NEXT: s_endpgm +define amdgpu_ps void @test_kill_depth_var(float %x) #0 { + call void @llvm.AMDGPU.kill(float %x) + ret void +} + +; FIXME: why does the skip depend on the asm length in the same block? + +; CHECK-LABEL: {{^}}test_kill_control_flow: +; CHECK: s_cmp_lg_i32 s{{[0-9]+}}, 0 +; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] + +; CHECK: ; BB#1: +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 + +; CHECK: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] +; CHECK-NEXT: ; BB#3: +; CHECK-NEXT: exp 0, 9, 0, 1, 1, v0, v0, v0, v0 +; CHECK-NEXT: s_endpgm + +; CHECK-NEXT: {{^}}[[SPLIT_BB]]: +; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v7 +; CHECK-NEXT: {{^}}BB{{[0-9]+_[0-9]+}}: +; CHECK-NEXT: s_endpgm +define amdgpu_ps void @test_kill_control_flow(i32 inreg %arg) #0 { +entry: + %cmp = icmp eq i32 %arg, 0 + br i1 %cmp, label %bb, label %exit + +bb: + %var = call float asm sideeffect " + v_mov_b32_e64 v7, -1 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64", "={VGPR7}"() + call void @llvm.AMDGPU.kill(float %var) + br label %exit + +exit: + ret void +} + +; CHECK-LABEL: {{^}}test_kill_control_flow_remainder: +; CHECK: s_cmp_lg_i32 s{{[0-9]+}}, 0 +; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] + +; CHECK-NEXT: ; BB#1: ; %bb +; CHECK: v_mov_b32_e64 v7, -1 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: v_nop_e64 +; CHECK: ;;#ASMEND +; CHECK: v_mov_b32_e64 v8, -1 +; CHECK: ;;#ASMEND +; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] + +; CHECK-NEXT: ; BB#3: +; CHECK-NEXT: exp 0, 9, 0, 1, 1, v0, v0, v0, v0 +; CHECK-NEXT: s_endpgm + +; CHECK-NEXT: {{^}}[[SPLIT_BB]]: +; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v7 +; CHECK: buffer_store_dword v8 +; CHECK: v_mov_b32_e64 v9, -2 + +; CHECK: {{^}}BB{{[0-9]+_[0-9]+}}: +; CHECK: buffer_store_dword v9 +; CHECK-NEXT: s_endpgm +define amdgpu_ps void @test_kill_control_flow_remainder(i32 inreg %arg) #0 { +entry: + %cmp = icmp eq i32 %arg, 0 + br i1 %cmp, label %bb, label %exit + +bb: + %var = call float asm sideeffect " + v_mov_b32_e64 v7, -1 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64", "={VGPR7}"() + %live.across = call float asm sideeffect "v_mov_b32_e64 v8, -1", "={VGPR8}"() + call void @llvm.AMDGPU.kill(float %var) + store volatile float %live.across, float addrspace(1)* undef + %live.out = call float asm sideeffect "v_mov_b32_e64 v9, -2", "={VGPR9}"() + br label %exit + +exit: + %phi = phi float [ 0.0, %entry ], [ %live.out, %bb ] + store float %phi, float addrspace(1)* undef + ret void +} + +declare void @llvm.AMDGPU.kill(float) #0 + +attributes #0 = { nounwind } |

