summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/convergent-inlineasm.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/early-if-convert.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/else.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fence-amdgiz.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop_break.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll40
-rw-r--r--llvm/test/CodeGen/AMDGPU/multilevel-break.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir48
-rw-r--r--llvm/test/CodeGen/AMDGPU/ret_jump.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/skip-if-dead.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/smrd.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-cfg.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/valu-i1.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir12
32 files changed, 125 insertions, 125 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index 9edf439b586..023baf1407e 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -24,7 +24,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]]
-; GCN-NEXT: ; BB#1: ; %bb2
+; GCN-NEXT: ; %bb.1: ; %bb2
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: v_nop_e64
; GCN-NEXT: v_nop_e64
@@ -275,7 +275,7 @@ bb4:
}
; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch:
-; GCN-NEXT: ; BB#0: ; %entry
+; GCN-NEXT: ; %bb.0: ; %entry
; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -311,7 +311,7 @@ loop:
; branch from %bb0 to %bb2
; GCN-LABEL: {{^}}expand_requires_expand:
-; GCN-NEXT: ; BB#0: ; %bb0
+; GCN-NEXT: ; %bb.0: ; %bb0
; GCN: s_load_dword
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
@@ -398,7 +398,7 @@ bb3:
; GCN: s_cmp_lg_u32
; GCN: s_cbranch_scc1 [[ENDIF]]
-; GCN-NEXT: ; BB#2: ; %if_uniform
+; GCN-NEXT: ; %bb.2: ; %if_uniform
; GCN: buffer_store_dword
; GCN-NEXT: [[ENDIF]]: ; %endif
diff --git a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
index 9e01267150e..88d165144f9 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}callee_no_stack:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_setpc_b64
define void @callee_no_stack() #0 {
@@ -10,7 +10,7 @@ define void @callee_no_stack() #0 {
}
; GCN-LABEL: {{^}}callee_no_stack_no_fp_elim:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_setpc_b64
define void @callee_no_stack_no_fp_elim() #1 {
@@ -20,7 +20,7 @@ define void @callee_no_stack_no_fp_elim() #1 {
; Requires frame pointer for access to local regular object.
; GCN-LABEL: {{^}}callee_with_stack:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN-NEXT: s_mov_b32 s5, s32
; GCN-NEXT: v_mov_b32_e32 v0, 0{{$}}
@@ -34,7 +34,7 @@ define void @callee_with_stack() #0 {
}
; GCN-LABEL: {{^}}callee_with_stack_and_call:
-; GCN: ; BB#0:
+; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt
; GCN: s_mov_b32 s5, s32
; GCN: buffer_store_dword v32, off, s[0:3], s5 offset:8
diff --git a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
index 697f26b83a4..1e0af2611b0 100644
--- a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
+++ b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
@@ -102,7 +102,7 @@ for.body:
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4
; GCN: s_cbranch_vccnz [[LOOPBB]]
-; GCN-NEXT: ; BB#2
+; GCN-NEXT: ; %bb.2
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
index 6f9c043f914..071bcbcf81b 100644
--- a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
@@ -13,7 +13,7 @@
; VGPR: workitem_private_segment_byte_size = 12{{$}}
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
@@ -91,7 +91,7 @@ endif:
; GCN-LABEL: {{^}}divergent_loop:
; VGPR: workitem_private_segment_byte_size = 12{{$}}
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
@@ -167,7 +167,7 @@ end:
}
; GCN-LABEL: {{^}}divergent_if_else_endif:
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN: s_mov_b32 m0, -1
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
diff --git a/llvm/test/CodeGen/AMDGPU/convergent-inlineasm.ll b/llvm/test/CodeGen/AMDGPU/convergent-inlineasm.ll
index 0074a41e44c..80907bf1c1b 100644
--- a/llvm/test/CodeGen/AMDGPU/convergent-inlineasm.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergent-inlineasm.ll
@@ -2,7 +2,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}convergent_inlineasm:
-; GCN: BB#0:
+; GCN: %bb.0:
; GCN: v_cmp_ne_u32_e64
; GCN: ; mask branch
; GCN: BB{{[0-9]+_[0-9]+}}:
diff --git a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
index 792f0b1eaef..d129ca5c140 100644
--- a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
+++ b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -382,7 +382,7 @@ done:
}
; GCN-LABEL: {{^}}ifcvt_undef_scc:
-; GCN: {{^}}; BB#0:
+; GCN: {{^}}; %bb.0:
; GCN-NEXT: s_load_dwordx2
; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 1, 0
define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, i32 addrspace(1)* %out) {
diff --git a/llvm/test/CodeGen/AMDGPU/else.ll b/llvm/test/CodeGen/AMDGPU/else.ll
index 22338e4f50e..c73ea936e8b 100644
--- a/llvm/test/CodeGen/AMDGPU/else.ll
+++ b/llvm/test/CodeGen/AMDGPU/else.ll
@@ -25,7 +25,7 @@ end:
}
; CHECK-LABEL: {{^}}else_execfix_leave_wqm:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: s_mov_b64 [[INIT_EXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: ; %Flow
; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]],
diff --git a/llvm/test/CodeGen/AMDGPU/fence-amdgiz.ll b/llvm/test/CodeGen/AMDGPU/fence-amdgiz.ll
index 3055f325f3f..0dd2a9241b2 100644
--- a/llvm/test/CodeGen/AMDGPU/fence-amdgiz.ll
+++ b/llvm/test/CodeGen/AMDGPU/fence-amdgiz.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"
; CHECK-LABEL: atomic_fence
-; CHECK: BB#0:
+; CHECK: %bb.0:
; CHECK-NOT: ATOMIC_FENCE
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
diff --git a/llvm/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll b/llvm/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
index f6bf0b09486..37d05c7ac41 100644
--- a/llvm/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
+++ b/llvm/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
@@ -3,7 +3,7 @@
; SILowerI1Copies was not handling IMPLICIT_DEF
; SI-LABEL: {{^}}br_implicit_def:
-; SI: BB#0:
+; SI: %bb.0:
; SI-NEXT: s_cbranch_scc1
define amdgpu_kernel void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 {
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index 67642282f75..61aa39fcc25 100644
--- a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -26,7 +26,7 @@
...
---
# CHECK-LABEL: name: invert_br_undef_vcc
-# CHECK: S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc
+# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
name: invert_br_undef_vcc
alignment: 0
@@ -58,7 +58,7 @@ body: |
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
- S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc
+ S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -66,7 +66,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 100, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 1, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
index 4f8c6191224..49ca7d40572 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll
@@ -127,7 +127,7 @@ entry:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
;CHECK: s_waitcnt
@@ -151,7 +151,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_glc_slc:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}}
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:12 glc{{$}}
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 glc slc{{$}}
@@ -176,7 +176,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
;CHECK: s_waitcnt
define amdgpu_ps void @buffer_load_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a) {
@@ -194,7 +194,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x1_offset_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:28
;CHECK: s_waitcnt
@@ -212,7 +212,7 @@ main_body:
}
;CHECK-LABEL: {{^}}buffer_load_x2_offset_merged:
-;CHECK-NEXT: BB#
+;CHECK-NEXT: %bb.
;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4
;CHECK: s_waitcnt
define amdgpu_ps void @buffer_load_x2_offset_merged(<4 x i32> inreg %rsrc) {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
index 10bea8ea63b..69de9555035 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
@@ -4,7 +4,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1() #0
; GCN-LABEL: {{^}}test_buffer_wbinvl1:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; SI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
index fe60d16d90f..d1c8f37b3d8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
@@ -3,7 +3,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1.sc() #0
; SI-LABEL: {{^}}test_buffer_wbinvl1_sc:
-; SI-NEXT: ; BB#0:
+; SI-NEXT: ; %bb.0:
; SI-NEXT: buffer_wbinvl1_sc ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; SI-NEXT: s_endpgm
define amdgpu_kernel void @test_buffer_wbinvl1_sc() #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
index 061c1469ed4..4dc938c9b0a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
@@ -4,7 +4,7 @@
declare void @llvm.amdgcn.buffer.wbinvl1.vol() #0
; GCN-LABEL: {{^}}test_buffer_wbinvl1_vol:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; CI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00]
; GCN: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
index 7b1cfa18721..16d0c237007 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
@@ -25,7 +25,7 @@ define amdgpu_kernel void @s_cvt_pkrtz_samereg_v2f16_f32(<2 x half> addrspace(1)
; FIXME: Folds to 0 on gfx9
; GCN-LABEL: {{^}}s_cvt_pkrtz_undef_undef:
-; GCN-NEXT: ; BB#0
+; GCN-NEXT: ; %bb.0
; SI-NEXT: s_endpgm
; VI-NEXT: s_endpgm
; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index a1ecb7f750c..d6b0628956a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -31,8 +31,8 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
}
; SI-LABEL: {{^}}true:
-; SI-NEXT: BB#
-; SI-NEXT: BB#
+; SI-NEXT: %bb.
+; SI-NEXT: %bb.
; SI-NEXT: s_endpgm
define amdgpu_gs void @true() {
call void @llvm.amdgcn.kill(i1 true)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
index 224b2ed72e3..b7fb96a2d1a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
@@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; GCN-LABEL: {{^}}test_s_dcache_inv:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; SI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0xc0,0xc7]
; VI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
@@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv() #0 {
}
; GCN-LABEL: {{^}}test_s_dcache_inv_insert_wait:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; GCN: s_dcache_inv
; GCN: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_inv_insert_wait() #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
index f96d5db5794..e8a363adde7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
@@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv.vol() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; GCN-LABEL: {{^}}test_s_dcache_inv_vol:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; CI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x40,0xc7]
; VI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
@@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv_vol() #0 {
}
; GCN-LABEL: {{^}}test_s_dcache_inv_vol_insert_wait:
-; GCN-NEXT: ; BB#0:
+; GCN-NEXT: ; %bb.0:
; GCN-NEXT: s_dcache_inv_vol
; GCN: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_inv_vol_insert_wait() #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
index 99b65135043..254a0fae3c3 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
@@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-LABEL: {{^}}test_s_dcache_wb:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
define amdgpu_kernel void @test_s_dcache_wb() #0 {
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb() #0 {
}
; VI-LABEL: {{^}}test_s_dcache_wb_insert_wait:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb
; VI: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_wb_insert_wait() #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
index 844fcecdb48..929cd1c5f0b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
@@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb.vol() #0
declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-LABEL: {{^}}test_s_dcache_wb_vol:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
define amdgpu_kernel void @test_s_dcache_wb_vol() #0 {
@@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb_vol() #0 {
}
; VI-LABEL: {{^}}test_s_dcache_wb_vol_insert_wait:
-; VI-NEXT: ; BB#0:
+; VI-NEXT: ; %bb.0:
; VI-NEXT: s_dcache_wb_vol
; VI: s_waitcnt lgkmcnt(0) ; encoding
define amdgpu_kernel void @test_s_dcache_wb_vol_insert_wait() #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/loop_break.ll b/llvm/test/CodeGen/AMDGPU/loop_break.ll
index 4acd1b24795..b2641cd4d2e 100644
--- a/llvm/test/CodeGen/AMDGPU/loop_break.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop_break.ll
@@ -31,7 +31,7 @@
; GCN: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz [[FLOW:BB[0-9]+_[0-9]+]]
-; GCN: ; BB#2: ; %bb4
+; GCN: ; %bb.2: ; %bb4
; GCN: buffer_load_dword
; GCN: v_cmp_ge_i32_e32 vcc,
; GCN: s_or_b64 [[MASK]], vcc, [[INITMASK]]
@@ -41,7 +41,7 @@
; GCN: s_andn2_b64 exec, exec, [[MASK]]
; GCN-NEXT: s_cbranch_execnz [[LOOP_ENTRY]]
-; GCN: ; BB#4: ; %bb9
+; GCN: ; %bb.4: ; %bb9
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @break_loop(i32 %arg) #0 {
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
index a563cfd0283..c8e920a1854 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=GCN -check-prefix=GFX8 %s
; FUNC-LABEL: {{^}}system_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GFX6: s_waitcnt vmcnt(0){{$}}
; GFX6-NEXT: buffer_wbinvl1{{$}}
@@ -17,7 +17,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GCN: s_endpgm
@@ -28,7 +28,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -41,7 +41,7 @@ entry:
}
; FUNC-LABEL: {{^}}system_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -54,7 +54,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_acquire() {
@@ -64,7 +64,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_release() {
@@ -74,7 +74,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_acq_rel() {
@@ -84,7 +84,7 @@ entry:
}
; FUNC-LABEL: {{^}}singlethread_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @singlethread_seq_cst() {
@@ -94,7 +94,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GFX6: s_waitcnt vmcnt(0){{$}}
; GFX6-NEXT: buffer_wbinvl1{{$}}
@@ -108,7 +108,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GCN: s_endpgm
@@ -119,7 +119,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -132,7 +132,7 @@ entry:
}
; FUNC-LABEL: {{^}}agent_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_waitcnt vmcnt(0){{$}}
; GFX6: buffer_wbinvl1{{$}}
@@ -145,7 +145,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_acquire() {
@@ -155,7 +155,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_release() {
@@ -165,7 +165,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_acq_rel() {
@@ -175,7 +175,7 @@ entry:
}
; FUNC-LABEL: {{^}}workgroup_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @workgroup_seq_cst() {
@@ -185,7 +185,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_acquire
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_acquire() {
@@ -195,7 +195,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_release
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_release() {
@@ -205,7 +205,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_acq_rel
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_acq_rel() {
@@ -215,7 +215,7 @@ entry:
}
; FUNC-LABEL: {{^}}wavefront_seq_cst
-; GCN: BB#0
+; GCN: %bb.0
; GCN-NOT: ATOMIC_FENCE
; GCN: s_endpgm
define amdgpu_kernel void @wavefront_seq_cst() {
diff --git a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
index 15de689b953..8cc02d49709 100644
--- a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
@@ -34,7 +34,7 @@
; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]]
; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]]
-; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}}
+; GCN: ; %bb.{{[0-9]+}}: ; %Flow1{{$}}
; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1
; Ensure copy is eliminated
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index b5dc9d9dac8..24e8ed8e29c 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -184,8 +184,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -241,8 +241,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -298,8 +298,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -359,8 +359,8 @@ body: |
BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -384,7 +384,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_wrong_reg
alignment: 0
exposesReturnsTwice: false
@@ -420,8 +420,8 @@ body: |
%sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr0_sgpr1
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
@@ -443,7 +443,7 @@ body: |
# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_modify_copy_to_exec
alignment: 0
@@ -479,8 +479,8 @@ body: |
%sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -540,8 +540,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
@@ -565,7 +565,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = COPY %exec
# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_unknown_saveexec
alignment: 0
@@ -599,8 +599,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -656,8 +656,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -680,7 +680,7 @@ body: |
# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_andn2_saveexec_no_commute
alignment: 0
exposesReturnsTwice: false
@@ -713,8 +713,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/ret_jump.ll b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
index 7c2e28108df..f87e8cbea4f 100644
--- a/llvm/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/llvm/test/CodeGen/AMDGPU/ret_jump.ll
@@ -57,7 +57,7 @@ ret.bb: ; preds = %else, %main_body
; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable:
; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]]
-; GCN: ; BB#{{[0-9]+}}: ; %else
+; GCN: ; %bb.{{[0-9]+}}: ; %else
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]]
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 8e18ab5554e..575938b5a5c 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -37,7 +37,7 @@ endif:
; SI: s_cmp_lg_u32
; SI: s_cbranch_scc0 [[IF:BB[0-9]+_[0-9]+]]
-; SI: ; BB#1: ; %else
+; SI: ; %bb.1: ; %else
; SI: s_load_dword [[LOAD0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xe
; SI: s_load_dword [[LOAD1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xf
; SI-NOT: add
diff --git a/llvm/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll b/llvm/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
index 7423a4a2753..ce85a666340 100644
--- a/llvm/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
@@ -55,7 +55,7 @@ unreachable:
; GCN: s_cmp_lg_u32
; GCN: s_cbranch_scc0 [[UNREACHABLE:BB[0-9]+_[0-9]+]]
-; GCN-NEXT: BB#{{[0-9]+}}: ; %ret
+; GCN-NEXT: %bb.{{[0-9]+}}: ; %ret
; GCN-NEXT: s_endpgm
; GCN: [[UNREACHABLE]]:
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index ed7e06ee4e2..9ae36b0a06c 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 {
call void @llvm.AMDGPU.kill(float 0.0)
@@ -9,9 +9,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 {
call void @llvm.AMDGPU.kill(float -0.0)
@@ -20,11 +20,11 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 {
; FIXME: Ideally only one would be emitted
; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg_x2:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_mov_b64 exec, 0
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 {
call void @llvm.AMDGPU.kill(float -0.0)
@@ -33,9 +33,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -44,11 +44,11 @@ define amdgpu_ps void @test_kill_depth_var(float %x) #0 {
; FIXME: Ideally only one would be emitted
; CHECK-LABEL: {{^}}test_kill_depth_var_x2_same:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -57,11 +57,11 @@ define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var_x2:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v1
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -70,12 +70,12 @@ define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 {
}
; CHECK-LABEL: {{^}}test_kill_depth_var_x2_instructions:
-; CHECK-NEXT: ; BB#0:
+; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: s_endpgm
define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
call void @llvm.AMDGPU.kill(float %x)
@@ -90,7 +90,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0
; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT: ; %bb.1:
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_nop_e64
; CHECK: v_nop_e64
@@ -105,7 +105,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: exp null off, off, off, off done vm
; CHECK-NEXT: s_endpgm
@@ -141,7 +141,7 @@ exit:
; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#1: ; %bb
+; CHECK-NEXT: ; %bb.1: ; %bb
; CHECK: v_mov_b32_e64 v7, -1
; CHECK: v_nop_e64
; CHECK: v_nop_e64
@@ -157,7 +157,7 @@ exit:
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]]
-; CHECK-NEXT: ; BB#2:
+; CHECK-NEXT: ; %bb.2:
; CHECK-NEXT: exp null off, off, off, off done vm
; CHECK-NEXT: s_endpgm
@@ -215,7 +215,7 @@ exit:
; CHECK: v_nop_e64
; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7
-; CHECK-NEXT: ; BB#3:
+; CHECK-NEXT: ; %bb.3:
; CHECK: buffer_load_dword [[LOAD:v[0-9]+]]
; CHECK: v_cmp_eq_u32_e32 vcc, 0, [[LOAD]]
; CHECK-NEXT: s_and_b64 vcc, exec, vcc
@@ -309,7 +309,7 @@ end:
; CHECK: [[SKIPKILL]]:
; CHECK: v_cmp_nge_f32_e32 vcc
-; CHECK-NEXT: BB#3: ; %bb5
+; CHECK-NEXT: %bb.3: ; %bb5
; CHECK-NEXT: .Lfunc_end{{[0-9]+}}
define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
bb:
@@ -335,7 +335,7 @@ bb7: ; preds = %bb4
}
; CHECK-LABEL: {{^}}if_after_kill_block:
-; CHECK: ; BB#0:
+; CHECK: ; %bb.0:
; CHECK: s_and_saveexec_b64
; CHECK: s_xor_b64
; CHECK-NEXT: mask branch [[BB4:BB[0-9]+_[0-9]+]]
diff --git a/llvm/test/CodeGen/AMDGPU/smrd.ll b/llvm/test/CodeGen/AMDGPU/smrd.ll
index 6f4592cabee..9fd20fd67b8 100644
--- a/llvm/test/CodeGen/AMDGPU/smrd.ll
+++ b/llvm/test/CodeGen/AMDGPU/smrd.ll
@@ -193,7 +193,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICIVI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ;
@@ -207,7 +207,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0
; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 {
@@ -218,7 +218,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_imm_merged:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1
; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7
; VI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4
@@ -243,7 +243,7 @@ main_body:
}
; GCN-LABEL: {{^}}smrd_vgpr_merged:
-; GCN-NEXT: BB#
+; GCN-NEXT: %bb.
; SICIVI-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
; SICIVI-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index 247b9691aff..a247d7a343f 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -401,7 +401,7 @@ exit:
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 BB[[FNNUM:[0-9]+]]_3
-; GCN: BB#1:
+; GCN: %bb.1:
; GCN-NOT: cmp
; GCN: buffer_load_dword
; GCN: buffer_store_dword
diff --git a/llvm/test/CodeGen/AMDGPU/valu-i1.ll b/llvm/test/CodeGen/AMDGPU/valu-i1.ll
index 3b0f003f52b..4a3937e44f3 100644
--- a/llvm/test/CodeGen/AMDGPU/valu-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/valu-i1.ll
@@ -192,7 +192,7 @@ exit:
; Load loop limit from buffer
; Branch to exit if uniformly not taken
-; SI: ; BB#0:
+; SI: ; %bb.0:
; SI: buffer_load_dword [[VBOUND:v[0-9]+]]
; SI: v_cmp_lt_i32_e32 vcc
; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc
diff --git a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 54991d3d953..ff9826baf48 100644
--- a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -48,7 +48,7 @@
# CHECK-LABEL: name: vccz_corrupt_workaround
# CHECK: %vcc = V_CMP_EQ_F32
# CHECK-NEXT: %vcc = S_MOV_B64 %vcc
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit killed %vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
name: vccz_corrupt_workaround
alignment: 0
@@ -82,7 +82,7 @@ body: |
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
%vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec
- S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc
+ S_CBRANCH_VCCZ %bb.1, implicit killed %vcc
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -90,7 +90,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 0, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -111,7 +111,7 @@ body: |
---
# CHECK-LABEL: name: vccz_corrupt_undef_vcc
# CHECK: S_WAITCNT
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit undef %vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc
name: vccz_corrupt_undef_vcc
alignment: 0
@@ -143,7 +143,7 @@ body: |
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%sgpr7 = S_MOV_B32 61440
%sgpr6 = S_MOV_B32 -1
- S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc
+ S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
bb.2.if:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
@@ -151,7 +151,7 @@ body: |
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
%vgpr0 = V_MOV_B32_e32 0, implicit %exec
- S_BRANCH %bb.3.done
+ S_BRANCH %bb.3
bb.1.else:
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
OpenPOWER on IntegriCloud