summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll')
-rw-r--r--llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll104
1 files changed, 52 insertions, 52 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 7406dc514d3..9502ec95d09 100644
--- a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -13,7 +13,7 @@ declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>,
define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -29,7 +29,7 @@ define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8*
define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -45,7 +45,7 @@ define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %b
define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -61,7 +61,7 @@ define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %ba
define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -89,7 +89,7 @@ declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i3
define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -105,7 +105,7 @@ define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %ba
define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -121,7 +121,7 @@ define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base,
define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -137,7 +137,7 @@ define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -153,7 +153,7 @@ define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -166,7 +166,7 @@ define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %m
define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -179,7 +179,7 @@ define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %m
define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
@@ -190,7 +190,7 @@ define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %s
define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
@@ -201,7 +201,7 @@ define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src,
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
@@ -214,7 +214,7 @@ define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -227,7 +227,7 @@ define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -240,7 +240,7 @@ define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i1
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
@@ -253,7 +253,7 @@ define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %
define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
@@ -272,7 +272,7 @@ declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
define void @prefetch(<8 x i64> %ind, i8* %base) {
; CHECK-LABEL: prefetch:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -296,7 +296,7 @@ declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64
define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -314,7 +314,7 @@ declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8,
define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -329,7 +329,7 @@ declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64
define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -347,7 +347,7 @@ declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8,
define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -365,7 +365,7 @@ declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -383,7 +383,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -401,7 +401,7 @@ declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -420,7 +420,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -439,7 +439,7 @@ declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32
define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -457,7 +457,7 @@ declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8,
define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -472,7 +472,7 @@ declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32
define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -490,7 +490,7 @@ declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8,
define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
; CHECK-NEXT: vpaddq %ymm0, %ymm0, %ymm0
@@ -505,7 +505,7 @@ declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>,
define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -523,7 +523,7 @@ declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8,
define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -541,7 +541,7 @@ declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>,
define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -559,7 +559,7 @@ declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8,
define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -577,7 +577,7 @@ declare void @llvm.x86.avx512.scatterdiv2.df(i8*, i8, <2 x i64>, <2 x double>, i
define void@test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -592,7 +592,7 @@ declare void @llvm.x86.avx512.scatterdiv2.di(i8*, i8, <2 x i64>, <2 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -607,7 +607,7 @@ declare void @llvm.x86.avx512.scatterdiv4.df(i8*, i8, <4 x i64>, <4 x double>, i
define void@test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -623,7 +623,7 @@ declare void @llvm.x86.avx512.scatterdiv4.di(i8*, i8, <4 x i64>, <4 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -639,7 +639,7 @@ declare void @llvm.x86.avx512.scatterdiv4.sf(i8*, i8, <2 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -654,7 +654,7 @@ declare void @llvm.x86.avx512.scatterdiv4.si(i8*, i8, <2 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -669,7 +669,7 @@ declare void @llvm.x86.avx512.scatterdiv8.sf(i8*, i8, <4 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -685,7 +685,7 @@ declare void @llvm.x86.avx512.scatterdiv8.si(i8*, i8, <4 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -701,7 +701,7 @@ declare void @llvm.x86.avx512.scattersiv2.df(i8*, i8, <4 x i32>, <2 x double>, i
define void@test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -716,7 +716,7 @@ declare void @llvm.x86.avx512.scattersiv2.di(i8*, i8, <4 x i32>, <2 x i64>, i32)
define void@test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -731,7 +731,7 @@ declare void @llvm.x86.avx512.scattersiv4.df(i8*, i8, <4 x i32>, <4 x double>, i
define void@test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -747,7 +747,7 @@ declare void @llvm.x86.avx512.scattersiv4.di(i8*, i8, <4 x i32>, <4 x i64>, i32)
define void@test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
@@ -763,7 +763,7 @@ declare void @llvm.x86.avx512.scattersiv4.sf(i8*, i8, <4 x i32>, <4 x float>, i3
define void@test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -778,7 +778,7 @@ declare void @llvm.x86.avx512.scattersiv4.si(i8*, i8, <4 x i32>, <4 x i32>, i32)
define void@test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -793,7 +793,7 @@ declare void @llvm.x86.avx512.scattersiv8.sf(i8*, i8, <8 x i32>, <8 x float>, i3
define void@test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -809,7 +809,7 @@ declare void @llvm.x86.avx512.scattersiv8.si(i8*, i8, <8 x i32>, <8 x i32>, i32)
define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -823,7 +823,7 @@ define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <
define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: scatter_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -845,7 +845,7 @@ define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base) {
; CHECK-LABEL: gather_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
OpenPOWER on IntegriCloud