summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-zext.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-zext.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-zext.ll494
1 files changed, 247 insertions, 247 deletions
diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll
index b0544169dad..94eadd8c1aa 100644
--- a/llvm/test/CodeGen/X86/vector-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-zext.ll
@@ -9,24 +9,24 @@
define <8 x i16> @zext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
entry:
@@ -38,7 +38,7 @@ entry:
; PR17654
define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE2-LABEL: zext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -46,7 +46,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
@@ -54,7 +54,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -62,7 +62,7 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -70,12 +70,12 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512-NEXT: retq
entry:
@@ -85,7 +85,7 @@ entry:
define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE2-LABEL: zext_32i8_to_32i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -97,7 +97,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_32i8_to_32i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm1, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm4, %xmm4
@@ -109,7 +109,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_32i8_to_32i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -121,7 +121,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_32i8_to_32i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
@@ -135,7 +135,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_32i8_to_32i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -143,7 +143,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: zext_32i8_to_32i16:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -151,7 +151,7 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: zext_32i8_to_32i16:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: retq
entry:
@@ -161,26 +161,26 @@ entry:
define <4 x i32> @zext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -191,7 +191,7 @@ entry:
define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -201,7 +201,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -211,7 +211,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -219,7 +219,7 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -227,12 +227,12 @@ define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -243,7 +243,7 @@ entry:
define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -258,7 +258,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm3, %xmm1
@@ -273,7 +273,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@@ -285,7 +285,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -299,7 +299,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -307,7 +307,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -317,7 +317,7 @@ entry:
define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -325,17 +325,17 @@ define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_16i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -346,7 +346,7 @@ entry:
define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -357,14 +357,14 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -372,7 +372,7 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -380,12 +380,12 @@ define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -396,7 +396,7 @@ entry:
define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -413,7 +413,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
@@ -426,7 +426,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrld $16, %xmm1
@@ -439,7 +439,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
@@ -453,7 +453,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
@@ -461,7 +461,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -472,24 +472,24 @@ entry:
define <4 x i32> @zext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_8i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
entry:
@@ -500,7 +500,7 @@ entry:
define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -508,7 +508,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -516,7 +516,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -524,7 +524,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -532,12 +532,12 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -547,7 +547,7 @@ entry:
define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i16_to_16i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -559,7 +559,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i16_to_16i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm1, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm4, %xmm4
@@ -571,7 +571,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i16_to_16i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -583,7 +583,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i16_to_16i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
@@ -597,7 +597,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i16_to_16i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -605,7 +605,7 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_16i16_to_16i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: retq
entry:
@@ -615,26 +615,26 @@ entry:
define <2 x i64> @zext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_8i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -645,7 +645,7 @@ entry:
define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -655,7 +655,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -665,7 +665,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -673,7 +673,7 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -681,12 +681,12 @@ define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -697,7 +697,7 @@ entry:
define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -712,7 +712,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm3, %xmm1
@@ -727,7 +727,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -739,7 +739,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
@@ -753,7 +753,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -761,7 +761,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i16_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -771,24 +771,24 @@ entry:
define <2 x i64> @zext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_4i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_4i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
entry:
@@ -799,7 +799,7 @@ entry:
define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -807,7 +807,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -815,7 +815,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
@@ -823,7 +823,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -831,12 +831,12 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
entry:
@@ -846,7 +846,7 @@ entry:
define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i32_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm1, %xmm3
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm4, %xmm4
@@ -858,7 +858,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i32_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm1, %xmm3
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm4, %xmm4
@@ -870,7 +870,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i32_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -882,7 +882,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i32_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
@@ -896,7 +896,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i32_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -904,7 +904,7 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i32_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -914,7 +914,7 @@ entry:
define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-LABEL: load_zext_2i8_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -924,19 +924,19 @@ define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i8_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i8_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i8_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -947,7 +947,7 @@ entry:
define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-LABEL: load_zext_4i8_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -955,7 +955,7 @@ define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i8_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -963,12 +963,12 @@ define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i8_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_4i8_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -979,7 +979,7 @@ entry:
define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-LABEL: load_zext_4i8_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -990,7 +990,7 @@ define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i8_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
@@ -998,25 +998,25 @@ define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i8_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i8_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i8_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i8_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1027,26 +1027,26 @@ entry:
define <8 x i16> @load_zext_8i8_to_8i16(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_8i8_to_8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX-NEXT: retq
entry:
@@ -1057,7 +1057,7 @@ entry:
define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1067,7 +1067,7 @@ define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1077,25 +1077,25 @@ define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1106,7 +1106,7 @@ entry:
define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE2-LABEL: load_zext_16i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1116,7 +1116,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_16i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1126,7 +1126,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_16i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
@@ -1134,7 +1134,7 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_16i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1143,12 +1143,12 @@ define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_16i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_16i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1160,7 +1160,7 @@ entry:
define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-LABEL: load_zext_8i8_to_8i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
@@ -1177,7 +1177,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i8_to_8i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1191,7 +1191,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i8_to_8i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -1199,7 +1199,7 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i8_to_8i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1209,13 +1209,13 @@ define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i8_to_8i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i8_to_8i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1226,7 +1226,7 @@ entry:
define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: load_zext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
@@ -1235,7 +1235,7 @@ define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1244,25 +1244,25 @@ define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_16i8_to_16i16:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512-NEXT: retq
entry:
@@ -1273,7 +1273,7 @@ entry:
define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-LABEL: load_zext_2i16_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1281,7 +1281,7 @@ define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i16_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1289,12 +1289,12 @@ define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i16_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i16_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX-NEXT: retq
entry:
@@ -1305,26 +1305,26 @@ entry:
define <4 x i32> @load_zext_4i16_to_4i32(<4 x i16> *%ptr) {
; SSE2-LABEL: load_zext_4i16_to_4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i16_to_4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i16_to_4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_4i16_to_4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: retq
entry:
@@ -1335,7 +1335,7 @@ entry:
define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-LABEL: load_zext_4i16_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1345,7 +1345,7 @@ define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i16_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
@@ -1355,25 +1355,25 @@ define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i16_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i16_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i16_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i16_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX512-NEXT: retq
entry:
@@ -1384,7 +1384,7 @@ entry:
define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-LABEL: load_zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
@@ -1393,7 +1393,7 @@ define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
@@ -1402,25 +1402,25 @@ define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX512-NEXT: retq
entry:
@@ -1431,26 +1431,26 @@ entry:
define <2 x i64> @load_zext_2i32_to_2i64(<2 x i32> *%ptr) {
; SSE2-LABEL: load_zext_2i32_to_2i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_2i32_to_2i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_2i32_to_2i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: load_zext_2i32_to_2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; AVX-NEXT: retq
entry:
@@ -1461,7 +1461,7 @@ entry:
define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-LABEL: load_zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps (%rdi), %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movaps %xmm1, %xmm0
@@ -1470,7 +1470,7 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps (%rdi), %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: movaps %xmm1, %xmm0
@@ -1479,25 +1479,25 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512-NEXT: retq
entry:
@@ -1508,7 +1508,7 @@ entry:
define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE2-LABEL: zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1518,7 +1518,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1528,7 +1528,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1537,7 +1537,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1546,13 +1546,13 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -1563,7 +1563,7 @@ entry:
define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1571,7 +1571,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1579,7 +1579,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -1587,7 +1587,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1595,12 +1595,12 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
entry:
@@ -1611,7 +1611,7 @@ entry:
define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1619,7 +1619,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps %xmm0, %xmm1
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -1627,7 +1627,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
@@ -1635,7 +1635,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1643,12 +1643,12 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_4i32_to_4i64:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
entry:
@@ -1659,7 +1659,7 @@ entry:
define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE2-LABEL: shuf_zext_8i8_to_8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: packuswb %xmm1, %xmm1
@@ -1671,7 +1671,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i8_to_8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1681,7 +1681,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i8_to_8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1690,7 +1690,7 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i8_to_8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -1699,13 +1699,13 @@ define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i8_to_8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i8_to_8i32:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: retq
@@ -1717,7 +1717,7 @@ entry:
define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1725,18 +1725,18 @@ define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrlq $48, %xmm0
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_16i8_to_2i64_offset6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
@@ -1748,7 +1748,7 @@ entry:
define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1761,14 +1761,14 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[11],zero,zero,zero,zero,zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[13],zero,zero,zero,zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrldq {{.*#+}} xmm1 = xmm1[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
@@ -1778,7 +1778,7 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -1787,13 +1787,13 @@ define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_16i8_to_4i64_offset11:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: retq
@@ -1805,7 +1805,7 @@ entry:
define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -1813,18 +1813,18 @@ define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_8i16_to_2i64_offset6:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
@@ -1836,7 +1836,7 @@ entry:
define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1846,7 +1846,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1856,7 +1856,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1865,7 +1865,7 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1874,13 +1874,13 @@ define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_4i64_offset2:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512-NEXT: retq
@@ -1892,27 +1892,27 @@ entry:
define <4 x i32> @shuf_zext_8i16_to_4i32_offset1(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuf_zext_8i16_to_4i32_offset1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
@@ -1924,7 +1924,7 @@ entry:
define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1933,7 +1933,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: pxor %xmm2, %xmm2
@@ -1942,7 +1942,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE41-NEXT: pxor %xmm2, %xmm2
@@ -1951,7 +1951,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -1960,13 +1960,13 @@ define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_8i16_to_8i32_offset3:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -1978,7 +1978,7 @@ entry:
define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1986,7 +1986,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
@@ -1994,7 +1994,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
@@ -2003,7 +2003,7 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -2013,13 +2013,13 @@ define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtabl
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_16i16_to_8i32_offset8:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: retq
@@ -2031,13 +2031,13 @@ entry:
define <2 x i64> @shuf_zext_4i32_to_2i64_offset2(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: retq
;
; AVX-LABEL: shuf_zext_4i32_to_2i64_offset2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: retq
@@ -2049,7 +2049,7 @@ entry:
define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -2057,7 +2057,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
; SSSE3-NEXT: pand %xmm1, %xmm0
@@ -2065,7 +2065,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
@@ -2073,7 +2073,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3]
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -2081,13 +2081,13 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
;
; AVX512-LABEL: shuf_zext_4i32_to_4i64_offset1:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: retq
@@ -2099,7 +2099,7 @@ entry:
define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE2-LABEL: zext_32i8_to_32i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2131,7 +2131,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_32i8_to_32i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -2163,7 +2163,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_32i8_to_32i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
@@ -2190,7 +2190,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_32i8_to_32i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2214,7 +2214,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_32i8_to_32i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
@@ -2230,7 +2230,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: zext_32i8_to_32i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -2242,7 +2242,7 @@ define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-LABEL: zext_2i8_to_2i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
@@ -2253,7 +2253,7 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_2i8_to_2i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movzwl (%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2261,13 +2261,13 @@ define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_2i8_to_2i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: paddq %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: zext_2i8_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
OpenPOWER on IntegriCloud