summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/pmul.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/pmul.ll')
-rw-r--r--llvm/test/CodeGen/X86/pmul.ll160
1 files changed, 80 insertions, 80 deletions
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index 249e20ce502..76b5b508711 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -7,7 +7,7 @@
define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-LABEL: mul_v16i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm1
@@ -23,7 +23,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v16i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -38,7 +38,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v16i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -50,7 +50,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v16i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
@@ -59,7 +59,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
@@ -73,12 +73,12 @@ entry:
define <8 x i16> @mul_v8i16c(<8 x i16> %i) nounwind {
; SSE-LABEL: mul_v8i16c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v8i16c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -88,7 +88,7 @@ entry:
define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
; SSE2-LABEL: mul_v4i32c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -99,12 +99,12 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -115,7 +115,7 @@ entry:
define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind {
; SSE-LABEL: mul_v2i64c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -126,7 +126,7 @@ define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117]
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -141,7 +141,7 @@ entry:
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE2-LABEL: mul_v16i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -161,7 +161,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v16i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm1, %xmm3
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; SSE41-NEXT: pmullw %xmm3, %xmm2
@@ -178,7 +178,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v16i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -191,7 +191,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v16i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -201,7 +201,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -216,12 +216,12 @@ entry:
define <8 x i16> @mul_v8i16(<8 x i16> %i, <8 x i16> %j) nounwind {
; SSE-LABEL: mul_v8i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v8i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -231,7 +231,7 @@ entry:
define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: mul_v4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -242,12 +242,12 @@ define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -257,7 +257,7 @@ entry:
define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-LABEL: mul_v2i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
@@ -271,7 +271,7 @@ define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3
@@ -290,7 +290,7 @@ declare void @foo()
define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: mul_v4i32spill:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: subq $40, %rsp
; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -308,7 +308,7 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i32spill:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: subq $40, %rsp
; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -319,7 +319,7 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32spill:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -337,7 +337,7 @@ entry:
define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-LABEL: mul_v2i64spill:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $40, %rsp
; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
@@ -358,7 +358,7 @@ define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v2i64spill:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -384,7 +384,7 @@ entry:
define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-LABEL: mul_v32i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -410,7 +410,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v32i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm4, %xmm2
@@ -434,7 +434,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v32i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
@@ -454,7 +454,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v32i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
@@ -469,7 +469,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v32i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
@@ -481,14 +481,14 @@ entry:
define <16 x i16> @mul_v16i16c(<16 x i16> %i) nounwind {
; SSE-LABEL: mul_v16i16c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v16i16c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -498,7 +498,7 @@ entry:
define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
; SSE2-LABEL: mul_v8i32c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0
@@ -515,14 +515,14 @@ define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i32c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: pmulld %xmm2, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v8i32c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
@@ -533,7 +533,7 @@ entry:
define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
; SSE-LABEL: mul_v4i64c:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pmuludq %xmm2, %xmm3
@@ -550,7 +550,7 @@ define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v4i64c:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
@@ -565,7 +565,7 @@ entry:
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-LABEL: mul_v32i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4
@@ -600,7 +600,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v32i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmovsxbw %xmm2, %xmm5
; SSE41-NEXT: pmovsxbw %xmm0, %xmm4
; SSE41-NEXT: pmullw %xmm5, %xmm4
@@ -629,7 +629,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v32i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -651,7 +651,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v32i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm3
; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm2
@@ -668,7 +668,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v32i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
@@ -681,13 +681,13 @@ entry:
define <16 x i16> @mul_v16i16(<16 x i16> %i, <16 x i16> %j) nounwind {
; SSE-LABEL: mul_v16i16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v16i16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -697,7 +697,7 @@ entry:
define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind {
; SSE2-LABEL: mul_v8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -715,13 +715,13 @@ define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pmulld %xmm2, %xmm0
; SSE41-NEXT: pmulld %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v8i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
@@ -731,7 +731,7 @@ entry:
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind {
; SSE-LABEL: mul_v4i64:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrlq $32, %xmm4
; SSE-NEXT: pmuludq %xmm2, %xmm4
@@ -755,7 +755,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind {
; SSE-NEXT: retq
;
; AVX-LABEL: mul_v4i64:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm2
; AVX-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -772,7 +772,7 @@ entry:
define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-LABEL: mul_v64i8c:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6
@@ -818,7 +818,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v64i8c:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
@@ -860,7 +860,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v64i8c:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
@@ -894,7 +894,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v64i8c:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
@@ -919,7 +919,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v64i8c:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -937,7 +937,7 @@ entry:
define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-LABEL: mul_v64i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa %xmm4, %xmm8
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm8
@@ -1002,7 +1002,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v64i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
@@ -1055,7 +1055,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v64i8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
@@ -1094,7 +1094,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_v64i8:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
@@ -1124,7 +1124,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v64i8:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm3
; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2
@@ -1145,7 +1145,7 @@ entry:
; PR30845
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -1160,7 +1160,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
@@ -1173,7 +1173,7 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_upper:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
@@ -1192,7 +1192,7 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper_left:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
@@ -1213,7 +1213,7 @@ define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper_left:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
@@ -1233,7 +1233,7 @@ define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_upper_left:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
@@ -1254,7 +1254,7 @@ entry:
define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-LABEL: mul_v4i64_zero_lower:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
@@ -1270,7 +1270,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_lower:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1284,7 +1284,7 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_lower:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
@@ -1304,7 +1304,7 @@ entry:
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_zero_upper:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm6, %xmm6
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
@@ -1329,7 +1329,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_zero_upper:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
@@ -1351,7 +1351,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_zero_upper:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1365,7 +1365,7 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_zero_upper:
-; AVX512: # BB#0: # %entry
+; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
@@ -1384,7 +1384,7 @@ entry:
define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_sext:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
@@ -1465,7 +1465,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_sext:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm3, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -1487,7 +1487,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovsxwq %xmm2, %ymm2
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
@@ -1500,7 +1500,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
; AVX512-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
OpenPOWER on IntegriCloud