diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-mul.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-mul.ll | 90 |
1 files changed, 45 insertions, 45 deletions
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 0038db02940..781b1907330 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -174,55 +174,55 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind { ret <8 x i16> %1 } -define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
-; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X86: # %bb.0:
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
-; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-NEXT: pmullw %xmm2, %xmm0
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; X86-NEXT: pand %xmm2, %xmm0
-; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1
-; X86-NEXT: pand %xmm2, %xmm1
-; X86-NEXT: packuswb %xmm0, %xmm1
-; X86-NEXT: movdqa %xmm1, %xmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64: # %bb.0:
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
-; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-NEXT: pmullw %xmm2, %xmm0
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; X64-NEXT: pand %xmm2, %xmm0
-; X64-NEXT: pmullw {{.*}}(%rip), %xmm1
-; X64-NEXT: pand %xmm2, %xmm1
-; X64-NEXT: packuswb %xmm0, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
-;
+define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8] +; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm2, %xmm0 +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-NEXT: pand %xmm2, %xmm1 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8] +; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm2, %xmm0 +; X64-NEXT: pmullw {{.*}}(%rip), %xmm1 +; X64-NEXT: pand %xmm2, %xmm1 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; ; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: ; X64-XOP: # %bb.0: ; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0 ; X64-XOP-NEXT: retq -;
-; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT: vzeroupper
-; X64-AVX2-NEXT: retq
- %1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8>
- ret <16 x i8> %1
+; +; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8> + ret <16 x i8> %1 } ; |