diff options
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-mul.ll | 292 |
1 files changed, 292 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 2558eb798ef..53cd2e577ba 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -335,6 +335,152 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind { } ; +; -(PowOf2 + 1) (uniform) +; + +define <2 x i64> @mul_v2i64_neg1025(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_neg1025: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294966271,4294967295,4294966271,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_neg1025: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709550591,18446744073709550591] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_neg1025: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709550591,18446744073709550591] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, <i64 -1025, i64 -1025> + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_neg33: +; X86: # %bb.0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_neg33: +; X64: # %bb.0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v4i32_neg33: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_neg33: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967263,4294967263,4294967263,4294967263] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <4 x i32> %a0, <i32 -33, i32 -33, i32 -33, i32 -33> + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_neg9(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_neg9: +; X86: # %bb.0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_neg9: +; X64: # %bb.0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_neg9: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, <i16 -9, i16 -9, i16 -9, i16 -9, i16 -9, i16 -9, i16 -9, i16 -9> + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_neg5(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_neg5: +; X86: # %bb.0: +; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X86-NEXT: movdqa {{.*#+}} xmm2 = [251,251,251,251,251,251,251,251,251,251,251,251,251,251,251,251] +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_neg5: +; X64: # %bb.0: +; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-NEXT: movdqa {{.*#+}} xmm2 = [251,251,251,251,251,251,251,251,251,251,251,251,251,251,251,251] +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_neg5: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [251,251,251,251,251,251,251,251,251,251,251,251,251,251,251,251] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm1[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_neg5: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, <i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5> + ret <16 x i8> %1 +} + +; ; PowOf2 + 1 (non-uniform) ; @@ -579,6 +725,152 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind { } ; +; -(PowOf2 - 1) (uniform) +; + +define <2 x i64> @mul_v2i64_neg7(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_neg7: +; X86: # %bb.0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967289,4294967295,4294967289,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_neg7: +; X64: # %bb.0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551609,18446744073709551609] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_neg7: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551609,18446744073709551609] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, <i64 -7, i64 -7> + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_neg63: +; X86: # %bb.0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_neg63: +; X64: # %bb.0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v4i32_neg63: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_neg63: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967233,4294967233,4294967233,4294967233] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <4 x i32> %a0, <i32 -63, i32 -63, i32 -63, i32 -63> + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_neg31(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_neg31: +; X86: # %bb.0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_neg31: +; X64: # %bb.0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_neg31: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, <i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31> + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_neg15(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_neg15: +; X86: # %bb.0: +; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X86-NEXT: movdqa {{.*#+}} xmm2 = [241,241,241,241,241,241,241,241,241,241,241,241,241,241,241,241] +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_neg15: +; X64: # %bb.0: +; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-NEXT: movdqa {{.*#+}} xmm2 = [241,241,241,241,241,241,241,241,241,241,241,241,241,241,241,241] +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_neg15: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [241,241,241,241,241,241,241,241,241,241,241,241,241,241,241,241] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm1[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_neg15: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, <i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15> + ret <16 x i8> %1 +} + +; ; PowOf2 - 1 (non-uniform) ; |

