diff options
author | Craig Topper <craig.topper@intel.com> | 2019-07-31 19:05:03 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@intel.com> | 2019-07-31 19:05:03 +0000 |
commit | e3f0e67f2e926243f9dacb719357a9701f1c1628 (patch) | |
tree | de735da97daf504e695da4bd59215090ff32319d | |
parent | 1a7f03e716b87eb3ba4d87996416885842034aa1 (diff) | |
download | bcm5719-llvm-e3f0e67f2e926243f9dacb719357a9701f1c1628.tar.gz bcm5719-llvm-e3f0e67f2e926243f9dacb719357a9701f1c1628.zip |
[X86] Add AVX512DQ command lines to vector-mul.ll to show that we use vpmullq instead of shift+add/sub for some cases. NFC
llvm-svn: 367465
-rw-r--r-- | llvm/test/CodeGen/X86/vector-mul.ll | 516 |
1 files changed, 405 insertions, 111 deletions
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index d16e1879151..d372b63f222 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -3,6 +3,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-XOP ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX512DQ ; ; PowOf2 (uniform) @@ -88,6 +89,12 @@ define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind { ; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_32: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllw $5, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32> ret <16 x i8> %1 } @@ -122,6 +129,11 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind { ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_32_8: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 32, i64 8> ret <2 x i64> %1 } @@ -146,6 +158,11 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind { ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i32_1_2_4_8: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 4, i32 8> ret <4 x i32> %1 } @@ -170,6 +187,11 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind { ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <8 x i16> %a0, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128> ret <8 x i16> %1 } @@ -217,6 +239,14 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw ; X64-AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; X64-AVX512DQ-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0 +; X64-AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 +; X64-AVX512DQ-NEXT: vzeroupper +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8> ret <16 x i8> %1 } @@ -242,11 +272,22 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind { ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_17: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpsllq $4, %xmm0, %xmm1 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_17: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpsllq $4, %xmm0, %xmm1 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_17: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsllq $4, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_17: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 17, i64 17> ret <2 x i64> %1 } @@ -272,6 +313,11 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind { ; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17] ; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i32_17: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17> ret <4 x i32> %1 } @@ -326,6 +372,13 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind { ; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; X64-AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_17: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllw $4, %xmm0, %xmm1 +; X64-AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512DQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17> ret <16 x i8> %1 } @@ -353,13 +406,26 @@ define <2 x i64> @mul_v2i64_neg1025(<2 x i64> %a0) nounwind { ; X64-NEXT: psubq %xmm1, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_neg1025: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpsllq $10, %xmm0, %xmm1 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_neg1025: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpsllq $10, %xmm0, %xmm1 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_neg1025: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsllq $10, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_neg1025: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -1025, i64 -1025> ret <2 x i64> %1 } @@ -385,6 +451,11 @@ define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind { ; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967263,4294967263,4294967263,4294967263] ; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i32_neg33: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <4 x i32> %a0, <i32 -33, i32 -33, i32 -33, i32 -33> ret <4 x i32> %1 } @@ -445,6 +516,15 @@ define <16 x i8> @mul_v16i8_neg5(<16 x i8> %a0) nounwind { ; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_neg5: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllw $2, %xmm0, %xmm1 +; X64-AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512DQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; X64-AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-AVX512DQ-NEXT: vpsubb %xmm0, %xmm1, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5, i8 -5> ret <16 x i8> %1 } @@ -476,15 +556,30 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_17_65: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_17_65: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_17_65: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_17_65: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 17, i64 65> ret <2 x i64> %1 } @@ -572,6 +667,15 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X64-AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; X64-AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; X64-AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 +; X64-AVX512DQ-NEXT: vzeroupper +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 2, i8 3, i8 9, i8 17, i8 33, i8 65, i8 129, i8 2, i8 3, i8 9, i8 17, i8 33, i8 65, i8 129, i8 2, i8 3> ret <16 x i8> %1 } @@ -597,11 +701,22 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind { ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_7: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm1 -; X64-AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_7: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpsllq $3, %xmm0, %xmm1 +; X64-XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_7: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsllq $3, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_7: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 7, i64 7> ret <2 x i64> %1 } @@ -627,6 +742,11 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind { ; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7] ; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i32_7: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <4 x i32> %a0, <i32 7, i32 7, i32 7, i32 7> ret <4 x i32> %1 } @@ -681,6 +801,13 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind { ; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; X64-AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_31: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllw $5, %xmm0, %xmm1 +; X64-AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512DQ-NEXT: vpsubb %xmm0, %xmm1, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31> ret <16 x i8> %1 } @@ -704,11 +831,22 @@ define <2 x i64> @mul_v2i64_neg7(<2 x i64> %a0) nounwind { ; X64-NEXT: psubq %xmm1, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_neg7: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm1 -; X64-AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_neg7: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vpsllq $3, %xmm0, %xmm1 +; X64-XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_neg7: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsllq $3, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_neg7: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -7, i64 -7> ret <2 x i64> %1 } @@ -734,6 +872,11 @@ define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind { ; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967233,4294967233,4294967233,4294967233] ; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i32_neg63: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <4 x i32> %a0, <i32 -63, i32 -63, i32 -63, i32 -63> ret <4 x i32> %1 } @@ -786,6 +929,13 @@ define <16 x i8> @mul_v16i8_neg15(<16 x i8> %a0) nounwind { ; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; X64-AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_neg15: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpsllw $4, %xmm0, %xmm1 +; X64-AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512DQ-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15, i8 -15> ret <16 x i8> %1 } @@ -817,15 +967,30 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_15_63: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_15_63: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_15_63: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_15_63: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 63> ret <2 x i64> %1 } @@ -859,17 +1024,34 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_neg_15_63: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_neg_15_63: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-XOP-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_neg_15_63: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX2-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_neg_15_63: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -15, i64 -63> ret <2 x i64> %1 } @@ -903,17 +1085,34 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_neg_17_65: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_neg_17_65: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-XOP-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_neg_17_65: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX2-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_neg_17_65: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -17, i64 -65> ret <2 x i64> %1 } @@ -941,15 +1140,30 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_0_1: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_0_1: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_0_1: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_0_1: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 0, i64 1> ret <2 x i64> %1 } @@ -984,17 +1198,34 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_neg_0_1: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_neg_0_1: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-XOP-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_neg_0_1: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX2-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_neg_0_1: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 0, i64 -1> ret <2 x i64> %1 } @@ -1029,17 +1260,34 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_15_neg_63: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,18446744073709551553] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_15_neg_63: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [15,18446744073709551553] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-XOP-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_15_neg_63: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,18446744073709551553] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX2-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_15_neg_63: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 -63> ret <2 x i64> %1 } @@ -1130,6 +1378,15 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> ; X64-AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; X64-AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; X64-AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0 +; X64-AVX512DQ-NEXT: vzeroupper +; X64-AVX512DQ-NEXT: retq %1 = mul <16 x i8> %a0, <i8 0, i8 1, i8 3, i8 7, i8 15, i8 31, i8 63, i8 127, i8 0, i8 1, i8 3, i8 7, i8 15, i8 31, i8 63, i8 127> ret <16 x i8> %1 } @@ -1157,15 +1414,30 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_68_132: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_68_132: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_68_132: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_68_132: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 68, i64 132> ret <2 x i64> %mul } @@ -1193,15 +1465,30 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind { ; X64-NEXT: paddq %xmm2, %xmm0 ; X64-NEXT: retq ; -; X64-AVX-LABEL: mul_v2i64_60_120: -; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124] -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 -; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 -; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 -; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 -; X64-AVX-NEXT: retq +; X64-XOP-LABEL: mul_v2i64_60_120: +; X64-XOP: # %bb.0: +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124] +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_60_120: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124] +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v2i64_60_120: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmullq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 60, i64 124> ret <2 x i64> %mul } @@ -1287,6 +1574,13 @@ define <4 x i64> @mul_v4i64_zext_cross_bb(<4 x i32>* %in, <4 x i32>* %y) { ; X64-AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; X64-AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; X64-AVX2-NEXT: retq +; +; X64-AVX512DQ-LABEL: mul_v4i64_zext_cross_bb: +; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; X64-AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; X64-AVX512DQ-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: retq %a = load <4 x i32>, <4 x i32>* %in %b = zext <4 x i32> %a to <4 x i64> br label %foo |