diff options
| author | Craig Topper <craig.topper@intel.com> | 2017-09-27 06:04:55 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2017-09-27 06:04:55 +0000 |
| commit | 177a3923ce5a44531c861b1dd3b1ab23edd3c473 (patch) | |
| tree | a56ad2dca7fe003f1b6869ec787d271e8f3c989b /llvm | |
| parent | 97f34e887fe295834754f9b0a29e3b28b67c8da9 (diff) | |
| download | bcm5719-llvm-177a3923ce5a44531c861b1dd3b1ab23edd3c473.tar.gz bcm5719-llvm-177a3923ce5a44531c861b1dd3b1ab23edd3c473.zip | |
[X86] Use extract128BitVector in LowerMULH so we can extract from constant build vectors.
llvm-svn: 314274
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 11 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll | 57 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll | 176 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll | 57 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll | 132 |
5 files changed, 202 insertions, 231 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 23c0ce32432..5c9534c6fd2 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -21634,14 +21634,15 @@ static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget, // AVX2 implementations - extend xmm subvectors to ymm. if (Subtarget.hasInt256()) { + unsigned NumElems = VT.getVectorNumElements(); SDValue Lo = DAG.getIntPtrConstant(0, dl); - SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl); + SDValue Hi = DAG.getIntPtrConstant(NumElems / 2, dl); if (VT == MVT::v32i8) { - SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo); - SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo); - SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi); - SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi); + SDValue ALo = extract128BitVector(A, 0, DAG, dl); + SDValue BLo = extract128BitVector(B, 0, DAG, dl); + SDValue AHi = extract128BitVector(A, NumElems / 2, DAG, dl); + SDValue BHi = extract128BitVector(B, NumElems / 2, DAG, dl); ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo); BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo); AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi); diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll index 5e38c575525..50af0d42f89 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -204,19 +204,16 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; ; AVX2-LABEL: test_div7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 -; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427] +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1 @@ -500,19 +497,16 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; ; AVX2NOBW-LABEL: test_rem7_32i8: ; AVX2NOBW: # BB#0: -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2NOBW-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3 -; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427] +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm2 @@ -544,19 +538,16 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; ; AVX512BW-LABEL: test_rem7_32i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512BW-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm3 -; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427] +; AVX512BW-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll index c582ebf2e61..b3808bac79a 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -128,48 +128,45 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind { define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512F-LABEL: test_div7_64i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512F-NEXT: vpmovsxbw %xmm4, %ymm4 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427] +; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm4 ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5 -; AVX512F-NEXT: vpmullw %ymm2, %ymm5, %ymm5 -; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm5[2,3],ymm4[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm0, %ymm4, %ymm0 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm6, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX512F-NEXT: vpxor %ymm7, %ymm0, %ymm0 -; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb %ymm4, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm4 -; AVX512F-NEXT: vpmovsxbw %xmm4, %ymm4 -; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm3 -; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm4 -; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX512F-NEXT: vpxor %ymm6, %ymm0, %ymm0 +; AVX512F-NEXT: vpsubb %ymm6, %ymm0, %ymm0 +; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm7 +; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm3 +; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm3[2,3],ymm2[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1 -; AVX512F-NEXT: vpxor %ymm7, %ymm1, %ymm1 -; AVX512F-NEXT: vpsubb %ymm7, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVX512F-NEXT: vpxor %ymm6, %ymm1, %ymm1 +; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; @@ -1057,71 +1054,68 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind { define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-LABEL: test_rem7_64i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512F-NEXT: vpmovsxbw %xmm4, %ymm4 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427] +; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3 +; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm4 ; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5 -; AVX512F-NEXT: vpmullw %ymm3, %ymm5, %ymm5 -; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm5[2,3],ymm4[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm0, %ymm4, %ymm4 -; AVX512F-NEXT: vpsrlw $7, %ymm4, %ymm6 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm10, %ymm6, %ymm8 -; AVX512F-NEXT: vpsrlw $2, %ymm4, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX512F-NEXT: vpxor %ymm7, %ymm4, %ymm4 -; AVX512F-NEXT: vpsubb %ymm7, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm8, %ymm4, %ymm8 -; AVX512F-NEXT: vpmovsxbw %xmm8, %ymm9 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm4, %ymm9, %ymm9 -; AVX512F-NEXT: vpmovsxwd %ymm9, %zmm9 -; AVX512F-NEXT: vpmovdb %zmm9, %xmm9 -; AVX512F-NEXT: vextracti128 $1, %ymm8, %xmm5 -; AVX512F-NEXT: vpmovsxbw %xmm5, %ymm5 -; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm5 -; AVX512F-NEXT: vpmovsxwd %ymm5, %zmm5 -; AVX512F-NEXT: vpmovdb %zmm5, %xmm5 -; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm9, %ymm5 -; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm5 -; AVX512F-NEXT: vpmovsxbw %xmm5, %ymm5 -; AVX512F-NEXT: vpmullw %ymm2, %ymm5, %ymm2 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm3[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpaddb %ymm0, %ymm3, %ymm3 +; AVX512F-NEXT: vpsrlw $7, %ymm3, %ymm5 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm7 +; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX512F-NEXT: vpxor %ymm6, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3 +; AVX512F-NEXT: vpaddb %ymm7, %ymm3, %ymm7 +; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm8 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512F-NEXT: vpmullw %ymm3, %ymm8, %ymm8 +; AVX512F-NEXT: vpmovsxwd %ymm8, %zmm8 +; AVX512F-NEXT: vpmovdb %zmm8, %xmm8 +; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm7 +; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7 +; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7 +; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7 +; AVX512F-NEXT: vpmovdb %zmm7, %xmm7 +; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7 +; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm7 +; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7 +; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm7 +; AVX512F-NEXT: vpsrlw $8, %ymm7, %ymm7 +; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm8 +; AVX512F-NEXT: vpmullw %ymm2, %ymm8, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm5 -; AVX512F-NEXT: vpmullw %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm3[2,3],ymm2[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm2[2,3],ymm7[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm8, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm2 -; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm3 -; AVX512F-NEXT: vpand %ymm10, %ymm3, %ymm3 +; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm7 +; AVX512F-NEXT: vpand %ymm4, %ymm7, %ymm4 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpxor %ymm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm3 -; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3 -; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3 -; AVX512F-NEXT: vpmovdb %zmm3, %xmm3 +; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpxor %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 +; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 +; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 +; AVX512F-NEXT: vpmovdb %zmm4, %xmm4 ; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 0a2079f0266..d65c5b2c24c 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -210,19 +210,16 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; ; AVX2-LABEL: test_div7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 @@ -509,19 +506,16 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; ; AVX2NOBW-LABEL: test_rem7_32i8: ; AVX2NOBW: # BB#0: -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 @@ -550,19 +544,16 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; ; AVX512BW-LABEL: test_rem7_32i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512BW-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] +; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm2 ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index a8ddf75fd8b..262c1dd16ca 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -139,44 +139,41 @@ define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind { define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512F-LABEL: test_div7_64i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm5, %ymm5 -; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm5[2,3],ymm4[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm4, %ymm4 -; AVX512F-NEXT: vpsubb %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpsubb %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb %ymm4, %ymm0, %ymm0 -; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm6 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero +; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm5 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero +; AVX512F-NEXT: vpmullw %ymm3, %ymm5, %ymm5 +; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm6, %ymm2 -; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],ymm3[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm5[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm6, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1 -; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1 -; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: test_div7_64i8: @@ -946,56 +943,53 @@ define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind { define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-LABEL: test_rem7_64i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm2 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 -; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm5 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm5[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm3, %ymm3 -; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm5 -; AVX512F-NEXT: vpsrlw $1, %ymm5, %ymm6 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6 -; AVX512F-NEXT: vpaddb %ymm3, %ymm6, %ymm3 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 +; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm3[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm4 +; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm5 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm5 +; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm7 -; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm8 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm6 +; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm7 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm3, %ymm8, %ymm8 -; AVX512F-NEXT: vpmovsxwd %ymm8, %zmm8 -; AVX512F-NEXT: vpmovdb %zmm8, %xmm8 -; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm7 -; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7 ; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7 ; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7 ; AVX512F-NEXT: vpmovdb %zmm7, %xmm7 -; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7 -; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm7 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero,xmm7[8],zero,xmm7[9],zero,xmm7[10],zero,xmm7[11],zero,xmm7[12],zero,xmm7[13],zero,xmm7[14],zero,xmm7[15],zero +; AVX512F-NEXT: vextracti128 $1, %ymm6, %xmm6 +; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm6 +; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm6 +; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6 +; AVX512F-NEXT: vpmovdb %zmm6, %xmm6 +; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 +; AVX512F-NEXT: vpsubb %ymm6, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm6 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero +; AVX512F-NEXT: vpmullw %ymm2, %ymm6, %ymm6 +; AVX512F-NEXT: vpsrlw $8, %ymm6, %ymm6 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512F-NEXT: vpmullw %ymm4, %ymm7, %ymm4 -; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm4[2,3],ymm2[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm2[2,3],ymm6[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm4 -; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4 -; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm6 +; AVX512F-NEXT: vpsrlw $1, %ymm6, %ymm6 +; AVX512F-NEXT: vpand %ymm4, %ymm6, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 |

