diff options
| author | Craig Topper <craig.topper@intel.com> | 2017-09-28 20:10:34 +0000 | 
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2017-09-28 20:10:34 +0000 | 
| commit | ed19350293f14bce74f53d9ffbe72ba4fc5d7bd2 (patch) | |
| tree | d1aeb99a2f3163d5e55c6e84dd3433ad77c1503f /llvm | |
| parent | de22fe5b5b7dbc825494a5d7c50fb09adcc6524f (diff) | |
| download | bcm5719-llvm-ed19350293f14bce74f53d9ffbe72ba4fc5d7bd2.tar.gz bcm5719-llvm-ed19350293f14bce74f53d9ffbe72ba4fc5d7bd2.zip  | |
[X86] Make use of vpmovwb when possible in LowerMULH
If we have BWI, we can truncate in a much simpler way by using vpmovwb. This even works without VLX by using the wider zmm->ymm truncate with a subvector extract.
Differential Revision: https://reviews.llvm.org/D38375
llvm-svn: 314457
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 23 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll | 57 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll | 8 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll | 48 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll | 8 | 
5 files changed, 80 insertions, 64 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d762c985e90..980db1012be 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -21631,17 +21631,7 @@ static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,          SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB);          Mul = DAG.getNode(ISD::SRL, dl, MVT::v32i16, Mul,                            DAG.getConstant(8, dl, MVT::v32i16)); -        // The ymm variant of PACKUS treats the 128-bit lanes separately, so -        // before using PACKUS we need to permute the inputs to the correct -        // lo/hi xmm lane. -        const int Mask[] = { 0,  1,  2,  3,  4,  5,  6,  7, -                            16, 17, 18, 19, 20, 21, 22, 23, -                             8,  9, 10, 11, 12, 13, 14, 15, -                            24, 25, 26, 27, 28, 29, 30, 31}; -        Mul = DAG.getVectorShuffle(MVT::v32i16, dl, Mul, Mul, Mask); -        Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Lo); -        Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Hi); -        return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); +        return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);        }        SDValue ALo = extract128BitVector(A, 0, DAG, dl);        SDValue BLo = extract128BitVector(B, 0, DAG, dl); @@ -21671,10 +21661,13 @@ static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,      SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v16i16, A);      SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v16i16, B);      SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB); -    SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul, -                               DAG.getConstant(8, dl, MVT::v16i16)); -    Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo); -    Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi); +    Mul = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul, +                      DAG.getConstant(8, dl, MVT::v16i16)); +    // If we have BWI we can use truncate instruction. +    if (Subtarget.hasBWI()) +      return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); +    Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Lo); +    Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Hi);      return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);    } diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll index 51c47ac8e3c..4fa7f747ed4 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -247,24 +247,42 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {  ; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0  ; AVX1-NEXT:    retq  ; -; AVX2-LABEL: test_div7_16i8: -; AVX2:       # BB#0: -; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm1 -; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 -; AVX2-NEXT:    vpsrlw $2, %xmm0, %xmm1 -; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1 -; AVX2-NEXT:    vpsubb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT:    vpsrlw $7, %xmm0, %xmm0 -; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 -; AVX2-NEXT:    vzeroupper -; AVX2-NEXT:    retq +; AVX2NOBW-LABEL: test_div7_16i8: +; AVX2NOBW:       # BB#0: +; AVX2NOBW-NEXT:    vpmovsxbw %xmm0, %ymm1 +; AVX2NOBW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2NOBW-NEXT:    vpsrlw $8, %ymm1, %ymm1 +; AVX2NOBW-NEXT:    vextracti128 $1, %ymm1, %xmm2 +; AVX2NOBW-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2NOBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 +; AVX2NOBW-NEXT:    vpsrlw $2, %xmm0, %xmm1 +; AVX2NOBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2NOBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2NOBW-NEXT:    vpxor %xmm2, %xmm1, %xmm1 +; AVX2NOBW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1 +; AVX2NOBW-NEXT:    vpsrlw $7, %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 +; AVX2NOBW-NEXT:    vzeroupper +; AVX2NOBW-NEXT:    retq +; +; AVX512BW-LABEL: test_div7_16i8: +; AVX512BW:       # BB#0: +; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm1 +; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX512BW-NEXT:    vpsrlw $8, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT:    vpsrlw $2, %xmm0, %xmm1 +; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX512BW-NEXT:    vpxor %xmm2, %xmm1, %xmm1 +; AVX512BW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1 +; AVX512BW-NEXT:    vpsrlw $7, %xmm0, %xmm0 +; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT:    vzeroupper +; AVX512BW-NEXT:    retq    %res = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>    ret <16 x i8> %res  } @@ -618,8 +636,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovsxbw %xmm0, %ymm1  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1  ; AVX512BW-NEXT:    vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpaddb %xmm0, %xmm1, %xmm1  ; AVX512BW-NEXT:    vpsrlw $2, %xmm1, %xmm2  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll index 665b7fffadc..28e7194bdc4 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -231,9 +231,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovsxbw %ymm0, %zmm1  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %zmm1, %zmm1  ; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1 -; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] -; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2 -; AVX512BW-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpaddb %ymm0, %ymm1, %ymm0  ; AVX512BW-NEXT:    vpsrlw $2, %ymm0, %ymm1  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1 @@ -560,9 +558,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovsxbw %ymm0, %zmm1  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %zmm1, %zmm1  ; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1 -; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] -; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2 -; AVX512BW-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpaddb %ymm0, %ymm1, %ymm1  ; AVX512BW-NEXT:    vpsrlw $2, %ymm1, %ymm2  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index dbf7144d0af..ede9c9fe9bd 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -230,21 +230,36 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {  ; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0  ; AVX1-NEXT:    retq  ; -; AVX2-LABEL: test_div7_16i8: -; AVX2:       # BB#0: -; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT:    vpsrlw $1, %xmm0, %xmm0 -; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 -; AVX2-NEXT:    vpsrlw $2, %xmm0, %xmm0 -; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT:    vzeroupper -; AVX2-NEXT:    retq +; AVX2NOBW-LABEL: test_div7_16i8: +; AVX2NOBW:       # BB#0: +; AVX2NOBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2NOBW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2NOBW-NEXT:    vpsrlw $8, %ymm1, %ymm1 +; AVX2NOBW-NEXT:    vextracti128 $1, %ymm1, %xmm2 +; AVX2NOBW-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2NOBW-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpsrlw $1, %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpsrlw $2, %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2NOBW-NEXT:    vzeroupper +; AVX2NOBW-NEXT:    retq +; +; AVX512BW-LABEL: test_div7_16i8: +; AVX512BW:       # BB#0: +; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX512BW-NEXT:    vpsrlw $8, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT:    vpsrlw $1, %xmm0, %xmm0 +; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT:    vpsrlw $2, %xmm0, %xmm0 +; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT:    vzeroupper +; AVX512BW-NEXT:    retq    %res = udiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>    ret <16 x i8> %res  } @@ -586,8 +601,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %ymm1, %ymm1  ; AVX512BW-NEXT:    vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpsubb %xmm1, %xmm0, %xmm2  ; AVX512BW-NEXT:    vpsrlw $1, %xmm2, %xmm2  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 1ed18c878b8..e2a7f7cf16d 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -234,9 +234,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %zmm1, %zmm1  ; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1 -; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] -; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2 -; AVX512BW-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpsubb %ymm1, %ymm0, %ymm0  ; AVX512BW-NEXT:    vpsrlw $1, %ymm0, %ymm0  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0 @@ -563,9 +561,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {  ; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero  ; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %zmm1, %zmm1  ; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1 -; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] -; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2 -; AVX512BW-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1  ; AVX512BW-NEXT:    vpsubb %ymm1, %ymm0, %ymm2  ; AVX512BW-NEXT:    vpsrlw $1, %ymm2, %ymm2  ; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2  | 

