diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-04 13:12:08 +0000 | 
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-04 13:12:08 +0000 | 
| commit | bd5d2f028411ca98ebac844cf0114836b8fc1186 (patch) | |
| tree | 600b6601897dc0fadb819e6ef45f40f92d6705e2 /llvm | |
| parent | 482d3f41e5dc909c2e386075042a03256b0d88c8 (diff) | |
| download | bcm5719-llvm-bd5d2f028411ca98ebac844cf0114836b8fc1186.tar.gz bcm5719-llvm-bd5d2f028411ca98ebac844cf0114836b8fc1186.zip  | |
[X86][SSE] Add support for lowering unary shuffles to PACKSS/PACKUS
Extension to D38472
llvm-svn: 314901
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 36 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll | 37 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-compare-results.ll | 116 | 
3 files changed, 100 insertions, 89 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a6cd747999c..08c944ca696 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -8683,24 +8683,36 @@ static SDValue lowerVectorShuffleWithPACK(const SDLoc &DL, MVT VT,    MVT PackSVT = MVT::getIntegerVT(BitSize * 2);    MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2); -  // TODO - Add support for unary packs. -  SmallVector<int, 32> BinaryMask; -  createPackShuffleMask(VT, BinaryMask, false); - -  if (isShuffleEquivalent(V1, V2, Mask, BinaryMask)) { -    SDValue VV1 = DAG.getBitcast(PackVT, V1); -    SDValue VV2 = DAG.getBitcast(PackVT, V2); -    if ((V1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) && -        (V2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) +  auto LowerWithPACK = [&](SDValue N1, SDValue N2) { +    SDValue VV1 = DAG.getBitcast(PackVT, N1); +    SDValue VV2 = DAG.getBitcast(PackVT, N2); +    if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) && +        (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize))        return DAG.getNode(X86ISD::PACKSS, DL, VT, VV1, VV2);      if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {        APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize); -      if ((V1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) && -          (V2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) +      if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) && +          (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask)))          return DAG.getNode(X86ISD::PACKUS, DL, VT, VV1, VV2);      } -  } + +    return SDValue(); +  }; + +  // Try binary shuffle. +  SmallVector<int, 32> BinaryMask; +  createPackShuffleMask(VT, BinaryMask, false); +  if (isShuffleEquivalent(V1, V2, Mask, BinaryMask)) +    if (SDValue Pack = LowerWithPACK(V1, V2)) +      return Pack; + +  // Try unary shuffle. +  SmallVector<int, 32> UnaryMask; +  createPackShuffleMask(VT, UnaryMask, true); +  if (isShuffleEquivalent(V1, V2, Mask, UnaryMask)) +    if (SDValue Pack = LowerWithPACK(V1, V1)) +      return Pack;    return SDValue();  } diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll index dfb03e05ca9..9d2766bcac6 100644 --- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll +++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -8,15 +8,10 @@  define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {  ; SSE-LABEL: v8i64:  ; SSE:       # BB#0: +; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm8  ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm9  ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm8  ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT:    pcmpgtq %xmm7, %xmm3 -; SSE-NEXT:    pcmpgtq %xmm6, %xmm2 -; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] -; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13] -; SSE-NEXT:    pshufb %xmm3, %xmm2  ; SSE-NEXT:    pcmpgtq %xmm5, %xmm1  ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]  ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] @@ -24,24 +19,28 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {  ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]  ; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]  ; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT:    pcmpgtq %xmm7, %xmm3 +; SSE-NEXT:    pcmpgtq %xmm6, %xmm2 +; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE-NEXT:    packssdw %xmm2, %xmm2 +; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]  ; SSE-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[0,2] -; SSE-NEXT:    pshufb %xmm3, %xmm8 +; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm11[0,2,2,3] +; SSE-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]  ; SSE-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm10  ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[0,2,2,3]  ; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] +; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]  ; SSE-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm9[0,2,2,3] -; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7] -; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5,6,7] -; SSE-NEXT:    pand %xmm0, %xmm2 -; SSE-NEXT:    psllw $15, %xmm2 -; SSE-NEXT:    psraw $15, %xmm2 -; SSE-NEXT:    packsswb %xmm0, %xmm2 -; SSE-NEXT:    pmovmskb %xmm2, %eax +; SSE-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm8 +; SSE-NEXT:    shufps {{.*#+}} xmm8 = xmm8[0,2],xmm9[0,2] +; SSE-NEXT:    packssdw %xmm8, %xmm8 +; SSE-NEXT:    pblendw {{.*#+}} xmm8 = xmm1[0,1,2,3],xmm8[4,5,6,7] +; SSE-NEXT:    pand %xmm2, %xmm8 +; SSE-NEXT:    psllw $15, %xmm8 +; SSE-NEXT:    psraw $15, %xmm8 +; SSE-NEXT:    packsswb %xmm0, %xmm8 +; SSE-NEXT:    pmovmskb %xmm8, %eax  ; SSE-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>  ; SSE-NEXT:    retq  ; diff --git a/llvm/test/CodeGen/X86/vector-compare-results.ll b/llvm/test/CodeGen/X86/vector-compare-results.ll index 6f03411a44d..473d54d28a6 100644 --- a/llvm/test/CodeGen/X86/vector-compare-results.ll +++ b/llvm/test/CodeGen/X86/vector-compare-results.ll @@ -9345,11 +9345,6 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {  ; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm13  ; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm15  ; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm12 -; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm7 -; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm6 -; SSE42-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2] -; SSE42-NEXT:    movdqa {{.*#+}} xmm7 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13] -; SSE42-NEXT:    pshufb %xmm7, %xmm6  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm5  ; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]  ; SSE42-NEXT:    pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7] @@ -9357,11 +9352,11 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {  ; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]  ; SSE42-NEXT:    pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]  ; SSE42-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7] -; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm3 -; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm2 -; SSE42-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] -; SSE42-NEXT:    pshufb %xmm7, %xmm2 +; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm7 +; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm6 +; SSE42-NEXT:    shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2] +; SSE42-NEXT:    packssdw %xmm6, %xmm6 +; SSE42-NEXT:    pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm1  ; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]  ; SSE42-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] @@ -9369,126 +9364,131 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {  ; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]  ; SSE42-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]  ; SSE42-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE42-NEXT:    packsswb %xmm4, %xmm0 +; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm3 +; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm2 +; SSE42-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE42-NEXT:    packssdw %xmm2, %xmm2 +; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; SSE42-NEXT:    packsswb %xmm6, %xmm2  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm12 -; SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,8,9,2,3,10,11,8,9,12,13,10,11,14,15] -; SSE42-NEXT:    pshufb %xmm1, %xmm12 +; SSE42-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,8,9,2,3,10,11,8,9,12,13,10,11,14,15] +; SSE42-NEXT:    pshufb %xmm0, %xmm12  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm15  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm13  ; SSE42-NEXT:    shufps {{.*#+}} xmm13 = xmm13[0,2],xmm15[0,2] -; SSE42-NEXT:    pshufb %xmm7, %xmm13 +; SSE42-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13] +; SSE42-NEXT:    pshufb %xmm1, %xmm13  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm14 -; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm14[0,2,2,3] -; SSE42-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] -; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm13[4,5,6,7] -; SSE42-NEXT:    pblendw {{.*#+}} xmm2 = xmm12[0,1],xmm2[2,3,4,5,6,7] +; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm14[0,2,2,3] +; SSE42-NEXT:    pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7] +; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm13[4,5,6,7] +; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm12[0,1],xmm3[2,3,4,5,6,7]  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm8 -; SSE42-NEXT:    pshufb %xmm1, %xmm8 +; SSE42-NEXT:    pshufb %xmm0, %xmm8  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm11  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm9  ; SSE42-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2] -; SSE42-NEXT:    pshufb %xmm7, %xmm9 +; SSE42-NEXT:    pshufb %xmm1, %xmm9  ; SSE42-NEXT:    pcmpgtq {{[0-9]+}}(%rsp), %xmm10 -; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm10[0,2,2,3] -; SSE42-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7] -; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm9[4,5,6,7] -; SSE42-NEXT:    pblendw {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3,4,5,6,7] -; SSE42-NEXT:    packsswb %xmm2, %xmm1 -; SSE42-NEXT:    pextrb $15, %xmm1, %eax +; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm10[0,2,2,3] +; SSE42-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7] +; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4,5,6,7] +; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm8[0,1],xmm0[2,3,4,5,6,7] +; SSE42-NEXT:    packsswb %xmm3, %xmm0 +; SSE42-NEXT:    pextrb $15, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $14, %xmm1, %eax +; SSE42-NEXT:    pextrb $14, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $13, %xmm1, %eax +; SSE42-NEXT:    pextrb $13, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $12, %xmm1, %eax +; SSE42-NEXT:    pextrb $12, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $11, %xmm1, %eax +; SSE42-NEXT:    pextrb $11, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $10, %xmm1, %eax +; SSE42-NEXT:    pextrb $10, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $9, %xmm1, %eax +; SSE42-NEXT:    pextrb $9, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $8, %xmm1, %eax +; SSE42-NEXT:    pextrb $8, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $7, %xmm1, %eax +; SSE42-NEXT:    pextrb $7, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $6, %xmm1, %eax +; SSE42-NEXT:    pextrb $6, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $5, %xmm1, %eax +; SSE42-NEXT:    pextrb $5, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $4, %xmm1, %eax +; SSE42-NEXT:    pextrb $4, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $3, %xmm1, %eax +; SSE42-NEXT:    pextrb $3, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $2, %xmm1, %eax +; SSE42-NEXT:    pextrb $2, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $1, %xmm1, %eax +; SSE42-NEXT:    pextrb $1, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $0, %xmm1, %eax +; SSE42-NEXT:    pextrb $0, %xmm0, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, 2(%rdi) -; SSE42-NEXT:    pextrb $15, %xmm0, %eax +; SSE42-NEXT:    pextrb $15, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $14, %xmm0, %eax +; SSE42-NEXT:    pextrb $14, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $13, %xmm0, %eax +; SSE42-NEXT:    pextrb $13, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $12, %xmm0, %eax +; SSE42-NEXT:    pextrb $12, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $11, %xmm0, %eax +; SSE42-NEXT:    pextrb $11, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $10, %xmm0, %eax +; SSE42-NEXT:    pextrb $10, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $9, %xmm0, %eax +; SSE42-NEXT:    pextrb $9, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $8, %xmm0, %eax +; SSE42-NEXT:    pextrb $8, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $7, %xmm0, %eax +; SSE42-NEXT:    pextrb $7, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $6, %xmm0, %eax +; SSE42-NEXT:    pextrb $6, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $5, %xmm0, %eax +; SSE42-NEXT:    pextrb $5, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $4, %xmm0, %eax +; SSE42-NEXT:    pextrb $4, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $3, %xmm0, %eax +; SSE42-NEXT:    pextrb $3, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $2, %xmm0, %eax +; SSE42-NEXT:    pextrb $2, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $1, %xmm0, %eax +; SSE42-NEXT:    pextrb $1, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi) -; SSE42-NEXT:    pextrb $0, %xmm0, %eax +; SSE42-NEXT:    pextrb $0, %xmm2, %eax  ; SSE42-NEXT:    andb $1, %al  ; SSE42-NEXT:    movb %al, (%rdi)  ; SSE42-NEXT:    movq %rdi, %rax  | 

