diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-03-28 21:33:52 +0000 | 
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-03-28 21:33:52 +0000 | 
| commit | d3df400fa9d20ea11c1c0b1296ce4299d5b82b86 (patch) | |
| tree | ccd598b5a80d0a7a89d1a2299719f3025c50ad47 /llvm | |
| parent | c2feac75fd3f3ef3f64465e498848699ff43286f (diff) | |
| download | bcm5719-llvm-d3df400fa9d20ea11c1c0b1296ce4299d5b82b86.tar.gz bcm5719-llvm-d3df400fa9d20ea11c1c0b1296ce4299d5b82b86.zip  | |
[X86][SSE] Vectorize a bit (AND/XOR/OR) op if a BUILD_VECTOR has the same op for all their scalar elements.
If all a BUILD_VECTOR's source elements are the same bit (AND/XOR/OR) operation type and each has one constant operand, lower to a pair of BUILD_VECTOR and just apply the bit operation to the vectors.
The constant operands will form a constant vector meaning that we still only have a single BUILD_VECTOR to lower and we will have replaced all the scalarized operations with a single SSE equivalent.
Its not in our interest to start make a general purpose vectorizer from this, but I'm seeing enough of these scalar bit operations from the later legalization/scalarization stages to support them at least.
Differential Revision: http://reviews.llvm.org/D18492
llvm-svn: 264666
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 50 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/pr15267.ll | 16 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-lzcnt-256.ll | 260 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-pcmp.ll | 19 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-sext.ll | 38 | 
5 files changed, 169 insertions, 214 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 98c020c655f..4f8719d1213 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -6625,6 +6625,54 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,    return SDValue();  } +/// If a BUILD_VECTOR's source elements all apply the same bit operation and +/// one of their operands is constant, lower to a pair of BUILD_VECTOR and +/// just apply the bit to the vectors. +/// NOTE: Its not in our interest to start make a general purpose vectorizer +/// from this, but enough scalar bit operations are created from the later +/// legalization + scalarization stages to need basic support. +static SDValue lowerBuildVectorToBitOp(SDValue Op, SelectionDAG &DAG) { +  SDLoc DL(Op); +  MVT VT = Op.getSimpleValueType(); +  unsigned NumElems = VT.getVectorNumElements(); +  const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + +  // Check that all elements have the same opcode. +  // TODO: Should we allow UNDEFS and if so how many? +  unsigned Opcode = Op.getOperand(0).getOpcode(); +  for (unsigned i = 1; i < NumElems; ++i) +    if (Opcode != Op.getOperand(i).getOpcode()) +      return SDValue(); + +  // TODO: We may be able to add support for other Ops (ADD/SUB + shifts). +  switch (Opcode) { +  default: +    return SDValue(); +  case ISD::AND: +  case ISD::XOR: +  case ISD::OR: +    if (!TLI.isOperationLegalOrPromote(Opcode, VT)) +      return SDValue(); +    break; +  } + +  SmallVector<SDValue, 4> LHSElts, RHSElts; +  for (SDValue Elt : Op->ops()) { +    SDValue LHS = Elt.getOperand(0); +    SDValue RHS = Elt.getOperand(1); + +    // We expect the canonicalized RHS operand to be the constant. +    if (!isa<ConstantSDNode>(RHS)) +      return SDValue(); +    LHSElts.push_back(LHS); +    RHSElts.push_back(RHS); +  } + +  SDValue LHS = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, LHSElts); +  SDValue RHS = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, RHSElts); +  return DAG.getNode(Opcode, DL, VT, LHS, RHS); +} +  /// Create a vector constant without a load. SSE/AVX provide the bare minimum  /// functionality to do this, so it's all zeros, all ones, or some derivation  /// that is cheap to calculate. @@ -6679,6 +6727,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {      return HorizontalOp;    if (SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG))      return Broadcast; +  if (SDValue BitOp = lowerBuildVectorToBitOp(Op, DAG)) +    return BitOp;    unsigned EVTBits = ExtVT.getSizeInBits(); diff --git a/llvm/test/CodeGen/X86/pr15267.ll b/llvm/test/CodeGen/X86/pr15267.ll index 9fc754aa112..d62aaf90587 100644 --- a/llvm/test/CodeGen/X86/pr15267.ll +++ b/llvm/test/CodeGen/X86/pr15267.ll @@ -7,18 +7,14 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {  ; CHECK-NEXT:    movzwl (%rdi), %eax  ; CHECK-NEXT:    movl %eax, %ecx  ; CHECK-NEXT:    shrl $3, %ecx -; CHECK-NEXT:    andl $7, %ecx -; CHECK-NEXT:    movl %eax, %edx -; CHECK-NEXT:    andl $7, %edx -; CHECK-NEXT:    vmovd %edx, %xmm0 +; CHECK-NEXT:    vmovd %eax, %xmm0  ; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0  ; CHECK-NEXT:    movl %eax, %ecx  ; CHECK-NEXT:    shrl $6, %ecx -; CHECK-NEXT:    andl $7, %ecx  ; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0  ; CHECK-NEXT:    shrl $9, %eax -; CHECK-NEXT:    andl $7, %eax  ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0 +; CHECK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0  ; CHECK-NEXT:    retq    %ret = load <4 x i3>, <4 x i3>* %in, align 1    ret <4 x i3> %ret @@ -30,18 +26,14 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind {  ; CHECK-NEXT:    movzbl (%rdi), %eax  ; CHECK-NEXT:    movl %eax, %ecx  ; CHECK-NEXT:    shrl %ecx -; CHECK-NEXT:    andl $1, %ecx -; CHECK-NEXT:    movl %eax, %edx -; CHECK-NEXT:    andl $1, %edx -; CHECK-NEXT:    vmovd %edx, %xmm0 +; CHECK-NEXT:    vmovd %eax, %xmm0  ; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0  ; CHECK-NEXT:    movl %eax, %ecx  ; CHECK-NEXT:    shrl $2, %ecx -; CHECK-NEXT:    andl $1, %ecx  ; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0  ; CHECK-NEXT:    shrl $3, %eax -; CHECK-NEXT:    andl $1, %eax  ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0 +; CHECK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0  ; CHECK-NEXT:    retq    %ret = load <4 x i1>, <4 x i1>* %in, align 1    ret <4 x i1> %ret diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll index e81b701c9d5..7b8ae963f93 100644 --- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll @@ -12,25 +12,24 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {  ; AVX1-NEXT:    bsrq %rax, %rax  ; AVX1-NEXT:    movl $127, %ecx  ; AVX1-NEXT:    cmoveq %rcx, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm2  ; AVX1-NEXT:    vmovq %xmm1, %rax  ; AVX1-NEXT:    bsrq %rax, %rax  ; AVX1-NEXT:    cmoveq %rcx, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm1  ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax  ; AVX1-NEXT:    bsrq %rax, %rax  ; AVX1-NEXT:    cmoveq %rcx, %rax -; AVX1-NEXT:    xorq $63, %rax -; AVX1-NEXT:    vmovq %rax, %xmm2 +; AVX1-NEXT:    vmovq %rax, %xmm3  ; AVX1-NEXT:    vmovq %xmm0, %rax  ; AVX1-NEXT:    bsrq %rax, %rax  ; AVX1-NEXT:    cmoveq %rcx, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm0 -; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -41,25 +40,24 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {  ; AVX2-NEXT:    bsrq %rax, %rax  ; AVX2-NEXT:    movl $127, %ecx  ; AVX2-NEXT:    cmoveq %rcx, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm2  ; AVX2-NEXT:    vmovq %xmm1, %rax  ; AVX2-NEXT:    bsrq %rax, %rax  ; AVX2-NEXT:    cmoveq %rcx, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm1  ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63] +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax  ; AVX2-NEXT:    bsrq %rax, %rax  ; AVX2-NEXT:    cmoveq %rcx, %rax -; AVX2-NEXT:    xorq $63, %rax -; AVX2-NEXT:    vmovq %rax, %xmm2 +; AVX2-NEXT:    vmovq %rax, %xmm3  ; AVX2-NEXT:    vmovq %xmm0, %rax  ; AVX2-NEXT:    bsrq %rax, %rax  ; AVX2-NEXT:    cmoveq %rcx, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm0 -; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; @@ -83,22 +81,21 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {  ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1  ; AVX1-NEXT:    vpextrq $1, %xmm1, %rax  ; AVX1-NEXT:    bsrq %rax, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm2  ; AVX1-NEXT:    vmovq %xmm1, %rax  ; AVX1-NEXT:    bsrq %rax, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm1  ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax  ; AVX1-NEXT:    bsrq %rax, %rax -; AVX1-NEXT:    xorq $63, %rax -; AVX1-NEXT:    vmovq %rax, %xmm2 +; AVX1-NEXT:    vmovq %rax, %xmm3  ; AVX1-NEXT:    vmovq %xmm0, %rax  ; AVX1-NEXT:    bsrq %rax, %rax -; AVX1-NEXT:    xorq $63, %rax  ; AVX1-NEXT:    vmovq %rax, %xmm0 -; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -107,22 +104,21 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {  ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1  ; AVX2-NEXT:    vpextrq $1, %xmm1, %rax  ; AVX2-NEXT:    bsrq %rax, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm2  ; AVX2-NEXT:    vmovq %xmm1, %rax  ; AVX2-NEXT:    bsrq %rax, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm1  ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [63,63] +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax  ; AVX2-NEXT:    bsrq %rax, %rax -; AVX2-NEXT:    xorq $63, %rax -; AVX2-NEXT:    vmovq %rax, %xmm2 +; AVX2-NEXT:    vmovq %rax, %xmm3  ; AVX2-NEXT:    vmovq %xmm0, %rax  ; AVX2-NEXT:    bsrq %rax, %rax -; AVX2-NEXT:    xorq $63, %rax  ; AVX2-NEXT:    vmovq %rax, %xmm0 -; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; @@ -148,43 +144,38 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {  ; AVX1-NEXT:    bsrl %eax, %ecx  ; AVX1-NEXT:    movl $63, %eax  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx  ; AVX1-NEXT:    vmovd %xmm1, %edx  ; AVX1-NEXT:    bsrl %edx, %edx  ; AVX1-NEXT:    cmovel %eax, %edx -; AVX1-NEXT:    xorl $31, %edx  ; AVX1-NEXT:    vmovd %edx, %xmm2  ; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx  ; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx  ; AVX1-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [31,31,31,31] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrd $1, %xmm0, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx  ; AVX1-NEXT:    vmovd %xmm0, %edx  ; AVX1-NEXT:    bsrl %edx, %edx  ; AVX1-NEXT:    cmovel %eax, %edx -; AVX1-NEXT:    xorl $31, %edx -; AVX1-NEXT:    vmovd %edx, %xmm2 -; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vmovd %edx, %xmm3 +; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrd $2, %xmm0, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx -; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrd $2, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrd $3, %xmm0, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx  ; AVX1-NEXT:    cmovel %eax, %ecx -; AVX1-NEXT:    xorl $31, %ecx -; AVX1-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX1-NEXT:    vpinsrd $3, %ecx, %xmm3, %xmm0 +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -195,43 +186,38 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {  ; AVX2-NEXT:    bsrl %eax, %ecx  ; AVX2-NEXT:    movl $63, %eax  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx  ; AVX2-NEXT:    vmovd %xmm1, %edx  ; AVX2-NEXT:    bsrl %edx, %edx  ; AVX2-NEXT:    cmovel %eax, %edx -; AVX2-NEXT:    xorl $31, %edx  ; AVX2-NEXT:    vmovd %edx, %xmm2  ; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx  ; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx  ; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm1 +; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrd $1, %xmm0, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx  ; AVX2-NEXT:    vmovd %xmm0, %edx  ; AVX2-NEXT:    bsrl %edx, %edx  ; AVX2-NEXT:    cmovel %eax, %edx -; AVX2-NEXT:    xorl $31, %edx -; AVX2-NEXT:    vmovd %edx, %xmm2 -; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vmovd %edx, %xmm3 +; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrd $2, %xmm0, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx -; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrd $2, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrd $3, %xmm0, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx  ; AVX2-NEXT:    cmovel %eax, %ecx -; AVX2-NEXT:    xorl $31, %ecx -; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm3, %xmm0 +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; @@ -255,36 +241,31 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {  ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1  ; AVX1-NEXT:    vpextrd $1, %xmm1, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax  ; AVX1-NEXT:    vmovd %xmm1, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx -; AVX1-NEXT:    xorl $31, %ecx  ; AVX1-NEXT:    vmovd %ecx, %xmm2  ; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrd $2, %xmm1, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax  ; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrd $3, %xmm1, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax  ; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1 +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [31,31,31,31] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrd $1, %xmm0, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax  ; AVX1-NEXT:    vmovd %xmm0, %ecx  ; AVX1-NEXT:    bsrl %ecx, %ecx -; AVX1-NEXT:    xorl $31, %ecx -; AVX1-NEXT:    vmovd %ecx, %xmm2 -; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vmovd %ecx, %xmm3 +; AVX1-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrd $2, %xmm0, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax -; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrd $3, %xmm0, %eax  ; AVX1-NEXT:    bsrl %eax, %eax -; AVX1-NEXT:    xorl $31, %eax -; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX1-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm0 +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -293,36 +274,31 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {  ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1  ; AVX2-NEXT:    vpextrd $1, %xmm1, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax  ; AVX2-NEXT:    vmovd %xmm1, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx -; AVX2-NEXT:    xorl $31, %ecx  ; AVX2-NEXT:    vmovd %ecx, %xmm2  ; AVX2-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrd $2, %xmm1, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax  ; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrd $3, %xmm1, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax  ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1 +; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrd $1, %xmm0, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax  ; AVX2-NEXT:    vmovd %xmm0, %ecx  ; AVX2-NEXT:    bsrl %ecx, %ecx -; AVX2-NEXT:    xorl $31, %ecx -; AVX2-NEXT:    vmovd %ecx, %xmm2 -; AVX2-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vmovd %ecx, %xmm3 +; AVX2-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrd $2, %xmm0, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax -; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrd $3, %xmm0, %eax  ; AVX2-NEXT:    bsrl %eax, %eax -; AVX2-NEXT:    xorl $31, %eax -; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0 +; AVX2-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm0 +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; @@ -348,83 +324,70 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {  ; AVX1-NEXT:    bsrw %ax, %cx  ; AVX1-NEXT:    movw $31, %ax  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vmovd %xmm1, %edx  ; AVX1-NEXT:    bsrw %dx, %dx  ; AVX1-NEXT:    cmovew %ax, %dx -; AVX1-NEXT:    xorl $15, %edx  ; AVX1-NEXT:    vmovd %edx, %xmm2  ; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $2, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $3, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $4, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $5, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $6, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $7, %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrw $1, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vmovd %xmm0, %edx  ; AVX1-NEXT:    bsrw %dx, %dx  ; AVX1-NEXT:    cmovew %ax, %dx -; AVX1-NEXT:    xorl $15, %edx -; AVX1-NEXT:    vmovd %edx, %xmm2 -; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vmovd %edx, %xmm3 +; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $2, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $2, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $3, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $3, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $4, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $4, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $5, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $5, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $6, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $6, %ecx, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $7, %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx  ; AVX1-NEXT:    cmovew %ax, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX1-NEXT:    vpinsrw $7, %ecx, %xmm3, %xmm0 +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -435,83 +398,70 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {  ; AVX2-NEXT:    bsrw %ax, %cx  ; AVX2-NEXT:    movw $31, %ax  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vmovd %xmm1, %edx  ; AVX2-NEXT:    bsrw %dx, %dx  ; AVX2-NEXT:    cmovew %ax, %dx -; AVX2-NEXT:    xorl $15, %edx  ; AVX2-NEXT:    vmovd %edx, %xmm2  ; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $2, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $3, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $4, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $5, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $6, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $7, %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm1 +; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15] +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrw $1, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vmovd %xmm0, %edx  ; AVX2-NEXT:    bsrw %dx, %dx  ; AVX2-NEXT:    cmovew %ax, %dx -; AVX2-NEXT:    xorl $15, %edx -; AVX2-NEXT:    vmovd %edx, %xmm2 -; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vmovd %edx, %xmm3 +; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $2, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $2, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $2, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $3, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $3, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $3, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $4, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $4, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $4, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $5, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $5, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $5, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $6, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $7, %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx  ; AVX2-NEXT:    cmovew %ax, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm0 +; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm3, %xmm0 +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; @@ -532,68 +482,55 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {  ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1  ; AVX1-NEXT:    vpextrw $1, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vmovd %xmm1, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx -; AVX1-NEXT:    xorl $15, %ecx  ; AVX1-NEXT:    vmovd %ecx, %xmm2  ; AVX1-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $2, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $3, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $4, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $5, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $6, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2  ; AVX1-NEXT:    vpextrw $7, %xmm1, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1 +; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15] +; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX1-NEXT:    vpextrw $1, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax  ; AVX1-NEXT:    vmovd %xmm0, %ecx  ; AVX1-NEXT:    bsrw %cx, %cx -; AVX1-NEXT:    xorl $15, %ecx -; AVX1-NEXT:    vmovd %ecx, %xmm2 -; AVX1-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vmovd %ecx, %xmm3 +; AVX1-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $2, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $3, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $4, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $5, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $6, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX1-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3  ; AVX1-NEXT:    vpextrw $7, %xmm0, %eax  ; AVX1-NEXT:    bsrw %ax, %ax -; AVX1-NEXT:    xorl $15, %eax -; AVX1-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX1-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0 +; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0  ; AVX1-NEXT:    retq  ; @@ -602,68 +539,55 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {  ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1  ; AVX2-NEXT:    vpextrw $1, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vmovd %xmm1, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx -; AVX2-NEXT:    xorl $15, %ecx  ; AVX2-NEXT:    vmovd %ecx, %xmm2  ; AVX2-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $2, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $3, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $4, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $5, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $6, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2  ; AVX2-NEXT:    vpextrw $7, %xmm1, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1 +; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15] +; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1  ; AVX2-NEXT:    vpextrw $1, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax  ; AVX2-NEXT:    vmovd %xmm0, %ecx  ; AVX2-NEXT:    bsrw %cx, %cx -; AVX2-NEXT:    xorl $15, %ecx -; AVX2-NEXT:    vmovd %ecx, %xmm2 -; AVX2-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vmovd %ecx, %xmm3 +; AVX2-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $2, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $3, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $4, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $5, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $6, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2 +; AVX2-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3  ; AVX2-NEXT:    vpextrw $7, %xmm0, %eax  ; AVX2-NEXT:    bsrw %ax, %ax -; AVX2-NEXT:    xorl $15, %eax -; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX2-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0 +; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0  ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0  ; AVX2-NEXT:    retq  ; diff --git a/llvm/test/CodeGen/X86/vector-pcmp.ll b/llvm/test/CodeGen/X86/vector-pcmp.ll index f1398c3e429..9af54b8bbb1 100644 --- a/llvm/test/CodeGen/X86/vector-pcmp.ll +++ b/llvm/test/CodeGen/X86/vector-pcmp.ll @@ -87,29 +87,33 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {  ; SSE2-LABEL: test_strange_type:  ; SSE2:       # BB#0:  ; SSE2-NEXT:    sarq $63, %rsi -; SSE2-NEXT:    notq %rsi  ; SSE2-NEXT:    movd %rsi, %xmm0 +; SSE2-NEXT:    notq %rsi  ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; SSE2-NEXT:    movd %xmm0, %rax +; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT:    pxor %xmm0, %xmm1 +; SSE2-NEXT:    movd %xmm1, %rax  ; SSE2-NEXT:    movq %rsi, %rdx  ; SSE2-NEXT:    retq  ;  ; SSE42-LABEL: test_strange_type:  ; SSE42:       # BB#0:  ; SSE42-NEXT:    sarq $63, %rsi -; SSE42-NEXT:    notq %rsi  ; SSE42-NEXT:    movd %rsi, %xmm0  ; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] -; SSE42-NEXT:    movd %xmm0, %rax -; SSE42-NEXT:    pextrq $1, %xmm0, %rdx +; SSE42-NEXT:    pcmpeqd %xmm1, %xmm1 +; SSE42-NEXT:    pxor %xmm0, %xmm1 +; SSE42-NEXT:    movd %xmm1, %rax +; SSE42-NEXT:    pextrq $1, %xmm1, %rdx  ; SSE42-NEXT:    retq  ;  ; AVX1-LABEL: test_strange_type:  ; AVX1:       # BB#0:  ; AVX1-NEXT:    sarq $63, %rsi -; AVX1-NEXT:    notq %rsi  ; AVX1-NEXT:    vmovq %rsi, %xmm0  ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0  ; AVX1-NEXT:    vmovq %xmm0, %rax  ; AVX1-NEXT:    vpextrq $1, %xmm0, %rdx  ; AVX1-NEXT:    retq @@ -117,9 +121,10 @@ define <1 x i128> @test_strange_type(<1 x i128> %x) {  ; AVX2-LABEL: test_strange_type:  ; AVX2:       # BB#0:  ; AVX2-NEXT:    sarq $63, %rsi -; AVX2-NEXT:    notq %rsi  ; AVX2-NEXT:    vmovq %rsi, %xmm0  ; AVX2-NEXT:    vpbroadcastq %xmm0, %xmm0 +; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0  ; AVX2-NEXT:    vmovq %xmm0, %rax  ; AVX2-NEXT:    vpextrq $1, %xmm0, %rdx  ; AVX2-NEXT:    retq diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll index f4192d2a8a5..bb94340ba6c 100644 --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -826,24 +826,20 @@ entry:  define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {  ; SSE2-LABEL: load_sext_4i1_to_4i64:  ; SSE2:       # BB#0: # %entry -; SSE2-NEXT:    movzbl (%rdi), %eax +; SSE2-NEXT:    movl (%rdi), %eax  ; SSE2-NEXT:    movl %eax, %ecx  ; SSE2-NEXT:    shrl $3, %ecx -; SSE2-NEXT:    andl $1, %ecx  ; SSE2-NEXT:    movd %ecx, %xmm0  ; SSE2-NEXT:    movl %eax, %ecx  ; SSE2-NEXT:    shrl %ecx -; SSE2-NEXT:    andl $1, %ecx  ; SSE2-NEXT:    movd %ecx, %xmm1  ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSE2-NEXT:    movl %eax, %ecx -; SSE2-NEXT:    andl $1, %ecx -; SSE2-NEXT:    movd %ecx, %xmm2 +; SSE2-NEXT:    movd %eax, %xmm2  ; SSE2-NEXT:    shrl $2, %eax -; SSE2-NEXT:    andl $1, %eax  ; SSE2-NEXT:    movd %eax, %xmm0  ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]  ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2  ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]  ; SSE2-NEXT:    psllq $63, %xmm0  ; SSE2-NEXT:    psrad $31, %xmm0 @@ -856,24 +852,20 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {  ;  ; SSSE3-LABEL: load_sext_4i1_to_4i64:  ; SSSE3:       # BB#0: # %entry -; SSSE3-NEXT:    movzbl (%rdi), %eax +; SSSE3-NEXT:    movl (%rdi), %eax  ; SSSE3-NEXT:    movl %eax, %ecx  ; SSSE3-NEXT:    shrl $3, %ecx -; SSSE3-NEXT:    andl $1, %ecx  ; SSSE3-NEXT:    movd %ecx, %xmm0  ; SSSE3-NEXT:    movl %eax, %ecx  ; SSSE3-NEXT:    shrl %ecx -; SSSE3-NEXT:    andl $1, %ecx  ; SSSE3-NEXT:    movd %ecx, %xmm1  ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; SSSE3-NEXT:    movl %eax, %ecx -; SSSE3-NEXT:    andl $1, %ecx -; SSSE3-NEXT:    movd %ecx, %xmm2 +; SSSE3-NEXT:    movd %eax, %xmm2  ; SSSE3-NEXT:    shrl $2, %eax -; SSSE3-NEXT:    andl $1, %eax  ; SSSE3-NEXT:    movd %eax, %xmm0  ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]  ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT:    pand {{.*}}(%rip), %xmm2  ; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]  ; SSSE3-NEXT:    psllq $63, %xmm0  ; SSSE3-NEXT:    psrad $31, %xmm0 @@ -886,21 +878,17 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {  ;  ; SSE41-LABEL: load_sext_4i1_to_4i64:  ; SSE41:       # BB#0: # %entry -; SSE41-NEXT:    movzbl (%rdi), %eax +; SSE41-NEXT:    movl (%rdi), %eax  ; SSE41-NEXT:    movl %eax, %ecx  ; SSE41-NEXT:    shrl %ecx -; SSE41-NEXT:    andl $1, %ecx -; SSE41-NEXT:    movl %eax, %edx -; SSE41-NEXT:    andl $1, %edx -; SSE41-NEXT:    movd %edx, %xmm1 +; SSE41-NEXT:    movd %eax, %xmm1  ; SSE41-NEXT:    pinsrd $1, %ecx, %xmm1  ; SSE41-NEXT:    movl %eax, %ecx  ; SSE41-NEXT:    shrl $2, %ecx -; SSE41-NEXT:    andl $1, %ecx  ; SSE41-NEXT:    pinsrd $2, %ecx, %xmm1  ; SSE41-NEXT:    shrl $3, %eax -; SSE41-NEXT:    andl $1, %eax  ; SSE41-NEXT:    pinsrd $3, %eax, %xmm1 +; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1  ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero  ; SSE41-NEXT:    psllq $63, %xmm0  ; SSE41-NEXT:    psrad $31, %xmm0 @@ -964,18 +952,14 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {  ; X32-SSE41-NEXT:    movzbl (%eax), %eax  ; X32-SSE41-NEXT:    movl %eax, %ecx  ; X32-SSE41-NEXT:    shrl %ecx -; X32-SSE41-NEXT:    andl $1, %ecx -; X32-SSE41-NEXT:    movl %eax, %edx -; X32-SSE41-NEXT:    andl $1, %edx -; X32-SSE41-NEXT:    movd %edx, %xmm1 +; X32-SSE41-NEXT:    movd %eax, %xmm1  ; X32-SSE41-NEXT:    pinsrd $1, %ecx, %xmm1  ; X32-SSE41-NEXT:    movl %eax, %ecx  ; X32-SSE41-NEXT:    shrl $2, %ecx -; X32-SSE41-NEXT:    andl $1, %ecx  ; X32-SSE41-NEXT:    pinsrd $2, %ecx, %xmm1  ; X32-SSE41-NEXT:    shrl $3, %eax -; X32-SSE41-NEXT:    andl $1, %eax  ; X32-SSE41-NEXT:    pinsrd $3, %eax, %xmm1 +; X32-SSE41-NEXT:    pand .LCPI16_0, %xmm1  ; X32-SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero  ; X32-SSE41-NEXT:    psllq $63, %xmm0  ; X32-SSE41-NEXT:    psrad $31, %xmm0  | 

