diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-05-31 11:25:16 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-05-31 11:25:16 +0000 |
| commit | 346886bc0de81ff4fe83573445de48cd6a5a81c8 (patch) | |
| tree | bbad4605654e2da9ed0f94f17f273a955ee2dd7a | |
| parent | 0f11db359d36976e2b2471e49a39f20f6d0a9f08 (diff) | |
| download | bcm5719-llvm-346886bc0de81ff4fe83573445de48cd6a5a81c8.tar.gz bcm5719-llvm-346886bc0de81ff4fe83573445de48cd6a5a81c8.zip | |
[X86][SSE] Add support for detecting SUB(SPLAT_BV, SPLAT) cases for shift-rotate patterns.
This improves splat rotations (rotation by an uniform value), to avoid having to use the generic non-uniform shift code (extension to PR37426).
llvm-svn: 333641
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 46 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-rotate-128.ll | 205 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-rotate-256.ll | 46 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-rotate-512.ll | 20 |
4 files changed, 93 insertions, 224 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 7f1432c8128..14a0a51ec1d 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -23123,8 +23123,9 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, } // Determine if V is a splat value, and return the scalar. -// TODO: Add support for SUB(SPLAT_CST, SPLAT) cases to support rotate patterns. -static SDValue IsSplatValue(SDValue V, const SDLoc &dl, SelectionDAG &DAG) { +static SDValue IsSplatValue(MVT VT, SDValue V, const SDLoc &dl, + SelectionDAG &DAG, const X86Subtarget &Subtarget, + unsigned Opcode) { // Check if this is a splat build_vector node. if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V)) { SDValue SplatAmt = BV->getSplatValue(); @@ -23133,6 +23134,30 @@ static SDValue IsSplatValue(SDValue V, const SDLoc &dl, SelectionDAG &DAG) { return SplatAmt; } + // Check for SUB(SPLAT_BV, SPLAT) cases from rotate patterns. + if (V.getOpcode() == ISD::SUB && + !SupportedVectorVarShift(VT, Subtarget, Opcode)) { + // Peek through any EXTRACT_SUBVECTORs. + SDValue LHS = V.getOperand(0); + SDValue RHS = V.getOperand(1); + while (LHS.getOpcode() == ISD::EXTRACT_SUBVECTOR) + LHS = LHS.getOperand(0); + while (RHS.getOpcode() == ISD::EXTRACT_SUBVECTOR) + RHS = RHS.getOperand(0); + + // Ensure that the corresponding splat BV element is not UNDEF. + BitVector UndefElts; + BuildVectorSDNode *BV0 = dyn_cast<BuildVectorSDNode>(LHS); + ShuffleVectorSDNode *SVN1 = dyn_cast<ShuffleVectorSDNode>(RHS); + if (BV0 && SVN1 && BV0->getSplatValue(&UndefElts) && SVN1->isSplat()) { + unsigned SplatIdx = (unsigned)SVN1->getSplatIndex(); + if (!UndefElts[SplatIdx]) + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, + VT.getVectorElementType(), V, + DAG.getIntPtrConstant(SplatIdx, dl)); + } + } + // Check if this is a shuffle node doing a splat. ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V); if (!SVN || !SVN->isSplat()) @@ -23141,7 +23166,7 @@ static SDValue IsSplatValue(SDValue V, const SDLoc &dl, SelectionDAG &DAG) { unsigned SplatIdx = (unsigned)SVN->getSplatIndex(); SDValue InVec = V.getOperand(0); if (InVec.getOpcode() == ISD::BUILD_VECTOR) { - assert((SplatIdx < InVec.getSimpleValueType().getVectorNumElements()) && + assert((SplatIdx < VT.getVectorNumElements()) && "Unexpected shuffle index found!"); return InVec.getOperand(SplatIdx); } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { @@ -23152,7 +23177,7 @@ static SDValue IsSplatValue(SDValue V, const SDLoc &dl, SelectionDAG &DAG) { // Avoid introducing an extract element from a shuffle. return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, - V.getValueType().getVectorElementType(), InVec, + VT.getVectorElementType(), InVec, DAG.getIntPtrConstant(SplatIdx, dl)); } @@ -23162,19 +23187,20 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); + unsigned Opcode = Op.getOpcode(); - unsigned X86OpcI = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI : - (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI; + unsigned X86OpcI = (Opcode == ISD::SHL) ? X86ISD::VSHLI : + (Opcode == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI; - unsigned X86OpcV = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHL : - (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA; + unsigned X86OpcV = (Opcode == ISD::SHL) ? X86ISD::VSHL : + (Opcode == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA; // Peek through any EXTRACT_SUBVECTORs. while (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) Amt = Amt.getOperand(0); - if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) { - if (SDValue BaseShAmt = IsSplatValue(Amt, dl, DAG)) { + if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) { + if (SDValue BaseShAmt = IsSplatValue(VT, Amt, dl, DAG, Subtarget, Opcode)) { MVT EltVT = VT.getVectorElementType(); assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!"); if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32)) diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll index 35a9a1eaf34..a877533128a 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -748,35 +748,16 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind { -; SSE2-LABEL: splatvar_rotate_v2i64: -; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,0,1] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [64,64] -; SSE2-NEXT: psubq %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psllq %xmm1, %xmm2 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrlq %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE2-NEXT: psrlq %xmm3, %xmm0 -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; SSE2-NEXT: orpd %xmm2, %xmm0 -; SSE2-NEXT: retq -; -; SSE41-LABEL: splatvar_rotate_v2i64: -; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,0,1] -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [64,64] -; SSE41-NEXT: psubq %xmm2, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllq %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psrlq %xmm3, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE41-NEXT: psrlq %xmm3, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; SSE41-NEXT: por %xmm2, %xmm0 -; SSE41-NEXT: retq +; SSE-LABEL: splatvar_rotate_v2i64: +; SSE: # %bb.0: +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,0,1] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64] +; SSE-NEXT: psubq %xmm2, %xmm3 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psllq %xmm1, %xmm2 +; SSE-NEXT: psrlq %xmm3, %xmm0 +; SSE-NEXT: por %xmm2, %xmm0 +; SSE-NEXT: retq ; ; AVX1-LABEL: splatvar_rotate_v2i64: ; AVX1: # %bb.0: @@ -784,10 +765,7 @@ define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind { ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [64,64] ; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -972,82 +950,31 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE2-LABEL: splatvar_rotate_v8i16: ; SSE2: # %bb.0: ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16] -; SSE2-NEXT: psubw %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] +; SSE2-NEXT: psubw %xmm2, %xmm3 ; SSE2-NEXT: pextrw $0, %xmm1, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psllw %xmm3, %xmm1 -; SSE2-NEXT: psllw $12, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psraw $15, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: psrlw $8, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: paddw %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psraw $15, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: paddw %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psraw $15, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: paddw %xmm2, %xmm2 -; SSE2-NEXT: psraw $15, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm0, %xmm3 -; SSE2-NEXT: por %xmm1, %xmm3 -; SSE2-NEXT: psrlw $1, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psllw %xmm1, %xmm2 +; SSE2-NEXT: pextrw $0, %xmm3, %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: psrlw %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: splatvar_rotate_v8i16: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7] -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16] -; SSE41-NEXT: psubw %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psllw %xmm4, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psllw $12, %xmm1 -; SSE41-NEXT: psllw $4, %xmm0 -; SSE41-NEXT: por %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddw %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: psrlw $8, %xmm4 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: psrlw $4, %xmm4 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: psrlw $2, %xmm4 -; SSE41-NEXT: paddw %xmm1, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: psrlw $1, %xmm4 -; SSE41-NEXT: paddw %xmm1, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2 -; SSE41-NEXT: por %xmm3, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero +; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] +; SSE41-NEXT: psubw %xmm1, %xmm3 +; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psllw %xmm2, %xmm3 +; SSE41-NEXT: psrlw %xmm1, %xmm0 +; SSE41-NEXT: por %xmm3, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: splatvar_rotate_v8i16: @@ -1057,21 +984,9 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] ; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero ; AVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 -; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4 -; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: retq ; @@ -1081,14 +996,10 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] ; AVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1 +; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero ; AVX2-NEXT: vpsllw %xmm2, %xmm0, %xmm2 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0 -; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512BW-LABEL: splatvar_rotate_v8i16: @@ -1131,45 +1042,17 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; X32-SSE-LABEL: splatvar_rotate_v8i16: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7] -; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16] -; X32-SSE-NEXT: psubw %xmm3, %xmm2 +; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] +; X32-SSE-NEXT: psubw %xmm2, %xmm3 ; X32-SSE-NEXT: pextrw $0, %xmm1, %eax -; X32-SSE-NEXT: movd %eax, %xmm3 -; X32-SSE-NEXT: movdqa %xmm0, %xmm1 -; X32-SSE-NEXT: psllw %xmm3, %xmm1 -; X32-SSE-NEXT: psllw $12, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: psraw $15, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: pandn %xmm0, %xmm4 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm4, %xmm0 -; X32-SSE-NEXT: paddw %xmm2, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: psraw $15, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: pandn %xmm0, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm4, %xmm0 -; X32-SSE-NEXT: paddw %xmm2, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: psraw $15, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: pandn %xmm0, %xmm4 -; X32-SSE-NEXT: psrlw $2, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm4, %xmm0 -; X32-SSE-NEXT: paddw %xmm2, %xmm2 -; X32-SSE-NEXT: psraw $15, %xmm2 -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm0, %xmm3 -; X32-SSE-NEXT: por %xmm1, %xmm3 -; X32-SSE-NEXT: psrlw $1, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 +; X32-SSE-NEXT: movd %eax, %xmm1 +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psllw %xmm1, %xmm2 +; X32-SSE-NEXT: pextrw $0, %xmm3, %eax +; X32-SSE-NEXT: movd %eax, %xmm1 +; X32-SSE-NEXT: psrlw %xmm1, %xmm0 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer %splat16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll index 6287fac2586..a048c0c5689 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -447,13 +447,8 @@ define <4 x i64> @splatvar_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm4 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 -; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4 -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] -; AVX1-NEXT: vpsrlq %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -594,33 +589,14 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16] ; AVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero ; AVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm4 ; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 -; AVX1-NEXT: vpsllw $12, %xmm2, %xmm4 -; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2 -; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm4 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5 -; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5 -; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5 -; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 -; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 -; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -632,16 +608,8 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind ; AVX2-NEXT: vpsubw %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero ; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1 -; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15] -; AVX2-NEXT: vpsrlvd %ymm5, %ymm4, %ymm4 -; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4 -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11] -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11] -; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 -; AVX2-NEXT: vpackusdw %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero +; AVX2-NEXT: vpsrlw %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-rotate-512.ll b/llvm/test/CodeGen/X86/vector-rotate-512.ll index 5c2c6c68afc..99c1a831612 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-512.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-512.ll @@ -341,14 +341,10 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero ; AVX512F-NEXT: vpsllw %xmm2, %ymm1, %ymm4 ; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm2 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512F-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1 -; AVX512F-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero +; AVX512F-NEXT: vpsrlw %xmm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512F-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512F-NEXT: vpsrlw %xmm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: retq ; @@ -360,13 +356,9 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind ; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero ; AVX512VL-NEXT: vpsllw %xmm2, %ymm1, %ymm4 ; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm2 -; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero -; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1 -; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1 -; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0 -; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero +; AVX512VL-NEXT: vpsrlw %xmm3, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsrlw %xmm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX512VL-NEXT: vpor %ymm1, %ymm4, %ymm1 ; AVX512VL-NEXT: retq |

