diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-03-24 19:06:35 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-03-24 19:06:35 +0000 |
| commit | 87d4ab8b92e17db517499403eaa2e0b19992fae2 (patch) | |
| tree | 0c973e1dd13a30f2acd11e595f2201c6560f7fd0 /llvm/lib | |
| parent | 6af0363857f5815fb69268198dd55f29c7a3539b (diff) | |
| download | bcm5719-llvm-87d4ab8b92e17db517499403eaa2e0b19992fae2.tar.gz bcm5719-llvm-87d4ab8b92e17db517499403eaa2e0b19992fae2.zip | |
[X86][SSE41] Start shuffle combining from ZERO_EXTEND_VECTOR_INREG (PR40685)
Enable SSE41 ZERO_EXTEND_VECTOR_INREG shuffle combines - for the PMOVZX(PSHUFD(V)) -> UNPCKH(V,0) pattern we reduce the shuffles (port5-bottleneck on Intel) at the expense of creating a zero (pxor v,v) and an extra register move - which is a good trade off as these are pretty cheap and in most cases it doesn't increase register pressure.
This also exposed a missed opportunity to use combine to ZERO_EXTEND_VECTOR_INREG with folded loads - even if we're in the float domain.
llvm-svn: 356864
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 61 |
1 files changed, 33 insertions, 28 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 67631f72be4..1bf029ac887 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -30885,33 +30885,39 @@ static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask, // Match against a ZERO_EXTEND_VECTOR_INREG/VZEXT instruction. // TODO: Add 512-bit vector support (split AVX512F and AVX512BW). - if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) || - (MaskVT.is256BitVector() && Subtarget.hasInt256()))) { - unsigned MaxScale = 64 / MaskEltSize; - for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) { - bool Match = true; - unsigned NumDstElts = NumMaskElts / Scale; - for (unsigned i = 0; i != NumDstElts && Match; ++i) { - Match &= isUndefOrEqual(Mask[i * Scale], (int)i); - Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1); - } - if (Match) { - unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize); - MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() : - MVT::getIntegerVT(MaskEltSize); - SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize); - - if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits()) - V1 = extractSubVector(V1, 0, DAG, DL, SrcSize); - - if (SrcVT.getVectorNumElements() == NumDstElts) - Shuffle = unsigned(ISD::ZERO_EXTEND); - else - Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG); + if ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) || + (MaskVT.is256BitVector() && Subtarget.hasInt256())) { + // Allow this with FloatDomain if we'll be able to fold the load. + SDValue BC1 = peekThroughOneUseBitcasts(V1); + if (AllowIntDomain || + (BC1.hasOneUse() && BC1.getOpcode() == ISD::SCALAR_TO_VECTOR && + MayFoldLoad(BC1.getOperand(0)))) { + unsigned MaxScale = 64 / MaskEltSize; + for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) { + bool Match = true; + unsigned NumDstElts = NumMaskElts / Scale; + for (unsigned i = 0; i != NumDstElts && Match; ++i) { + Match &= isUndefOrEqual(Mask[i * Scale], (int)i); + Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1); + } + if (Match) { + unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize); + MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() + : MVT::getIntegerVT(MaskEltSize); + SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize); - DstVT = MVT::getIntegerVT(Scale * MaskEltSize); - DstVT = MVT::getVectorVT(DstVT, NumDstElts); - return true; + if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits()) + V1 = extractSubVector(V1, 0, DAG, DL, SrcSize); + + if (SrcVT.getVectorNumElements() == NumDstElts) + Shuffle = unsigned(ISD::ZERO_EXTEND); + else + Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG); + + DstVT = MVT::getIntegerVT(Scale * MaskEltSize); + DstVT = MVT::getVectorVT(DstVT, NumDstElts); + return true; + } } } } @@ -42616,8 +42622,7 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG, return DAG.getNode(N->getOpcode(), SDLoc(N), VT, In.getOperand(0)); // Attempt to combine as a shuffle. - // TODO: SSE41 support - if (Subtarget.hasAVX() && N->getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { + if (Subtarget.hasSSE41() && N->getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { SDValue Op(N, 0); if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType())) if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget)) |

