diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-02-15 13:33:15 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-02-15 13:33:15 +0000 |
| commit | 908f833e57bfacb310248cd9ef1c0257360a0c4e (patch) | |
| tree | 076c59aea087e44288d007900b2c9dc0372a60c3 | |
| parent | 6acc0e555b8a91a07fbeec1ea46cf33bee9c71cb (diff) | |
| download | bcm5719-llvm-908f833e57bfacb310248cd9ef1c0257360a0c4e.tar.gz bcm5719-llvm-908f833e57bfacb310248cd9ef1c0257360a0c4e.zip | |
[X86][SSE] combineTruncateWithSat - use truncateVectorWithPACK to chain PACKSS vXi32-vXi8 saturated truncation
We can use PACKSS to saturate each stage of the chain: PACKSSDW down to [-32768,32767] and then PACKSSWB to [-128,127].
PACKUS is a little trickier and will be handled in a separate patch.
llvm-svn: 325235
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 19 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-trunc-ssat.ll | 177 |
2 files changed, 26 insertions, 170 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6a82de1d7fe..d9b7f192b51 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -34212,7 +34212,9 @@ static SDValue detectAVX512USatPattern(SDValue In, EVT VT, static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget) { + EVT SVT = VT.getScalarType(); EVT InVT = In.getValueType(); + EVT InSVT = InVT.getScalarType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) && isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) { @@ -34221,16 +34223,19 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL, if (auto USatVal = detectUSatPattern(In, VT)) return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal); } - if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) && - ((VT.getScalarType() == MVT::i8 && InVT.getScalarType() == MVT::i16) || - (VT.getScalarType() == MVT::i16 && InVT.getScalarType() == MVT::i32))) { - if (auto SSatVal = detectSSatPattern(In, VT)) - return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG, - Subtarget); - if (Subtarget.hasSSE41() || VT.getScalarType() == MVT::i8) + if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements())) { + if ((SVT == MVT::i8 || SVT == MVT::i16) && + (InSVT == MVT::i16 || InSVT == MVT::i32)) { + if (auto SSatVal = detectSSatPattern(In, VT)) + return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG, + Subtarget); + } + if ((SVT == MVT::i8 && InSVT == MVT::i16) || + (SVT == MVT::i16 && InSVT == MVT::i32 && Subtarget.hasSSE41())) { if (auto USatVal = detectSSatPattern(In, VT, true)) return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG, Subtarget); + } } return SDValue(); } diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll index 08c51c89ddc..3fe7c333328 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll @@ -2723,178 +2723,29 @@ define <8 x i8> @trunc_ssat_v8i32_v8i8(<8 x i32> %a0) { } define <16 x i8> @trunc_ssat_v16i32_v16i8(<16 x i32> %a0) { -; SSE2-LABEL: trunc_ssat_v16i32_v16i8: -; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127] -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: pandn %xmm5, %xmm4 -; SSE2-NEXT: por %xmm3, %xmm4 -; SSE2-NEXT: movdqa %xmm5, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pandn %xmm5, %xmm3 -; SSE2-NEXT: por %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm5, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: pandn %xmm5, %xmm2 -; SSE2-NEXT: por %xmm1, %xmm2 -; SSE2-NEXT: movdqa %xmm5, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: pandn %xmm5, %xmm1 -; SSE2-NEXT: por %xmm0, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [4294967168,4294967168,4294967168,4294967168] -; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm0 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pandn %xmm5, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm1 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: pandn %xmm5, %xmm1 -; SSE2-NEXT: por %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm2 -; SSE2-NEXT: pand %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm5, %xmm2 -; SSE2-NEXT: por %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm4, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm5, %xmm3 -; SSE2-NEXT: por %xmm4, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: packuswb %xmm1, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: retq -; -; SSSE3-LABEL: trunc_ssat_v16i32_v16i8: -; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127] -; SSSE3-NEXT: movdqa %xmm5, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm3 -; SSSE3-NEXT: pandn %xmm5, %xmm4 -; SSSE3-NEXT: por %xmm3, %xmm4 -; SSSE3-NEXT: movdqa %xmm5, %xmm3 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pandn %xmm5, %xmm3 -; SSSE3-NEXT: por %xmm2, %xmm3 -; SSSE3-NEXT: movdqa %xmm5, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 -; SSSE3-NEXT: pand %xmm2, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm2 -; SSSE3-NEXT: por %xmm1, %xmm2 -; SSSE3-NEXT: movdqa %xmm5, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1 -; SSSE3-NEXT: pand %xmm1, %xmm0 -; SSSE3-NEXT: pandn %xmm5, %xmm1 -; SSSE3-NEXT: por %xmm0, %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [4294967168,4294967168,4294967168,4294967168] -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm0 -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm0 -; SSSE3-NEXT: por %xmm1, %xmm0 -; SSSE3-NEXT: movdqa %xmm2, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm1 -; SSSE3-NEXT: pand %xmm1, %xmm2 -; SSSE3-NEXT: pandn %xmm5, %xmm1 -; SSSE3-NEXT: por %xmm2, %xmm1 -; SSSE3-NEXT: movdqa %xmm3, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm2 -; SSSE3-NEXT: pand %xmm2, %xmm3 -; SSSE3-NEXT: pandn %xmm5, %xmm2 -; SSSE3-NEXT: por %xmm3, %xmm2 -; SSSE3-NEXT: movdqa %xmm4, %xmm3 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm4 -; SSSE3-NEXT: pandn %xmm5, %xmm3 -; SSSE3-NEXT: por %xmm4, %xmm3 -; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSSE3-NEXT: pand %xmm4, %xmm3 -; SSSE3-NEXT: pand %xmm4, %xmm2 -; SSSE3-NEXT: packuswb %xmm3, %xmm2 -; SSSE3-NEXT: pand %xmm4, %xmm1 -; SSSE3-NEXT: pand %xmm4, %xmm0 -; SSSE3-NEXT: packuswb %xmm1, %xmm0 -; SSSE3-NEXT: packuswb %xmm2, %xmm0 -; SSSE3-NEXT: retq -; -; SSE41-LABEL: trunc_ssat_v16i32_v16i8: -; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127] -; SSE41-NEXT: pminsd %xmm4, %xmm3 -; SSE41-NEXT: pminsd %xmm4, %xmm2 -; SSE41-NEXT: pminsd %xmm4, %xmm1 -; SSE41-NEXT: pminsd %xmm4, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [4294967168,4294967168,4294967168,4294967168] -; SSE41-NEXT: pmaxsd %xmm4, %xmm0 -; SSE41-NEXT: pmaxsd %xmm4, %xmm1 -; SSE41-NEXT: pmaxsd %xmm4, %xmm2 -; SSE41-NEXT: pmaxsd %xmm4, %xmm3 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSE41-NEXT: pand %xmm4, %xmm3 -; SSE41-NEXT: pand %xmm4, %xmm2 -; SSE41-NEXT: packuswb %xmm3, %xmm2 -; SSE41-NEXT: pand %xmm4, %xmm1 -; SSE41-NEXT: pand %xmm4, %xmm0 -; SSE41-NEXT: packuswb %xmm1, %xmm0 -; SSE41-NEXT: packuswb %xmm2, %xmm0 -; SSE41-NEXT: retq +; SSE-LABEL: trunc_ssat_v16i32_v16i8: +; SSE: # %bb.0: +; SSE-NEXT: packssdw %xmm3, %xmm2 +; SSE-NEXT: packssdw %xmm1, %xmm0 +; SSE-NEXT: packsswb %xmm2, %xmm0 +; SSE-NEXT: retq ; ; AVX1-LABEL: trunc_ssat_v16i32_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [127,127,127,127] -; AVX1-NEXT: vpminsd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpminsd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vpminsd %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpminsd %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967168,4294967168,4294967168,4294967168] -; AVX1-NEXT: vpmaxsd %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmaxsd %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpmaxsd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpmaxsd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_ssat_v16i32_v16i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127] -; AVX2-NEXT: vpminsd %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpminsd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168] -; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpmaxsd %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] -; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; |

