diff options
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-trunc-ssat.ll | 54 |
2 files changed, 15 insertions, 49 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 9e5cb113ac6..3e1806bc4ff 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -34032,15 +34032,21 @@ static SDValue detectAVX512USatPattern(SDValue In, EVT VT, static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget) { + EVT InVT = In.getValueType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - if (!TLI.isTypeLegal(In.getValueType()) || !TLI.isTypeLegal(VT)) + if (!TLI.isTypeLegal(InVT) || !TLI.isTypeLegal(VT)) return SDValue(); - if (isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget)) { + if (isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) { if (auto SSatVal = detectSSatPattern(In, VT)) return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal); if (auto USatVal = detectUSatPattern(In, VT)) return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal); } + if (VT.getScalarType() == MVT::i8 && InVT.getScalarType() == MVT::i16) { + if (auto SSatVal = detectSSatPattern(In, VT)) + return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG, + Subtarget); + } return SDValue(); } diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll index d4c22be94d4..2dbfe76a6e8 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll @@ -3274,28 +3274,14 @@ define <16 x i8> @trunc_ssat_v16i16_v16i8(<16 x i16> %a0) { ; AVX1-LABEL: trunc_ssat_v16i16_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpminsw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpminsw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65408,65408,65408,65408,65408,65408,65408,65408] -; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpmaxsw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_ssat_v16i16_v16i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -3409,44 +3395,18 @@ define <32 x i8> @trunc_ssat_v32i16_v32i8(<32 x i16> %a0) { ; AVX1-LABEL: trunc_ssat_v32i16_v32i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpminsw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpminsw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vpminsw %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpminsw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65408,65408,65408,65408,65408,65408,65408,65408] -; AVX1-NEXT: vpmaxsw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmaxsw %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpmaxsw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm2 -; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_ssat_v32i16_v32i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX2-NEXT: vpminsw %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408] -; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; |

