diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-26 10:57:05 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-26 10:57:05 +0000 |
| commit | 5beaac433dac39034fa85ca6fa785e86d3260ffa (patch) | |
| tree | ffdc60b0dd5062cb40faf73b29b600eab373f4fa /llvm/lib/Target | |
| parent | 75aca9409356366a7db618a1febfd26a62fcba93 (diff) | |
| download | bcm5719-llvm-5beaac433dac39034fa85ca6fa785e86d3260ffa.tar.gz bcm5719-llvm-5beaac433dac39034fa85ca6fa785e86d3260ffa.zip | |
[X86][SSE] Use ISD::MULHS for constant vXi16 ISD::SRA lowering (PR38151)
Similar to the existing ISD::SRL constant vector shifts from D49562, this patch adds ISD::SRA support with ISD::MULHS.
As we're dealing with signed values, we have to handle shift by zero and shift by one special cases, so XOP+AVX2/AVX512 splitting/extension is still a better solution - really we should still use ISD::MULHS if one of the special cases are used but for now I've just left a TODO and filtered by isKnownNeverZero.
Differential Revision: https://reviews.llvm.org/D52171
llvm-svn: 343093
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 72a2260a44a..02164c85fee 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -23931,6 +23931,30 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget, } } + // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we + // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt). + // TODO: Special case handling for shift by 0/1, really we can afford either + // of these cases in pre-SSE41/XOP/AVX512 but not both. + if (Opc == ISD::SRA && ConstantAmt && + (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) && + ((Subtarget.hasSSE41() && !Subtarget.hasXOP() && + !Subtarget.hasAVX512()) || + DAG.isKnownNeverZero(Amt))) { + SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT); + SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt); + if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) { + SDValue Amt0 = + DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ); + SDValue Amt1 = + DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ); + SDValue Sra1 = + getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG); + SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale); + Res = DAG.getSelect(dl, VT, Amt0, R, Res); + return DAG.getSelect(dl, VT, Amt1, Sra1, Res); + } + } + // v4i32 Non Uniform Shifts. // If the shift amount is constant we can shift each lane using the SSE2 // immediate shifts, else we need to zero-extend each lane to the lower i64 |

