diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 71 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrAVX512.td | 96 |
2 files changed, 80 insertions, 87 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 16001e1a406..7baea19ffa5 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4702,7 +4702,8 @@ static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, } /// Insert i1-subvector to i1-vector. -static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) { +static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { SDLoc dl(Op); SDValue Vec = Op.getOperand(0); @@ -4732,43 +4733,71 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) { // 3. Subvector should be inserted in the middle (for example v2i1 // to v16i1, index 2) + // extend to natively supported kshift + MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1; + MVT WideOpVT = OpVT; + if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits()) + WideOpVT = MinVT; + SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); - SDValue Undef = DAG.getUNDEF(OpVT); - SDValue WideSubVec = - DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx); - if (Vec.isUndef()) - return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec, - DAG.getConstant(IdxVal, dl, MVT::i8)); + SDValue Undef = DAG.getUNDEF(WideOpVT); + SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, + Undef, SubVec, ZeroIdx); + + // Extract sub-vector if require. + auto ExtractSubVec = [&](SDValue V) { + return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, + OpVT, V, ZeroIdx); + }; + + if (Vec.isUndef()) { + if (IdxVal != 0) { + SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8); + WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits); + } + return ExtractSubVec(WideSubVec); + } if (ISD::isBuildVectorAllZeros(Vec.getNode())) { + NumElems = WideOpVT.getVectorNumElements(); unsigned ShiftLeft = NumElems - SubVecNumElems; unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal; - WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec, - DAG.getConstant(ShiftLeft, dl, MVT::i8)); - return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec, - DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec; + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, + DAG.getConstant(ShiftLeft, dl, MVT::i8)); + Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, + DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec; + return ExtractSubVec(Vec); } if (IdxVal == 0) { // Zero lower bits of the Vec SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8); - Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits); - Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits); - // Merge them together - return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec); + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx); + Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits); + // Merge them together, SubVec should be zero extended. + WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, + getZeroVector(WideOpVT, Subtarget, DAG, dl), + SubVec, ZeroIdx); + Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec); + return ExtractSubVec(Vec); } // Simple case when we put subvector in the upper part if (IdxVal + SubVecNumElems == NumElems) { // Zero upper bits of the Vec - WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, - DAG.getConstant(IdxVal, dl, MVT::i8)); + WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, + DAG.getConstant(IdxVal, dl, MVT::i8)); SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8); - Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits); - Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits); - return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec); + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx); + Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits); + Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec); + return ExtractSubVec(Vec); } // Subvector should be inserted in the middle - use shuffle + WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, + SubVec, ZeroIdx); SmallVector<int, 64> Mask; for (unsigned i = 0; i < NumElems; ++i) Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ? @@ -12661,7 +12690,7 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl); if (OpVT.getVectorElementType() == MVT::i1) - return Insert1BitVector(Op, DAG); + return Insert1BitVector(Op, DAG, Subtarget); return SDValue(); } diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index e4791dec1ed..3412ae39344 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -2445,7 +2445,6 @@ multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr, let Predicates = [HasBWI] in { defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>, VEX, TAPD, VEX_W; - let Predicates = [HasDQI] in defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>, VEX, TAPD; } @@ -2482,88 +2481,53 @@ let Predicates = [HasAVX512] in { def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>; def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>; } -def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>; -def : Pat<(v8i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK32:$src, VK8))>; -def : Pat<(v8i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK64:$src, VK8))>; - -def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), - (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; - -def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>; -def : Pat<(v16i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK64:$src, VK16))>; - -def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))), - (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>; - -def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>; -def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))), - (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>; - -def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), - (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>; - -def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))), - (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>; +// Patterns for kmask insert_subvector/extract_subvector to/from index=0 +multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT, + RegisterClass RC, ValueType VT> { + def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))), + (subVT (COPY_TO_REGCLASS RC:$src, subRC))>; + + def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))), + (VT (COPY_TO_REGCLASS subRC:$src, RC))>; +} -def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), - (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>; +defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>; +defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>; +defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>; +defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>; +defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>; -def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>; -def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))), - (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>; +defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>; +defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>; +defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>; +defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>; -def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))), - (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>; +defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>; +defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>; +defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>; -def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>; -def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))), - (v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>; +defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>; +defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>; -def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>; -def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))), - (v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>; +defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>; +def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))), + (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>; +def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))), + (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>; +def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))), + (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>; def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))), (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; -def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))), - (v8i1 (COPY_TO_REGCLASS - (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), - (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>; - def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))), (v4i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16), (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; - -def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))), - (v4i1 (COPY_TO_REGCLASS - (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), - (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>; - //===----------------------------------------------------------------------===// // AVX-512 - Aligned and unaligned load and store // |