diff options
Diffstat (limited to 'llvm/lib/Target/X86')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c3c6cf331ac..29bf86475ea 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28435,15 +28435,13 @@ static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) { return R.getValue(1); } -/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or -/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating -/// with UNDEFs) of the input to vectors of the same size as the target type -/// which then extends the lowest elements. +/// Convert a SEXT of a vector to a SIGN_EXTEND_VECTOR_INREG, this requires +/// the splitting (or concatenating with UNDEFs) of the input to vectors of the +/// same size as the target type which then extends the lowest elements. static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { - unsigned Opcode = N->getOpcode(); - if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND) + if (N->getOpcode() != ISD::SIGN_EXTEND) return SDValue(); if (!DCI.isBeforeLegalizeOps()) return SDValue(); @@ -28464,12 +28462,6 @@ static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG, if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8) return SDValue(); - // On AVX2+ targets, if the input/output types are both legal then we will be - // able to use SIGN_EXTEND/ZERO_EXTEND directly. - if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && - DAG.getTargetLoweringInfo().isTypeLegal(InVT)) - return SDValue(); - SDLoc DL(N); auto ExtendVecSize = [&DAG](SDLoc DL, SDValue N, unsigned Size) { @@ -28489,22 +28481,20 @@ static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG, EVT ExVT = EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits()); SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits()); - SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex); + SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, ExVT, Ex); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt, DAG.getIntPtrConstant(0, DL)); } // If target-size is 128-bits (or 256-bits on AVX2 target), then convert to - // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT. + // ISD::SIGN_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::VSEXT. if (VT.is128BitVector() || (VT.is256BitVector() && Subtarget.hasInt256())) { SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits()); - return Opcode == ISD::SIGN_EXTEND - ? DAG.getSignExtendVectorInReg(ExOp, DL, VT) - : DAG.getZeroExtendVectorInReg(ExOp, DL, VT); + return DAG.getSignExtendVectorInReg(ExOp, DL, VT); } // On pre-AVX2 targets, split into 128-bit nodes of - // ISD::*_EXTEND_VECTOR_INREG. + // ISD::SIGN_EXTEND_VECTOR_INREG. if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128)) { unsigned NumVecs = VT.getSizeInBits() / 128; unsigned NumSubElts = 128 / SVT.getSizeInBits(); @@ -28516,9 +28506,7 @@ static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG, SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0, DAG.getIntPtrConstant(Offset, DL)); SrcVec = ExtendVecSize(DL, SrcVec, 128); - SrcVec = Opcode == ISD::SIGN_EXTEND - ? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT) - : DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT); + SrcVec = DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT); Opnds.push_back(SrcVec); } return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds); @@ -28637,9 +28625,6 @@ static SDValue combineZext(SDNode *N, SelectionDAG &DAG, } } - if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget)) - return V; - if (VT.is256BitVector()) if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget)) return R; |

