summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp50
1 files changed, 39 insertions, 11 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3ffebf84521..4423b01f7d2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1309,6 +1309,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, VT, Legal);
}
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i32, Custom);
+
+ // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
+ setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
+
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
@@ -1509,6 +1516,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMIN, MVT::v64i8, Legal);
setOperationAction(ISD::UMIN, MVT::v32i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
+
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
if (Subtarget.hasVLX()) {
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
@@ -16368,9 +16377,13 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
return DAG.getNode(X86ISD::VTRUNC, dl, VT, V);
}
-static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
- const X86Subtarget &Subtarget,
- SelectionDAG &DAG) {
+// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
+// For sign extend this needs to handle all vector sizes and SSE4.1 and
+// non-SSE4.1 targets. For zero extend this should only handle inputs of
+// MVT::v64i8 when BWI is not supported, but AVX512 is.
+static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT VT = Op->getSimpleValueType(0);
MVT InVT = In.getSimpleValueType();
@@ -16385,20 +16398,33 @@ static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
- !(VT.is256BitVector() && Subtarget.hasInt256()))
+ !(VT.is256BitVector() && Subtarget.hasInt256()) &&
+ !(VT.is512BitVector() && Subtarget.hasAVX512()))
return SDValue();
SDLoc dl(Op);
// For 256-bit vectors, we only need the lower (128-bit) half of the input.
- if (VT.is256BitVector())
- In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
- MVT::getVectorVT(InSVT, InVT.getVectorNumElements() / 2),
- In, DAG.getIntPtrConstant(0, dl));
+ // For 512-bit vectors, we need 128-bits or 256-bits.
+ if (VT.getSizeInBits() > 128) {
+ // Input needs to be at least the same number of elements as output, and
+ // at least 128-bits.
+ int InSize = InSVT.getSizeInBits() * VT.getVectorNumElements();
+ In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
+ }
+
+ assert((Op.getOpcode() != ISD::ZERO_EXTEND_VECTOR_INREG ||
+ InVT == MVT::v64i8) && "Zero extend only for v64i8 input!");
// SSE41 targets can use the pmovsx* instructions directly.
+ unsigned ExtOpc = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ?
+ X86ISD::VSEXT : X86ISD::VZEXT;
if (Subtarget.hasSSE41())
- return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
+ return DAG.getNode(ExtOpc, dl, VT, In);
+
+ // We should only get here for sign extend.
+ assert(Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
+ "Unexpected opcode!");
// pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
SDValue Curr = In;
@@ -22075,8 +22101,9 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
- return LowerSIGN_EXTEND_VECTOR_INREG(Op, Subtarget, DAG);
+ return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
@@ -31120,7 +31147,8 @@ static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
// ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
// Also use this if we don't have SSE41 to allow the legalizer do its job.
if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
- (VT.is256BitVector() && Subtarget.hasInt256())) {
+ (VT.is256BitVector() && Subtarget.hasInt256()) ||
+ (VT.is512BitVector() && Subtarget.hasAVX512())) {
SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
return Opcode == ISD::SIGN_EXTEND
? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
OpenPOWER on IntegriCloud