diff options
| author | David Green <david.green@arm.com> | 2019-07-15 10:44:50 +0000 |
|---|---|---|
| committer | David Green <david.green@arm.com> | 2019-07-15 10:44:50 +0000 |
| commit | da750b1688fb82ca28d89d2dbe08784ed16f978c (patch) | |
| tree | 01964c8866845aeaa097ea241f25c994b2e2f4db /llvm/lib/Target/ARM/ARMISelLowering.cpp | |
| parent | d021ad9fbeb6d29c8551879f703f45e263e7a700 (diff) | |
| download | bcm5719-llvm-da750b1688fb82ca28d89d2dbe08784ed16f978c.tar.gz bcm5719-llvm-da750b1688fb82ca28d89d2dbe08784ed16f978c.zip | |
[ARM] Adjust how NEON shifts are lowered
This adjusts the way that we lower NEON shifts to use a DAG target node, not
via a neon intrinsic. This is useful for handling MVE shifts operations in the
same the way. It also renames some of the immediate shift nodes for
consistency, and moves some of the processing of immediate shifts into
LowerShift allowing it to capture more cases.
Differential Revision: https://reviews.llvm.org/D64426
llvm-svn: 366051
Diffstat (limited to 'llvm/lib/Target/ARM/ARMISelLowering.cpp')
| -rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 251 |
1 files changed, 136 insertions, 115 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 5e2de61e288..5773c3ba04e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1501,23 +1501,25 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::VCGTU: return "ARMISD::VCGTU"; case ARMISD::VTST: return "ARMISD::VTST"; - case ARMISD::VSHL: return "ARMISD::VSHL"; - case ARMISD::VSHRs: return "ARMISD::VSHRs"; - case ARMISD::VSHRu: return "ARMISD::VSHRu"; - case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; - case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; - case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; - case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; - case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; - case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; - case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; - case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; - case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; - case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; - case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; - case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; - case ARMISD::VSLI: return "ARMISD::VSLI"; - case ARMISD::VSRI: return "ARMISD::VSRI"; + case ARMISD::VSHLs: return "ARMISD::VSHLs"; + case ARMISD::VSHLu: return "ARMISD::VSHLu"; + case ARMISD::VSHLIMM: return "ARMISD::VSHLIMM"; + case ARMISD::VSHRsIMM: return "ARMISD::VSHRsIMM"; + case ARMISD::VSHRuIMM: return "ARMISD::VSHRuIMM"; + case ARMISD::VRSHRsIMM: return "ARMISD::VRSHRsIMM"; + case ARMISD::VRSHRuIMM: return "ARMISD::VRSHRuIMM"; + case ARMISD::VRSHRNIMM: return "ARMISD::VRSHRNIMM"; + case ARMISD::VQSHLsIMM: return "ARMISD::VQSHLsIMM"; + case ARMISD::VQSHLuIMM: return "ARMISD::VQSHLuIMM"; + case ARMISD::VQSHLsuIMM: return "ARMISD::VQSHLsuIMM"; + case ARMISD::VQSHRNsIMM: return "ARMISD::VQSHRNsIMM"; + case ARMISD::VQSHRNuIMM: return "ARMISD::VQSHRNuIMM"; + case ARMISD::VQSHRNsuIMM: return "ARMISD::VQSHRNsuIMM"; + case ARMISD::VQRSHRNsIMM: return "ARMISD::VQRSHRNsIMM"; + case ARMISD::VQRSHRNuIMM: return "ARMISD::VQRSHRNuIMM"; + case ARMISD::VQRSHRNsuIMM: return "ARMISD::VQRSHRNsuIMM"; + case ARMISD::VSLIIMM: return "ARMISD::VSLIIMM"; + case ARMISD::VSRIIMM: return "ARMISD::VSRIIMM"; case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; @@ -5136,7 +5138,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; if (VT == MVT::f64) - Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, + Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), DAG.getConstant(32, dl, MVT::i32)); else /*if (VT == MVT::f32)*/ @@ -5144,11 +5146,11 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { if (SrcVT == MVT::f32) { Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); if (VT == MVT::f64) - Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, + Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), DAG.getConstant(32, dl, MVT::i32)); } else if (VT == MVT::f32) - Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, + Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64, DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), DAG.getConstant(32, dl, MVT::i32)); Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); @@ -5653,40 +5655,99 @@ static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, return Res; } +/// Getvshiftimm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift operation, where all the elements of the +/// build_vector must have the same constant integer value. +static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { + // Ignore bit_converts. + while (Op.getOpcode() == ISD::BITCAST) + Op = Op.getOperand(0); + BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); + APInt SplatBits, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + if (!BVN || + !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, + ElementBits) || + SplatBitSize > ElementBits) + return false; + Cnt = SplatBits.getSExtValue(); + return true; +} + +/// isVShiftLImm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift left operation. That value must be in the range: +/// 0 <= Value < ElementBits for a left shift; or +/// 0 <= Value <= ElementBits for a long left shift. +static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { + assert(VT.isVector() && "vector shift count is not a vector type"); + int64_t ElementBits = VT.getScalarSizeInBits(); + if (!getVShiftImm(Op, ElementBits, Cnt)) + return false; + return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); +} + +/// isVShiftRImm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift right operation. For a shift opcode, the value +/// is positive, but for an intrinsic the value count must be negative. The +/// absolute value must be in the range: +/// 1 <= |Value| <= ElementBits for a right shift; or +/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. +static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, + int64_t &Cnt) { + assert(VT.isVector() && "vector shift count is not a vector type"); + int64_t ElementBits = VT.getScalarSizeInBits(); + if (!getVShiftImm(Op, ElementBits, Cnt)) + return false; + if (!isIntrinsic) + return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); + if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { + Cnt = -Cnt; + return true; + } + return false; +} + static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { EVT VT = N->getValueType(0); SDLoc dl(N); + int64_t Cnt; if (!VT.isVector()) return SDValue(); - // Lower vector shifts on NEON to use VSHL. - assert(ST->hasNEON() && "unexpected vector shift"); + // We essentially have two forms here. Shift by an immediate and shift by a + // vector register. We cannot easily match shift by an immediate in tablegen + // so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. For shifting + // by a vector, we don't have VSHR, only VSHL (which can be signed or + // unsigned, and a negative shift indicates a shift right). + if (N->getOpcode() == ISD::SHL) { + if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) + return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), + DAG.getConstant(Cnt, dl, MVT::i32)); + return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0), + N->getOperand(1)); + } - // Left shifts translate directly to the vshiftu intrinsic. - if (N->getOpcode() == ISD::SHL) - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, - DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, - MVT::i32), - N->getOperand(0), N->getOperand(1)); + assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && + "unexpected vector shift opcode"); - assert((N->getOpcode() == ISD::SRA || - N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); + if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { + unsigned VShiftOpc = + (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); + return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), + DAG.getConstant(Cnt, dl, MVT::i32)); + } - // NEON uses the same intrinsics for both left and right shifts. For - // right shifts, the shift amounts are negative, so negate the vector of - // shift amounts. + // Other right shifts we don't have operations for (we use a shift left by a + // negative number). EVT ShiftVT = N->getOperand(1).getValueType(); - SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, - getZeroVector(ShiftVT, DAG, dl), - N->getOperand(1)); - Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? - Intrinsic::arm_neon_vshifts : - Intrinsic::arm_neon_vshiftu); - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, - DAG.getConstant(vshiftInt, dl, MVT::i32), - N->getOperand(0), NegatedCount); + SDValue NegatedCount = DAG.getNode( + ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1)); + unsigned VShiftOpc = + (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); + return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount); } static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, @@ -12574,58 +12635,6 @@ static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, ConvInput, DAG.getConstant(C, dl, MVT::i32)); } -/// Getvshiftimm - Check if this is a valid build_vector for the immediate -/// operand of a vector shift operation, where all the elements of the -/// build_vector must have the same constant integer value. -static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { - // Ignore bit_converts. - while (Op.getOpcode() == ISD::BITCAST) - Op = Op.getOperand(0); - BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); - APInt SplatBits, SplatUndef; - unsigned SplatBitSize; - bool HasAnyUndefs; - if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, - HasAnyUndefs, ElementBits) || - SplatBitSize > ElementBits) - return false; - Cnt = SplatBits.getSExtValue(); - return true; -} - -/// isVShiftLImm - Check if this is a valid build_vector for the immediate -/// operand of a vector shift left operation. That value must be in the range: -/// 0 <= Value < ElementBits for a left shift; or -/// 0 <= Value <= ElementBits for a long left shift. -static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { - assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getScalarSizeInBits(); - if (! getVShiftImm(Op, ElementBits, Cnt)) - return false; - return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); -} - -/// isVShiftRImm - Check if this is a valid build_vector for the immediate -/// operand of a vector shift right operation. For a shift opcode, the value -/// is positive, but for an intrinsic the value count must be negative. The -/// absolute value must be in the range: -/// 1 <= |Value| <= ElementBits for a right shift; or -/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. -static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, - int64_t &Cnt) { - assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getScalarSizeInBits(); - if (! getVShiftImm(Op, ElementBits, Cnt)) - return false; - if (!isIntrinsic) - return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); - if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { - Cnt = -Cnt; - return true; - } - return false; -} - /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); @@ -12661,12 +12670,12 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { case Intrinsic::arm_neon_vshifts: case Intrinsic::arm_neon_vshiftu: if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { - VShiftOpc = ARMISD::VSHL; + VShiftOpc = ARMISD::VSHLIMM; break; } if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { - VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? - ARMISD::VSHRs : ARMISD::VSHRu); + VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM + : ARMISD::VSHRuIMM); break; } return SDValue(); @@ -12711,29 +12720,41 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { // Opcode already set above. break; case Intrinsic::arm_neon_vrshifts: - VShiftOpc = ARMISD::VRSHRs; break; + VShiftOpc = ARMISD::VRSHRsIMM; + break; case Intrinsic::arm_neon_vrshiftu: - VShiftOpc = ARMISD::VRSHRu; break; + VShiftOpc = ARMISD::VRSHRuIMM; + break; case Intrinsic::arm_neon_vrshiftn: - VShiftOpc = ARMISD::VRSHRN; break; + VShiftOpc = ARMISD::VRSHRNIMM; + break; case Intrinsic::arm_neon_vqshifts: - VShiftOpc = ARMISD::VQSHLs; break; + VShiftOpc = ARMISD::VQSHLsIMM; + break; case Intrinsic::arm_neon_vqshiftu: - VShiftOpc = ARMISD::VQSHLu; break; + VShiftOpc = ARMISD::VQSHLuIMM; + break; case Intrinsic::arm_neon_vqshiftsu: - VShiftOpc = ARMISD::VQSHLsu; break; + VShiftOpc = ARMISD::VQSHLsuIMM; + break; case Intrinsic::arm_neon_vqshiftns: - VShiftOpc = ARMISD::VQSHRNs; break; + VShiftOpc = ARMISD::VQSHRNsIMM; + break; case Intrinsic::arm_neon_vqshiftnu: - VShiftOpc = ARMISD::VQSHRNu; break; + VShiftOpc = ARMISD::VQSHRNuIMM; + break; case Intrinsic::arm_neon_vqshiftnsu: - VShiftOpc = ARMISD::VQSHRNsu; break; + VShiftOpc = ARMISD::VQSHRNsuIMM; + break; case Intrinsic::arm_neon_vqrshiftns: - VShiftOpc = ARMISD::VQRSHRNs; break; + VShiftOpc = ARMISD::VQRSHRNsIMM; + break; case Intrinsic::arm_neon_vqrshiftnu: - VShiftOpc = ARMISD::VQRSHRNu; break; + VShiftOpc = ARMISD::VQRSHRNuIMM; + break; case Intrinsic::arm_neon_vqrshiftnsu: - VShiftOpc = ARMISD::VQRSHRNsu; break; + VShiftOpc = ARMISD::VQRSHRNsuIMM; + break; } SDLoc dl(N); @@ -12747,9 +12768,9 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { unsigned VShiftOpc = 0; if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) - VShiftOpc = ARMISD::VSLI; + VShiftOpc = ARMISD::VSLIIMM; else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) - VShiftOpc = ARMISD::VSRI; + VShiftOpc = ARMISD::VSRIIMM; else { llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); } @@ -12840,7 +12861,7 @@ static SDValue PerformShiftCombine(SDNode *N, case ISD::SHL: if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { SDLoc dl(N); - return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), + return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), DAG.getConstant(Cnt, dl, MVT::i32)); } break; @@ -12848,8 +12869,8 @@ static SDValue PerformShiftCombine(SDNode *N, case ISD::SRA: case ISD::SRL: if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { - unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? - ARMISD::VSHRs : ARMISD::VSHRu); + unsigned VShiftOpc = + (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); SDLoc dl(N); return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), DAG.getConstant(Cnt, dl, MVT::i32)); @@ -13619,7 +13640,7 @@ bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { SDNode *U = *ExtVal->use_begin(); if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || - U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) + U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) return false; return true; |

