diff options
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/Hexagon/Hexagon.td | 56 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 66 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonPseudo.td | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonRegisterInfo.td | 46 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonSubtarget.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonSubtarget.h | 9 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp | 2 |
7 files changed, 111 insertions, 86 deletions
diff --git a/llvm/lib/Target/Hexagon/Hexagon.td b/llvm/lib/Target/Hexagon/Hexagon.td index 0c1ff9693e4..23221a9e175 100644 --- a/llvm/lib/Target/Hexagon/Hexagon.td +++ b/llvm/lib/Target/Hexagon/Hexagon.td @@ -25,10 +25,31 @@ include "llvm/Target/Target.td" include "HexagonDepArch.td" // Hexagon ISA Extensions -def ExtensionHVX: SubtargetFeature<"hvx", "UseHVXOps", "true", - "Hexagon HVX instructions">; -def ExtensionHVXDbl: SubtargetFeature<"hvx-double", "UseHVXDblOps", "true", - "Hexagon HVX Double instructions">; +def ExtensionHVXV60: SubtargetFeature<"hvxv60", "HexagonHVXVersion", + "Hexagon::ArchEnum::V60", "Hexagon HVX instructions">; +def ExtensionHVXV62: SubtargetFeature<"hvxv62", "HexagonHVXVersion", + "Hexagon::ArchEnum::V62", "Hexagon HVX instructions", + [ExtensionHVXV60]>; +def ExtensionHVX: SubtargetFeature<"hvx", "HexagonHVXVersion", + "Hexagon::ArchEnum::V62", "Hexagon HVX instructions", + [ExtensionHVXV60, + ExtensionHVXV62]>; +def ExtensionHVX64B + : SubtargetFeature<"hvx-length64b", "UseHVX64BOps", "true", + "Hexagon HVX 64B instructions", + [ExtensionHVXV60, ExtensionHVXV62]>; +def ExtensionHVX128B + : SubtargetFeature<"hvx-length128b", "UseHVX128BOps", "true", + "Hexagon HVX 128B instructions", + [ExtensionHVXV60, ExtensionHVXV62]>; + +// This is an alias to ExtensionHVX128B to accept the hvx-double as +// an acceptable subtarget feature. +def ExtensionHVXDbl + : SubtargetFeature<"hvx-double", "UseHVX128BOps", "true", + "Hexagon HVX 128B instructions", + [ExtensionHVXV60, ExtensionHVXV62]>; + def FeatureLongCalls: SubtargetFeature<"long-calls", "UseLongCalls", "true", "Use constant-extended calls">; @@ -38,14 +59,21 @@ def FeatureLongCalls: SubtargetFeature<"long-calls", "UseLongCalls", "true", def UseMEMOP : Predicate<"HST->useMemOps()">; def IEEERndNearV5T : Predicate<"HST->modeIEEERndNear()">; -def UseHVXDbl : Predicate<"HST->useHVXDblOps()">, - AssemblerPredicate<"ExtensionHVXDbl">; -def UseHVXSgl : Predicate<"HST->useHVXSglOps()">; -def UseHVX : Predicate<"HST->useHVXSglOps() ||HST->useHVXDblOps()">, - AssemblerPredicate<"ExtensionHVX">; - -def Hvx64 : HwMode<"+hvx,-hvx-double">; -def Hvx128 : HwMode<"+hvx,+hvx-double">; +def UseHVX64B : Predicate<"HST->useHVX64BOps()">, + AssemblerPredicate<"ExtensionHVX64B">; +def UseHVX128B : Predicate<"HST->useHVX128BOps()">, + AssemblerPredicate<"ExtensionHVX128B">; +def UseHVX : Predicate<"HST->useHVXOps()">, + AssemblerPredicate<"ExtensionHVXV60">; +def UseHVXV60 : Predicate<"HST->useHVXOps()">, + AssemblerPredicate<"ExtensionHVXV60">; +def UseHVXV62 : Predicate<"HST->useHVXOps()">, + AssemblerPredicate<"ExtensionHVXV62">; + +def Hvx64 : HwMode<"+hvx-length64b">; +def Hvx64old : HwMode<"-hvx-double">; +def Hvx128 : HwMode<"+hvx-length128b">; +def Hvx128old : HwMode<"+hvx-double">; //===----------------------------------------------------------------------===// // Classes used for relation maps. @@ -274,9 +302,9 @@ def : Proc<"hexagonv5", HexagonModelV4, def : Proc<"hexagonv55", HexagonModelV55, [ArchV4, ArchV5, ArchV55]>; def : Proc<"hexagonv60", HexagonModelV60, - [ArchV4, ArchV5, ArchV55, ArchV60, ExtensionHVX]>; + [ArchV4, ArchV5, ArchV55, ArchV60]>; def : Proc<"hexagonv62", HexagonModelV62, - [ArchV4, ArchV5, ArchV55, ArchV60, ArchV62, ExtensionHVX]>; + [ArchV4, ArchV5, ArchV55, ArchV60, ArchV62]>; //===----------------------------------------------------------------------===// // Declare the target which we are implementing diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index d9d8dbec320..957fc8caccc 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -357,7 +357,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT, auto &MF = State.getMachineFunction(); auto &HST = MF.getSubtarget<HexagonSubtarget>(); - if (HST.useHVXSglOps() && + if (HST.useHVX64BOps() && (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 || LocVT == MVT::v64i8 || LocVT == MVT::v512i1)) { if (unsigned Reg = State.AllocateReg(VecLstS)) { @@ -368,7 +368,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT, State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return false; } - if (HST.useHVXSglOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || + if (HST.useHVX64BOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 || LocVT == MVT::v128i8)) { if (unsigned Reg = State.AllocateReg(VecLstD)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); @@ -379,8 +379,8 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT, return false; } // 128B Mode - if (HST.useHVXDblOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 || - LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) { + if (HST.useHVX128BOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 || + LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) { if (unsigned Reg = State.AllocateReg(VecLstD)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -389,7 +389,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT, State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return false; } - if (HST.useHVXDblOps() && + if (HST.useHVX128BOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 || LocVT == MVT::v128i8 || LocVT == MVT::v1024i1)) { if (unsigned Reg = State.AllocateReg(VecLstS)) { @@ -437,7 +437,7 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT, LocInfo = CCValAssign::Full; } else if (LocVT == MVT::v128i8 || LocVT == MVT::v64i16 || LocVT == MVT::v32i32 || LocVT == MVT::v16i64 || - (LocVT == MVT::v1024i1 && HST.useHVXDblOps())) { + (LocVT == MVT::v1024i1 && HST.useHVX128BOps())) { LocVT = MVT::v32i32; ValVT = MVT::v32i32; LocInfo = CCValAssign::Full; @@ -507,7 +507,7 @@ static bool RetCC_HexagonVector(unsigned ValNo, MVT ValVT, return false; } } else if (LocVT == MVT::v32i32) { - unsigned Req = HST.useHVXDblOps() ? Hexagon::V0 : Hexagon::W0; + unsigned Req = HST.useHVX128BOps() ? Hexagon::V0 : Hexagon::W0; if (unsigned Reg = State.AllocateReg(Req)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -827,9 +827,9 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, DEBUG(dbgs() << "Function needs byte stack align due to call args\n"); // V6 vectors passed by value have 64 or 128 byte alignment depending // on whether we are 64 byte vector mode or 128 byte. - bool UseHVXDbl = Subtarget.useHVXDblOps(); + bool UseHVX128B = Subtarget.useHVX128BOps(); assert(Subtarget.useHVXOps()); - const unsigned ObjAlign = UseHVXDbl ? 128 : 64; + const unsigned ObjAlign = UseHVX128B ? 128 : 64; LargestAlignSeen = std::max(LargestAlignSeen, ObjAlign); MFI.ensureMaxAlignment(LargestAlignSeen); } @@ -940,15 +940,15 @@ static bool getIndexedAddressParts(SDNode *Ptr, EVT VT, auto &HST = static_cast<const HexagonSubtarget&>(DAG.getSubtarget()); - bool ValidHVXDblType = - HST.useHVXDblOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 || - VT == MVT::v64i16 || VT == MVT::v128i8); + bool ValidHVX128BType = + HST.useHVX128BOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 || + VT == MVT::v64i16 || VT == MVT::v128i8); bool ValidHVXType = - HST.useHVXSglOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 || + HST.useHVX64BOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 || VT == MVT::v32i16 || VT == MVT::v64i8); - if (ValidHVXDblType || ValidHVXType || - VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) { + if (ValidHVX128BType || ValidHVXType || VT == MVT::i64 || VT == MVT::i32 || + VT == MVT::i16 || VT == MVT::i8) { IsInc = (Ptr->getOpcode() == ISD::ADD); Base = Ptr->getOperand(0); Offset = Ptr->getOperand(1); @@ -1182,7 +1182,7 @@ SDValue HexagonTargetLowering::LowerFormalArguments( RegInfo.createVirtualRegister(&Hexagon::HvxVRRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); - } else if (Subtarget.useHVXDblOps() && + } else if (Subtarget.useHVX128BOps() && ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 || RegVT == MVT::v64i16 || RegVT == MVT::v128i8))) { unsigned VReg = @@ -1197,7 +1197,7 @@ SDValue HexagonTargetLowering::LowerFormalArguments( RegInfo.createVirtualRegister(&Hexagon::HvxWRRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); - } else if (Subtarget.useHVXDblOps() && + } else if (Subtarget.useHVX128BOps() && ((RegVT == MVT::v32i64 || RegVT == MVT::v64i32 || RegVT == MVT::v128i16 || RegVT == MVT::v256i8))) { unsigned VReg = @@ -1743,7 +1743,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, } if (Subtarget.hasV60TOps()) { - if (Subtarget.useHVXSglOps()) { + if (Subtarget.useHVX64BOps()) { addRegisterClass(MVT::v64i8, &Hexagon::HvxVRRegClass); addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass); addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass); @@ -1753,7 +1753,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass); addRegisterClass(MVT::v16i64, &Hexagon::HvxWRRegClass); addRegisterClass(MVT::v512i1, &Hexagon::HvxQRRegClass); - } else if (Subtarget.useHVXDblOps()) { + } else if (Subtarget.useHVX128BOps()) { addRegisterClass(MVT::v128i8, &Hexagon::HvxVRRegClass); addRegisterClass(MVT::v64i16, &Hexagon::HvxVRRegClass); addRegisterClass(MVT::v32i32, &Hexagon::HvxVRRegClass); @@ -1992,7 +1992,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); if (Subtarget.useHVXOps()) { - if (Subtarget.useHVXSglOps()) { + if (Subtarget.useHVX64BOps()) { setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i8, Custom); setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i16, Custom); setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i32, Custom); @@ -2004,7 +2004,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i8, Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i16, Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom); - } else if (Subtarget.useHVXDblOps()) { + } else if (Subtarget.useHVX128BOps()) { setOperationAction(ISD::CONCAT_VECTORS, MVT::v256i8, Custom); setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i16, Custom); setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i32, Custom); @@ -2082,13 +2082,13 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, setIndexedStoreAction(ISD::POST_INC, VT, Legal); } - if (Subtarget.useHVXSglOps()) { + if (Subtarget.useHVX64BOps()) { for (MVT VT : {MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64, MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64}) { setIndexedLoadAction(ISD::POST_INC, VT, Legal); setIndexedStoreAction(ISD::POST_INC, VT, Legal); } - } else if (Subtarget.useHVXDblOps()) { + } else if (Subtarget.useHVX128BOps()) { for (MVT VT : {MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64, MVT::v256i8, MVT::v128i16, MVT::v64i32, MVT::v32i64}) { setIndexedLoadAction(ISD::POST_INC, VT, Legal); @@ -2353,8 +2353,8 @@ HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) size_t MaskLen = Mask.size(); unsigned SizeInBits = VT.getScalarSizeInBits() * MaskLen; - if ((Subtarget.useHVXSglOps() && SizeInBits == 64 * 8) || - (Subtarget.useHVXDblOps() && SizeInBits == 128 * 8)) { + if ((Subtarget.useHVX64BOps() && SizeInBits == 64 * 8) || + (Subtarget.useHVX128BOps() && SizeInBits == 128 * 8)) { StridedLoadKind Pattern = isStridedLoad(Mask); if (Pattern == StridedLoadKind::NoPattern) return SDValue(); @@ -2617,11 +2617,11 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op, return DAG.getNode(HexagonISD::COMBINE, dl, VT, Op.getOperand(1), Vec0); if (UseHVX) { - assert((Width == 64*8 && Subtarget.useHVXSglOps()) || - (Width == 128*8 && Subtarget.useHVXDblOps())); + assert((Width == 64 * 8 && Subtarget.useHVX64BOps()) || + (Width == 128 * 8 && Subtarget.useHVX128BOps())); SDValue Vec1 = Op.getOperand(1); - MVT OpTy = Subtarget.useHVXSglOps() ? MVT::v16i32 : MVT::v32i32; - MVT ReTy = Subtarget.useHVXSglOps() ? MVT::v32i32 : MVT::v64i32; + MVT OpTy = Subtarget.useHVX64BOps() ? MVT::v16i32 : MVT::v32i32; + MVT ReTy = Subtarget.useHVX64BOps() ? MVT::v32i32 : MVT::v64i32; SDValue B0 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec0); SDValue B1 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec1); SDValue VC = DAG.getNode(HexagonISD::VCOMBINE, dl, ReTy, B1, B0); @@ -2667,7 +2667,7 @@ HexagonTargetLowering::LowerEXTRACT_SUBVECTOR_HVX(SDValue Op, EVT VT = Op.getOperand(0).getValueType(); SDLoc dl(Op); bool UseHVX = Subtarget.useHVXOps(); - bool UseHVXSgl = Subtarget.useHVXSglOps(); + bool UseHVX64B = Subtarget.useHVX64BOps(); // Just in case... if (!VT.isVector() || !UseHVX) @@ -2675,7 +2675,7 @@ HexagonTargetLowering::LowerEXTRACT_SUBVECTOR_HVX(SDValue Op, EVT ResVT = Op.getValueType(); unsigned ResSize = ResVT.getSizeInBits(); - unsigned VectorSizeInBits = UseHVXSgl ? (64 * 8) : (128 * 8); + unsigned VectorSizeInBits = UseHVX64B ? (64 * 8) : (128 * 8); unsigned OpSize = VT.getSizeInBits(); // We deal only with cases where the result is the vector size @@ -3001,7 +3001,7 @@ HexagonTargetLowering::getRegForInlineAsmConstraint( case 512: return std::make_pair(0U, &Hexagon::HvxVRRegClass); case 1024: - if (Subtarget.hasV60TOps() && Subtarget.useHVXDblOps()) + if (Subtarget.hasV60TOps() && Subtarget.useHVX128BOps()) return std::make_pair(0U, &Hexagon::HvxVRRegClass); return std::make_pair(0U, &Hexagon::HvxWRRegClass); case 2048: @@ -3204,7 +3204,7 @@ HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, case MVT::v32i32: case MVT::v16i64: if (Subtarget.hasV60TOps() && Subtarget.useHVXOps() && - Subtarget.useHVXDblOps()) + Subtarget.useHVX128BOps()) RRC = &Hexagon::HvxVRRegClass; else RRC = &Hexagon::HvxWRRegClass; diff --git a/llvm/lib/Target/Hexagon/HexagonPseudo.td b/llvm/lib/Target/Hexagon/HexagonPseudo.td index 199148fde05..b2d66317b66 100644 --- a/llvm/lib/Target/Hexagon/HexagonPseudo.td +++ b/llvm/lib/Target/Hexagon/HexagonPseudo.td @@ -427,7 +427,7 @@ class LDrivv_template<RegisterClass RC, InstHexagon rootInst> def PS_vloadrw_ai: LDrivv_template<HvxWR, V6_vL32b_ai>, Requires<[HasV60T,UseHVX]>; def PS_vloadrw_nt_ai: LDrivv_template<HvxWR, V6_vL32b_nt_ai>, - Requires<[HasV60T,UseHVXSgl]>; + Requires<[HasV60T,UseHVX]>; def PS_vloadrwu_ai: LDrivv_template<HvxWR, V6_vL32Ub_ai>, Requires<[HasV60T,UseHVX]>; diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td index b2e952a7612..51ef37f39a7 100644 --- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -216,25 +216,33 @@ let Namespace = "Hexagon" in { // HVX types -def VecI1 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v512i1, v1024i1, v512i1]>; -def VecI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v64i8, v128i8, v64i8]>; -def VecI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v32i16, v64i16, v32i16]>; -def VecI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v16i32, v32i32, v16i32]>; -def VecI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v8i64, v16i64, v8i64]>; -def VecPI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v128i8, v256i8, v128i8]>; -def VecPI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v64i16, v128i16, v64i16]>; -def VecPI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v32i32, v64i32, v32i32]>; -def VecPI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode], - [v16i64, v32i64, v16i64]>; - +def VecI1 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v512i1, v512i1, v1024i1, v1024i1, v512i1]>; +def VecI8 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v64i8, v64i8, v128i8, v128i8, v64i8]>; +def VecI16 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v32i16, v32i16, v64i16, v64i16, v32i16]>; +def VecI32 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v16i32, v16i32, v32i32, v32i32, v16i32]>; +def VecI64 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v8i64, v8i64, v16i64, v16i64, v8i64]>; +def VecPI8 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v128i8, v128i8, v256i8, v256i8, v128i8]>; +def VecPI16 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v64i16, v64i16, v128i16, v128i16, v64i16]>; +def VecPI32 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v32i32, v32i32, v64i32, v64i32, v32i32]>; +def VecPI64 + : ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode], + [v16i64, v16i64, v32i64, v32i64, v16i64]>; // Register classes. // diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp index 2c6e34072b4..7ec4c34504b 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -53,14 +53,6 @@ static cl::opt<bool> EnableIEEERndNear("enable-hexagon-ieee-rnd-near", static cl::opt<bool> EnableBSBSched("enable-bsb-sched", cl::Hidden, cl::ZeroOrMore, cl::init(true)); -static cl::opt<bool> EnableHexagonHVXDouble("enable-hexagon-hvx-double", - cl::Hidden, cl::ZeroOrMore, cl::init(false), - cl::desc("Enable Hexagon Double Vector eXtensions")); - -static cl::opt<bool> EnableHexagonHVX("enable-hexagon-hvx", - cl::Hidden, cl::ZeroOrMore, cl::init(false), - cl::desc("Enable Hexagon Vector eXtensions")); - static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched", cl::Hidden, cl::ZeroOrMore, cl::init(false)); @@ -126,8 +118,8 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { else llvm_unreachable("Unrecognized Hexagon processor version"); - UseHVXOps = false; - UseHVXDblOps = false; + UseHVX128BOps = false; + UseHVX64BOps = false; UseLongCalls = false; UseMemOps = DisableMemOps ? false : EnableMemOps; @@ -136,10 +128,6 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { ParseSubtargetFeatures(CPUString, FS); - if (EnableHexagonHVX.getPosition()) - UseHVXOps = EnableHexagonHVX; - if (EnableHexagonHVXDouble.getPosition()) - UseHVXDblOps = EnableHexagonHVXDouble; if (OverrideLongCalls.getPosition()) UseLongCalls = OverrideLongCalls; diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h index 9722852aa1d..54cf8e11d06 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h @@ -46,12 +46,13 @@ class Triple; class HexagonSubtarget : public HexagonGenSubtargetInfo { virtual void anchor(); - bool UseMemOps, UseHVXOps, UseHVXDblOps; + bool UseMemOps, UseHVX64BOps, UseHVX128BOps; bool UseLongCalls; bool ModeIEEERndNear; public: Hexagon::ArchEnum HexagonArchVersion; + Hexagon::ArchEnum HexagonHVXVersion = Hexagon::ArchEnum::V4; /// True if the target should use Back-Skip-Back scheduling. This is the /// default for V60. bool UseBSBScheduling; @@ -138,9 +139,9 @@ public: } bool modeIEEERndNear() const { return ModeIEEERndNear; } - bool useHVXOps() const { return UseHVXOps; } - bool useHVXDblOps() const { return UseHVXOps && UseHVXDblOps; } - bool useHVXSglOps() const { return UseHVXOps && !UseHVXDblOps; } + bool useHVXOps() const { return HexagonHVXVersion > Hexagon::ArchEnum::V4; } + bool useHVX128BOps() const { return useHVXOps() && UseHVX128BOps; } + bool useHVX64BOps() const { return useHVXOps() && UseHVX64BOps; } bool useLongCalls() const { return UseLongCalls; } bool usePredicatedCalls() const; diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp index 05bbf396944..6f48169be8c 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp @@ -288,7 +288,7 @@ MCSubtargetInfo *Hexagon_MC::createHexagonMCSubtargetInfo(const Triple &TT, } MCSubtargetInfo *X = createHexagonMCSubtargetInfoImpl(TT, CPUName, ArchFS); - if (X->getFeatureBits()[Hexagon::ExtensionHVXDbl]) { + if (X->getFeatureBits()[Hexagon::ExtensionHVX128B]) { llvm::FeatureBitset Features = X->getFeatureBits(); X->setFeatureBits(Features.set(Hexagon::ExtensionHVX)); } |