diff options
Diffstat (limited to 'llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp')
-rw-r--r-- | llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 489 |
1 files changed, 424 insertions, 65 deletions
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 1e9341182f3..123e8b01309 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -69,6 +69,10 @@ using namespace llvm; +namespace llvm { +extern const MCInstrDesc ARMInsts[]; +} // end namespace llvm + namespace { enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly }; @@ -357,6 +361,18 @@ class ARMAsmParser : public MCTargetAsmParser { ITState.IsExplicit = true; } + struct { + unsigned Mask : 4; + unsigned CurPosition; + } VPTState; + bool inVPTBlock() { return VPTState.CurPosition != ~0U; } + void forwardVPTPosition() { + if (!inVPTBlock()) return; + unsigned TZ = countTrailingZeros(VPTState.Mask); + if (++VPTState.CurPosition == 5 - TZ) + VPTState.CurPosition = ~0U; + } + void Note(SMLoc L, const Twine &Msg, SMRange Range = None) { return getParser().Note(L, Msg, Range); } @@ -415,12 +431,15 @@ class ARMAsmParser : public MCTargetAsmParser { bool parseDirectiveAlign(SMLoc L); bool parseDirectiveThumbSet(SMLoc L); - StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, - bool &CarrySetting, unsigned &ProcessorIMod, - StringRef &ITMask); - void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, - bool &CanAcceptCarrySet, - bool &CanAcceptPredicationCode); + bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken); + StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken, + unsigned &PredicationCode, + unsigned &VPTPredicationCode, bool &CarrySetting, + unsigned &ProcessorIMod, StringRef &ITMask); + void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken, + StringRef FullInst, bool &CanAcceptCarrySet, + bool &CanAcceptPredicationCode, + bool &CanAcceptVPTPredicationCode); void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting, OperandVector &Operands); @@ -475,6 +494,12 @@ class ARMAsmParser : public MCTargetAsmParser { bool hasV8_1MMainline() const { return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps]; } + bool hasMVE() const { + return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps]; + } + bool hasMVEFloat() const { + return getSTI().getFeatureBits()[ARM::HasMVEFloatOps]; + } bool has8MSecExt() const { return getSTI().getFeatureBits()[ARM::Feature8MSecExt]; } @@ -557,6 +582,7 @@ class ARMAsmParser : public MCTargetAsmParser { bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); + bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands); bool isITBlockTerminator(MCInst &Inst) const; void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands); bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, @@ -593,6 +619,8 @@ public: // Not in an ITBlock to start with. ITState.CurPosition = ~0U; + VPTState.CurPosition = ~0U; + NextSymbolIsThumb = false; } @@ -638,6 +666,7 @@ public: class ARMOperand : public MCParsedAsmOperand { enum KindTy { k_CondCode, + k_VPTPred, k_CCOut, k_ITCondMask, k_CoprocNum, @@ -680,6 +709,10 @@ class ARMOperand : public MCParsedAsmOperand { ARMCC::CondCodes Val; }; + struct VCCOp { + ARMVCC::VPTCodes Val; + }; + struct CopOp { unsigned Val; }; @@ -796,6 +829,7 @@ class ARMOperand : public MCParsedAsmOperand { union { struct CCOp CC; + struct VCCOp VCC; struct CopOp Cop; struct CoprocOptionOp CoprocOption; struct MBOptOp MBOpt; @@ -844,6 +878,11 @@ public: return CC.Val; } + ARMVCC::VPTCodes getVPTPred() const { + assert(isVPTPred() && "Invalid access!"); + return VCC.Val; + } + unsigned getCoproc() const { assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); return Cop.Val; @@ -917,6 +956,7 @@ public: bool isCoprocReg() const { return Kind == k_CoprocReg; } bool isCoprocOption() const { return Kind == k_CoprocOption; } bool isCondCode() const { return Kind == k_CondCode; } + bool isVPTPred() const { return Kind == k_VPTPred; } bool isCCOut() const { return Kind == k_CCOut; } bool isITMask() const { return Kind == k_ITCondMask; } bool isITCondCode() const { return Kind == k_CondCode; } @@ -1855,6 +1895,8 @@ public: return VectorList.Count == 4 && VectorList.LaneIndex <= 1; } + bool isVectorIndex() const { return Kind == k_VectorIndex; } + bool isVectorIndex8() const { if (Kind != k_VectorIndex) return false; return VectorIndex.Val < 8; @@ -2052,10 +2094,40 @@ public: bool isITCondCodeNoAL() const { if (!isITCondCode()) return false; - auto CC = (ARMCC::CondCodes) getCondCode(); + ARMCC::CondCodes CC = getCondCode(); return CC != ARMCC::AL; } + bool isITCondCodeRestrictedI() const { + if (!isITCondCode()) + return false; + ARMCC::CondCodes CC = getCondCode(); + return CC == ARMCC::EQ || CC == ARMCC::NE; + } + + bool isITCondCodeRestrictedS() const { + if (!isITCondCode()) + return false; + ARMCC::CondCodes CC = getCondCode(); + return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE || + CC == ARMCC::GE; + } + + bool isITCondCodeRestrictedU() const { + if (!isITCondCode()) + return false; + ARMCC::CondCodes CC = getCondCode(); + return CC == ARMCC::HS || CC == ARMCC::HI; + } + + bool isITCondCodeRestrictedFP() const { + if (!isITCondCode()) + return false; + ARMCC::CondCodes CC = getCondCode(); + return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT || + CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE; + } + void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediates when possible. Null MCExpr = 0. if (!Expr) @@ -2083,6 +2155,30 @@ public: Inst.addOperand(MCOperand::createReg(RegNum)); } + void addVPTPredNOperands(MCInst &Inst, unsigned N) const { + assert(N == 2 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred()))); + unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0; + Inst.addOperand(MCOperand::createReg(RegNum)); + } + + void addVPTPredROperands(MCInst &Inst, unsigned N) const { + assert(N == 3 && "Invalid number of operands!"); + addVPTPredNOperands(Inst, N-1); + unsigned RegNum; + if (getVPTPred() == ARMVCC::None) { + RegNum = 0; + } else { + unsigned NextOpIndex = Inst.getNumOperands(); + const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()]; + int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO); + assert(TiedOp >= 0 && + "Inactive register in vpred_r is not tied to an output!"); + RegNum = Inst.getOperand(TiedOp).getReg(); + } + Inst.addOperand(MCOperand::createReg(RegNum)); + } + void addCoprocNumOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createImm(getCoproc())); @@ -2813,6 +2909,12 @@ public: Inst.addOperand(MCOperand::createImm(Imm)); } + void addPowerTwoOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + Inst.addOperand(MCOperand::createImm(CE->getValue())); + } + void addMSRMaskOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask()))); @@ -3027,6 +3129,15 @@ public: return Op; } + static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, + SMLoc S) { + auto Op = make_unique<ARMOperand>(k_VPTPred); + Op->VCC.Val = CC; + Op->StartLoc = S; + Op->EndLoc = S; + return Op; + } + static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) { auto Op = make_unique<ARMOperand>(k_CoprocNum); Op->Cop.Val = CopVal; @@ -3343,6 +3454,9 @@ void ARMOperand::print(raw_ostream &OS) const { case k_CondCode: OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; break; + case k_VPTPred: + OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">"; + break; case k_CCOut: OS << "<ccout " << RegName(getReg()) << ">"; break; @@ -5775,11 +5889,14 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { // FIXME: Would be nice to autogen this. // FIXME: This is a bit of a maze of special cases. StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, + StringRef ExtraToken, unsigned &PredicationCode, + unsigned &VPTPredicationCode, bool &CarrySetting, unsigned &ProcessorIMod, StringRef &ITMask) { PredicationCode = ARMCC::AL; + VPTPredicationCode = ARMVCC::None; CarrySetting = false; ProcessorIMod = 0; @@ -5857,12 +5974,30 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, } } + if (isMnemonicVPTPredicable(Mnemonic, ExtraToken)) { + unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1)); + if (CC != ~0U) { + Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1); + VPTPredicationCode = CC; + } + return Mnemonic; + } + // The "it" instruction has the condition mask on the end of the mnemonic. if (Mnemonic.startswith("it")) { ITMask = Mnemonic.slice(2, Mnemonic.size()); Mnemonic = Mnemonic.slice(0, 2); } + if (Mnemonic.startswith("vpst")) { + ITMask = Mnemonic.slice(4, Mnemonic.size()); + Mnemonic = Mnemonic.slice(0, 4); + } + else if (Mnemonic.startswith("vpt")) { + ITMask = Mnemonic.slice(3, Mnemonic.size()); + Mnemonic = Mnemonic.slice(0, 3); + } + return Mnemonic; } @@ -5870,9 +6005,14 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, /// inclusion of carry set or predication code operands. // // FIXME: It would be nice to autogen this. -void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, +void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, + StringRef ExtraToken, + StringRef FullInst, bool &CanAcceptCarrySet, - bool &CanAcceptPredicationCode) { + bool &CanAcceptPredicationCode, + bool &CanAcceptVPTPredicationCode) { + CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken); + CanAcceptCarrySet = Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || @@ -5906,7 +6046,8 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" || - Mnemonic == "cset" || Mnemonic == "csetm") { + Mnemonic == "cset" || Mnemonic == "csetm" || + Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst")) { // These mnemonics are never predicable CanAcceptPredicationCode = false; } else if (!isThumb()) { @@ -6158,6 +6299,24 @@ bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, return false; } +bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic, + OperandVector &Operands) { + if (!hasMVE() || Operands.size() < 3) + return true; + + for (auto &Operand : Operands) { + // We check the larger class QPR instead of just the legal class + // MQPR, to more accurately report errors when using Q registers + // outside of the allowed range. + if (static_cast<ARMOperand &>(*Operand).isVectorIndex() || + (Operand->isReg() && + (ARMMCRegisterClasses[ARM::QPRRegClassID].contains( + Operand->getReg())))) + return false; + } + return true; +} + static bool isDataTypeToken(StringRef Tok) { return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || @@ -6249,14 +6408,16 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // Create the leading tokens for the mnemonic, split by '.' characters. size_t Start = 0, Next = Name.find('.'); StringRef Mnemonic = Name.slice(Start, Next); + StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1)); // Split out the predication code and carry setting flag from the mnemonic. unsigned PredicationCode; + unsigned VPTPredicationCode; unsigned ProcessorIMod; bool CarrySetting; StringRef ITMask; - Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, - ProcessorIMod, ITMask); + Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode, + CarrySetting, ProcessorIMod, ITMask); // In Thumb1, only the branch (B) instruction can be predicated. if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { @@ -6265,18 +6426,24 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); - // Handle the mask for IT instructions. In ARMOperand and MCOperand, - // this is stored in a format independent of the condition code: the - // lowest set bit indicates the end of the encoding, and above that, - // a 1 bit indicates 'else', and an 0 indicates 'then'. E.g. + // Handle the mask for IT and VPT instructions. In ARMOperand and + // MCOperand, this is stored in a format independent of the + // condition code: the lowest set bit indicates the end of the + // encoding, and above that, a 1 bit indicates 'else', and an 0 + // indicates 'then'. E.g. // IT -> 1000 // ITx -> x100 (ITT -> 0100, ITE -> 1100) // ITxy -> xy10 (e.g. ITET -> 1010) // ITxyz -> xyz1 (e.g. ITEET -> 1101) - if (Mnemonic == "it") { - SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); + if (Mnemonic == "it" || Mnemonic.startswith("vpt") || + Mnemonic.startswith("vpst")) { + SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) : + Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) : + SMLoc::getFromPointer(NameLoc.getPointer() + 4); if (ITMask.size() > 3) { - return Error(Loc, "too many conditions on IT instruction"); + if (Mnemonic == "it") + return Error(Loc, "too many conditions on IT instruction"); + return Error(Loc, "too many conditions on VPT instruction"); } unsigned Mask = 8; for (unsigned i = ITMask.size(); i != 0; --i) { @@ -6301,8 +6468,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // ConditionCode operands to match the mnemonic "as written" and then we let // the matcher deal with finding the right instruction or generating an // appropriate error. - bool CanAcceptCarrySet, CanAcceptPredicationCode; - getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode); + bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode; + getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet, + CanAcceptPredicationCode, CanAcceptVPTPredicationCode); // If we had a carry-set on an instruction that can't do that, issue an // error. @@ -6317,6 +6485,13 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, "' is not predicable, but condition code specified"); } + // If we had a VPT predication code on an instruction that can't do that, issue an + // error. + if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) { + return Error(NameLoc, "instruction '" + Mnemonic + + "' is not VPT predicable, but VPT code T/E is specified"); + } + // Add the carry setting operand, if necessary. if (CanAcceptCarrySet) { SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); @@ -6329,7 +6504,15 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + CarrySetting); Operands.push_back(ARMOperand::CreateCondCode( - ARMCC::CondCodes(PredicationCode), Loc)); + ARMCC::CondCodes(PredicationCode), Loc)); + } + + // Add the VPT predication code operand, if necessary. + if (CanAcceptVPTPredicationCode) { + SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + + CarrySetting); + Operands.push_back(ARMOperand::CreateVPTPred( + ARMVCC::VPTCodes(VPTPredicationCode), Loc)); } // Add the processor imod operand, if necessary. @@ -6345,7 +6528,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, while (Next != StringRef::npos) { Start = Next; Next = Name.find('.', Start + 1); - StringRef ExtraToken = Name.slice(Start, Next); + ExtraToken = Name.slice(Start, Next); // Some NEON instructions have an optional datatype suffix that is // completely ignored. Check for that. @@ -6401,57 +6584,96 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // Some instructions have the same mnemonic, but don't always // have a predicate. Distinguish them here and delete the - // predicate if needed. + // appropriate predicate if needed. This could be either the scalar + // predication code or the vector predication code. if (PredicationCode == ARMCC::AL && shouldOmitPredicateOperand(Mnemonic, Operands)) Operands.erase(Operands.begin() + 1); - // ARM mode 'blx' need special handling, as the register operand version - // is predicable, but the label operand version is not. So, we can't rely - // on the Mnemonic based checking to correctly figure out when to put - // a k_CondCode operand in the list. If we're trying to match the label - // version, remove the k_CondCode operand here. - if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && - static_cast<ARMOperand &>(*Operands[2]).isImm()) - Operands.erase(Operands.begin() + 1); - // Adjust operands of ldrexd/strexd to MCK_GPRPair. - // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, - // a single GPRPair reg operand is used in the .td file to replace the two - // GPRs. However, when parsing from asm, the two GRPs cannot be automatically - // expressed as a GPRPair, so we have to manually merge them. - // FIXME: We would really like to be able to tablegen'erate this. - if (!isThumb() && Operands.size() > 4 && - (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || - Mnemonic == "stlexd")) { - bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); - unsigned Idx = isLoad ? 2 : 3; - ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); - ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); - - const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID); - // Adjust only if Op1 and Op2 are GPRs. - if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && - MRC.contains(Op2.getReg())) { - unsigned Reg1 = Op1.getReg(); - unsigned Reg2 = Op2.getReg(); - unsigned Rt = MRI->getEncodingValue(Reg1); - unsigned Rt2 = MRI->getEncodingValue(Reg2); - - // Rt2 must be Rt + 1 and Rt must be even. - if (Rt + 1 != Rt2 || (Rt & 1)) { - return Error(Op2.getStartLoc(), - isLoad ? "destination operands must be sequential" - : "source operands must be sequential"); + if (hasMVE()) { + if (CanAcceptVPTPredicationCode) { + // For all other instructions, make sure only one of the two + // predication operands is left behind, depending on whether we should + // use the vector predication. + if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { + if (CanAcceptPredicationCode) + Operands.erase(Operands.begin() + 2); + else + Operands.erase(Operands.begin() + 1); + } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) { + Operands.erase(Operands.begin() + 1); } - unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0, - &(MRI->getRegClass(ARM::GPRPairRegClassID))); - Operands[Idx] = - ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); - Operands.erase(Operands.begin() + Idx + 1); } } + if (VPTPredicationCode != ARMVCC::None) { + bool usedVPTPredicationCode = false; + for (unsigned I = 1; I < Operands.size(); ++I) + if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) + usedVPTPredicationCode = true; + if (!usedVPTPredicationCode) { + // If we have a VPT predication code and we haven't just turned it + // into an operand, then it was a mistake for splitMnemonic to + // separate it from the rest of the mnemonic in the first place, + // and this may lead to wrong disassembly (e.g. scalar floating + // point VCMPE is actually a different instruction from VCMP, so + // we mustn't treat them the same). In that situation, glue it + // back on. + Mnemonic = Name.slice(0, Mnemonic.size() + 1); + Operands.erase(Operands.begin()); + Operands.insert(Operands.begin(), + ARMOperand::CreateToken(Mnemonic, NameLoc)); + } + } + + // ARM mode 'blx' need special handling, as the register operand version + // is predicable, but the label operand version is not. So, we can't rely + // on the Mnemonic based checking to correctly figure out when to put + // a k_CondCode operand in the list. If we're trying to match the label + // version, remove the k_CondCode operand here. + if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && + static_cast<ARMOperand &>(*Operands[2]).isImm()) + Operands.erase(Operands.begin() + 1); + + // Adjust operands of ldrexd/strexd to MCK_GPRPair. + // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, + // a single GPRPair reg operand is used in the .td file to replace the two + // GPRs. However, when parsing from asm, the two GRPs cannot be + // automatically + // expressed as a GPRPair, so we have to manually merge them. + // FIXME: We would really like to be able to tablegen'erate this. + if (!isThumb() && Operands.size() > 4 && + (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || + Mnemonic == "stlexd")) { + bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); + unsigned Idx = isLoad ? 2 : 3; + ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); + ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); + + const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID); + // Adjust only if Op1 and Op2 are GPRs. + if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && + MRC.contains(Op2.getReg())) { + unsigned Reg1 = Op1.getReg(); + unsigned Reg2 = Op2.getReg(); + unsigned Rt = MRI->getEncodingValue(Reg1); + unsigned Rt2 = MRI->getEncodingValue(Reg2); + + // Rt2 must be Rt + 1 and Rt must be even. + if (Rt + 1 != Rt2 || (Rt & 1)) { + return Error(Op2.getStartLoc(), + isLoad ? "destination operands must be sequential" + : "source operands must be sequential"); + } + unsigned NewReg = MRI->getMatchingSuperReg( + Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID))); + Operands[Idx] = + ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); + Operands.erase(Operands.begin() + Idx + 1); + } + } + // GNU Assembler extension (compatibility). fixupGNULDRDAlias(Mnemonic, Operands); @@ -6610,6 +6832,17 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, return false; } +static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) { + for (unsigned i = 0; i < MCID.NumOperands; ++i) { + if (ARM::isVpred(MCID.OpInfo[i].OperandType)) + return i; + } + return -1; +} + +static bool isVectorPredicable(const MCInstrDesc &MCID) { + return findFirstVectorPredOperandIdx(MCID) != -1; +} // FIXME: We would really like to be able to tablegen'erate this. bool ARMAsmParser::validateInstruction(MCInst &Inst, @@ -6668,6 +6901,28 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block"); } + if (inVPTBlock() && !instIsBreakpoint(Inst)) { + unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition); + if (!isVectorPredicable(MCID)) + return Error(Loc, "instruction in VPT block must be predicable"); + unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm(); + unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then; + if (Pred != VPTPred) { + SMLoc PredLoc; + for (unsigned I = 1; I < Operands.size(); ++I) + if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) + PredLoc = Operands[I]->getStartLoc(); + return Error(PredLoc, "incorrect predication in VPT block; got '" + + StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) + + "', but expected '" + + ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'"); + } + } + else if (isVectorPredicable(MCID) && + Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() != + ARMVCC::None) + return Error(Loc, "VPT predicated instructions must be in VPT block"); + const unsigned Opcode = Inst.getOpcode(); switch (Opcode) { case ARM::t2IT: { @@ -9335,6 +9590,35 @@ bool ARMAsmParser::processInstruction(MCInst &Inst, return true; } return false; + case ARM::t2VPST: + case ARM::t2VPTv16i8: + case ARM::t2VPTv8i16: + case ARM::t2VPTv4i32: + case ARM::t2VPTv16u8: + case ARM::t2VPTv8u16: + case ARM::t2VPTv4u32: + case ARM::t2VPTv16s8: + case ARM::t2VPTv8s16: + case ARM::t2VPTv4s32: + case ARM::t2VPTv4f32: + case ARM::t2VPTv8f16: + case ARM::t2VPTv16i8r: + case ARM::t2VPTv8i16r: + case ARM::t2VPTv4i32r: + case ARM::t2VPTv16u8r: + case ARM::t2VPTv8u16r: + case ARM::t2VPTv4u32r: + case ARM::t2VPTv16s8r: + case ARM::t2VPTv8s16r: + case ARM::t2VPTv4s32r: + case ARM::t2VPTv4f32r: + case ARM::t2VPTv8f16r: { + assert(!inVPTBlock() && "Nested VPT blocks are not allowed"); + MCOperand &MO = Inst.getOperand(0); + VPTState.Mask = MO.getImm(); + VPTState.CurPosition = 0; + break; + } } return false; } @@ -9575,6 +9859,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // Still progress the IT block, otherwise one wrong condition causes // nasty cascading errors. forwardITPosition(); + forwardVPTPosition(); return true; } @@ -9601,6 +9886,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // and process gets a consistent answer about whether we're in an IT // block. forwardITPosition(); + forwardVPTPosition(); // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and // doesn't actually encode. @@ -10980,3 +11266,76 @@ unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, } return Match_InvalidOperand; } + +bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic, + StringRef ExtraToken) { + if (!hasMVE()) + return false; + + return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") || + Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") || + Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") || + Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") || + Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") || + Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") || + Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") || + Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") || + Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") || + Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") || + Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") || + Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") || + Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") || + Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") || + Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") || + Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") || + Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") || + Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") || + Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") || + Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") || + Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") || + Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") || + Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") || + Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") || + Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") || + Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") || + Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") || + Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") || + Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") || + Mnemonic.startswith("vqabs") || + (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") || + Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") || + Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") || + Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") || + Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") || + Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") || + Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") || + Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") || + Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") || + Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") || + Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") || + Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") || + Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") || + Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") || + Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") || + Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") || + Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") || + Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") || + Mnemonic.startswith("vldrb") || + (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") || + (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") || + Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") || + Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") || + Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") || + Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") || + Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") || + Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") || + Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") || + Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") || + Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") || + Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") || + Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") || + Mnemonic.startswith("vcvt") || + (Mnemonic.startswith("vmov") && + !(ExtraToken == ".f16" || ExtraToken == ".32" || + ExtraToken == ".16" || ExtraToken == ".8")); +} |