diff options
author | Sander de Smalen <sander.desmalen@arm.com> | 2018-07-02 07:34:52 +0000 |
---|---|---|
committer | Sander de Smalen <sander.desmalen@arm.com> | 2018-07-02 07:34:52 +0000 |
commit | 0325e304b94fbe69e794f827570617d352b3a71b (patch) | |
tree | 040e68206e22ab034e9965a73e4f11b7c1797826 /llvm/lib | |
parent | 66da390506d48e06a50fea7172d23f881793dd38 (diff) | |
download | bcm5719-llvm-0325e304b94fbe69e794f827570617d352b3a71b.tar.gz bcm5719-llvm-0325e304b94fbe69e794f827570617d352b3a71b.zip |
Reapply r334980 and r334983.
These patches were previously reverted as they led to
buildbot time-outs caused by large switch statement in
printAliasInstr when using UBSan and O3. The issue has
been addressed with a workaround (r335525).
llvm-svn: 336079
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrFormats.td | 12 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 16 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 112 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/SVEInstrFormats.td | 23 |
6 files changed, 154 insertions, 18 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index d71dc90b7a8..42328a38acc 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -179,11 +179,23 @@ def CondCode : AsmOperandClass { // A 32-bit register pasrsed as 64-bit def GPR32as64Operand : AsmOperandClass { let Name = "GPR32as64"; + let ParserMethod = + "tryParseGPROperand<false, RegConstraintEqualityTy::EqualsSubReg>"; } def GPR32as64 : RegisterOperand<GPR32> { let ParserMatchClass = GPR32as64Operand; } +// A 64-bit register pasrsed as 32-bit +def GPR64as32Operand : AsmOperandClass { + let Name = "GPR64as32"; + let ParserMethod = + "tryParseGPROperand<false, RegConstraintEqualityTy::EqualsSuperReg>"; +} +def GPR64as32 : RegisterOperand<GPR64, "printGPR64as32"> { + let ParserMatchClass = GPR64as32Operand; +} + // 8-bit immediate for AdvSIMD where 64-bit values of the form: // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh // are encoded as the eight bit value 'abcdefgh'. diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 8032f50eef9..536c55bf764 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -527,21 +527,37 @@ let Predicates = [HasSVE] in { defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo">; defm CMPLS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b111, "cmpls">; + defm SQINCB_XPiWdI : sve_int_pred_pattern_b_s32<0b00000, "sqincb">; + defm UQINCB_WPiI : sve_int_pred_pattern_b_u32<0b00001, "uqincb">; + defm SQDECB_XPiWdI : sve_int_pred_pattern_b_s32<0b00010, "sqdecb">; + defm UQDECB_WPiI : sve_int_pred_pattern_b_u32<0b00011, "uqdecb">; defm SQINCB_XPiI : sve_int_pred_pattern_b_x64<0b00100, "sqincb">; defm UQINCB_XPiI : sve_int_pred_pattern_b_x64<0b00101, "uqincb">; defm SQDECB_XPiI : sve_int_pred_pattern_b_x64<0b00110, "sqdecb">; defm UQDECB_XPiI : sve_int_pred_pattern_b_x64<0b00111, "uqdecb">; + defm SQINCH_XPiWdI : sve_int_pred_pattern_b_s32<0b01000, "sqinch">; + defm UQINCH_WPiI : sve_int_pred_pattern_b_u32<0b01001, "uqinch">; + defm SQDECH_XPiWdI : sve_int_pred_pattern_b_s32<0b01010, "sqdech">; + defm UQDECH_WPiI : sve_int_pred_pattern_b_u32<0b01011, "uqdech">; defm SQINCH_XPiI : sve_int_pred_pattern_b_x64<0b01100, "sqinch">; defm UQINCH_XPiI : sve_int_pred_pattern_b_x64<0b01101, "uqinch">; defm SQDECH_XPiI : sve_int_pred_pattern_b_x64<0b01110, "sqdech">; defm UQDECH_XPiI : sve_int_pred_pattern_b_x64<0b01111, "uqdech">; + defm SQINCW_XPiWdI : sve_int_pred_pattern_b_s32<0b10000, "sqincw">; + defm UQINCW_WPiI : sve_int_pred_pattern_b_u32<0b10001, "uqincw">; + defm SQDECW_XPiWdI : sve_int_pred_pattern_b_s32<0b10010, "sqdecw">; + defm UQDECW_WPiI : sve_int_pred_pattern_b_u32<0b10011, "uqdecw">; defm SQINCW_XPiI : sve_int_pred_pattern_b_x64<0b10100, "sqincw">; defm UQINCW_XPiI : sve_int_pred_pattern_b_x64<0b10101, "uqincw">; defm SQDECW_XPiI : sve_int_pred_pattern_b_x64<0b10110, "sqdecw">; defm UQDECW_XPiI : sve_int_pred_pattern_b_x64<0b10111, "uqdecw">; + defm SQINCD_XPiWdI : sve_int_pred_pattern_b_s32<0b11000, "sqincd">; + defm UQINCD_WPiI : sve_int_pred_pattern_b_u32<0b11001, "uqincd">; + defm SQDECD_XPiWdI : sve_int_pred_pattern_b_s32<0b11010, "sqdecd">; + defm UQDECD_WPiI : sve_int_pred_pattern_b_u32<0b11011, "uqdecd">; defm SQINCD_XPiI : sve_int_pred_pattern_b_x64<0b11100, "sqincd">; defm UQINCD_XPiI : sve_int_pred_pattern_b_x64<0b11101, "uqincd">; defm SQDECD_XPiI : sve_int_pred_pattern_b_x64<0b11110, "sqdecd">; diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 5736802affe..d042370acf4 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -66,6 +66,12 @@ enum class RegKind { SVEPredicateVector }; +enum RegConstraintEqualityTy { + EqualsReg, + EqualsSuperReg, + EqualsSubReg +}; + class AArch64AsmParser : public MCTargetAsmParser { private: StringRef Mnemonic; ///< Instruction mnemonic. @@ -92,7 +98,8 @@ private: bool parseOperand(OperandVector &Operands, bool isCondCode, bool invertCondCode); - bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands); + bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, + OperandVector &Operands); bool parseDirectiveArch(SMLoc L); bool parseDirectiveCPU(SMLoc L); @@ -139,7 +146,8 @@ private: bool tryParseNeonVectorRegister(OperandVector &Operands); OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); - template <bool ParseShiftExtend> + template <bool ParseShiftExtend, + RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); template <bool ParseShiftExtend, bool ParseSuffix> OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); @@ -177,6 +185,8 @@ public: setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); } + bool regsEqual(const MCParsedAsmOperand &Op1, + const MCParsedAsmOperand &Op2) const override; bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) override; bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; @@ -231,6 +241,10 @@ private: RegKind Kind; int ElementWidth; + // The register may be allowed as a different register class, + // e.g. for GPR64as32 or GPR32as64. + RegConstraintEqualityTy EqualityTy; + // In some cases the shift/extend needs to be explicitly parsed together // with the register, rather than as a separate operand. This is needed // for addressing modes where the instruction as a whole dictates the @@ -446,6 +460,11 @@ public: return Reg.RegNum; } + RegConstraintEqualityTy getRegEqualityTy() const { + assert(Kind == k_Register && "Invalid access!"); + return Reg.EqualityTy; + } + unsigned getVectorListStart() const { assert(Kind == k_VectorList && "Invalid access!"); return VectorList.RegNum; @@ -554,14 +573,16 @@ public: return DiagnosticPredicateTy::NearMatch; } - bool isSVEPattern() const { + DiagnosticPredicate isSVEPattern() const { if (!isImm()) - return false; + return DiagnosticPredicateTy::NoMatch; auto *MCE = dyn_cast<MCConstantExpr>(getImm()); if (!MCE) - return false; + return DiagnosticPredicateTy::NoMatch; int64_t Val = MCE->getValue(); - return Val >= 0 && Val < 32; + if (Val >= 0 && Val < 32) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; } bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const { @@ -1002,6 +1023,11 @@ public: AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); } + bool isGPR64as32() const { + return Kind == k_Register && Reg.Kind == RegKind::Scalar && + AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); + } + bool isWSeqPair() const { return Kind == k_Register && Reg.Kind == RegKind::Scalar && AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( @@ -1318,6 +1344,18 @@ public: Inst.addOperand(MCOperand::createReg(Reg)); } + void addGPR64as32Operands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + assert( + AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())); + + const MCRegisterInfo *RI = Ctx.getRegisterInfo(); + uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( + RI->getEncodingValue(getReg())); + + Inst.addOperand(MCOperand::createReg(Reg)); + } + template <int Width> void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { unsigned Base; @@ -1668,6 +1706,7 @@ public: static std::unique_ptr<AArch64Operand> CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, + RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) { @@ -1675,6 +1714,7 @@ public: Op->Reg.RegNum = RegNum; Op->Reg.Kind = Kind; Op->Reg.ElementWidth = 0; + Op->Reg.EqualityTy = EqTy; Op->Reg.ShiftExtend.Type = ExtTy; Op->Reg.ShiftExtend.Amount = ShiftAmount; Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; @@ -1692,7 +1732,7 @@ public: assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && "Invalid vector kind"); - auto Op = CreateReg(RegNum, Kind, S, E, Ctx, ExtTy, ShiftAmount, + auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, HasExplicitAmount); Op->Reg.ElementWidth = ElementWidth; return Op; @@ -3164,7 +3204,7 @@ AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { return MatchOperand_Success; } -template <bool ParseShiftExtend> +template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> OperandMatchResultTy AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { SMLoc StartLoc = getLoc(); @@ -3177,7 +3217,7 @@ AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { // No shift/extend is the default. if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { Operands.push_back(AArch64Operand::CreateReg( - RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); + RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); return MatchOperand_Success; } @@ -3191,10 +3231,10 @@ AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { return Res; auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); - Operands.push_back(AArch64Operand::CreateReg(RegNum, RegKind::Scalar, - StartLoc, Ext->getEndLoc(), getContext(), - Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), - Ext->hasShiftExtendAmount())); + Operands.push_back(AArch64Operand::CreateReg( + RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, + Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), + Ext->hasShiftExtendAmount())); return MatchOperand_Success; } @@ -3412,6 +3452,30 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, } } +bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1, + const MCParsedAsmOperand &Op2) const { + auto &AOp1 = static_cast<const AArch64Operand&>(Op1); + auto &AOp2 = static_cast<const AArch64Operand&>(Op2); + if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && + AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) + return MCTargetAsmParser::regsEqual(Op1, Op2); + + assert(AOp1.isScalarReg() && AOp2.isScalarReg() && + "Testing equality of non-scalar registers not supported"); + + // Check if a registers match their sub/super register classes. + if (AOp1.getRegEqualityTy() == EqualsSuperReg) + return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); + if (AOp1.getRegEqualityTy() == EqualsSubReg) + return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); + if (AOp2.getRegEqualityTy() == EqualsSuperReg) + return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); + if (AOp2.getRegEqualityTy() == EqualsSubReg) + return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); + + return false; +} + /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its /// operands. bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, @@ -3765,10 +3829,22 @@ static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS, unsigned VariantID = 0); bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, + uint64_t ErrorInfo, OperandVector &Operands) { switch (ErrCode) { - case Match_InvalidTiedOperand: - return Error(Loc, "operand must match destination register"); + case Match_InvalidTiedOperand: { + RegConstraintEqualityTy EqTy = + static_cast<const AArch64Operand &>(*Operands[ErrorInfo]) + .getRegEqualityTy(); + switch (EqTy) { + case RegConstraintEqualityTy::EqualsSubReg: + return Error(Loc, "operand must be 64-bit form of destination register"); + case RegConstraintEqualityTy::EqualsSuperReg: + return Error(Loc, "operand must be 32-bit form of destination register"); + case RegConstraintEqualityTy::EqualsReg: + return Error(Loc, "operand must match destination register"); + } + } case Match_MissingFeature: return Error(Loc, "instruction requires a CPU feature not currently enabled"); @@ -4389,7 +4465,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, Msg); } case Match_MnemonicFail: - return showMatchError(IDLoc, MatchResult, Operands); + return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; @@ -4408,7 +4484,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) MatchResult = Match_InvalidSuffix; - return showMatchError(ErrorLoc, MatchResult, Operands); + return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); } case Match_InvalidTiedOperand: case Match_InvalidMemoryIndexed1: @@ -4546,7 +4622,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; - return showMatchError(ErrorLoc, MatchResult, Operands); + return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); } } diff --git a/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp index 8f4fca71800..626c580663f 100644 --- a/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ b/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -1527,3 +1527,10 @@ void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum, unsigned Val = MI->getOperand(OpNum).getImm(); O << "#" << (Val ? Imm1Desc->Repr : Imm0Desc->Repr); } + +void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + unsigned Reg = MI->getOperand(OpNum).getReg(); + O << getRegisterName(getWRegFromXReg(Reg)); +} diff --git a/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h index 15cb363dd3f..8dc9264f94a 100644 --- a/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h +++ b/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h @@ -180,6 +180,8 @@ protected: template <char = 0> void printSVERegOp(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O); + void printGPR64as32(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); template <int Width> void printZPRasFPR(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O); diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index d4e41310f9e..ff56d5c593a 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -333,9 +333,32 @@ class sve_int_pred_pattern_b<bits<5> opc, string asm, RegisterOperand dt, let Inst{9-5} = pattern; let Inst{4-0} = Rdn; + // Signed 32bit forms require their GPR operand printed. + let AsmString = !if(!eq(opc{2,0}, 0b00), + !strconcat(asm, "\t$Rdn, $_Rdn, $pattern, mul $imm4"), + !strconcat(asm, "\t$Rdn, $pattern, mul $imm4")); + let Constraints = "$Rdn = $_Rdn"; } +multiclass sve_int_pred_pattern_b_s32<bits<5> opc, string asm> { + def NAME : sve_int_pred_pattern_b<opc, asm, GPR64z, GPR64as32>; + + def : InstAlias<asm # "\t$Rd, $Rn, $pattern", + (!cast<Instruction>(NAME) GPR64z:$Rd, GPR64as32:$Rn, sve_pred_enum:$pattern, 1), 1>; + def : InstAlias<asm # "\t$Rd, $Rn", + (!cast<Instruction>(NAME) GPR64z:$Rd, GPR64as32:$Rn, 0b11111, 1), 2>; +} + +multiclass sve_int_pred_pattern_b_u32<bits<5> opc, string asm> { + def NAME : sve_int_pred_pattern_b<opc, asm, GPR32z, GPR32z>; + + def : InstAlias<asm # "\t$Rdn, $pattern", + (!cast<Instruction>(NAME) GPR32z:$Rdn, sve_pred_enum:$pattern, 1), 1>; + def : InstAlias<asm # "\t$Rdn", + (!cast<Instruction>(NAME) GPR32z:$Rdn, 0b11111, 1), 2>; +} + multiclass sve_int_pred_pattern_b_x64<bits<5> opc, string asm> { def NAME : sve_int_pred_pattern_b<opc, asm, GPR64z, GPR64z>; |