diff options
| author | Sander de Smalen <sander.desmalen@arm.com> | 2018-01-02 13:39:44 +0000 |
|---|---|---|
| committer | Sander de Smalen <sander.desmalen@arm.com> | 2018-01-02 13:39:44 +0000 |
| commit | c9b3e1cf039e541dac031bebac0fd075197d5860 (patch) | |
| tree | de7f6b9af6cd78dc347a74c590dad8c19f0d4642 /llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | |
| parent | 016860cf2fa92698088a13d60259102546c7be29 (diff) | |
| download | bcm5719-llvm-c9b3e1cf039e541dac031bebac0fd075197d5860.tar.gz bcm5719-llvm-c9b3e1cf039e541dac031bebac0fd075197d5860.zip | |
[AArch64][AsmParser] Add isScalarReg() and repurpose isReg()
Summary:
isReg() in AArch64AsmParser.cpp is a bit of a misnomer, and would be better named 'isScalarReg()' instead.
Patch [1/3] in a series to add operand constraint checks for SVE's predicated ADD/SUB.
Reviewers: rengolin, mcrosier, evandro, fhahn, echristo
Reviewed By: fhahn
Subscribers: aemerson, javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D41445
llvm-svn: 321646
Diffstat (limited to 'llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp')
| -rw-r--r-- | llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 6e63783e564..a480fa3b604 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -819,6 +819,10 @@ public: } bool isReg() const override { + return Kind == k_Register; + } + + bool isScalarReg() const { return Kind == k_Register && Reg.Kind == RegKind::Scalar; } @@ -3148,7 +3152,7 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, return true; if (Operands.size() < 2 || - !static_cast<AArch64Operand &>(*Operands[1]).isReg()) + !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) return Error(Loc, "Only valid when first operand is register"); bool IsXReg = @@ -3670,7 +3674,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, if (NumOperands == 4 && Tok == "lsl") { AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); - if (Op2.isReg() && Op3.isImm()) { + if (Op2.isScalarReg() && Op3.isImm()) { const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); if (Op3CE) { uint64_t Op3Val = Op3CE->getValue(); @@ -3702,7 +3706,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); - if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) { + if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); @@ -3758,7 +3762,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); - if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { + if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); @@ -3822,7 +3826,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); - if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { + if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); @@ -3901,7 +3905,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // The source register can be Wn here, but the matcher expects a // GPR64. Twiddle it here if necessary. AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getXRegFromWReg(Op.getReg()); Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(), Op.getEndLoc(), @@ -3911,13 +3915,13 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // FIXME: Likewise for sxt[bh] with a Xd dst operand else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); - if (Op.isReg() && + if (Op.isScalarReg() && AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( Op.getReg())) { // The source register can be Wn here, but the matcher expects a // GPR64. Twiddle it here if necessary. AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getXRegFromWReg(Op.getReg()); Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(), @@ -3928,13 +3932,13 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // FIXME: Likewise for uxt[bh] with a Xd dst operand else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); - if (Op.isReg() && + if (Op.isScalarReg() && AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( Op.getReg())) { // The source register can be Wn here, but the matcher expects a // GPR32. Twiddle it here if necessary. AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getWRegFromXReg(Op.getReg()); Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(), |

