diff options
| author | Simon Tatham <simon.tatham@arm.com> | 2019-06-21 13:17:23 +0000 |
|---|---|---|
| committer | Simon Tatham <simon.tatham@arm.com> | 2019-06-21 13:17:23 +0000 |
| commit | 0c7af66450bae7eedde53c7cdbd6457a205307e3 (patch) | |
| tree | 14a7b8071311a58800a4ebcb434fb5ccb4f712b0 /llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp | |
| parent | bafb105e969755511a8281493b9f6f135a069c16 (diff) | |
| download | bcm5719-llvm-0c7af66450bae7eedde53c7cdbd6457a205307e3.tar.gz bcm5719-llvm-0c7af66450bae7eedde53c7cdbd6457a205307e3.zip | |
[ARM] Add MVE 64-bit GPR <-> vector move instructions.
These instructions let you load half a vector register at once from
two general-purpose registers, or vice versa.
The assembly syntax for these instructions mentions the vector
register name twice. For the move _into_ a vector register, the MC
operand list also has to mention the register name twice (once as the
output, and once as an input to represent where the unchanged half of
the output register comes from). So we can conveniently assign one of
the two asm operands to be the output $Qd, and the other $QdSrc, which
avoids confusing the auto-generated AsmMatcher too much. For the move
_from_ a vector register, there's no way to get round the fact that
both instances of that register name have to be inputs, so we need a
custom AsmMatchConverter to avoid generating two separate output MC
operands. (And even that wouldn't have worked if it hadn't been for
D60695.)
Reviewers: dmgreen, samparker, SjoerdMeijer, t.p.northover
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62679
llvm-svn: 364041
Diffstat (limited to 'llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp')
| -rw-r--r-- | llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index eb16bece258..8ecdb139a04 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -577,6 +577,7 @@ class ARMAsmParser : public MCTargetAsmParser { // Asm Match Converter Methods void cvtThumbMultiply(MCInst &Inst, const OperandVector &); void cvtThumbBranches(MCInst &Inst, const OperandVector &); + void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &); bool validateInstruction(MCInst &Inst, const OperandVector &Ops); bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); @@ -1946,6 +1947,13 @@ public: bool isVectorIndex32() const { return isVectorIndexInRange<2>(); } bool isVectorIndex64() const { return isVectorIndexInRange<1>(); } + template<int PermittedValue, int OtherPermittedValue> + bool isMVEPairVectorIndex() const { + if (Kind != k_VectorIndex) return false; + return VectorIndex.Val == PermittedValue || + VectorIndex.Val == OtherPermittedValue; + } + bool isNEONi8splat() const { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); @@ -2996,6 +3004,11 @@ public: Inst.addOperand(MCOperand::createImm(getVectorIndex())); } + void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createImm(getVectorIndex())); + } + void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. @@ -5361,6 +5374,21 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst, ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); } +void ARMAsmParser::cvtMVEVMOVQtoDReg( + MCInst &Inst, const OperandVector &Operands) { + + // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2 + assert(Operands.size() == 8); + + ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt + ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2 + ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd + ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx + // skip second copy of Qd in Operands[6] + ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2 + ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code +} + /// Parse an ARM memory expression, return false if successful else return true /// or an error. The first token must be a '[' when called. bool ARMAsmParser::parseMemory(OperandVector &Operands) { @@ -7595,6 +7623,22 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, } break; } + case ARM::MVE_VMOV_rr_q: { + if (Operands[4]->getReg() != Operands[6]->getReg()) + return Error (Operands[4]->getStartLoc(), "Q-registers must be the same"); + if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() != + static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2) + return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1"); + break; + } + case ARM::MVE_VMOV_q_rr: { + if (Operands[2]->getReg() != Operands[4]->getReg()) + return Error (Operands[2]->getStartLoc(), "Q-registers must be the same"); + if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() != + static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2) + return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1"); + break; + } } return false; |

