summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorSander de Smalen <sander.desmalen@arm.com>2018-04-13 12:56:14 +0000
committerSander de Smalen <sander.desmalen@arm.com>2018-04-13 12:56:14 +0000
commit5c62598b0da96ae7df1b4be69ae7f2b0ce63febc (patch)
treedb98ba1bf2496ea2afc7403f876ea871a5a487d8 /llvm/lib/Target
parentae0c2711b608ef053ec01f5298648c3a8d12e6d0 (diff)
downloadbcm5719-llvm-5c62598b0da96ae7df1b4be69ae7f2b0ce63febc.tar.gz
bcm5719-llvm-5c62598b0da96ae7df1b4be69ae7f2b0ce63febc.zip
[AArch64][SVE] Asm: Support for contiguous ST1 (scalar+imm) store instructions.
Summary: Added instructions for contiguous stores, ST1, with scalar+imm addressing modes and corresponding tests. The patch also adds parsing of 'mul vl' as needed for the VL-scaled immediate. This is patch [6/6] in a series to add assembler/disassembler support for SVE's contiguous ST1 (scalar+imm) instructions. Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro Reviewed By: rengolin Subscribers: tschuett, llvm-commits, kristof.beyls Differential Revision: https://reviews.llvm.org/D45432 llvm-svn: 330014
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td13
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp40
-rw-r--r--llvm/lib/Target/AArch64/SVEInstrFormats.td55
3 files changed, 107 insertions, 1 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 1d60c7bff2f..6a13003ab02 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -20,6 +20,19 @@ let Predicates = [HasSVE] in {
defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add">;
defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub">;
+
+ // continuous store with immediates
+ defm ST1B_IMM : sve_mem_cst_si<0b00, 0b00, "st1b", Z_b, ZPR8>;
+ defm ST1B_H_IMM : sve_mem_cst_si<0b00, 0b01, "st1b", Z_h, ZPR16>;
+ defm ST1B_S_IMM : sve_mem_cst_si<0b00, 0b10, "st1b", Z_s, ZPR32>;
+ defm ST1B_D_IMM : sve_mem_cst_si<0b00, 0b11, "st1b", Z_d, ZPR64>;
+ defm ST1H_IMM : sve_mem_cst_si<0b01, 0b01, "st1h", Z_h, ZPR16>;
+ defm ST1H_S_IMM : sve_mem_cst_si<0b01, 0b10, "st1h", Z_s, ZPR32>;
+ defm ST1H_D_IMM : sve_mem_cst_si<0b01, 0b11, "st1h", Z_d, ZPR64>;
+ defm ST1W_IMM : sve_mem_cst_si<0b10, 0b10, "st1w", Z_s, ZPR32>;
+ defm ST1W_D_IMM : sve_mem_cst_si<0b10, 0b11, "st1w", Z_d, ZPR64>;
+ defm ST1D_IMM : sve_mem_cst_si<0b11, 0b11, "st1d", Z_d, ZPR64>;
+
defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1">;
defm ZIP2_ZZZ : sve_int_perm_bin_perm_zz<0b001, "zip2">;
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index d21881fd99c..787c035df10 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -89,6 +89,7 @@ private:
bool parseRegister(OperandVector &Operands);
bool parseSymbolicImmVal(const MCExpr *&ImmVal);
bool parseNeonVectorList(OperandVector &Operands);
+ bool parseOptionalMulVl(OperandVector &Operands);
bool parseOperand(OperandVector &Operands, bool isCondCode,
bool invertCondCode);
@@ -1371,6 +1372,13 @@ public:
Inst.addOperand(MCOperand::createImm(MCE->getValue()));
}
+ template <int Scale>
+ void addImmScaledOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
+ }
+
template <typename T>
void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
@@ -3049,6 +3057,29 @@ AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
return MatchOperand_Success;
}
+bool AArch64AsmParser::parseOptionalMulVl(OperandVector &Operands) {
+ MCAsmParser &Parser = getParser();
+
+ // Some SVE instructions have a decoration after the immediate, i.e.
+ // "mul vl". We parse them here and add tokens, which must be present in the
+ // asm string in the tablegen instruction.
+ if (!Parser.getTok().getString().equals_lower("mul") ||
+ !Parser.getLexer().peekTok().getString().equals_lower("vl"))
+ return true;
+
+ SMLoc S = getLoc();
+ Operands.push_back(
+ AArch64Operand::CreateToken("mul", false, S, getContext()));
+ Parser.Lex(); // Eat the "mul"
+
+ S = getLoc();
+ Operands.push_back(
+ AArch64Operand::CreateToken("vl", false, S, getContext()));
+ Parser.Lex(); // Eat the "vl"
+
+ return false;
+}
+
/// parseOperand - Parse a arm instruction operand. For now this parses the
/// operand regardless of the mnemonic.
bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
@@ -3102,6 +3133,10 @@ bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
if (!parseRegister(Operands))
return false;
+ // See if this is a "mul vl" decoration used by SVE instructions.
+ if (!parseOptionalMulVl(Operands))
+ return false;
+
// This could be an optional "shift" or "extend" operand.
OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
// We can only continue if no tokens were eaten.
@@ -3610,6 +3645,8 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
return Error(Loc, "index must be an integer in range [-32, 31].");
case Match_InvalidMemoryIndexedSImm5:
return Error(Loc, "index must be an integer in range [-16, 15].");
+ case Match_InvalidMemoryIndexed1SImm4:
+ return Error(Loc, "index must be an integer in range [-8, 7].");
case Match_InvalidMemoryIndexedSImm9:
return Error(Loc, "index must be an integer in range [-256, 255].");
case Match_InvalidMemoryIndexedSImm10:
@@ -4124,10 +4161,11 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_InvalidMemoryXExtend32:
case Match_InvalidMemoryXExtend64:
case Match_InvalidMemoryXExtend128:
- case Match_InvalidMemoryIndexedSImm6:
+ case Match_InvalidMemoryIndexed1SImm4:
case Match_InvalidMemoryIndexed4SImm7:
case Match_InvalidMemoryIndexed8SImm7:
case Match_InvalidMemoryIndexed16SImm7:
+ case Match_InvalidMemoryIndexedSImm6:
case Match_InvalidMemoryIndexedSImm5:
case Match_InvalidMemoryIndexedSImm9:
case Match_InvalidMemoryIndexedSImm10:
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 3b69d3a143c..73e79559e7c 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -27,6 +27,21 @@ def sve_pred_enum : Operand<i32>, ImmLeaf<i32, [{
let ParserMatchClass = SVEPatternOperand;
}
+
+class SImmMulVlOperand<int Bits, int Scale> : AsmOperandClass {
+ let Name = "SImm" # Bits # "Scale" # Scale # "MulVl";
+ let DiagnosticType = "InvalidMemoryIndexed" # Scale # "SImm" # Bits;
+ let PredicateMethod = "isSImmScaled<" # Bits # ", " # Scale # ">";
+ let RenderMethod = "addImmScaledOperands<" # Scale # ">";
+}
+
+def SImm4MulVlOperand : SImmMulVlOperand<4,1>;
+
+def simm4MulVl : Operand<i64>, ImmLeaf<i64, [{ return Imm >= -8 && Imm < 8; }]> {
+ let DecoderMethod = "DecodeSImm<4>";
+ let ParserMatchClass = SImm4MulVlOperand;
+}
+
class SVELogicalImmOperand<int Width> : AsmOperandClass {
let Name = "SVELogicalImm" # Width;
let DiagnosticType = "LogicalSecondSource";
@@ -490,6 +505,46 @@ multiclass sve_int_bin_cons_shift_b_right<bits<2> opc, string asm> {
}
}
//===----------------------------------------------------------------------===//
+// SVE Memory - Store Group
+//===----------------------------------------------------------------------===//
+
+class sve_mem_cst_si<bits<2> msz, bits<2> esz, string asm,
+ RegisterOperand VecList>
+: I<(outs), (ins VecList:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4),
+ asm, "\t$Zt, $Pg, [$Rn, $imm4, mul vl]",
+ "",
+ []>, Sched<[]> {
+ bits<3> Pg;
+ bits<5> Rn;
+ bits<5> Zt;
+ bits<4> imm4;
+ let Inst{31-25} = 0b1110010;
+ let Inst{24-23} = msz;
+ let Inst{22-21} = esz;
+ let Inst{20} = 0;
+ let Inst{19-16} = imm4;
+ let Inst{15-13} = 0b111;
+ let Inst{12-10} = Pg;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Zt;
+
+ let mayStore = 1;
+}
+
+multiclass sve_mem_cst_si<bits<2> msz, bits<2> esz, string asm,
+ RegisterOperand listty, ZPRRegOp zprty>
+{
+ def NAME : sve_mem_cst_si<msz, esz, asm, listty>;
+
+ def : InstAlias<asm # "\t$Zt, $Pg, [$Rn, $imm4, mul vl]",
+ (!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4), 0>;
+ def : InstAlias<asm # "\t$Zt, $Pg, [$Rn]",
+ (!cast<Instruction>(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>;
+ def : InstAlias<asm # "\t$Zt, $Pg, [$Rn]",
+ (!cast<Instruction>(NAME) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>;
+}
+
+//===----------------------------------------------------------------------===//
// SVE Permute - Predicates Group
//===----------------------------------------------------------------------===//
OpenPOWER on IntegriCloud