diff options
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrFormats.td | 77 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 63 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/SVEInstrFormats.td | 52 |
4 files changed, 127 insertions, 71 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index e4abc479eb7..861cdc8109c 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -932,53 +932,48 @@ def fpimm0 : FPImmLeaf<fAny, [{ }]>; // Vector lane operands -class AsmVectorIndex<string Suffix> : AsmOperandClass { - let Name = "VectorIndex" # Suffix; - let DiagnosticType = "InvalidIndex" # Suffix; +class AsmVectorIndex<int Min, int Max, string NamePrefix=""> : AsmOperandClass { + let Name = NamePrefix # "IndexRange" # Min # "_" # Max; + let DiagnosticType = "Invalid" # Name; + let PredicateMethod = "isVectorIndex<" # Min # ", " # Max # ">"; let RenderMethod = "addVectorIndexOperands"; } -def VectorIndex1Operand : AsmVectorIndex<"1">; -def VectorIndexBOperand : AsmVectorIndex<"B">; -def VectorIndexHOperand : AsmVectorIndex<"H">; -def VectorIndexSOperand : AsmVectorIndex<"S">; -def VectorIndexDOperand : AsmVectorIndex<"D">; -def VectorIndex1 : Operand<i64>, ImmLeaf<i64, [{ - return ((uint64_t)Imm) == 1; -}]> { - let ParserMatchClass = VectorIndex1Operand; - let PrintMethod = "printVectorIndex"; - let MIOperandInfo = (ops i64imm); -} -def VectorIndexB : Operand<i64>, ImmLeaf<i64, [{ - return ((uint64_t)Imm) < 16; -}]> { - let ParserMatchClass = VectorIndexBOperand; - let PrintMethod = "printVectorIndex"; - let MIOperandInfo = (ops i64imm); -} -def VectorIndexH : Operand<i64>, ImmLeaf<i64, [{ - return ((uint64_t)Imm) < 8; -}]> { - let ParserMatchClass = VectorIndexHOperand; +class AsmVectorIndexOpnd<AsmOperandClass mc, code pred> + : Operand<i64>, ImmLeaf<i64, pred> { + let ParserMatchClass = mc; let PrintMethod = "printVectorIndex"; - let MIOperandInfo = (ops i64imm); -} -def VectorIndexS : Operand<i64>, ImmLeaf<i64, [{ - return ((uint64_t)Imm) < 4; -}]> { - let ParserMatchClass = VectorIndexSOperand; - let PrintMethod = "printVectorIndex"; - let MIOperandInfo = (ops i64imm); -} -def VectorIndexD : Operand<i64>, ImmLeaf<i64, [{ - return ((uint64_t)Imm) < 2; -}]> { - let ParserMatchClass = VectorIndexDOperand; - let PrintMethod = "printVectorIndex"; - let MIOperandInfo = (ops i64imm); } +def VectorIndex1Operand : AsmVectorIndex<1, 1>; +def VectorIndexBOperand : AsmVectorIndex<0, 15>; +def VectorIndexHOperand : AsmVectorIndex<0, 7>; +def VectorIndexSOperand : AsmVectorIndex<0, 3>; +def VectorIndexDOperand : AsmVectorIndex<0, 1>; + +def VectorIndex1 : AsmVectorIndexOpnd<VectorIndex1Operand, [{ return ((uint64_t)Imm) == 1; }]>; +def VectorIndexB : AsmVectorIndexOpnd<VectorIndexBOperand, [{ return ((uint64_t)Imm) < 16; }]>; +def VectorIndexH : AsmVectorIndexOpnd<VectorIndexHOperand, [{ return ((uint64_t)Imm) < 8; }]>; +def VectorIndexS : AsmVectorIndexOpnd<VectorIndexSOperand, [{ return ((uint64_t)Imm) < 4; }]>; +def VectorIndexD : AsmVectorIndexOpnd<VectorIndexDOperand, [{ return ((uint64_t)Imm) < 2; }]>; + +def SVEVectorIndexExtDupBOperand : AsmVectorIndex<0, 63, "SVE">; +def SVEVectorIndexExtDupHOperand : AsmVectorIndex<0, 31, "SVE">; +def SVEVectorIndexExtDupSOperand : AsmVectorIndex<0, 15, "SVE">; +def SVEVectorIndexExtDupDOperand : AsmVectorIndex<0, 7, "SVE">; +def SVEVectorIndexExtDupQOperand : AsmVectorIndex<0, 3, "SVE">; + +def sve_elm_idx_extdup_b + : AsmVectorIndexOpnd<SVEVectorIndexExtDupBOperand, [{ return ((uint64_t)Imm) < 64; }]>; +def sve_elm_idx_extdup_h + : AsmVectorIndexOpnd<SVEVectorIndexExtDupHOperand, [{ return ((uint64_t)Imm) < 32; }]>; +def sve_elm_idx_extdup_s + : AsmVectorIndexOpnd<SVEVectorIndexExtDupSOperand, [{ return ((uint64_t)Imm) < 16; }]>; +def sve_elm_idx_extdup_d + : AsmVectorIndexOpnd<SVEVectorIndexExtDupDOperand, [{ return ((uint64_t)Imm) < 8; }]>; +def sve_elm_idx_extdup_q + : AsmVectorIndexOpnd<SVEVectorIndexExtDupQOperand, [{ return ((uint64_t)Imm) < 4; }]>; + // 8-bit immediate for AdvSIMD where 64-bit values of the form: // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh // are encoded as the eight bit value 'abcdefgh'. diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 5ddf9203093..899807f3d57 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -45,6 +45,10 @@ let Predicates = [HasSVE] in { defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy">; defm FCPY_ZPmI : sve_int_dup_fpimm_pred<"fcpy">; + // Splat scalar register (unpredicated, GPR or vector + element index) + defm DUP_ZR : sve_int_perm_dup_r<"dup">; + defm DUP_ZZI : sve_int_perm_dup_i<"dup">; + // continuous load with reg+immediate defm LD1B_IMM : sve_mem_cld_si<0b0000, "ld1b", Z_b, ZPR8>; defm LD1B_H_IMM : sve_mem_cld_si<0b0001, "ld1b", Z_h, ZPR16>; @@ -465,8 +469,6 @@ let Predicates = [HasSVE] in { defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1">; defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2">; - defm DUP_ZR : sve_int_perm_dup_r<"dup">; - def RDVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdvl">; def ADDVL_XXI : sve_int_arith_vl<0b0, "addvl">; def ADDPL_XXI : sve_int_arith_vl<0b1, "addpl">; diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 031de490885..cf22a1cbadc 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -1048,24 +1048,13 @@ public: return VectorList.NumElements == NumElements; } - bool isVectorIndex1() const { - return Kind == k_VectorIndex && VectorIndex.Val == 1; - } - - bool isVectorIndexB() const { - return Kind == k_VectorIndex && VectorIndex.Val < 16; - } - - bool isVectorIndexH() const { - return Kind == k_VectorIndex && VectorIndex.Val < 8; - } - - bool isVectorIndexS() const { - return Kind == k_VectorIndex && VectorIndex.Val < 4; - } - - bool isVectorIndexD() const { - return Kind == k_VectorIndex && VectorIndex.Val < 2; + template <int Min, int Max> + DiagnosticPredicate isVectorIndex() const { + if (Kind != k_VectorIndex) + return DiagnosticPredicateTy::NoMatch; + if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) + return DiagnosticPredicateTy::Match; + return DiagnosticPredicateTy::NearMatch; } bool isToken() const override { return Kind == k_Token; } @@ -3839,16 +3828,26 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, case Match_InvalidSVECpyImm64: return Error(Loc, "immediate must be an integer in range [-128, 127] or a " "multiple of 256 in range [-32768, 32512]"); - case Match_InvalidIndex1: + case Match_InvalidIndexRange1_1: return Error(Loc, "expected lane specifier '[1]'"); - case Match_InvalidIndexB: + case Match_InvalidIndexRange0_15: return Error(Loc, "vector lane must be an integer in range [0, 15]."); - case Match_InvalidIndexH: + case Match_InvalidIndexRange0_7: return Error(Loc, "vector lane must be an integer in range [0, 7]."); - case Match_InvalidIndexS: + case Match_InvalidIndexRange0_3: return Error(Loc, "vector lane must be an integer in range [0, 3]."); - case Match_InvalidIndexD: + case Match_InvalidIndexRange0_1: return Error(Loc, "vector lane must be an integer in range [0, 1]."); + case Match_InvalidSVEIndexRange0_63: + return Error(Loc, "vector lane must be an integer in range [0, 63]."); + case Match_InvalidSVEIndexRange0_31: + return Error(Loc, "vector lane must be an integer in range [0, 31]."); + case Match_InvalidSVEIndexRange0_15: + return Error(Loc, "vector lane must be an integer in range [0, 15]."); + case Match_InvalidSVEIndexRange0_7: + return Error(Loc, "vector lane must be an integer in range [0, 7]."); + case Match_InvalidSVEIndexRange0_3: + return Error(Loc, "vector lane must be an integer in range [0, 3]."); case Match_InvalidLabel: return Error(Loc, "expected label or encodable integer pc offset"); case Match_MRS: @@ -4375,11 +4374,16 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidSVECpyImm16: case Match_InvalidSVECpyImm32: case Match_InvalidSVECpyImm64: - case Match_InvalidIndex1: - case Match_InvalidIndexB: - case Match_InvalidIndexH: - case Match_InvalidIndexS: - case Match_InvalidIndexD: + case Match_InvalidIndexRange1_1: + case Match_InvalidIndexRange0_15: + case Match_InvalidIndexRange0_7: + case Match_InvalidIndexRange0_3: + case Match_InvalidIndexRange0_1: + case Match_InvalidSVEIndexRange0_63: + case Match_InvalidSVEIndexRange0_31: + case Match_InvalidSVEIndexRange0_15: + case Match_InvalidSVEIndexRange0_7: + case Match_InvalidSVEIndexRange0_3: case Match_InvalidLabel: case Match_InvalidComplexRotationEven: case Match_InvalidComplexRotationOdd: @@ -5028,6 +5032,9 @@ AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { Operands.push_back(AArch64Operand::CreateVectorReg( RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext())); + OperandMatchResultTy Res = tryParseVectorIndex(Operands); + if (Res == MatchOperand_ParseFail) + return MatchOperand_ParseFail; return MatchOperand_Success; } diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index a54d6f315c5..757bb6f91dd 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -203,6 +203,7 @@ def addsub_imm8_opt_lsl_i64 : imm8_opt_lsl<64, "uint64_t", SVEAddSubImmOperand64 return AArch64_AM::isSVEAddSubImm<int64_t>(Imm); }]>; + //===----------------------------------------------------------------------===// // SVE PTrue - These are used extensively throughout the pattern matching so // it's important we define them first. @@ -284,6 +285,57 @@ multiclass sve_int_perm_dup_r<string asm> { (!cast<Instruction>(NAME # _D) ZPR64:$Zd, GPR64sp:$Rn), 1>; } +class sve_int_perm_dup_i<bits<5> tsz, Operand immtype, string asm, + ZPRRegOp zprty> +: I<(outs zprty:$Zd), (ins zprty:$Zn, immtype:$idx), + asm, "\t$Zd, $Zn$idx", + "", + []>, Sched<[]> { + bits<5> Zd; + bits<5> Zn; + bits<7> idx; + let Inst{31-24} = 0b00000101; + let Inst{23-22} = {?,?}; // imm3h + let Inst{21} = 0b1; + let Inst{20-16} = tsz; + let Inst{15-10} = 0b001000; + let Inst{9-5} = Zn; + let Inst{4-0} = Zd; +} + +multiclass sve_int_perm_dup_i<string asm> { + def _B : sve_int_perm_dup_i<{?,?,?,?,1}, sve_elm_idx_extdup_b, asm, ZPR8> { + let Inst{23-22} = idx{5-4}; + let Inst{20-17} = idx{3-0}; + } + def _H : sve_int_perm_dup_i<{?,?,?,1,0}, sve_elm_idx_extdup_h, asm, ZPR16> { + let Inst{23-22} = idx{4-3}; + let Inst{20-18} = idx{2-0}; + } + def _S : sve_int_perm_dup_i<{?,?,1,0,0}, sve_elm_idx_extdup_s, asm, ZPR32> { + let Inst{23-22} = idx{3-2}; + let Inst{20-19} = idx{1-0}; + } + def _D : sve_int_perm_dup_i<{?,1,0,0,0}, sve_elm_idx_extdup_d, asm, ZPR64> { + let Inst{23-22} = idx{2-1}; + let Inst{20} = idx{0}; + } + def _Q : sve_int_perm_dup_i<{1,0,0,0,0}, sve_elm_idx_extdup_q, asm, ZPR128> { + let Inst{23-22} = idx{1-0}; + } + + def : InstAlias<"mov $Zd, $Zn$idx", + (!cast<Instruction>(NAME # _B) ZPR8:$Zd, ZPR8:$Zn, sve_elm_idx_extdup_b:$idx), 1>; + def : InstAlias<"mov $Zd, $Zn$idx", + (!cast<Instruction>(NAME # _H) ZPR16:$Zd, ZPR16:$Zn, sve_elm_idx_extdup_h:$idx), 1>; + def : InstAlias<"mov $Zd, $Zn$idx", + (!cast<Instruction>(NAME # _S) ZPR32:$Zd, ZPR32:$Zn, sve_elm_idx_extdup_s:$idx), 1>; + def : InstAlias<"mov $Zd, $Zn$idx", + (!cast<Instruction>(NAME # _D) ZPR64:$Zd, ZPR64:$Zn, sve_elm_idx_extdup_d:$idx), 1>; + def : InstAlias<"mov $Zd, $Zn$idx", + (!cast<Instruction>(NAME # _Q) ZPR128:$Zd, ZPR128:$Zn, sve_elm_idx_extdup_q:$idx), 1>; +} + //===----------------------------------------------------------------------===// // SVE Logical Mask Immediate Group //===----------------------------------------------------------------------===// |

