diff options
author | Sander de Smalen <sander.desmalen@arm.com> | 2018-04-30 07:24:38 +0000 |
---|---|---|
committer | Sander de Smalen <sander.desmalen@arm.com> | 2018-04-30 07:24:38 +0000 |
commit | 5861c263e0c7264d891eb48c9272d906d978a417 (patch) | |
tree | c4a6a98825c193c773e05b65fdfae0e97cdf8bdb /llvm/lib | |
parent | 5a4c88005c1cafe95bbf0117f0a3900a78742080 (diff) | |
download | bcm5719-llvm-5861c263e0c7264d891eb48c9272d906d978a417.tar.gz bcm5719-llvm-5861c263e0c7264d891eb48c9272d906d978a417.zip |
[AArch64][SVE] Asm: Improve diagnostics for gather loads.
This patch extends the 'isSVEVectorRegWithShiftExtend' function to
improve diagnostics for SVE's gather load (scalar + vector) addressing
modes. Instead of always suggesting the 'unscaled' addressing mode,
the use of DiagnosticPredicate enables a more specific error message
in the context where the scaling is incorrect. For example:
ld1h z0.d, p0/z, [x0, z0.d, lsl #2]
^
shift amount should be '1'
Instead of suggesting the packed, unscaled addressing mode:
expected 'z[0..31].d, (uxtw|sxtw)'
the assembler now suggests using the proper scaling:
expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'
Reviewers: fhahn, rengolin, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D46124
llvm-svn: 331162
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64RegisterInfo.td | 22 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 16 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 19 |
3 files changed, 38 insertions, 19 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td index 0a69a2142fa..557e75926a4 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -934,22 +934,27 @@ def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> { let ParserMatchClass = ZPRVectorList<64, 4>; } -class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale> - : AsmOperandClass { - let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale; +class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale, + bit ScaleAlwaysSame = 0b0> : AsmOperandClass { + let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale + # !if(ScaleAlwaysSame, "Only", ""); + let PredicateMethod = "isSVEVectorRegWithShiftExtend<" # RegWidth # ", AArch64::ZPRRegClassID, " # "AArch64_AM::" # ShiftExtend # ", " - # Scale # ">"; + # Scale # ", " + # !if(ScaleAlwaysSame, "true", "false") + # ">"; let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale; let RenderMethod = "addRegOperands"; let ParserMethod = "tryParseSVEDataVector<true, true>"; } class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr, - int RegWidth, int Scale> : RegisterOperand<ZPR> { + int RegWidth, int Scale, string Suffix = ""> + : RegisterOperand<ZPR> { let ParserMatchClass = - !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale); + !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix); let PrintMethod = "printRegWithShiftExtend<" # !if(SignExtend, "true", "false") # ", " # Scale # ", " @@ -959,22 +964,26 @@ class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr, foreach RegWidth = [32, 64] in { // UXTW(8|16|32|64) + def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>; def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>; def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>; def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>; def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>; + def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">; def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>; def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>; def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>; def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>; // SXTW(8|16|32|64) + def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>; def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>; def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>; def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>; def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>; + def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">; def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>; def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>; def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>; @@ -986,7 +995,6 @@ def ZPR64AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", 64, 8>; def ZPR64AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", 64, 16>; def ZPR64AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", 64, 32>; def ZPR64AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", 64, 64>; - def ZPR64ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 8>; def ZPR64ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 16>; def ZPR64ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 32>; diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index f0556d38d7c..40e1092afd8 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -108,10 +108,10 @@ let Predicates = [HasSVE] in { // Gathers using unscaled 32-bit offsets, e.g. // ld1h z0.s, p0/z, [x0, z0.s, uxtw] - defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; - defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; - defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; - defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; + defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>; + defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>; + defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>; + defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>; defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR32ExtSXTW8, ZPR32ExtUXTW8>; @@ -190,10 +190,10 @@ let Predicates = [HasSVE] in { // Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g. // ld1h z0.d, p0/z, [x0, z0.d, uxtw] - defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; - defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; - defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; - defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; + defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>; + defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>; + defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>; + defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>; defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR64ExtSXTW8, ZPR64ExtUXTW8>; diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index c648a8db6ec..7e1bd5a1734 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -877,14 +877,25 @@ public: } template <int ElementWidth, unsigned Class, - AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth> + AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, + bool ShiftWidthAlwaysSame> DiagnosticPredicate isSVEVectorRegWithShiftExtend() const { if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) return DiagnosticPredicateTy::NoMatch; - if (isSVEVectorRegOfWidth<ElementWidth, Class>() && - ShiftExtendTy == getShiftExtendType() && - getShiftExtendAmount() == Log2_32(ShiftWidth / 8)) + if (!isSVEVectorRegOfWidth<ElementWidth, Class>()) + return DiagnosticPredicateTy::NearMatch; + + // Give a more specific diagnostic when the user has explicitly typed in + // a shift-amount that does not match what is expected, but for which + // there is also an unscaled addressing mode (e.g. sxtw/uxtw). + bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); + if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || + ShiftExtendTy == AArch64_AM::SXTW) && + !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) + return DiagnosticPredicateTy::NoMatch; + + if (MatchShift && ShiftExtendTy == getShiftExtendType()) return DiagnosticPredicateTy::Match; return DiagnosticPredicateTy::NearMatch; |