diff options
author | Alex Bradbury <asb@lowrisc.org> | 2017-12-07 10:46:23 +0000 |
---|---|---|
committer | Alex Bradbury <asb@lowrisc.org> | 2017-12-07 10:46:23 +0000 |
commit | 7bc2a95bb98a5e589cf2a098f2b6c4eae3e4c072 (patch) | |
tree | e812ff14e80035228b8ad1a0855c39546072921b /llvm/lib/Target | |
parent | a8a83d150f489733353b3bff0891248f59a0794c (diff) | |
download | bcm5719-llvm-7bc2a95bb98a5e589cf2a098f2b6c4eae3e4c072.tar.gz bcm5719-llvm-7bc2a95bb98a5e589cf2a098f2b6c4eae3e4c072.zip |
[RISCV] MC layer support for the standard RV32D instruction set extension
As the FPR32 and FPR64 registers have the same names, use
validateTargetOperandClass in RISCVAsmParser to coerce a parsed FPR32 to an
FPR64 when necessary. The rest of this patch is very similar to the RV32F
patch.
Differential Revision: https://reviews.llvm.org/D39895
llvm-svn: 320023
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp | 64 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp | 25 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCV.td | 8 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfo.td | 1 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoD.td | 131 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 27 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVSubtarget.h | 2 |
7 files changed, 258 insertions, 0 deletions
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 0037d8d8af4..ab5880a3646 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -32,6 +32,9 @@ struct RISCVOperand; class RISCVAsmParser : public MCTargetAsmParser { SMLoc getLoc() const { return getParser().getTok().getLoc(); } + unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, + unsigned Kind) override; + bool generateImmOutOfRangeError(OperandVector &Operands, uint64_t ErrorInfo, int Lower, int Upper, Twine Msg); @@ -381,6 +384,67 @@ public: #define GET_MATCHER_IMPLEMENTATION #include "RISCVGenAsmMatcher.inc" +// Return the matching FPR64 register for the given FPR32. +// FIXME: Ideally this function could be removed in favour of using +// information from TableGen. +unsigned convertFPR32ToFPR64(unsigned Reg) { + switch (Reg) { + default: + llvm_unreachable("Not a recognised FPR32 register"); + case RISCV::F0_32: return RISCV::F0_64; + case RISCV::F1_32: return RISCV::F1_64; + case RISCV::F2_32: return RISCV::F2_64; + case RISCV::F3_32: return RISCV::F3_64; + case RISCV::F4_32: return RISCV::F4_64; + case RISCV::F5_32: return RISCV::F5_64; + case RISCV::F6_32: return RISCV::F6_64; + case RISCV::F7_32: return RISCV::F7_64; + case RISCV::F8_32: return RISCV::F8_64; + case RISCV::F9_32: return RISCV::F9_64; + case RISCV::F10_32: return RISCV::F10_64; + case RISCV::F11_32: return RISCV::F11_64; + case RISCV::F12_32: return RISCV::F12_64; + case RISCV::F13_32: return RISCV::F13_64; + case RISCV::F14_32: return RISCV::F14_64; + case RISCV::F15_32: return RISCV::F15_64; + case RISCV::F16_32: return RISCV::F16_64; + case RISCV::F17_32: return RISCV::F17_64; + case RISCV::F18_32: return RISCV::F18_64; + case RISCV::F19_32: return RISCV::F19_64; + case RISCV::F20_32: return RISCV::F20_64; + case RISCV::F21_32: return RISCV::F21_64; + case RISCV::F22_32: return RISCV::F22_64; + case RISCV::F23_32: return RISCV::F23_64; + case RISCV::F24_32: return RISCV::F24_64; + case RISCV::F25_32: return RISCV::F25_64; + case RISCV::F26_32: return RISCV::F26_64; + case RISCV::F27_32: return RISCV::F27_64; + case RISCV::F28_32: return RISCV::F28_64; + case RISCV::F29_32: return RISCV::F29_64; + case RISCV::F30_32: return RISCV::F30_64; + case RISCV::F31_32: return RISCV::F31_64; + } +} + +unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, + unsigned Kind) { + RISCVOperand &Op = static_cast<RISCVOperand &>(AsmOp); + if (!Op.isReg()) + return Match_InvalidOperand; + + unsigned Reg = Op.getReg(); + bool IsRegFPR32 = + RISCVMCRegisterClasses[RISCV::FPR32RegClassID].contains(Reg); + + // As the parser couldn't differentiate an FPR32 from an FPR64, coerce the + // register from FPR32 to FPR64 if necessary. + if (IsRegFPR32 && Kind == MCK_FPR64) { + Op.Reg.RegNum = convertFPR32ToFPR64(Reg); + return Match_Success; + } + return Match_InvalidOperand; +} + bool RISCVAsmParser::generateImmOutOfRangeError( OperandVector &Operands, uint64_t ErrorInfo, int Lower, int Upper, Twine Msg = "immediate must be an integer in the range") { diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp index 0f186004a4f..2649f1b3729 100644 --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -105,6 +105,31 @@ static DecodeStatus DecodeFPR32RegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } +static const unsigned FPR64DecoderTable[] = { + RISCV::F0_64, RISCV::F1_64, RISCV::F2_64, RISCV::F3_64, + RISCV::F4_64, RISCV::F5_64, RISCV::F6_64, RISCV::F7_64, + RISCV::F8_64, RISCV::F9_64, RISCV::F10_64, RISCV::F11_64, + RISCV::F12_64, RISCV::F13_64, RISCV::F14_64, RISCV::F15_64, + RISCV::F16_64, RISCV::F17_64, RISCV::F18_64, RISCV::F19_64, + RISCV::F20_64, RISCV::F21_64, RISCV::F22_64, RISCV::F23_64, + RISCV::F24_64, RISCV::F25_64, RISCV::F26_64, RISCV::F27_64, + RISCV::F28_64, RISCV::F29_64, RISCV::F30_64, RISCV::F31_64 +}; + +static DecodeStatus DecodeFPR64RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > sizeof(FPR64DecoderTable)) + return MCDisassembler::Fail; + + // We must define our own mapping from RegNo to register identifier. + // Accessing index RegNo in the register class will work in the case that + // registers were added in ascending order, but not in general. + unsigned Reg = FPR64DecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + template <unsigned N> static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td index 2a64ed5ace9..3ebf76b8e59 100644 --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -31,6 +31,13 @@ def FeatureStdExtF def HasStdExtF : Predicate<"Subtarget->hasStdExtF()">, AssemblerPredicate<"FeatureStdExtF">; +def FeatureStdExtD + : SubtargetFeature<"d", "HasStdExtD", "true", + "'D' (Double-Precision Floating-Point)", + [FeatureStdExtF]>; +def HasStdExtD : Predicate<"Subtarget->hasStdExtD()">, + AssemblerPredicate<"FeatureStdExtD">; + def Feature64Bit : SubtargetFeature<"64bit", "HasRV64", "true", "Implements RV64">; @@ -63,6 +70,7 @@ def RISCVInstrInfo : InstrInfo { def RISCVAsmParser : AsmParser { let ShouldEmitMatchRegisterAltName = 1; + let AllowDuplicateRegisterNames = 1; } def RISCV : Target { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index a141bd9ffc7..cd500876c62 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -442,3 +442,4 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), include "RISCVInstrInfoM.td" include "RISCVInstrInfoA.td" include "RISCVInstrInfoF.td" +include "RISCVInstrInfoD.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td new file mode 100644 index 00000000000..aa194320d99 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -0,0 +1,131 @@ +//===-- RISCVInstrInfoD.td - RISC-V 'D' instructions -------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISC-V instructions from the standard 'D', +// Double-Precision Floating-Point instruction set extension. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction Class Templates +//===----------------------------------------------------------------------===// + +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +class FPFMAD_rrr_frm<RISCVOpcode opcode, string opcodestr> + : RVInstR4<0b01, opcode, (outs FPR64:$rd), + (ins FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, frmarg:$funct3), + opcodestr, "$rd, $rs1, $rs2, $rs3, $funct3">; + +class FPFMADDynFrmAlias<FPFMAD_rrr_frm Inst, string OpcodeStr> + : InstAlias<OpcodeStr#" $rd, $rs1, $rs2, $rs3", + (Inst FPR64:$rd, FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>; + +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +class FPALUD_rr<bits<7> funct7, bits<3> funct3, string opcodestr> + : RVInstR<funct7, funct3, OPC_OP_FP, (outs FPR64:$rd), + (ins FPR64:$rs1, FPR64:$rs2), opcodestr, "$rd, $rs1, $rs2">; + +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +class FPALUD_rr_frm<bits<7> funct7, string opcodestr> + : RVInstRFrm<funct7, OPC_OP_FP, (outs FPR64:$rd), + (ins FPR64:$rs1, FPR64:$rs2, frmarg:$funct3), opcodestr, + "$rd, $rs1, $rs2, $funct3">; + +class FPALUDDynFrmAlias<FPALUD_rr_frm Inst, string OpcodeStr> + : InstAlias<OpcodeStr#" $rd, $rs1, $rs2", + (Inst FPR64:$rd, FPR64:$rs1, FPR64:$rs2, 0b111)>; + +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +class FPCmpD_rr<bits<3> funct3, string opcodestr> + : RVInstR<0b1010001, funct3, OPC_OP_FP, (outs GPR:$rd), + (ins FPR64:$rs1, FPR64:$rs2), opcodestr, "$rd, $rs1, $rs2">; + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtD] in { + +let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in +def FLD : RVInstI<0b011, OPC_LOAD_FP, (outs FPR64:$rd), + (ins GPR:$rs1, simm12:$imm12), + "fld", "$rd, ${imm12}(${rs1})">; + +// Operands for stores are in the order srcreg, base, offset rather than +// reflecting the order these fields are specified in the instruction +// encoding. +let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in +def FSD : RVInstS<0b011, OPC_STORE_FP, (outs), + (ins FPR64:$rs2, GPR:$rs1, simm12:$imm12), + "fsd", "$rs2, ${imm12}(${rs1})">; + +def FMADD_D : FPFMAD_rrr_frm<OPC_MADD, "fmadd.d">; +def : FPFMADDynFrmAlias<FMADD_D, "fmadd.d">; +def FMSUB_D : FPFMAD_rrr_frm<OPC_MSUB, "fmsub.d">; +def : FPFMADDynFrmAlias<FMSUB_D, "fmsub.d">; +def FNMSUB_D : FPFMAD_rrr_frm<OPC_NMSUB, "fnmsub.d">; +def : FPFMADDynFrmAlias<FNMSUB_D, "fnmsub.d">; +def FNMADD_D : FPFMAD_rrr_frm<OPC_NMADD, "fnmadd.d">; +def : FPFMADDynFrmAlias<FNMADD_D, "fnmadd.d">; + +def FADD_D : FPALUD_rr_frm<0b0000001, "fadd.d">; +def : FPALUDDynFrmAlias<FADD_D, "fadd.d">; +def FSUB_D : FPALUD_rr_frm<0b0000101, "fsub.d">; +def : FPALUDDynFrmAlias<FSUB_D, "fsub.d">; +def FMUL_D : FPALUD_rr_frm<0b0001001, "fmul.d">; +def : FPALUDDynFrmAlias<FMUL_D, "fmul.d">; +def FDIV_D : FPALUD_rr_frm<0b0001101, "fdiv.d">; +def : FPALUDDynFrmAlias<FDIV_D, "fdiv.d">; + +def FSQRT_D : FPUnaryOp_r_frm<0b0101101, FPR64, FPR64, "fsqrt.d"> { + let rs2 = 0b00000; +} +def : FPUnaryOpDynFrmAlias<FSQRT_D, "fsqrt.d", FPR64, FPR64>; + +def FSGNJ_D : FPALUD_rr<0b0010001, 0b000, "fsgnj.d">; +def FSGNJN_D : FPALUD_rr<0b0010001, 0b001, "fsgnjn.d">; +def FSGNJX_D : FPALUD_rr<0b0010001, 0b010, "fsgnjx.d">; +def FMIN_D : FPALUD_rr<0b0010101, 0b000, "fmin.d">; +def FMAX_D : FPALUD_rr<0b0010101, 0b001, "fmax.d">; + +def FCVT_S_D : FPUnaryOp_r_frm<0b0100000, FPR32, FPR64, "fcvt.s.d"> { + let rs2 = 0b00001; +} +def : FPUnaryOpDynFrmAlias<FCVT_S_D, "fcvt.s.d", FPR32, FPR64>; + +def FCVT_D_S : FPUnaryOp_r<0b0100001, 0b000, FPR64, FPR32, "fcvt.d.s"> { + let rs2 = 0b00000; +} + +def FEQ_D : FPCmpD_rr<0b010, "feq.d">; +def FLT_D : FPCmpD_rr<0b001, "flt.d">; +def FLE_D : FPCmpD_rr<0b000, "fle.d">; + +def FCLASS_D : FPUnaryOp_r<0b1110001, 0b001, GPR, FPR64, "fclass.d"> { + let rs2 = 0b00000; +} + +def FCVT_W_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.w.d"> { + let rs2 = 0b00000; +} +def : FPUnaryOpDynFrmAlias<FCVT_W_D, "fcvt.w.d", GPR, FPR64>; + +def FCVT_WU_D : FPUnaryOp_r_frm<0b1100001, GPR, FPR64, "fcvt.wu.d"> { + let rs2 = 0b00001; +} +def : FPUnaryOpDynFrmAlias<FCVT_WU_D, "fcvt.wu.d", GPR, FPR64>; + +def FCVT_D_W : FPUnaryOp_r<0b1101001, 0b000, FPR64, GPR, "fcvt.d.w"> { + let rs2 = 0b00000; +} + +def FCVT_D_WU : FPUnaryOp_r<0b1101001, 0b000, FPR64, GPR, "fcvt.d.wu"> { + let rs2 = 0b00001; +} +} // Predicates = [HasStdExtD] diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index fd1d477e638..cabce379c4c 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -22,6 +22,18 @@ class RISCVReg32<bits<5> Enc, string n, list<string> alt = []> : Register<n> { let AltNames = alt; } +// Because RISCVReg64 register have AsmName and AltNames that alias with their +// 32-bit sub-register, RISCVAsmParser will need to coerce a register number +// from a RISCVReg32 to the equivalent RISCVReg64 when appropriate. +def sub_32 : SubRegIndex<32>; +class RISCVReg64<RISCVReg32 subreg> : Register<""> { + let HWEncoding{4-0} = subreg.HWEncoding{4-0}; + let SubRegs = [subreg]; + let SubRegIndices = [sub_32]; + let AsmName = subreg.AsmName; + let AltNames = subreg.AltNames; +} + def ABIRegAltName : RegAltNameIndex; } // Namespace = "RISCV" @@ -113,6 +125,11 @@ let RegAltNameIndices = [ABIRegAltName] in { def F29_32 : RISCVReg32<29,"f29", ["ft9"]>, DwarfRegNum<[61]>; def F30_32 : RISCVReg32<30,"f30", ["ft10"]>, DwarfRegNum<[62]>; def F31_32 : RISCVReg32<31,"f31", ["ft11"]>, DwarfRegNum<[63]>; + + foreach Index = 0-31 in { + def F#Index#_64 : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_32")>, + DwarfRegNum<[!add(Index, 32)]>; + } } // The order of registers represents the preferred allocation sequence, @@ -124,3 +141,13 @@ def FPR32 : RegisterClass<"RISCV", [f32], 32, (add (sequence "F%u_32", 8, 9), (sequence "F%u_32", 18, 27) )>; + +// The order of registers represents the preferred allocation sequence, +// meaning caller-save regs are listed before callee-save. +def FPR64 : RegisterClass<"RISCV", [f64], 64, (add + (sequence "F%u_64", 0, 7), + (sequence "F%u_64", 10, 17), + (sequence "F%u_64", 28, 31), + (sequence "F%u_64", 8, 9), + (sequence "F%u_64", 18, 27) +)>; diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 82edef65e11..7db49456ebc 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -33,6 +33,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool HasStdExtM = false; bool HasStdExtA = false; bool HasStdExtF = false; + bool HasStdExtD = false; bool HasRV64 = false; unsigned XLen = 32; MVT XLenVT = MVT::i32; @@ -72,6 +73,7 @@ public: bool hasStdExtM() const { return HasStdExtM; } bool hasStdExtA() const { return HasStdExtA; } bool hasStdExtF() const { return HasStdExtF; } + bool hasStdExtD() const { return HasStdExtD; } bool is64Bit() const { return HasRV64; } MVT getXLenVT() const { return XLenVT; } unsigned getXLen() const { return XLen; } |