summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorSam Kolton <Sam.Kolton@amd.com>2016-09-09 09:37:51 +0000
committerSam Kolton <Sam.Kolton@amd.com>2016-09-09 09:37:51 +0000
commitd63d8a7c05950d7289ec2daaa526d63772bd3598 (patch)
tree35816432d893f988e80ab7d8bbbad8ce0e26ecdb /llvm
parent8efa9790290d29931df95056b5b76fc0c3373e73 (diff)
downloadbcm5719-llvm-d63d8a7c05950d7289ec2daaa526d63772bd3598.tar.gz
bcm5719-llvm-d63d8a7c05950d7289ec2daaa526d63772bd3598.zip
[AMDGPU] Assembler: match e32 VOP instructions before e64.
Summary: Split assembler match table in 4 tables with assembler variants: Default - all instructions except VOP3, SDWA and DPP - VOP3 - SDWA - DPP First match Default table then VOP3, SDWA and DPP. Reviewers: tstellarAMD, artem.tamazov, vpykhtin Subscribers: arsenm, wdng, nhaehnle, AMDGPU Differential Revision: https://reviews.llvm.org/D24252 llvm-svn: 281023
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td35
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp87
-rw-r--r--llvm/lib/Target/AMDGPU/SIDefines.h9
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrFormats.td3
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td21
-rw-r--r--llvm/lib/Target/AMDGPU/VIInstrFormats.td2
-rw-r--r--llvm/lib/Target/AMDGPU/VIInstructions.td1
7 files changed, 126 insertions, 32 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 72c45535441..58494a91d18 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -349,10 +349,45 @@ def AMDGPUAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
}
+def AMDGPUAsmVariants {
+ string Default = "Default";
+ int Default_ID = 0;
+ string VOP3 = "VOP3";
+ int VOP3_ID = 1;
+ string SDWA = "SDWA";
+ int SDWA_ID = 2;
+ string DPP = "DPP";
+ int DPP_ID = 3;
+}
+
+def DefaultAMDGPUAsmParserVariant : AsmParserVariant {
+ let Variant = AMDGPUAsmVariants.Default_ID;
+ let Name = AMDGPUAsmVariants.Default;
+}
+
+def VOP3AsmParserVariant : AsmParserVariant {
+ let Variant = AMDGPUAsmVariants.VOP3_ID;
+ let Name = AMDGPUAsmVariants.VOP3;
+}
+
+def SDWAAsmParserVariant : AsmParserVariant {
+ let Variant = AMDGPUAsmVariants.SDWA_ID;
+ let Name = AMDGPUAsmVariants.SDWA;
+}
+
+def DPPAsmParserVariant : AsmParserVariant {
+ let Variant = AMDGPUAsmVariants.DPP_ID;
+ let Name = AMDGPUAsmVariants.DPP;
+}
+
def AMDGPU : Target {
// Pull in Instruction Info:
let InstructionSet = AMDGPUInstrInfo;
let AssemblyParsers = [AMDGPUAsmParser];
+ let AssemblyParserVariants = [DefaultAMDGPUAsmParserVariant,
+ VOP3AsmParserVariant,
+ SDWAAsmParserVariant,
+ DPPAsmParserVariant];
}
// Dummy Instruction itineraries for pseudo instructions
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 703c9358d70..2f0e6027c7f 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1147,35 +1147,76 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
+ // What asm variants we should check
+ std::vector<unsigned> MatchedVariants;
+ if (getForcedEncodingSize() == 32) {
+ MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
+ } else if (isForcedVOP3()) {
+ MatchedVariants = {AMDGPUAsmVariants::VOP3};
+ } else if (isForcedSDWA()) {
+ MatchedVariants = {AMDGPUAsmVariants::SDWA};
+ } else if (isForcedDPP()) {
+ MatchedVariants = {AMDGPUAsmVariants::DPP};
+ } else {
+ MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
+ AMDGPUAsmVariants::VOP3,
+ AMDGPUAsmVariants::SDWA,
+ AMDGPUAsmVariants::DPP};
+ }
+
MCInst Inst;
+ unsigned Result = Match_Success;
+ for (auto Variant : MatchedVariants) {
+ uint64_t EI;
+ auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
+ Variant);
+ // We order match statuses from least to most specific. We use most specific
+ // status as resulting
+ // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
+ if ((R == Match_Success) ||
+ (R == Match_PreferE32) ||
+ (R == Match_MissingFeature && Result != Match_PreferE32) ||
+ (R == Match_InvalidOperand && Result != Match_MissingFeature
+ && Result != Match_PreferE32) ||
+ (R == Match_MnemonicFail && Result != Match_InvalidOperand
+ && Result != Match_MissingFeature
+ && Result != Match_PreferE32)) {
+ Result = R;
+ ErrorInfo = EI;
+ }
+ if (R == Match_Success)
+ break;
+ }
- switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
- default: break;
- case Match_Success:
- Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst, getSTI());
- return false;
- case Match_MissingFeature:
- return Error(IDLoc, "instruction not supported on this GPU");
+ switch (Result) {
+ default: break;
+ case Match_Success:
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst, getSTI());
+ return false;
- case Match_MnemonicFail:
- return Error(IDLoc, "unrecognized instruction mnemonic");
+ case Match_MissingFeature:
+ return Error(IDLoc, "instruction not supported on this GPU");
- case Match_InvalidOperand: {
- SMLoc ErrorLoc = IDLoc;
- if (ErrorInfo != ~0ULL) {
- if (ErrorInfo >= Operands.size()) {
- return Error(IDLoc, "too few operands for instruction");
- }
- ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
- if (ErrorLoc == SMLoc())
- ErrorLoc = IDLoc;
+ case Match_MnemonicFail:
+ return Error(IDLoc, "unrecognized instruction mnemonic");
+
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0ULL) {
+ if (ErrorInfo >= Operands.size()) {
+ return Error(IDLoc, "too few operands for instruction");
}
- return Error(ErrorLoc, "invalid operand for instruction");
+ ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
}
- case Match_PreferE32:
- return Error(IDLoc, "internal error: instruction without _e64 suffix "
- "should be encoded as e32");
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+
+ case Match_PreferE32:
+ return Error(IDLoc, "internal error: instruction without _e64 suffix "
+ "should be encoded as e32");
}
llvm_unreachable("Implement any new match types added!");
}
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index f4b04e3631a..7eac83c655e 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -105,6 +105,15 @@ namespace SIOutMods {
};
}
+namespace AMDGPUAsmVariants {
+ enum {
+ DEFAULT = 0,
+ VOP3 = 1,
+ SDWA = 2,
+ DPP = 3
+ };
+}
+
namespace llvm {
namespace AMDGPU {
namespace EncValues { // Encoding values of enum9/8/7 operands
diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
index 9e18784e07a..16f5e8abb38 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
@@ -95,6 +95,7 @@ class InstSI <dag outs, dag ins, string asm = "",
field bits<1> DisableDecoder = 0;
let isAsmParserOnly = !if(!eq(DisableDecoder{0}, {0}), 0, 1);
+ let AsmVariantName = AMDGPUAsmVariants.Default;
}
class PseudoInstSI<dag outs, dag ins, list<dag> pattern = []>
@@ -187,6 +188,8 @@ class VOP3Common <dag outs, dag ins, string asm = "",
"cvtVOP3",
!if(!eq(HasMods,1), "cvtVOP3_2_mod", ""));
+ let AsmVariantName = AMDGPUAsmVariants.VOP3;
+
let isCodeGenOnly = 0;
int Size = 8;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 05913ac2089..ad129b80a40 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1110,7 +1110,8 @@ def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>;
def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>;
// This class is used only with VOPC instructions. Use $sdst for out operand
-class SIInstAlias <string asm, Instruction inst, VOPProfile p> :
+class SIInstAlias <string asm, Instruction inst, VOPProfile p,
+ string VariantName = ""> :
InstAlias <asm, (inst)>, PredicateControl {
field bit isCompare;
@@ -1139,23 +1140,25 @@ class SIInstAlias <string asm, Instruction inst, VOPProfile p> :
// else
// 0 dst, 0 src
(inst))));
+
+ let AsmVariantName = VariantName;
}
-class SIInstAliasSI <string asm, string op_name, VOPProfile p> :
- SIInstAlias <asm, !cast<Instruction>(op_name#"_e32_si"), p> {
+class SIInstAliasSI <string asm, string op_name, VOPProfile p, string VariantName = ""> :
+ SIInstAlias <asm, !cast<Instruction>(op_name#"_e32_si"), p, VariantName> {
let AssemblerPredicate = SIAssemblerPredicate;
}
-class SIInstAliasVI <string asm, string op_name, VOPProfile p> :
- SIInstAlias <asm, !cast<Instruction>(op_name#"_e32_vi"), p> {
+class SIInstAliasVI <string asm, string op_name, VOPProfile p, string VariantName = ""> :
+ SIInstAlias <asm, !cast<Instruction>(op_name#"_e32_vi"), p, VariantName> {
let AssemblerPredicates = [isVI];
}
-multiclass SIInstAliasBuilder <string asm, VOPProfile p> {
+multiclass SIInstAliasBuilder <string asm, VOPProfile p, string VariantName = ""> {
- def : SIInstAliasSI <asm, NAME, p>;
+ def : SIInstAliasSI <asm, NAME, p, VariantName>;
- def : SIInstAliasVI <asm, NAME, p>;
+ def : SIInstAliasVI <asm, NAME, p, VariantName>;
}
class VOP <string opName> {
@@ -1818,7 +1821,7 @@ multiclass VOPC_m <vopc op, dag ins, string op_asm, list<dag> pattern,
} // End AssemblerPredicates = [isVI]
- defm : SIInstAliasBuilder<alias_asm, p>;
+ defm : SIInstAliasBuilder<alias_asm, p, AMDGPUAsmVariants.Default>;
}
multiclass VOPC_Helper <vopc op, string opName, list<dag> pat32,
diff --git a/llvm/lib/Target/AMDGPU/VIInstrFormats.td b/llvm/lib/Target/AMDGPU/VIInstrFormats.td
index 28ad37a2397..295548f53f1 100644
--- a/llvm/lib/Target/AMDGPU/VIInstrFormats.td
+++ b/llvm/lib/Target/AMDGPU/VIInstrFormats.td
@@ -140,6 +140,7 @@ class VOP_DPP <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0
let Size = 8;
let AsmMatchConverter = !if(!eq(HasMods,1), "cvtDPP", "");
+ let AsmVariantName = AMDGPUAsmVariants.DPP;
}
class VOP_DPPe : Enc64 {
@@ -186,6 +187,7 @@ class VOP_SDWA <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods =
VOPAnyCommon <outs, ins, asm, pattern> {
let SDWA = 1;
let Size = 8;
+ let AsmVariantName = AMDGPUAsmVariants.SDWA;
}
class VOP_SDWAe : Enc64 {
diff --git a/llvm/lib/Target/AMDGPU/VIInstructions.td b/llvm/lib/Target/AMDGPU/VIInstructions.td
index 818381f3701..fc88f1fb2c3 100644
--- a/llvm/lib/Target/AMDGPU/VIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VIInstructions.td
@@ -93,6 +93,7 @@ class SI2_VI3Alias <string name, Instruction inst> : InstAlias <
(inst VGPR_32:$dst, 0, VCSrc_32:$src0, 0, VCSrc_32:$src1, 0, 0)
>, PredicateControl {
let UseInstAsmMatchConverter = 0;
+ let AsmVariantName = AMDGPUAsmVariants.VOP3;
}
def : SI2_VI3Alias <"v_ldexp_f32", V_LDEXP_F32_e64_vi>;
OpenPOWER on IntegriCloud