summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp131
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td19
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td2
3 files changed, 77 insertions, 75 deletions
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 6458955b3ef..67d7e08fc32 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -486,6 +486,7 @@ public:
OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
void cvtFlat(MCInst &Inst, const OperandVector &Operands);
+ void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseOffset(OperandVector &Operands);
@@ -672,31 +673,8 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
- if (isForcedVOP3()) {
- // If 64-bit encoding has been forced we can end up with no
- // clamp or omod operands if none of the registers have modifiers,
- // so we need to add these to the operand list.
- AMDGPUOperand &LastOp =
- ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
- if (LastOp.isRegKind() ||
- (LastOp.isImm() &&
- LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
- SMLoc S = Parser.getTok().getLoc();
- Operands.push_back(AMDGPUOperand::CreateImm(0, S,
- AMDGPUOperand::ImmTyClamp));
- Operands.push_back(AMDGPUOperand::CreateImm(0, S,
- AMDGPUOperand::ImmTyOMod));
- bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
- Out, ErrorInfo,
- MatchingInlineAsm);
- if (!Res)
- return Res;
- }
-
- }
return Error(IDLoc, "too few operands for instruction");
}
-
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
@@ -1261,13 +1239,6 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
}
}
- // Once we reach end of statement, continue parsing so we can add default
- // values for optional arguments.
- AMDGPUAsmParser::OperandMatchResultTy Res;
- while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
- if (Res != MatchOperand_Success)
- return Error(getLexer().getLoc(), "failed parsing operand.");
- }
return false;
}
@@ -1356,6 +1327,18 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
return MatchOperand_Success;
}
+typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
+
+void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
+ auto i = OptionalIdx.find(ImmT);
+ if (i != OptionalIdx.end()) {
+ unsigned Idx = i->second;
+ ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
+ } else {
+ Inst.addOperand(MCOperand::createImm(0));
+ }
+}
+
static bool operandsHasOptionalOp(const OperandVector &Operands,
const OptionalOperand &OOp) {
for (unsigned i = 0; i < Operands.size(); i++) {
@@ -1392,11 +1375,15 @@ AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
if (Res != MatchOperand_Success)
return Res;
+ bool DefaultValue = (Value == Op.Default);
+
if (Op.ConvertResult && !Op.ConvertResult(Value)) {
return MatchOperand_ParseFail;
}
- Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
+ if (!DefaultValue) {
+ Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
+ }
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
@@ -1450,7 +1437,7 @@ bool AMDGPUOperand::isDSOffset01() const {
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
const OperandVector &Operands) {
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1465,13 +1452,10 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
OptionalIdx[Op.getImmTy()] = i;
}
- unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
- unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
- unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
- ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
- ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1498,12 +1482,11 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
OptionalIdx[Op.getImmTy()] = i;
}
- unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
- ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
if (!GDSOnly) {
- unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
}
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1642,7 +1625,7 @@ AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
const OperandVector &Operands) {
- std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1653,27 +1636,37 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
continue;
}
- // Handle 'glc' token which is sometimes hard-coded into the
- // asm string. There are no MCInst operands for these.
- if (Op.isToken())
- continue;
-
- // Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
-
}
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+}
- // flat atomic instructions don't have a glc argument.
- if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
- }
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
+void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
+ const OperandVector &Operands) {
+ OptionalImmIndexMap OptionalIdx;
+
+ for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+
+ // Add the register arguments
+ if (Op.isReg()) {
+ Op.addRegOperands(Inst, 1);
+ continue;
+ }
+
+ // Handle 'glc' token for flat atomics.
+ if (Op.isToken()) {
+ continue;
+ }
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = i;
+ }
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
}
//===----------------------------------------------------------------------===//
@@ -1718,7 +1711,7 @@ bool AMDGPUOperand::isMubufOffset() const {
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
const OperandVector &Operands) {
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1746,17 +1739,10 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
OptionalIdx[Op.getImmTy()] = i;
}
- assert(OptionalIdx.size() == 4);
-
- unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
-
- ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
}
//===----------------------------------------------------------------------===//
@@ -1890,7 +1876,8 @@ void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
}
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
- if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
+ uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
+ if (TSFlags & SIInstrFlags::VOP3) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c5b64fdc919..85f42a8f254 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -435,6 +435,7 @@ def MubufOffsetMatchClass : AsmOperandClass {
let Name = "MubufOffset";
let ParserMethod = "parseMubufOptionalOps";
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
@@ -442,6 +443,7 @@ class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
let PredicateMethod = "isDSOffset";
+ let IsOptional = 1;
}
def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">;
@@ -452,6 +454,7 @@ def DSOffset01MatchClass : AsmOperandClass {
let ParserMethod = "parseDSOff01OptionalOps";
let RenderMethod = "addImmOperands";
let PredicateMethod = "isDSOffset01";
+ let IsOptional = 1;
}
class GDSBaseMatchClass <string parser> : AsmOperandClass {
@@ -459,6 +462,7 @@ class GDSBaseMatchClass <string parser> : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
@@ -469,6 +473,7 @@ class GLCBaseMatchClass <string parser> : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -479,6 +484,7 @@ class SLCBaseMatchClass <string parser> : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -490,6 +496,7 @@ class TFEBaseMatchClass <string parser> : AsmOperandClass {
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
+ let IsOptional = 1;
}
def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
@@ -523,13 +530,21 @@ def SMRDLiteralOffsetMatchClass : SMRDOffsetBaseMatchClass <
"isSMRDLiteralOffset"
>;
+class OptionalImmAsmOperand <string OpName> : AsmOperandClass {
+ let Name = "Imm"#OpName;
+ let PredicateMethod = "isImm";
+ let IsOptional = 1;
+}
+
let OperandType = "OPERAND_IMMEDIATE" in {
def offen : Operand<i1> {
let PrintMethod = "printOffen";
+ let ParserMatchClass = OptionalImmAsmOperand<"offen">;
}
def idxen : Operand<i1> {
let PrintMethod = "printIdxen";
+ let ParserMatchClass = OptionalImmAsmOperand<"idxen">;
}
def addr64 : Operand<i1> {
let PrintMethod = "printAddr64";
@@ -2871,7 +2886,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
dag outs_noret = (outs),
string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> {
- let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in {
+ let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in {
def "" : FLAT_Pseudo <NAME, outs_noret,
(ins VReg_64:$addr, data_rc:$data,
slc_flat_atomic:$slc, tfe_flat_atomic:$tfe), []>,
@@ -2888,7 +2903,7 @@ multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
asm_noret>;
}
- let glc = 1, hasPostISelHook = 1 in {
+ let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in {
defm _RTN : FLAT_AtomicRet_m <op, (outs vdst_rc:$vdst),
(ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
tfe_flat_atomic:$tfe),
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d5db5f9ecbc..c141d5f12b1 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1433,7 +1433,7 @@ multiclass V_CNDMASK <vop2 op, string name> {
defm _e64 : VOP3_m <
op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins64,
- name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3>;
+ name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3, 0>;
}
defm V_CNDMASK_B32 : V_CNDMASK<vop2<0x0>, "v_cndmask_b32">;
OpenPOWER on IntegriCloud