summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
diff options
context:
space:
mode:
authorNAKAMURA Takumi <geek4civic@gmail.com>2016-02-25 08:35:27 +0000
committerNAKAMURA Takumi <geek4civic@gmail.com>2016-02-25 08:35:27 +0000
commit3d3d0f41516dcef0992bedb37d35ce6db1b31847 (patch)
treefd1fec3c36e0309c5635118fba06ccded16ebd16 /llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
parent58941ee12af86ab7fb18d5a40b66a1ac949c0fef (diff)
downloadbcm5719-llvm-3d3d0f41516dcef0992bedb37d35ce6db1b31847.tar.gz
bcm5719-llvm-3d3d0f41516dcef0992bedb37d35ce6db1b31847.zip
Revert r261742, "[AMDGPU] Assembler: Simplify handling of optional operands"
It brought undefined behavior. llvm-svn: 261839
Diffstat (limited to 'llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp133
1 files changed, 72 insertions, 61 deletions
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 89827dd15d1..6458955b3ef 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -486,7 +486,6 @@ public:
OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
void cvtFlat(MCInst &Inst, const OperandVector &Operands);
- void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseOffset(OperandVector &Operands);
@@ -673,8 +672,31 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
+ if (isForcedVOP3()) {
+ // If 64-bit encoding has been forced we can end up with no
+ // clamp or omod operands if none of the registers have modifiers,
+ // so we need to add these to the operand list.
+ AMDGPUOperand &LastOp =
+ ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
+ if (LastOp.isRegKind() ||
+ (LastOp.isImm() &&
+ LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
+ SMLoc S = Parser.getTok().getLoc();
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyClamp));
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyOMod));
+ bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
+ Out, ErrorInfo,
+ MatchingInlineAsm);
+ if (!Res)
+ return Res;
+ }
+
+ }
return Error(IDLoc, "too few operands for instruction");
}
+
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
@@ -1239,6 +1261,13 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
}
}
+ // Once we reach end of statement, continue parsing so we can add default
+ // values for optional arguments.
+ AMDGPUAsmParser::OperandMatchResultTy Res;
+ while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
+ if (Res != MatchOperand_Success)
+ return Error(getLexer().getLoc(), "failed parsing operand.");
+ }
return false;
}
@@ -1327,18 +1356,6 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
return MatchOperand_Success;
}
-typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
-
-void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
- auto i = OptionalIdx.find(ImmT);
- if (i != OptionalIdx.end()) {
- unsigned Idx = i->second;
- ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
- } else {
- Inst.addOperand(MCOperand::createImm(0));
- }
-}
-
static bool operandsHasOptionalOp(const OperandVector &Operands,
const OptionalOperand &OOp) {
for (unsigned i = 0; i < Operands.size(); i++) {
@@ -1375,15 +1392,11 @@ AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
if (Res != MatchOperand_Success)
return Res;
- bool DefaultValue = (Value == Op.Default);
-
if (Op.ConvertResult && !Op.ConvertResult(Value)) {
return MatchOperand_ParseFail;
}
- if (!DefaultValue) {
- Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
- }
+ Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
@@ -1437,7 +1450,7 @@ bool AMDGPUOperand::isDSOffset01() const {
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1452,10 +1465,13 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
+ unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
+ unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
+ unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+ ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
+ ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
+ ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1482,11 +1498,12 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
+ unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
+ ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
if (!GDSOnly) {
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
+ unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+ ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
}
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1625,7 +1642,7 @@ AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1636,39 +1653,27 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
continue;
}
- OptionalIdx[Op.getImmTy()] = i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
-}
-
+ // Handle 'glc' token which is sometimes hard-coded into the
+ // asm string. There are no MCInst operands for these.
+ if (Op.isToken())
+ continue;
-void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
- const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = i;
- bool token = false;
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+ }
- // Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- continue;
- }
+ // flat atomic instructions don't have a glc argument.
+ if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
+ unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
+ ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ }
- // Handle 'glc' token for flat atomics.
- if (Op.isToken()) {
- token = true;
- continue;
- }
+ unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
+ unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
- // Handle optional arguments
- OptionalIdx[Op.getImmTy()] = token ? i - 1 : i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
}
//===----------------------------------------------------------------------===//
@@ -1713,7 +1718,7 @@ bool AMDGPUOperand::isMubufOffset() const {
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1741,10 +1746,17 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ assert(OptionalIdx.size() == 4);
+
+ unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
+ unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
+ unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
+ unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
+
+ ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
}
//===----------------------------------------------------------------------===//
@@ -1878,8 +1890,7 @@ void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
}
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
- uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
- if (TSFlags & SIInstrFlags::VOP3) {
+ if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
OpenPOWER on IntegriCloud