summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/AsmParser
diff options
context:
space:
mode:
authorDmitry Preobrazhensky <dmitry.preobrazhensky@amd.com>2017-08-07 13:14:12 +0000
committerDmitry Preobrazhensky <dmitry.preobrazhensky@amd.com>2017-08-07 13:14:12 +0000
commit50805a0b83a2df7d77cebfbea1c175009a883cb4 (patch)
treedbe3352df5c487ca8d347151aecc7be9db62ffc1 /llvm/lib/Target/AMDGPU/AsmParser
parent3886705689aba8da7a8a7e3b243b40189a61f8b6 (diff)
downloadbcm5719-llvm-50805a0b83a2df7d77cebfbea1c175009a883cb4.tar.gz
bcm5719-llvm-50805a0b83a2df7d77cebfbea1c175009a883cb4.zip
[AMDGPU][MC] Corrected VOP3 version of v_interp_* instructions for VI
See bug 32621: https://bugs.llvm.org//show_bug.cgi?id=32621 Reviewers: vpykhtin, SamWot, arsenm Differential Revision: https://reviews.llvm.org/D35902 llvm-svn: 310251
Diffstat (limited to 'llvm/lib/Target/AMDGPU/AsmParser')
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp47
1 files changed, 46 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index abf0b1a2c24..728f3522e3c 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -164,7 +164,8 @@ public:
ImmTyOpSelHi,
ImmTyNegLo,
ImmTyNegHi,
- ImmTySwizzle
+ ImmTySwizzle,
+ ImmTyHigh
};
struct TokOp {
@@ -312,6 +313,7 @@ public:
bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
bool isNegLo() const { return isImmTy(ImmTyNegLo); }
bool isNegHi() const { return isImmTy(ImmTyNegHi); }
+ bool isHigh() const { return isImmTy(ImmTyHigh); }
bool isMod() const {
return isClampSI() || isOModSI();
@@ -673,6 +675,7 @@ public:
case ImmTyNegLo: OS << "NegLo"; break;
case ImmTyNegHi: OS << "NegHi"; break;
case ImmTySwizzle: OS << "Swizzle"; break;
+ case ImmTyHigh: OS << "High"; break;
}
}
@@ -1064,6 +1067,8 @@ public:
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
+ void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
+
void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
bool IsAtomic = false);
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
@@ -4020,6 +4025,7 @@ static const OptionalOperand AMDGPUOptionalOperandTable[] = {
{"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
{"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
{"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
+ {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
{"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
{"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
{"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
@@ -4122,6 +4128,45 @@ static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
&& Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
}
+void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands) {
+
+ OptionalImmIndexMap OptionalIdx;
+ unsigned Opc = Inst.getOpcode();
+
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
+
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
+ Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
+ } else if (Op.isInterpSlot() ||
+ Op.isInterpAttr() ||
+ Op.isAttrChan()) {
+ Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
+ } else if (Op.isImmModifier()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ llvm_unreachable("unhandled operand type");
+ }
+ }
+
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
+ }
+
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
+ }
+
+ if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
+ }
+}
+
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
OptionalImmIndexMap &OptionalIdx) {
unsigned Opc = Inst.getOpcode();
OpenPOWER on IntegriCloud