summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
diff options
context:
space:
mode:
authorEugene Zelenko <eugene.zelenko@gmail.com>2017-08-10 00:46:15 +0000
committerEugene Zelenko <eugene.zelenko@gmail.com>2017-08-10 00:46:15 +0000
commitc8fbf6ffeaeb4fc9a2b5fcd6c993d64bcbb52818 (patch)
treed746c79051b37da71bc55017cfadf5e25ae31d64 /llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
parent3cbb12c2ed8ff2013d6d881800aecf46f8205813 (diff)
downloadbcm5719-llvm-c8fbf6ffeaeb4fc9a2b5fcd6c993d64bcbb52818.tar.gz
bcm5719-llvm-c8fbf6ffeaeb4fc9a2b5fcd6c993d64bcbb52818.zip
[AMDGPU] Fix some Clang-tidy modernize-use-using and Include What You Use warnings; other minor fixes (NFC).
llvm-svn: 310541
Diffstat (limited to 'llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp40
1 files changed, 20 insertions, 20 deletions
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 17e7288ac3a..9a0e4a39d31 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1,4 +1,4 @@
-//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
+//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "AMDGPU.h"
#include "AMDKernelCodeT.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
@@ -40,7 +41,9 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/AMDGPUCodeObjectMetadata.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
@@ -83,7 +86,7 @@ public:
AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
: MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
- typedef std::unique_ptr<AMDGPUOperand> Ptr;
+ using Ptr = std::unique_ptr<AMDGPUOperand>;
struct Modifiers {
bool Abs = false;
@@ -846,7 +849,7 @@ public:
Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
};
- typedef std::map<AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
+ using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
const MCInstrInfo &MII,
@@ -994,8 +997,9 @@ public:
private:
struct OperandInfoTy {
int64_t Id;
- bool IsSymbolic;
- OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
+ bool IsSymbolic = false;
+
+ OperandInfoTy(int64_t Id_) : Id(Id_) {}
};
bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
@@ -1285,7 +1289,6 @@ uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
}
void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
-
if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
Inst.getNumOperands())) {
addLiteralImmOperand(Inst, Imm.Val,
@@ -1317,7 +1320,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
- case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
+ case AMDGPU::OPERAND_REG_INLINE_C_FP64:
if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
@@ -1341,7 +1344,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
// unclear how we should encode them. This case should be checked earlier
// in predicate methods (isLiteralImm())
llvm_unreachable("fp literal in 64-bit integer instruction.");
- }
+
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
@@ -1383,7 +1386,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
- case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
+ case AMDGPU::OPERAND_REG_INLINE_C_FP32:
if (isInt<32>(Val) &&
AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
@@ -1393,11 +1396,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
return;
- }
+
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
- case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
+ case AMDGPU::OPERAND_REG_INLINE_C_FP64:
if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Val));
return;
@@ -1405,11 +1408,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
return;
- }
+
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
- case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
+ case AMDGPU::OPERAND_REG_INLINE_C_FP16:
if (isInt<16>(Val) &&
AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
@@ -1419,7 +1422,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Inst.addOperand(MCOperand::createImm(Val & 0xffff));
return;
- }
+
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
@@ -1717,7 +1720,6 @@ AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
(getLexer().getKind() == AsmToken::Integer ||
getLexer().getKind() == AsmToken::Real)) {
-
// This is a workaround for handling operands like these:
// |1.0|
// |-1|
@@ -2117,7 +2119,6 @@ bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
SIInstrFlags::SDWA)) {
-
// Check special imm operands (used by madmk, etc)
if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
++ConstantBusUseCount;
@@ -2162,7 +2163,6 @@ bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
}
bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
-
const unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
@@ -4128,8 +4128,8 @@ static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
&& Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
}
-void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands) {
-
+void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
+{
OptionalImmIndexMap OptionalIdx;
unsigned Opc = Inst.getOpcode();
@@ -4398,7 +4398,6 @@ AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
if (getLexer().isNot(AsmToken::RBrac))
return MatchOperand_ParseFail;
Parser.Lex();
-
} else {
// sel:%d
Parser.Lex();
@@ -4573,6 +4572,7 @@ void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
uint64_t BasicInstType, bool skipVcc) {
using namespace llvm::AMDGPU::SDWA;
+
OptionalImmIndexMap OptionalIdx;
bool skippedVcc = false;
OpenPOWER on IntegriCloud