summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.h2
-rw-r--r--llvm/lib/Target/AMDGPU/R600ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600InstrInfo.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp26
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp30
-rw-r--r--llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp13
29 files changed, 149 insertions, 171 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 3851ee8968b..b2491ebc6f4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -544,7 +544,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
if (!N->isMachineOpcode()) {
if (N->getOpcode() == ISD::CopyToReg) {
unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
return MRI.getRegClass(Reg);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index b71bfce60b3..3d76b8b7e67 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -62,7 +62,7 @@ AMDGPUInstructionSelector::AMDGPUInstructionSelector(
const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return Reg == AMDGPU::SCC;
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@@ -83,7 +83,7 @@ static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
bool AMDGPUInstructionSelector::isVCC(Register Reg,
const MachineRegisterInfo &MRI) const {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return Reg == TRI.getVCC();
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@@ -157,7 +157,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
}
for (const MachineOperand &MO : I.operands()) {
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =
@@ -550,7 +550,7 @@ bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
for (const MachineOperand &MO : Ins->operands()) {
if (!MO.isReg())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 23749095705..113e5a32264 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -694,7 +694,7 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
// If this is a source register to a PHI we are chaining, it
@@ -734,7 +734,7 @@ void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
for (auto &UI : MRI->use_operands(Reg)) {
@@ -949,7 +949,7 @@ void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
(IncludeLoopPHI && IsLoopPHI);
if (ShouldReplace) {
- if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
@@ -1022,7 +1022,7 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
for (auto &RI : II.uses()) {
if (RI.isReg()) {
unsigned Reg = RI.getReg();
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (hasNoDef(Reg, MRI))
continue;
if (!MRI->hasOneDef(Reg)) {
@@ -2230,7 +2230,7 @@ void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
I != E;) {
MachineOperand &O = *I;
++I;
- if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
diff --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
index 51c4c99cfb1..c9446015aeb 100644
--- a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
@@ -174,7 +174,7 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const {
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI.getOperand(VAddr0Idx + I);
unsigned Reg = Op.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+ if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return NSA_Status::FIXED;
unsigned PhysReg = VRM->getPhys(Reg);
diff --git a/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp b/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
index f0d47eaa4ed..2e5b130cf9f 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
@@ -230,7 +230,7 @@ private:
public:
Printable printReg(unsigned Reg, unsigned SubReg = 0) const {
return Printable([Reg, SubReg, this](raw_ostream &OS) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
OS << llvm::printReg(Reg, TRI);
return;
}
@@ -275,7 +275,7 @@ char GCNRegBankReassign::ID = 0;
char &llvm::GCNRegBankReassignID = GCNRegBankReassign::ID;
unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
- assert (TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getRegSizeInBits(*RC);
@@ -293,7 +293,7 @@ unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
unsigned GCNRegBankReassign::getRegBankMask(unsigned Reg, unsigned SubReg,
int Bank) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (!VRM->isAssignedReg(Reg))
return 0;
@@ -420,7 +420,7 @@ unsigned GCNRegBankReassign::getOperandGatherWeight(const MachineInstr& MI,
}
bool GCNRegBankReassign::isReassignable(unsigned Reg) const {
- if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+ if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return false;
const MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index f3eac95b057..4abbb8537fb 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -40,7 +40,7 @@ void llvm::printLivesAt(SlotIndex SI,
<< *LIS.getInstructionFromIndex(SI);
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- const unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ const unsigned Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
@@ -84,7 +84,7 @@ bool llvm::isEqual(const GCNRPTracker::LiveRegSet &S1,
unsigned GCNRegPressure::getRegKind(unsigned Reg,
const MachineRegisterInfo &MRI) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(Register::isVirtualRegister(Reg));
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
@@ -197,8 +197,7 @@ void GCNRegPressure::print(raw_ostream &OS, const GCNSubtarget *ST) const {
static LaneBitmask getDefRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI) {
- assert(MO.isDef() && MO.isReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()));
+ assert(MO.isDef() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
// We don't rely on read-undef flag because in case of tentative schedule
// tracking it isn't set correctly yet. This works correctly however since
@@ -211,8 +210,7 @@ static LaneBitmask getDefRegMask(const MachineOperand &MO,
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI,
const LiveIntervals &LIS) {
- assert(MO.isUse() && MO.isReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()));
+ assert(MO.isUse() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
if (auto SubReg = MO.getSubReg())
return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);
@@ -233,7 +231,7 @@ collectVirtualRegUses(const MachineInstr &MI, const LiveIntervals &LIS,
const MachineRegisterInfo &MRI) {
SmallVector<RegisterMaskPair, 8> Res;
for (const auto &MO : MI.operands()) {
- if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
if (!MO.isUse() || !MO.readsReg())
continue;
@@ -279,7 +277,7 @@ GCNRPTracker::LiveRegSet llvm::getLiveRegs(SlotIndex SI,
const MachineRegisterInfo &MRI) {
GCNRPTracker::LiveRegSet LiveRegs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- auto Reg = TargetRegisterInfo::index2VirtReg(I);
+ auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto LiveMask = getLiveLaneMask(Reg, SI, LIS, MRI);
@@ -330,8 +328,7 @@ void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
MaxPressure = max(AtMIPressure, MaxPressure);
for (const auto &MO : MI.defs()) {
- if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
- MO.isDead())
+ if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()) || MO.isDead())
continue;
auto Reg = MO.getReg();
@@ -410,7 +407,7 @@ void GCNDownwardRPTracker::advanceToNext() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
auto &LiveMask = LiveRegs[Reg];
auto PrevMask = LiveMask;
@@ -501,7 +498,7 @@ void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
const MachineRegisterInfo &MRI) {
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned Reg = Register::index2VirtReg(I);
auto It = LiveRegs.find(Reg);
if (It != LiveRegs.end() && It->second.any())
OS << ' ' << printVRegOrUnit(Reg, TRI) << ':'
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index e4894418b94..5862cdb0416 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -214,7 +214,7 @@ getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS) {
DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> LiveRegMap;
SmallVector<SlotIndex, 32> LiveIdxs, SRLiveIdxs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- auto Reg = TargetRegisterInfo::index2VirtReg(I);
+ auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto &LI = LIS.getInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index f80a53ba1dc..29b4d5559d9 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -335,7 +335,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case R600::MASK_WRITE: {
unsigned maskedRegister = MI.getOperand(0).getReg();
- assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
+ assert(Register::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index d9e839fe203..79e36b71e0f 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -97,8 +97,8 @@ bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const {
for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
E = MBBI->operands_end(); I != E; ++I) {
- if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
- I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
+ if (I->isReg() && !Register::isVirtualRegister(I->getReg()) && I->isUse() &&
+ RI.isPhysRegLiveAcrossClauses(I->getReg()))
return false;
}
return true;
@@ -242,8 +242,7 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
E = MI.operands_end();
I != E; ++I) {
- if (!I->isReg() || !I->isUse() ||
- TargetRegisterInfo::isVirtualRegister(I->getReg()))
+ if (!I->isReg() || !I->isUse() || Register::isVirtualRegister(I->getReg()))
continue;
if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
@@ -1193,8 +1192,7 @@ int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
unsigned Reg = LI.first;
- if (TargetRegisterInfo::isVirtualRegister(Reg) ||
- !IndirectRC->contains(Reg))
+ if (Register::isVirtualRegister(Reg) || !IndirectRC->contains(Reg))
continue;
unsigned RegIndex;
diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
index 34267a909b5..e1abdb78452 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -183,7 +183,7 @@ isPhysicalRegCopy(MachineInstr *MI) {
if (MI->getOpcode() != R600::COPY)
return false;
- return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
+ return !Register::isVirtualRegister(MI->getOperand(1).getReg());
}
void R600SchedStrategy::releaseTopNode(SUnit *SU) {
@@ -209,7 +209,7 @@ void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
const TargetRegisterClass *RC) const {
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
return RC->contains(Reg);
} else {
return MRI->getRegClass(Reg) == RC;
diff --git a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 9f1cb6582b5..d34d30fcdd7 100644
--- a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -58,7 +58,7 @@ using namespace llvm;
static bool isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
assert(MRI.isSSA());
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
const MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
return MI && MI->isImplicitDef();
diff --git a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
index 685df74490f..ef12c1d2459 100644
--- a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
@@ -93,7 +93,7 @@ const RegClassWeight &R600RegisterInfo::getRegClassWeight(
}
bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
- assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(!Register::isVirtualRegister(Reg));
switch (Reg) {
case R600::OQAP:
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 05aee164cb5..a169133a6ec 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -148,7 +148,7 @@ static bool hasVectorOperands(const MachineInstr &MI,
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
if (!MI.getOperand(i).isReg() ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ !Register::isVirtualRegister(MI.getOperand(i).getReg()))
continue;
if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
@@ -164,18 +164,16 @@ getCopyRegClasses(const MachineInstr &Copy,
unsigned DstReg = Copy.getOperand(0).getReg();
unsigned SrcReg = Copy.getOperand(1).getReg();
- const TargetRegisterClass *SrcRC =
- TargetRegisterInfo::isVirtualRegister(SrcReg) ?
- MRI.getRegClass(SrcReg) :
- TRI.getPhysRegClass(SrcReg);
+ const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
+ ? MRI.getRegClass(SrcReg)
+ : TRI.getPhysRegClass(SrcReg);
// We don't really care about the subregister here.
// SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
- const TargetRegisterClass *DstRC =
- TargetRegisterInfo::isVirtualRegister(DstReg) ?
- MRI.getRegClass(DstReg) :
- TRI.getPhysRegClass(DstReg);
+ const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg)
+ ? MRI.getRegClass(DstReg)
+ : TRI.getPhysRegClass(DstReg);
return std::make_pair(SrcRC, DstRC);
}
@@ -201,8 +199,8 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
auto &Src = MI.getOperand(1);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = Src.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
- !TargetRegisterInfo::isVirtualRegister(DstReg))
+ if (!Register::isVirtualRegister(SrcReg) ||
+ !Register::isVirtualRegister(DstReg))
return false;
for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
@@ -250,7 +248,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
return false;
// It is illegal to have vreg inputs to a physreg defining reg_sequence.
- if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
+ if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
return false;
const TargetRegisterClass *SrcRC, *DstRC;
@@ -624,7 +622,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterClass *SrcRC, *DstRC;
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
- if (!TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ if (!Register::isVirtualRegister(DstReg)) {
// If the destination register is a physical register there isn't
// really much we can do to fix this.
// Some special instructions use M0 as an input. Some even only use
@@ -644,7 +642,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (!Register::isVirtualRegister(SrcReg)) {
TII->moveToVALU(MI, MDT);
break;
}
diff --git a/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp b/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
index 5b834c8de13..a0119297b11 100644
--- a/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
@@ -91,8 +91,7 @@ static bool findSRegBaseAndIndex(MachineOperand *Op,
Worklist.push_back(Op);
while (!Worklist.empty()) {
MachineOperand *WOp = Worklist.pop_back_val();
- if (!WOp->isReg() ||
- !TargetRegisterInfo::isVirtualRegister(WOp->getReg()))
+ if (!WOp->isReg() || !Register::isVirtualRegister(WOp->getReg()))
continue;
MachineInstr *DefInst = MRI.getUniqueVRegDef(WOp->getReg());
switch (DefInst->getOpcode()) {
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 74d77d32801..1b6981121f8 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -444,7 +444,7 @@ static bool tryToFoldACImm(const SIInstrInfo *TII,
return false;
unsigned UseReg = OpToFold.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(UseReg))
+ if (!Register::isVirtualRegister(UseReg))
return false;
if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
@@ -570,14 +570,13 @@ void SIFoldOperands::foldOperand(
if (FoldingImmLike && UseMI->isCopy()) {
unsigned DestReg = UseMI->getOperand(0).getReg();
- const TargetRegisterClass *DestRC
- = TargetRegisterInfo::isVirtualRegister(DestReg) ?
- MRI->getRegClass(DestReg) :
- TRI->getPhysRegClass(DestReg);
+ const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
+ ? MRI->getRegClass(DestReg)
+ : TRI->getPhysRegClass(DestReg);
unsigned SrcReg = UseMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(DestReg) &&
+ Register::isVirtualRegister(SrcReg)) {
const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
MachineRegisterInfo::use_iterator NextUse;
@@ -616,7 +615,7 @@ void SIFoldOperands::foldOperand(
CopiesToReplace.push_back(UseMI);
} else {
if (UseMI->isCopy() && OpToFold.isReg() &&
- TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
+ Register::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) &&
!UseMI->getOperand(1).getSubReg()) {
@@ -810,7 +809,7 @@ static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
if (Op.isReg()) {
// If this has a subregister, it obviously is a register source.
if (Op.getSubReg() != AMDGPU::NoSubRegister ||
- !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ !Register::isVirtualRegister(Op.getReg()))
return &Op;
MachineInstr *Def = MRI.getVRegDef(Op.getReg());
@@ -1339,8 +1338,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
if (!FoldingImm && !OpToFold.isReg())
continue;
- if (OpToFold.isReg() &&
- !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
+ if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
continue;
// Prevent folding operands backwards in the function. For example,
@@ -1350,8 +1348,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
// ...
// %vgpr0 = V_MOV_B32_e32 1, implicit %exec
MachineOperand &Dst = MI.getOperand(0);
- if (Dst.isReg() &&
- !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
+ if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
continue;
foldInstOperand(MI, OpToFold);
diff --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
index f3c9ad63a80..1fcf8fbeb35 100644
--- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
@@ -144,7 +144,7 @@ static unsigned getMopState(const MachineOperand &MO) {
S |= RegState::Kill;
if (MO.isEarlyClobber())
S |= RegState::EarlyClobber;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) && MO.isRenamable())
+ if (Register::isPhysicalRegister(MO.getReg()) && MO.isRenamable())
S |= RegState::Renamable;
return S;
}
@@ -152,7 +152,7 @@ static unsigned getMopState(const MachineOperand &MO) {
template <typename Callable>
void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask,
Callable Func) const {
- if (LaneMask.all() || TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ if (LaneMask.all() || Register::isPhysicalRegister(Reg) ||
LaneMask == MRI->getMaxLaneMaskForVReg(Reg)) {
Func(0);
return;
@@ -227,7 +227,7 @@ bool SIFormMemoryClauses::canBundle(const MachineInstr &MI,
if (Conflict == Map.end())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
@@ -269,9 +269,9 @@ void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI,
if (!Reg)
continue;
- LaneBitmask Mask = TargetRegisterInfo::isVirtualRegister(Reg) ?
- TRI->getSubRegIndexLaneMask(MO.getSubReg()) :
- LaneBitmask::getAll();
+ LaneBitmask Mask = Register::isVirtualRegister(Reg)
+ ? TRI->getSubRegIndexLaneMask(MO.getSubReg())
+ : LaneBitmask::getAll();
RegUse &Map = MO.isDef() ? Defs : Uses;
auto Loc = Map.find(Reg);
@@ -389,7 +389,7 @@ bool SIFormMemoryClauses::runOnMachineFunction(MachineFunction &MF) {
for (auto &&R : Defs) {
unsigned Reg = R.first;
Uses.erase(Reg);
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
@@ -397,7 +397,7 @@ bool SIFormMemoryClauses::runOnMachineFunction(MachineFunction &MF) {
for (auto &&R : Uses) {
unsigned Reg = R.first;
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a14a8929aa4..639d8193749 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10093,7 +10093,7 @@ SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
// Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
// to try understanding copies to physical registers.
if (SrcVal.getValueType() == MVT::i1 &&
- TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
+ Register::isPhysicalRegister(DestReg->getReg())) {
SDLoc SL(Node);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue VReg = DAG.getRegister(
@@ -10246,7 +10246,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
MachineOperand &Op = MI.getOperand(I);
if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
- !TargetRegisterInfo::isVirtualRegister(Op.getReg()) ||
+ !Register::isVirtualRegister(Op.getReg()) ||
!TRI->isAGPR(MRI, Op.getReg()))
continue;
auto *Src = MRI.getUniqueVRegDef(Op.getReg());
@@ -10674,7 +10674,7 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
const MachineRegisterInfo &MRI = MF->getRegInfo();
const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
unsigned Reg = R->getReg();
- if (TRI.isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return !TRI.isSGPRReg(MRI, Reg);
if (MRI.isLiveIn(Reg)) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index b964af49739..85c8abe848c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -460,7 +460,7 @@ bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
const unsigned Reg = FirstDst->getReg();
- const TargetRegisterClass *DstRC = TargetRegisterInfo::isVirtualRegister(Reg)
+ const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
: RI.getPhysRegClass(Reg);
@@ -1052,7 +1052,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
// The SGPR spill/restore instructions only work on number sgprs, so we need
// to make sure we are using the correct register class.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) {
+ if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -1182,7 +1182,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
// FIXME: Maybe this should not include a memoperand because it will be
// lowered to non-memory instructions.
const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
- if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) {
+ if (Register::isVirtualRegister(DestReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -2374,12 +2374,12 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
MRI->hasOneUse(Src0->getReg())) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
Src0Inlined = true;
- } else if ((RI.isPhysicalRegister(Src0->getReg()) &&
- (ST.getConstantBusLimit(Opc) <= 1 &&
- RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
- (RI.isVirtualRegister(Src0->getReg()) &&
- (ST.getConstantBusLimit(Opc) <= 1 &&
- RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
+ } else if ((Register::isPhysicalRegister(Src0->getReg()) &&
+ (ST.getConstantBusLimit(Opc) <= 1 &&
+ RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
+ (Register::isVirtualRegister(Src0->getReg()) &&
+ (ST.getConstantBusLimit(Opc) <= 1 &&
+ RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
return false;
// VGPR is okay as Src0 - fallthrough
}
@@ -2392,10 +2392,10 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
MRI->hasOneUse(Src1->getReg()) &&
commuteInstruction(UseMI)) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
- } else if ((RI.isPhysicalRegister(Src1->getReg()) &&
- RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
- (RI.isVirtualRegister(Src1->getReg()) &&
- RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ } else if ((Register::isPhysicalRegister(Src1->getReg()) &&
+ RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
+ (Register::isVirtualRegister(Src1->getReg()) &&
+ RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
return false;
// VGPR is okay as Src1 - fallthrough
}
@@ -3043,7 +3043,7 @@ bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
if (!MO.isUse())
return false;
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (Register::isVirtualRegister(MO.getReg()))
return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
// Null is free
@@ -3111,7 +3111,7 @@ static bool shouldReadExec(const MachineInstr &MI) {
static bool isSubRegOf(const SIRegisterInfo &TRI,
const MachineOperand &SuperVec,
const MachineOperand &SubReg) {
- if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
+ if (Register::isPhysicalRegister(SubReg.getReg()))
return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
@@ -3152,7 +3152,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
continue;
unsigned Reg = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
+ if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) {
ErrInfo = "inlineasm operand has incorrect register class.";
return false;
}
@@ -3217,8 +3217,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (RegClass != -1) {
unsigned Reg = MI.getOperand(i).getReg();
- if (Reg == AMDGPU::NoRegister ||
- TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg))
continue;
const TargetRegisterClass *RC = RI.getRegClass(RegClass);
@@ -3311,7 +3310,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
ErrInfo =
"Dst register should be tied to implicit use of preserved register";
return false;
- } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) &&
+ } else if (Register::isPhysicalRegister(TiedMO.getReg()) &&
Dst.getReg() != TiedMO.getReg()) {
ErrInfo = "Dst register should use same physical register as preserved";
return false;
@@ -3718,7 +3717,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
Desc.OpInfo[OpNo].RegClass == -1) {
unsigned Reg = MI.getOperand(OpNo).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI.getRegClass(Reg);
return RI.getPhysRegClass(Reg);
}
@@ -3823,10 +3822,9 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
return false;
unsigned Reg = MO.getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isVirtualRegister(Reg) ?
- MRI.getRegClass(Reg) :
- RI.getPhysRegClass(Reg);
+ const TargetRegisterClass *RC = Register::isVirtualRegister(Reg)
+ ? MRI.getRegClass(Reg)
+ : RI.getPhysRegClass(Reg);
const SIRegisterInfo *TRI =
static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
@@ -4438,7 +4436,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
if (!MI.getOperand(i).isReg() ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ !Register::isVirtualRegister(MI.getOperand(i).getReg()))
continue;
const TargetRegisterClass *OpRC =
MRI.getRegClass(MI.getOperand(i).getReg());
@@ -4466,7 +4464,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// Update all the operands so they have the same type.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
continue;
// MI is a PHI instruction.
@@ -4491,7 +4489,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// subregister index types e.g. sub0_sub1 + sub2 + sub3
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
continue;
const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
@@ -4942,7 +4940,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
unsigned NewDstReg = AMDGPU::NoRegister;
if (HasDst) {
unsigned DstReg = Inst.getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
continue;
// Update the destination register class.
@@ -4951,7 +4949,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
continue;
if (Inst.isCopy() &&
- TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
+ Register::isVirtualRegister(Inst.getOperand(1).getReg()) &&
NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
// Instead of creating a copy where src and dst are the same register
// class, we just replace all uses of dst with src. These kinds of
@@ -6264,7 +6262,7 @@ static bool followSubRegDef(MachineInstr &MI,
MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
MachineRegisterInfo &MRI) {
assert(MRI.isSSA());
- if (!TargetRegisterInfo::isVirtualRegister(P.Reg))
+ if (!Register::isVirtualRegister(P.Reg))
return nullptr;
auto RSR = P;
@@ -6275,8 +6273,7 @@ MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
case AMDGPU::COPY:
case AMDGPU::V_MOV_B32_e32: {
auto &Op1 = MI->getOperand(1);
- if (Op1.isReg() &&
- TargetRegisterInfo::isVirtualRegister(Op1.getReg())) {
+ if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) {
if (Op1.isUndef())
return nullptr;
RSR = getRegSubRegPair(Op1);
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index b48b66195ec..d3048fcde5a 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -249,8 +249,7 @@ static void addDefsUsesToList(const MachineInstr &MI,
if (Op.isReg()) {
if (Op.isDef())
RegDefs.insert(Op.getReg());
- else if (Op.readsReg() &&
- TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
+ else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
PhysRegUses.insert(Op.getReg());
}
}
@@ -282,7 +281,7 @@ static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
if (Use.isReg() &&
((Use.readsReg() && RegDefs.count(Use.getReg())) ||
(Use.isDef() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
+ (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
PhysRegUses.count(Use.getReg())))) {
Insts.push_back(&MI);
addDefsUsesToList(MI, RegDefs, PhysRegUses);
@@ -548,7 +547,7 @@ bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
// We only ever merge operations with the same base address register, so
// don't bother scanning forward if there are no other uses.
if (AddrReg[i]->isReg() &&
- (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
+ (Register::isPhysicalRegister(AddrReg[i]->getReg()) ||
MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 516b9bed63c..0070b1229c9 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -503,7 +503,7 @@ void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
SmallVectorImpl<MachineOperand> &Src) const {
MachineOperand &Op = MI.getOperand(OpNo);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) {
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) {
Src.push_back(Op);
return;
}
@@ -523,8 +523,7 @@ void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
for (const auto &SrcOp : Def->explicit_operands())
if (SrcOp.isReg() && SrcOp.isUse() &&
- (TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) ||
- SrcOp.getReg() == Exec))
+ (Register::isVirtualRegister(SrcOp.getReg()) || SrcOp.getReg() == Exec))
Src.push_back(SrcOp);
}
diff --git a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
index 1c0f836f07e..8b46f3d8044 100644
--- a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
@@ -96,7 +96,7 @@ private:
getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
bool isVreg1(unsigned Reg) const {
- return TargetRegisterInfo::isVirtualRegister(Reg) &&
+ return Register::isVirtualRegister(Reg) &&
MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
}
@@ -689,7 +689,7 @@ void SILowerI1Copies::lowerCopiesToI1() {
unsigned SrcReg = MI.getOperand(1).getReg();
assert(!MI.getOperand(1).getSubReg());
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ if (!Register::isVirtualRegister(SrcReg) ||
(!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
unsigned TmpReg = createLaneMaskReg(*MF);
@@ -734,7 +734,7 @@ bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
break;
Reg = MI->getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return false;
if (!isLaneMaskReg(Reg))
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index ebbdf80f956..6372f2df399 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -348,7 +348,7 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
// Do not Track Physical Registers, because it messes up.
for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
- if (TargetRegisterInfo::isVirtualRegister(RegMaskPair.RegUnit))
+ if (Register::isVirtualRegister(RegMaskPair.RegUnit))
LiveInRegs.insert(RegMaskPair.RegUnit);
}
LiveOutRegs.clear();
@@ -376,7 +376,7 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
// The use of findDefBetween removes the case 4.
for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) {
unsigned Reg = RegMaskPair.RegUnit;
- if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ if (Register::isVirtualRegister(Reg) &&
isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(),
LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI,
LIS)) {
@@ -1690,7 +1690,7 @@ SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) {
for (unsigned Reg : Regs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
// If not already in the live set, then add it.
(void) LiveRegs.insert(Reg);
@@ -1750,7 +1750,7 @@ SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
for (unsigned Reg : InRegs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
if (LiveRegsConsumers[Reg] > 1)
continue;
@@ -1762,7 +1762,7 @@ SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
for (unsigned Reg : OutRegs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
@@ -1913,7 +1913,7 @@ SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End,
for (_Iterator RegI = First; RegI != End; ++RegI) {
unsigned Reg = *RegI;
// For now only track virtual registers
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
PSetIterator PSetI = MRI.getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
index 4b17cf4e632..b04df380103 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
@@ -266,20 +266,19 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
// Try to remove compare. Cmp value should not used in between of cmp
// and s_and_b64 if VCC or just unused if any other register.
- if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
- MRI.use_nodbg_empty(CmpReg)) ||
+ if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
(CmpReg == CondReg &&
std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
[&](const MachineInstr &MI) {
- return MI.readsRegister(CondReg, TRI); }))) {
+ return MI.readsRegister(CondReg, TRI);
+ }))) {
LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
LIS->RemoveMachineInstrFromMaps(*Cmp);
Cmp->eraseFromParent();
// Try to remove v_cndmask_b32.
- if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
- MRI.use_nodbg_empty(SelReg)) {
+ if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
LIS->RemoveMachineInstrFromMaps(*Sel);
@@ -434,7 +433,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
if (Changed) {
for (auto Reg : RecalcRegs) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LIS->removeInterval(Reg);
if (!MRI.reg_empty(Reg))
LIS->createAndComputeVirtRegInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 2d71abc0612..7888086085f 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -574,8 +574,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src1->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src1->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
@@ -613,8 +613,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src1->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src1->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
@@ -677,8 +677,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src0->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src0->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
return make_unique<SDWASrcOperand>(
@@ -706,8 +706,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(ValSrc->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(ValSrc->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
return make_unique<SDWASrcOperand>(
diff --git a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
index f9bfe96f65c..1cda9932785 100644
--- a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
@@ -95,7 +95,7 @@ bool SIPreAllocateWWMRegs::processDef(MachineOperand &MO) {
if (!TRI->isVGPR(*MRI, Reg))
return false;
- if (TRI->isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
if (VRM->hasPhys(Reg))
@@ -125,7 +125,7 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) {
continue;
const unsigned VirtReg = MO.getReg();
- if (TRI->isPhysicalRegister(VirtReg))
+ if (Register::isPhysicalRegister(VirtReg))
continue;
if (!VRM->hasPhys(VirtReg))
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 989d7062995..3c5d8fc576a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1361,7 +1361,7 @@ StringRef SIRegisterInfo::getRegAsmName(unsigned Reg) const {
// FIXME: This is very slow. It might be worth creating a map from physreg to
// register class.
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
- assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(!Register::isVirtualRegister(Reg));
static const TargetRegisterClass *const BaseClasses[] = {
&AMDGPU::VGPR_32RegClass,
@@ -1796,7 +1796,7 @@ ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC
const TargetRegisterClass*
SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
unsigned Reg) const {
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI.getRegClass(Reg);
return getPhysRegClass(Reg);
@@ -1968,7 +1968,7 @@ MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
SlotIndex UseIdx = LIS->getInstructionIndex(Use);
SlotIndex DefIdx;
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (!LIS->hasInterval(Reg))
return nullptr;
LiveInterval &LI = LIS->getInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index 34487c96e72..a847db98479 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -141,7 +141,7 @@ public:
bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const {
const TargetRegisterClass *RC;
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
RC = MRI.getRegClass(Reg);
else
RC = getPhysRegClass(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 7ee178149c7..c208cf1ef1e 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -78,7 +78,7 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg()) {
unsigned Reg = Src0.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
+ if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
MachineOperand &MovSrc = Def->getOperand(1);
@@ -360,8 +360,7 @@ static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
}
if (NewImm != 0) {
- if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
- SrcReg->isReg()) {
+ if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
return true;
@@ -394,12 +393,11 @@ static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
if (!MO.isReg())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
- TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (Register::isPhysicalRegister(Reg) &&
+ Register::isPhysicalRegister(MO.getReg())) {
if (TRI.regsOverlap(Reg, MO.getReg()))
return true;
- } else if (MO.getReg() == Reg &&
- TargetRegisterInfo::isVirtualRegister(Reg)) {
+ } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
TRI.getSubRegIndexLaneMask(MO.getSubReg());
if (Overlap.any())
@@ -425,7 +423,7 @@ static TargetInstrInfo::RegSubRegPair
getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
} else {
LaneBitmask LM = TRI.getSubRegIndexLaneMask(Sub);
@@ -579,7 +577,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() &&
- TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
+ Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
int32_t ReverseImm;
if (isReverseInlineImm(TII, Src, ReverseImm)) {
MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
@@ -643,8 +641,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// FIXME: This could work better if hints worked with subregisters. If
// we have a vector add of a constant, we usually don't get the correct
// allocation due to the subregister usage.
- if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
- Src0->isReg()) {
+ if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
@@ -672,8 +669,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &Dst = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() &&
- TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
+ if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
int32_t ReverseImm;
if (isKImmOperand(TII, Src))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
@@ -722,7 +718,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (TII->isVOPC(Op32)) {
unsigned DstReg = MI.getOperand(0).getReg();
- if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ if (Register::isVirtualRegister(DstReg)) {
// VOPC instructions can only write to the VCC register. We can't
// force them to use VCC here, because this is only one register and
// cannot deal with sequences which would require multiple copies of
@@ -746,7 +742,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (!Src2->isReg())
continue;
unsigned SReg = Src2->getReg();
- if (TargetRegisterInfo::isVirtualRegister(SReg)) {
+ if (Register::isVirtualRegister(SReg)) {
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;
}
@@ -766,7 +762,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
bool Next = false;
if (SDst->getReg() != VCCReg) {
- if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
+ if (Register::isVirtualRegister(SDst->getReg()))
MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
Next = true;
}
@@ -774,7 +770,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// All of the instructions with carry outs also have an SGPR input in
// src2.
if (Src2 && Src2->getReg() != VCCReg) {
- if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
+ if (Register::isVirtualRegister(Src2->getReg()))
MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
Next = true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 332c7176a8c..7980f15104c 100644
--- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -278,7 +278,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
// Handle physical registers that we need to track; this is mostly relevant
// for VCC, which can appear as the (implicit) input of a uniform branch,
// e.g. when a loop counter is stored in a VGPR.
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO)
continue;
@@ -362,7 +362,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
LowerToCopyInstrs.push_back(&MI);
} else {
unsigned Reg = Inactive.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
for (MachineInstr &DefMI : MRI->def_instructions(Reg))
markInstruction(DefMI, StateWWM, Worklist);
}
@@ -392,7 +392,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
unsigned Reg = MO.getReg();
- if (!TRI->isVirtualRegister(Reg) &&
+ if (!Register::isVirtualRegister(Reg) &&
TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
Flags = StateWQM;
break;
@@ -858,10 +858,9 @@ void SIWholeQuadMode::lowerCopyInstrs() {
const unsigned Reg = MI->getOperand(0).getReg();
if (TRI->isVGPR(*MRI, Reg)) {
- const TargetRegisterClass *regClass =
- TargetRegisterInfo::isVirtualRegister(Reg)
- ? MRI->getRegClass(Reg)
- : TRI->getPhysRegClass(Reg);
+ const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg)
+ ? MRI->getRegClass(Reg)
+ : TRI->getPhysRegClass(Reg);
const unsigned MovOp = TII->getMovOpcode(regClass);
MI->setDesc(TII->get(MovOp));
OpenPOWER on IntegriCloud