summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp56
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp19
-rw-r--r--llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.h2
-rw-r--r--llvm/lib/Target/AMDGPU/R600ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600InstrInfo.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp26
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp30
-rw-r--r--llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp13
-rw-r--r--llvm/lib/Target/ARM/A15SDOptimizer.cpp22
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp25
-rw-r--r--llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMCallLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMInstructionSelector.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMScheduleA9.td4
-rw-r--r--llvm/lib/Target/ARM/MLxExpansionPass.cpp20
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.cpp17
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.cpp6
-rw-r--r--llvm/lib/Target/ARM/ThumbRegisterInfo.cpp7
-rw-r--r--llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp11
-rw-r--r--llvm/lib/Target/BPF/BPFMIPeephole.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/BitTracker.cpp18
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp33
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitTracker.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp14
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp15
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp22
-rw-r--r--llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp20
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenInsert.cpp17
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp10
-rw-r--r--llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPeephole.cpp14
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp20
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.cpp5
-rw-r--r--llvm/lib/Target/Hexagon/RDFCopy.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/RDFGraph.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/RDFLiveness.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/RDFRegisters.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/RDFRegisters.h8
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsInstructionSelector.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsOptimizePICCall.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp10
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXPeephole.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp13
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp22
-rw-r--r--llvm/lib/Target/PowerPC/PPCMIPeephole.cpp36
-rw-r--r--llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXCopy.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp5
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp6
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h8
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp7
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp6
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp3
-rw-r--r--llvm/lib/Target/X86/X86CallFrameOptimization.cpp6
-rw-r--r--llvm/lib/Target/X86/X86CmovConversion.cpp4
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp12
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp5
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp21
-rw-r--r--llvm/lib/Target/X86/X86InstructionSelector.cpp10
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp3
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp4
113 files changed, 503 insertions, 543 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 89404463e1f..7922ac69f16 100644
--- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -105,14 +105,14 @@ static bool isGPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (SubReg)
return false;
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
return AArch64::GPR64RegClass.contains(Reg);
}
static bool isFPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
SubReg == 0) ||
(MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 094fbd99952..6de41d25b3c 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -486,7 +486,7 @@ void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
llvm_unreachable("<unknown operand type>");
case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
assert(!MO.getSubReg() && "Subregs should be eliminated!");
O << AArch64InstPrinter::getRegisterName(Reg);
break;
diff --git a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
index 453132e0966..8fc6e22c054 100644
--- a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -78,7 +78,7 @@ void AArch64CondBrTuning::getAnalysisUsage(AnalysisUsage &AU) const {
}
MachineInstr *AArch64CondBrTuning::getOperandDef(const MachineOperand &MO) {
- if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (!Register::isVirtualRegister(MO.getReg()))
return nullptr;
return MRI->getUniqueVRegDef(MO.getReg());
}
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 2cfbcc592d6..048e04e4a51 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -259,7 +259,7 @@ bool SSACCmpConv::isDeadDef(unsigned DstReg) {
// Writes to the zero register are dead.
if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
return true;
- if (!TargetRegisterInfo::isVirtualRegister(DstReg))
+ if (!Register::isVirtualRegister(DstReg))
return false;
// A virtual register def without any uses will be marked dead later, and
// eventually replaced by the zero register.
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index a43077cb88e..0a03ff7b7e1 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -146,7 +146,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
// We should not have any relevant physreg defs that are replacable by
// zero before register allocation. So we just check for dead vreg defs.
unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
+ if (!Register::isVirtualRegister(Reg) ||
(!MO.isDead() && !MRI->use_nodbg_empty(Reg)))
continue;
assert(!MO.isImplicit() && "Unexpected implicit def!");
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5d7385c4e70..caa2280aee9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -416,7 +416,7 @@ unsigned AArch64InstrInfo::insertBranch(
// Find the original register that VReg is copied from.
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
- while (TargetRegisterInfo::isVirtualRegister(VReg)) {
+ while (Register::isVirtualRegister(VReg)) {
const MachineInstr *DefMI = MRI.getVRegDef(VReg);
if (!DefMI->isFullCopy())
return VReg;
@@ -431,7 +431,7 @@ static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
unsigned *NewVReg = nullptr) {
VReg = removeCopies(MRI, VReg);
- if (!TargetRegisterInfo::isVirtualRegister(VReg))
+ if (!Register::isVirtualRegister(VReg))
return 0;
bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
@@ -1072,7 +1072,7 @@ static bool UpdateOperandRegClass(MachineInstr &Instr) {
"Operand has register constraints without being a register!");
unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
if (!OpRegCstraints->contains(Reg))
return false;
} else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
@@ -2350,7 +2350,7 @@ static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
if (!SubIdx)
return MIB.addReg(Reg, State);
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
@@ -2722,7 +2722,7 @@ static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
MachineMemOperand *MMO) {
unsigned SrcReg0 = SrcReg;
unsigned SrcReg1 = SrcReg;
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ if (Register::isPhysicalRegister(SrcReg)) {
SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
SubIdx0 = 0;
SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
@@ -2761,7 +2761,7 @@ void AArch64InstrInfo::storeRegToStackSlot(
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRWui;
- if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (Register::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
else
assert(SrcReg != AArch64::WSP);
@@ -2771,7 +2771,7 @@ void AArch64InstrInfo::storeRegToStackSlot(
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRXui;
- if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (Register::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
else
assert(SrcReg != AArch64::SP);
@@ -2852,7 +2852,7 @@ static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
unsigned DestReg0 = DestReg;
unsigned DestReg1 = DestReg;
bool IsUndef = true;
- if (TargetRegisterInfo::isPhysicalRegister(DestReg)) {
+ if (Register::isPhysicalRegister(DestReg)) {
DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
SubIdx0 = 0;
DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
@@ -2892,7 +2892,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRWui;
- if (TargetRegisterInfo::isVirtualRegister(DestReg))
+ if (Register::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
else
assert(DestReg != AArch64::WSP);
@@ -2902,7 +2902,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRXui;
- if (TargetRegisterInfo::isVirtualRegister(DestReg))
+ if (Register::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
else
assert(DestReg != AArch64::SP);
@@ -3081,13 +3081,11 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
if (MI.isFullCopy()) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
- if (SrcReg == AArch64::SP &&
- TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
return nullptr;
}
- if (DstReg == AArch64::SP &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (DstReg == AArch64::SP && Register::isVirtualRegister(SrcReg)) {
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
return nullptr;
}
@@ -3132,9 +3130,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// This is slightly expensive to compute for physical regs since
// getMinimalPhysRegClass is slow.
auto getRegClass = [&](unsigned Reg) {
- return TargetRegisterInfo::isVirtualRegister(Reg)
- ? MRI.getRegClass(Reg)
- : TRI.getMinimalPhysRegClass(Reg);
+ return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg)
+ : TRI.getMinimalPhysRegClass(Reg);
};
if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
@@ -3159,8 +3156,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
//
// STRXui %xzr, %stack.0
//
- if (IsSpill && DstMO.isUndef() &&
- TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ if (IsSpill && DstMO.isUndef() && Register::isPhysicalRegister(SrcReg)) {
assert(SrcMO.getSubReg() == 0 &&
"Unexpected subreg on physical register");
const TargetRegisterClass *SpillRC;
@@ -3459,7 +3455,7 @@ static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineInstr *MI = nullptr;
- if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
MI = MRI.getUniqueVRegDef(MO.getReg());
// And it needs to be in the trace (otherwise, it won't have a depth).
if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
@@ -3955,13 +3951,13 @@ genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
}
- if (TargetRegisterInfo::isVirtualRegister(ResultReg))
+ if (Register::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
- if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
+ if (Register::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
- if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
+ if (Register::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
- if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
+ if (Register::isVirtualRegister(SrcReg2))
MRI.constrainRegClass(SrcReg2, RC);
MachineInstrBuilder MIB;
@@ -4021,13 +4017,13 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
unsigned SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill();
- if (TargetRegisterInfo::isVirtualRegister(ResultReg))
+ if (Register::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
- if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
+ if (Register::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
- if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
+ if (Register::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
- if (TargetRegisterInfo::isVirtualRegister(VR))
+ if (Register::isVirtualRegister(VR))
MRI.constrainRegClass(VR, RC);
MachineInstrBuilder MIB =
@@ -4618,7 +4614,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
unsigned VReg = MI.getOperand(0).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VReg))
+ if (!Register::isVirtualRegister(VReg))
return false;
MachineInstr *DefMI = MRI->getVRegDef(VReg);
@@ -4654,7 +4650,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
MachineOperand &MO = DefMI->getOperand(1);
unsigned NewReg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(NewReg))
+ if (!Register::isVirtualRegister(NewReg))
return false;
assert(!MRI->def_empty(NewReg) && "Register must be defined.");
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 75ae60cbadc..27b12f93f81 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -373,7 +373,7 @@ static bool unsupportedBinOp(const MachineInstr &I,
// so, this will need to be taught about that, and we'll need to get the
// bank out of the minimal class for the register.
// Either way, this needs to be documented (and possibly verified).
- if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ if (!Register::isVirtualRegister(MO.getReg())) {
LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
return true;
}
@@ -518,7 +518,7 @@ static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
(DstSize == SrcSize ||
// Copies are a mean to setup initial types, the number of
// bits may not exactly match.
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
+ (Register::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
// Copies are a mean to copy bits around, as long as we are
// on the same register class, that's fine. Otherwise, that
// means we need some SUBREG_TO_REG or AND & co.
@@ -555,7 +555,7 @@ static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI,
// It's possible that the destination register won't be constrained. Make
// sure that happens.
- if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
+ if (!Register::isPhysicalRegister(I.getOperand(0).getReg()))
RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
return true;
@@ -623,11 +623,10 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// result.
auto CheckCopy = [&]() {
// If we have a bitcast or something, we can't have physical registers.
- assert(
- (I.isCopy() ||
- (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
- !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
- "No phys reg on generic operator!");
+ assert((I.isCopy() ||
+ (!Register::isPhysicalRegister(I.getOperand(0).getReg()) &&
+ !Register::isPhysicalRegister(I.getOperand(1).getReg()))) &&
+ "No phys reg on generic operator!");
assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
(void)KnownValid;
return true;
@@ -690,7 +689,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// If the destination is a physical register, then there's nothing to
// change, so we're done.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
return CheckCopy();
}
@@ -3355,7 +3354,7 @@ bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const {
// Can't see past copies from physregs.
if (Opc == TargetOpcode::COPY &&
- TargetRegisterInfo::isPhysicalRegister(CondDef->getOperand(1).getReg()))
+ Register::isPhysicalRegister(CondDef->getOperand(1).getReg()))
return false;
CondDef = MRI.getVRegDef(CondDef->getOperand(1).getReg());
diff --git a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
index aff861aae6b..f06d8db2546 100644
--- a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
@@ -162,11 +162,11 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
LiveIntervals &LIs = G.getMetadata().LIS;
- if (TRI->isPhysicalRegister(Rd) || TRI->isPhysicalRegister(Ra)) {
- LLVM_DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd)
- << '\n');
- LLVM_DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra)
- << '\n');
+ if (Register::isPhysicalRegister(Rd) || Register::isPhysicalRegister(Ra)) {
+ LLVM_DEBUG(dbgs() << "Rd is a physical reg:"
+ << Register::isPhysicalRegister(Rd) << '\n');
+ LLVM_DEBUG(dbgs() << "Ra is a physical reg:"
+ << Register::isPhysicalRegister(Ra) << '\n');
return false;
}
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index daa55337773..f4b6b0f2c8b 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -566,9 +566,9 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
// Check if one of the register is not a generic register.
- if ((TargetRegisterInfo::isPhysicalRegister(DstReg) ||
+ if ((Register::isPhysicalRegister(DstReg) ||
!MRI.getType(DstReg).isValid()) ||
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
+ (Register::isPhysicalRegister(SrcReg) ||
!MRI.getType(SrcReg).isValid())) {
const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 3851ee8968b..b2491ebc6f4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -544,7 +544,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
if (!N->isMachineOpcode()) {
if (N->getOpcode() == ISD::CopyToReg) {
unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
return MRI.getRegClass(Reg);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index b71bfce60b3..3d76b8b7e67 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -62,7 +62,7 @@ AMDGPUInstructionSelector::AMDGPUInstructionSelector(
const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return Reg == AMDGPU::SCC;
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@@ -83,7 +83,7 @@ static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
bool AMDGPUInstructionSelector::isVCC(Register Reg,
const MachineRegisterInfo &MRI) const {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return Reg == TRI.getVCC();
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@@ -157,7 +157,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
}
for (const MachineOperand &MO : I.operands()) {
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =
@@ -550,7 +550,7 @@ bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
for (const MachineOperand &MO : Ins->operands()) {
if (!MO.isReg())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 23749095705..113e5a32264 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -694,7 +694,7 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
// If this is a source register to a PHI we are chaining, it
@@ -734,7 +734,7 @@ void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
for (auto &UI : MRI->use_operands(Reg)) {
@@ -949,7 +949,7 @@ void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
(IncludeLoopPHI && IsLoopPHI);
if (ShouldReplace) {
- if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
@@ -1022,7 +1022,7 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
for (auto &RI : II.uses()) {
if (RI.isReg()) {
unsigned Reg = RI.getReg();
- if (TRI->isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (hasNoDef(Reg, MRI))
continue;
if (!MRI->hasOneDef(Reg)) {
@@ -2230,7 +2230,7 @@ void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
I != E;) {
MachineOperand &O = *I;
++I;
- if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
diff --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
index 51c4c99cfb1..c9446015aeb 100644
--- a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
@@ -174,7 +174,7 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const {
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI.getOperand(VAddr0Idx + I);
unsigned Reg = Op.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+ if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return NSA_Status::FIXED;
unsigned PhysReg = VRM->getPhys(Reg);
diff --git a/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp b/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
index f0d47eaa4ed..2e5b130cf9f 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegBankReassign.cpp
@@ -230,7 +230,7 @@ private:
public:
Printable printReg(unsigned Reg, unsigned SubReg = 0) const {
return Printable([Reg, SubReg, this](raw_ostream &OS) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
OS << llvm::printReg(Reg, TRI);
return;
}
@@ -275,7 +275,7 @@ char GCNRegBankReassign::ID = 0;
char &llvm::GCNRegBankReassignID = GCNRegBankReassign::ID;
unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
- assert (TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getRegSizeInBits(*RC);
@@ -293,7 +293,7 @@ unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
unsigned GCNRegBankReassign::getRegBankMask(unsigned Reg, unsigned SubReg,
int Bank) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (!VRM->isAssignedReg(Reg))
return 0;
@@ -420,7 +420,7 @@ unsigned GCNRegBankReassign::getOperandGatherWeight(const MachineInstr& MI,
}
bool GCNRegBankReassign::isReassignable(unsigned Reg) const {
- if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
+ if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return false;
const MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index f3eac95b057..4abbb8537fb 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -40,7 +40,7 @@ void llvm::printLivesAt(SlotIndex SI,
<< *LIS.getInstructionFromIndex(SI);
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- const unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ const unsigned Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
@@ -84,7 +84,7 @@ bool llvm::isEqual(const GCNRPTracker::LiveRegSet &S1,
unsigned GCNRegPressure::getRegKind(unsigned Reg,
const MachineRegisterInfo &MRI) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(Register::isVirtualRegister(Reg));
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
@@ -197,8 +197,7 @@ void GCNRegPressure::print(raw_ostream &OS, const GCNSubtarget *ST) const {
static LaneBitmask getDefRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI) {
- assert(MO.isDef() && MO.isReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()));
+ assert(MO.isDef() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
// We don't rely on read-undef flag because in case of tentative schedule
// tracking it isn't set correctly yet. This works correctly however since
@@ -211,8 +210,7 @@ static LaneBitmask getDefRegMask(const MachineOperand &MO,
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI,
const LiveIntervals &LIS) {
- assert(MO.isUse() && MO.isReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()));
+ assert(MO.isUse() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
if (auto SubReg = MO.getSubReg())
return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);
@@ -233,7 +231,7 @@ collectVirtualRegUses(const MachineInstr &MI, const LiveIntervals &LIS,
const MachineRegisterInfo &MRI) {
SmallVector<RegisterMaskPair, 8> Res;
for (const auto &MO : MI.operands()) {
- if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
if (!MO.isUse() || !MO.readsReg())
continue;
@@ -279,7 +277,7 @@ GCNRPTracker::LiveRegSet llvm::getLiveRegs(SlotIndex SI,
const MachineRegisterInfo &MRI) {
GCNRPTracker::LiveRegSet LiveRegs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- auto Reg = TargetRegisterInfo::index2VirtReg(I);
+ auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto LiveMask = getLiveLaneMask(Reg, SI, LIS, MRI);
@@ -330,8 +328,7 @@ void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
MaxPressure = max(AtMIPressure, MaxPressure);
for (const auto &MO : MI.defs()) {
- if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
- MO.isDead())
+ if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()) || MO.isDead())
continue;
auto Reg = MO.getReg();
@@ -410,7 +407,7 @@ void GCNDownwardRPTracker::advanceToNext() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
auto &LiveMask = LiveRegs[Reg];
auto PrevMask = LiveMask;
@@ -501,7 +498,7 @@ void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
const MachineRegisterInfo &MRI) {
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned Reg = Register::index2VirtReg(I);
auto It = LiveRegs.find(Reg);
if (It != LiveRegs.end() && It->second.any())
OS << ' ' << printVRegOrUnit(Reg, TRI) << ':'
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index e4894418b94..5862cdb0416 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -214,7 +214,7 @@ getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS) {
DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> LiveRegMap;
SmallVector<SlotIndex, 32> LiveIdxs, SRLiveIdxs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- auto Reg = TargetRegisterInfo::index2VirtReg(I);
+ auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto &LI = LIS.getInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index f80a53ba1dc..29b4d5559d9 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -335,7 +335,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case R600::MASK_WRITE: {
unsigned maskedRegister = MI.getOperand(0).getReg();
- assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
+ assert(Register::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index d9e839fe203..79e36b71e0f 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -97,8 +97,8 @@ bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const {
for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
E = MBBI->operands_end(); I != E; ++I) {
- if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
- I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
+ if (I->isReg() && !Register::isVirtualRegister(I->getReg()) && I->isUse() &&
+ RI.isPhysRegLiveAcrossClauses(I->getReg()))
return false;
}
return true;
@@ -242,8 +242,7 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
E = MI.operands_end();
I != E; ++I) {
- if (!I->isReg() || !I->isUse() ||
- TargetRegisterInfo::isVirtualRegister(I->getReg()))
+ if (!I->isReg() || !I->isUse() || Register::isVirtualRegister(I->getReg()))
continue;
if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
@@ -1193,8 +1192,7 @@ int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
unsigned Reg = LI.first;
- if (TargetRegisterInfo::isVirtualRegister(Reg) ||
- !IndirectRC->contains(Reg))
+ if (Register::isVirtualRegister(Reg) || !IndirectRC->contains(Reg))
continue;
unsigned RegIndex;
diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
index 34267a909b5..e1abdb78452 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -183,7 +183,7 @@ isPhysicalRegCopy(MachineInstr *MI) {
if (MI->getOpcode() != R600::COPY)
return false;
- return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
+ return !Register::isVirtualRegister(MI->getOperand(1).getReg());
}
void R600SchedStrategy::releaseTopNode(SUnit *SU) {
@@ -209,7 +209,7 @@ void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
const TargetRegisterClass *RC) const {
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
return RC->contains(Reg);
} else {
return MRI->getRegClass(Reg) == RC;
diff --git a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 9f1cb6582b5..d34d30fcdd7 100644
--- a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -58,7 +58,7 @@ using namespace llvm;
static bool isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
assert(MRI.isSSA());
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
const MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
return MI && MI->isImplicitDef();
diff --git a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
index 685df74490f..ef12c1d2459 100644
--- a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp
@@ -93,7 +93,7 @@ const RegClassWeight &R600RegisterInfo::getRegClassWeight(
}
bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
- assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(!Register::isVirtualRegister(Reg));
switch (Reg) {
case R600::OQAP:
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 05aee164cb5..a169133a6ec 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -148,7 +148,7 @@ static bool hasVectorOperands(const MachineInstr &MI,
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
if (!MI.getOperand(i).isReg() ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ !Register::isVirtualRegister(MI.getOperand(i).getReg()))
continue;
if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
@@ -164,18 +164,16 @@ getCopyRegClasses(const MachineInstr &Copy,
unsigned DstReg = Copy.getOperand(0).getReg();
unsigned SrcReg = Copy.getOperand(1).getReg();
- const TargetRegisterClass *SrcRC =
- TargetRegisterInfo::isVirtualRegister(SrcReg) ?
- MRI.getRegClass(SrcReg) :
- TRI.getPhysRegClass(SrcReg);
+ const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
+ ? MRI.getRegClass(SrcReg)
+ : TRI.getPhysRegClass(SrcReg);
// We don't really care about the subregister here.
// SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
- const TargetRegisterClass *DstRC =
- TargetRegisterInfo::isVirtualRegister(DstReg) ?
- MRI.getRegClass(DstReg) :
- TRI.getPhysRegClass(DstReg);
+ const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg)
+ ? MRI.getRegClass(DstReg)
+ : TRI.getPhysRegClass(DstReg);
return std::make_pair(SrcRC, DstRC);
}
@@ -201,8 +199,8 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
auto &Src = MI.getOperand(1);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = Src.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
- !TargetRegisterInfo::isVirtualRegister(DstReg))
+ if (!Register::isVirtualRegister(SrcReg) ||
+ !Register::isVirtualRegister(DstReg))
return false;
for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
@@ -250,7 +248,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
return false;
// It is illegal to have vreg inputs to a physreg defining reg_sequence.
- if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
+ if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
return false;
const TargetRegisterClass *SrcRC, *DstRC;
@@ -624,7 +622,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterClass *SrcRC, *DstRC;
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
- if (!TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ if (!Register::isVirtualRegister(DstReg)) {
// If the destination register is a physical register there isn't
// really much we can do to fix this.
// Some special instructions use M0 as an input. Some even only use
@@ -644,7 +642,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (!Register::isVirtualRegister(SrcReg)) {
TII->moveToVALU(MI, MDT);
break;
}
diff --git a/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp b/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
index 5b834c8de13..a0119297b11 100644
--- a/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixupVectorISel.cpp
@@ -91,8 +91,7 @@ static bool findSRegBaseAndIndex(MachineOperand *Op,
Worklist.push_back(Op);
while (!Worklist.empty()) {
MachineOperand *WOp = Worklist.pop_back_val();
- if (!WOp->isReg() ||
- !TargetRegisterInfo::isVirtualRegister(WOp->getReg()))
+ if (!WOp->isReg() || !Register::isVirtualRegister(WOp->getReg()))
continue;
MachineInstr *DefInst = MRI.getUniqueVRegDef(WOp->getReg());
switch (DefInst->getOpcode()) {
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 74d77d32801..1b6981121f8 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -444,7 +444,7 @@ static bool tryToFoldACImm(const SIInstrInfo *TII,
return false;
unsigned UseReg = OpToFold.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(UseReg))
+ if (!Register::isVirtualRegister(UseReg))
return false;
if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
@@ -570,14 +570,13 @@ void SIFoldOperands::foldOperand(
if (FoldingImmLike && UseMI->isCopy()) {
unsigned DestReg = UseMI->getOperand(0).getReg();
- const TargetRegisterClass *DestRC
- = TargetRegisterInfo::isVirtualRegister(DestReg) ?
- MRI->getRegClass(DestReg) :
- TRI->getPhysRegClass(DestReg);
+ const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
+ ? MRI->getRegClass(DestReg)
+ : TRI->getPhysRegClass(DestReg);
unsigned SrcReg = UseMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(DestReg) &&
+ Register::isVirtualRegister(SrcReg)) {
const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
MachineRegisterInfo::use_iterator NextUse;
@@ -616,7 +615,7 @@ void SIFoldOperands::foldOperand(
CopiesToReplace.push_back(UseMI);
} else {
if (UseMI->isCopy() && OpToFold.isReg() &&
- TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
+ Register::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) &&
!UseMI->getOperand(1).getSubReg()) {
@@ -810,7 +809,7 @@ static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
if (Op.isReg()) {
// If this has a subregister, it obviously is a register source.
if (Op.getSubReg() != AMDGPU::NoSubRegister ||
- !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ !Register::isVirtualRegister(Op.getReg()))
return &Op;
MachineInstr *Def = MRI.getVRegDef(Op.getReg());
@@ -1339,8 +1338,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
if (!FoldingImm && !OpToFold.isReg())
continue;
- if (OpToFold.isReg() &&
- !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
+ if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
continue;
// Prevent folding operands backwards in the function. For example,
@@ -1350,8 +1348,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
// ...
// %vgpr0 = V_MOV_B32_e32 1, implicit %exec
MachineOperand &Dst = MI.getOperand(0);
- if (Dst.isReg() &&
- !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
+ if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
continue;
foldInstOperand(MI, OpToFold);
diff --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
index f3c9ad63a80..1fcf8fbeb35 100644
--- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
@@ -144,7 +144,7 @@ static unsigned getMopState(const MachineOperand &MO) {
S |= RegState::Kill;
if (MO.isEarlyClobber())
S |= RegState::EarlyClobber;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) && MO.isRenamable())
+ if (Register::isPhysicalRegister(MO.getReg()) && MO.isRenamable())
S |= RegState::Renamable;
return S;
}
@@ -152,7 +152,7 @@ static unsigned getMopState(const MachineOperand &MO) {
template <typename Callable>
void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask,
Callable Func) const {
- if (LaneMask.all() || TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ if (LaneMask.all() || Register::isPhysicalRegister(Reg) ||
LaneMask == MRI->getMaxLaneMaskForVReg(Reg)) {
Func(0);
return;
@@ -227,7 +227,7 @@ bool SIFormMemoryClauses::canBundle(const MachineInstr &MI,
if (Conflict == Map.end())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
@@ -269,9 +269,9 @@ void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI,
if (!Reg)
continue;
- LaneBitmask Mask = TargetRegisterInfo::isVirtualRegister(Reg) ?
- TRI->getSubRegIndexLaneMask(MO.getSubReg()) :
- LaneBitmask::getAll();
+ LaneBitmask Mask = Register::isVirtualRegister(Reg)
+ ? TRI->getSubRegIndexLaneMask(MO.getSubReg())
+ : LaneBitmask::getAll();
RegUse &Map = MO.isDef() ? Defs : Uses;
auto Loc = Map.find(Reg);
@@ -389,7 +389,7 @@ bool SIFormMemoryClauses::runOnMachineFunction(MachineFunction &MF) {
for (auto &&R : Defs) {
unsigned Reg = R.first;
Uses.erase(Reg);
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
@@ -397,7 +397,7 @@ bool SIFormMemoryClauses::runOnMachineFunction(MachineFunction &MF) {
for (auto &&R : Uses) {
unsigned Reg = R.first;
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
continue;
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a14a8929aa4..639d8193749 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10093,7 +10093,7 @@ SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
// Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
// to try understanding copies to physical registers.
if (SrcVal.getValueType() == MVT::i1 &&
- TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
+ Register::isPhysicalRegister(DestReg->getReg())) {
SDLoc SL(Node);
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
SDValue VReg = DAG.getRegister(
@@ -10246,7 +10246,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
MachineOperand &Op = MI.getOperand(I);
if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
- !TargetRegisterInfo::isVirtualRegister(Op.getReg()) ||
+ !Register::isVirtualRegister(Op.getReg()) ||
!TRI->isAGPR(MRI, Op.getReg()))
continue;
auto *Src = MRI.getUniqueVRegDef(Op.getReg());
@@ -10674,7 +10674,7 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
const MachineRegisterInfo &MRI = MF->getRegInfo();
const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
unsigned Reg = R->getReg();
- if (TRI.isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return !TRI.isSGPRReg(MRI, Reg);
if (MRI.isLiveIn(Reg)) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index b964af49739..85c8abe848c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -460,7 +460,7 @@ bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
const unsigned Reg = FirstDst->getReg();
- const TargetRegisterClass *DstRC = TargetRegisterInfo::isVirtualRegister(Reg)
+ const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
: RI.getPhysRegClass(Reg);
@@ -1052,7 +1052,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
// The SGPR spill/restore instructions only work on number sgprs, so we need
// to make sure we are using the correct register class.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) {
+ if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -1182,7 +1182,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
// FIXME: Maybe this should not include a memoperand because it will be
// lowered to non-memory instructions.
const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
- if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) {
+ if (Register::isVirtualRegister(DestReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -2374,12 +2374,12 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
MRI->hasOneUse(Src0->getReg())) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
Src0Inlined = true;
- } else if ((RI.isPhysicalRegister(Src0->getReg()) &&
- (ST.getConstantBusLimit(Opc) <= 1 &&
- RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
- (RI.isVirtualRegister(Src0->getReg()) &&
- (ST.getConstantBusLimit(Opc) <= 1 &&
- RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
+ } else if ((Register::isPhysicalRegister(Src0->getReg()) &&
+ (ST.getConstantBusLimit(Opc) <= 1 &&
+ RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
+ (Register::isVirtualRegister(Src0->getReg()) &&
+ (ST.getConstantBusLimit(Opc) <= 1 &&
+ RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
return false;
// VGPR is okay as Src0 - fallthrough
}
@@ -2392,10 +2392,10 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
MRI->hasOneUse(Src1->getReg()) &&
commuteInstruction(UseMI)) {
Src0->ChangeToImmediate(Def->getOperand(1).getImm());
- } else if ((RI.isPhysicalRegister(Src1->getReg()) &&
- RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
- (RI.isVirtualRegister(Src1->getReg()) &&
- RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ } else if ((Register::isPhysicalRegister(Src1->getReg()) &&
+ RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
+ (Register::isVirtualRegister(Src1->getReg()) &&
+ RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
return false;
// VGPR is okay as Src1 - fallthrough
}
@@ -3043,7 +3043,7 @@ bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
if (!MO.isUse())
return false;
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (Register::isVirtualRegister(MO.getReg()))
return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
// Null is free
@@ -3111,7 +3111,7 @@ static bool shouldReadExec(const MachineInstr &MI) {
static bool isSubRegOf(const SIRegisterInfo &TRI,
const MachineOperand &SuperVec,
const MachineOperand &SubReg) {
- if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
+ if (Register::isPhysicalRegister(SubReg.getReg()))
return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
@@ -3152,7 +3152,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
continue;
unsigned Reg = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
+ if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) {
ErrInfo = "inlineasm operand has incorrect register class.";
return false;
}
@@ -3217,8 +3217,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (RegClass != -1) {
unsigned Reg = MI.getOperand(i).getReg();
- if (Reg == AMDGPU::NoRegister ||
- TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg))
continue;
const TargetRegisterClass *RC = RI.getRegClass(RegClass);
@@ -3311,7 +3310,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
ErrInfo =
"Dst register should be tied to implicit use of preserved register";
return false;
- } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) &&
+ } else if (Register::isPhysicalRegister(TiedMO.getReg()) &&
Dst.getReg() != TiedMO.getReg()) {
ErrInfo = "Dst register should use same physical register as preserved";
return false;
@@ -3718,7 +3717,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
Desc.OpInfo[OpNo].RegClass == -1) {
unsigned Reg = MI.getOperand(OpNo).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI.getRegClass(Reg);
return RI.getPhysRegClass(Reg);
}
@@ -3823,10 +3822,9 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
return false;
unsigned Reg = MO.getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isVirtualRegister(Reg) ?
- MRI.getRegClass(Reg) :
- RI.getPhysRegClass(Reg);
+ const TargetRegisterClass *RC = Register::isVirtualRegister(Reg)
+ ? MRI.getRegClass(Reg)
+ : RI.getPhysRegClass(Reg);
const SIRegisterInfo *TRI =
static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
@@ -4438,7 +4436,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
if (!MI.getOperand(i).isReg() ||
- !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ !Register::isVirtualRegister(MI.getOperand(i).getReg()))
continue;
const TargetRegisterClass *OpRC =
MRI.getRegClass(MI.getOperand(i).getReg());
@@ -4466,7 +4464,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// Update all the operands so they have the same type.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
continue;
// MI is a PHI instruction.
@@ -4491,7 +4489,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// subregister index types e.g. sub0_sub1 + sub2 + sub3
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
MachineOperand &Op = MI.getOperand(I);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
continue;
const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
@@ -4942,7 +4940,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
unsigned NewDstReg = AMDGPU::NoRegister;
if (HasDst) {
unsigned DstReg = Inst.getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
continue;
// Update the destination register class.
@@ -4951,7 +4949,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
continue;
if (Inst.isCopy() &&
- TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
+ Register::isVirtualRegister(Inst.getOperand(1).getReg()) &&
NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
// Instead of creating a copy where src and dst are the same register
// class, we just replace all uses of dst with src. These kinds of
@@ -6264,7 +6262,7 @@ static bool followSubRegDef(MachineInstr &MI,
MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
MachineRegisterInfo &MRI) {
assert(MRI.isSSA());
- if (!TargetRegisterInfo::isVirtualRegister(P.Reg))
+ if (!Register::isVirtualRegister(P.Reg))
return nullptr;
auto RSR = P;
@@ -6275,8 +6273,7 @@ MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
case AMDGPU::COPY:
case AMDGPU::V_MOV_B32_e32: {
auto &Op1 = MI->getOperand(1);
- if (Op1.isReg() &&
- TargetRegisterInfo::isVirtualRegister(Op1.getReg())) {
+ if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) {
if (Op1.isUndef())
return nullptr;
RSR = getRegSubRegPair(Op1);
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index b48b66195ec..d3048fcde5a 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -249,8 +249,7 @@ static void addDefsUsesToList(const MachineInstr &MI,
if (Op.isReg()) {
if (Op.isDef())
RegDefs.insert(Op.getReg());
- else if (Op.readsReg() &&
- TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
+ else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
PhysRegUses.insert(Op.getReg());
}
}
@@ -282,7 +281,7 @@ static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
if (Use.isReg() &&
((Use.readsReg() && RegDefs.count(Use.getReg())) ||
(Use.isDef() && RegDefs.count(Use.getReg())) ||
- (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
+ (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
PhysRegUses.count(Use.getReg())))) {
Insts.push_back(&MI);
addDefsUsesToList(MI, RegDefs, PhysRegUses);
@@ -548,7 +547,7 @@ bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
// We only ever merge operations with the same base address register, so
// don't bother scanning forward if there are no other uses.
if (AddrReg[i]->isReg() &&
- (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
+ (Register::isPhysicalRegister(AddrReg[i]->getReg()) ||
MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 516b9bed63c..0070b1229c9 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -503,7 +503,7 @@ void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
SmallVectorImpl<MachineOperand> &Src) const {
MachineOperand &Op = MI.getOperand(OpNo);
- if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) {
+ if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) {
Src.push_back(Op);
return;
}
@@ -523,8 +523,7 @@ void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
for (const auto &SrcOp : Def->explicit_operands())
if (SrcOp.isReg() && SrcOp.isUse() &&
- (TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) ||
- SrcOp.getReg() == Exec))
+ (Register::isVirtualRegister(SrcOp.getReg()) || SrcOp.getReg() == Exec))
Src.push_back(SrcOp);
}
diff --git a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
index 1c0f836f07e..8b46f3d8044 100644
--- a/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerI1Copies.cpp
@@ -96,7 +96,7 @@ private:
getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
bool isVreg1(unsigned Reg) const {
- return TargetRegisterInfo::isVirtualRegister(Reg) &&
+ return Register::isVirtualRegister(Reg) &&
MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
}
@@ -689,7 +689,7 @@ void SILowerI1Copies::lowerCopiesToI1() {
unsigned SrcReg = MI.getOperand(1).getReg();
assert(!MI.getOperand(1).getSubReg());
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ if (!Register::isVirtualRegister(SrcReg) ||
(!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
unsigned TmpReg = createLaneMaskReg(*MF);
@@ -734,7 +734,7 @@ bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
break;
Reg = MI->getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return false;
if (!isLaneMaskReg(Reg))
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index ebbdf80f956..6372f2df399 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -348,7 +348,7 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
// Do not Track Physical Registers, because it messes up.
for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
- if (TargetRegisterInfo::isVirtualRegister(RegMaskPair.RegUnit))
+ if (Register::isVirtualRegister(RegMaskPair.RegUnit))
LiveInRegs.insert(RegMaskPair.RegUnit);
}
LiveOutRegs.clear();
@@ -376,7 +376,7 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
// The use of findDefBetween removes the case 4.
for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) {
unsigned Reg = RegMaskPair.RegUnit;
- if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ if (Register::isVirtualRegister(Reg) &&
isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(),
LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI,
LIS)) {
@@ -1690,7 +1690,7 @@ SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) {
for (unsigned Reg : Regs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
// If not already in the live set, then add it.
(void) LiveRegs.insert(Reg);
@@ -1750,7 +1750,7 @@ SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
for (unsigned Reg : InRegs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
if (LiveRegsConsumers[Reg] > 1)
continue;
@@ -1762,7 +1762,7 @@ SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
for (unsigned Reg : OutRegs) {
// For now only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
@@ -1913,7 +1913,7 @@ SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End,
for (_Iterator RegI = First; RegI != End; ++RegI) {
unsigned Reg = *RegI;
// For now only track virtual registers
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
PSetIterator PSetI = MRI.getPressureSets(Reg);
for (; PSetI.isValid(); ++PSetI) {
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
index 4b17cf4e632..b04df380103 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
@@ -266,20 +266,19 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
// Try to remove compare. Cmp value should not used in between of cmp
// and s_and_b64 if VCC or just unused if any other register.
- if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
- MRI.use_nodbg_empty(CmpReg)) ||
+ if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
(CmpReg == CondReg &&
std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
[&](const MachineInstr &MI) {
- return MI.readsRegister(CondReg, TRI); }))) {
+ return MI.readsRegister(CondReg, TRI);
+ }))) {
LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
LIS->RemoveMachineInstrFromMaps(*Cmp);
Cmp->eraseFromParent();
// Try to remove v_cndmask_b32.
- if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
- MRI.use_nodbg_empty(SelReg)) {
+ if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
LIS->RemoveMachineInstrFromMaps(*Sel);
@@ -434,7 +433,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
if (Changed) {
for (auto Reg : RecalcRegs) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
LIS->removeInterval(Reg);
if (!MRI.reg_empty(Reg))
LIS->createAndComputeVirtRegInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 2d71abc0612..7888086085f 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -574,8 +574,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src1->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src1->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
@@ -613,8 +613,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src1->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src1->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
@@ -677,8 +677,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(Src0->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(Src0->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
return make_unique<SDWASrcOperand>(
@@ -706,8 +706,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
- if (TRI->isPhysicalRegister(ValSrc->getReg()) ||
- TRI->isPhysicalRegister(Dst->getReg()))
+ if (Register::isPhysicalRegister(ValSrc->getReg()) ||
+ Register::isPhysicalRegister(Dst->getReg()))
break;
return make_unique<SDWASrcOperand>(
diff --git a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
index f9bfe96f65c..1cda9932785 100644
--- a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
@@ -95,7 +95,7 @@ bool SIPreAllocateWWMRegs::processDef(MachineOperand &MO) {
if (!TRI->isVGPR(*MRI, Reg))
return false;
- if (TRI->isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
if (VRM->hasPhys(Reg))
@@ -125,7 +125,7 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) {
continue;
const unsigned VirtReg = MO.getReg();
- if (TRI->isPhysicalRegister(VirtReg))
+ if (Register::isPhysicalRegister(VirtReg))
continue;
if (!VRM->hasPhys(VirtReg))
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 989d7062995..3c5d8fc576a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1361,7 +1361,7 @@ StringRef SIRegisterInfo::getRegAsmName(unsigned Reg) const {
// FIXME: This is very slow. It might be worth creating a map from physreg to
// register class.
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
- assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(!Register::isVirtualRegister(Reg));
static const TargetRegisterClass *const BaseClasses[] = {
&AMDGPU::VGPR_32RegClass,
@@ -1796,7 +1796,7 @@ ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC
const TargetRegisterClass*
SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
unsigned Reg) const {
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI.getRegClass(Reg);
return getPhysRegClass(Reg);
@@ -1968,7 +1968,7 @@ MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
SlotIndex UseIdx = LIS->getInstructionIndex(Use);
SlotIndex DefIdx;
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (!LIS->hasInterval(Reg))
return nullptr;
LiveInterval &LI = LIS->getInterval(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index 34487c96e72..a847db98479 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -141,7 +141,7 @@ public:
bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const {
const TargetRegisterClass *RC;
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
RC = MRI.getRegClass(Reg);
else
RC = getPhysRegClass(Reg);
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 7ee178149c7..c208cf1ef1e 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -78,7 +78,7 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg()) {
unsigned Reg = Src0.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
+ if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
MachineOperand &MovSrc = Def->getOperand(1);
@@ -360,8 +360,7 @@ static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
}
if (NewImm != 0) {
- if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
- SrcReg->isReg()) {
+ if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
return true;
@@ -394,12 +393,11 @@ static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
if (!MO.isReg())
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
- TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (Register::isPhysicalRegister(Reg) &&
+ Register::isPhysicalRegister(MO.getReg())) {
if (TRI.regsOverlap(Reg, MO.getReg()))
return true;
- } else if (MO.getReg() == Reg &&
- TargetRegisterInfo::isVirtualRegister(Reg)) {
+ } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
TRI.getSubRegIndexLaneMask(MO.getSubReg());
if (Overlap.any())
@@ -425,7 +423,7 @@ static TargetInstrInfo::RegSubRegPair
getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
} else {
LaneBitmask LM = TRI.getSubRegIndexLaneMask(Sub);
@@ -579,7 +577,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() &&
- TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
+ Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
int32_t ReverseImm;
if (isReverseInlineImm(TII, Src, ReverseImm)) {
MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
@@ -643,8 +641,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// FIXME: This could work better if hints worked with subregisters. If
// we have a vector add of a constant, we usually don't get the correct
// allocation due to the subregister usage.
- if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
- Src0->isReg()) {
+ if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
@@ -672,8 +669,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &Dst = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() &&
- TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
+ if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
int32_t ReverseImm;
if (isKImmOperand(TII, Src))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
@@ -722,7 +718,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (TII->isVOPC(Op32)) {
unsigned DstReg = MI.getOperand(0).getReg();
- if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ if (Register::isVirtualRegister(DstReg)) {
// VOPC instructions can only write to the VCC register. We can't
// force them to use VCC here, because this is only one register and
// cannot deal with sequences which would require multiple copies of
@@ -746,7 +742,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (!Src2->isReg())
continue;
unsigned SReg = Src2->getReg();
- if (TargetRegisterInfo::isVirtualRegister(SReg)) {
+ if (Register::isVirtualRegister(SReg)) {
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;
}
@@ -766,7 +762,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
bool Next = false;
if (SDst->getReg() != VCCReg) {
- if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
+ if (Register::isVirtualRegister(SDst->getReg()))
MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
Next = true;
}
@@ -774,7 +770,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// All of the instructions with carry outs also have an SGPR input in
// src2.
if (Src2 && Src2->getReg() != VCCReg) {
- if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
+ if (Register::isVirtualRegister(Src2->getReg()))
MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
Next = true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 332c7176a8c..7980f15104c 100644
--- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -278,7 +278,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
// Handle physical registers that we need to track; this is mostly relevant
// for VCC, which can appear as the (implicit) input of a uniform branch,
// e.g. when a loop counter is stored in a VGPR.
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO)
continue;
@@ -362,7 +362,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
LowerToCopyInstrs.push_back(&MI);
} else {
unsigned Reg = Inactive.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
for (MachineInstr &DefMI : MRI->def_instructions(Reg))
markInstruction(DefMI, StateWWM, Worklist);
}
@@ -392,7 +392,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
unsigned Reg = MO.getReg();
- if (!TRI->isVirtualRegister(Reg) &&
+ if (!Register::isVirtualRegister(Reg) &&
TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
Flags = StateWQM;
break;
@@ -858,10 +858,9 @@ void SIWholeQuadMode::lowerCopyInstrs() {
const unsigned Reg = MI->getOperand(0).getReg();
if (TRI->isVGPR(*MRI, Reg)) {
- const TargetRegisterClass *regClass =
- TargetRegisterInfo::isVirtualRegister(Reg)
- ? MRI->getRegClass(Reg)
- : TRI->getPhysRegClass(Reg);
+ const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg)
+ ? MRI->getRegClass(Reg)
+ : TRI->getPhysRegClass(Reg);
const unsigned MovOp = TII->getMovOpcode(regClass);
MI->setDesc(TII->get(MovOp));
diff --git a/llvm/lib/Target/ARM/A15SDOptimizer.cpp b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
index fb238bfc9cb..a4aacfb6dc8 100644
--- a/llvm/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/llvm/lib/Target/ARM/A15SDOptimizer.cpp
@@ -135,7 +135,7 @@ bool A15SDOptimizer::usesRegClass(MachineOperand &MO,
return false;
unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return MRI->getRegClass(Reg)->hasSuperClassEq(TRC);
else
return TRC->contains(Reg);
@@ -151,7 +151,7 @@ unsigned A15SDOptimizer::getDPRLaneFromSPR(unsigned SReg) {
// Get the subreg type that is most likely to be coalesced
// for an SPR register that will be used in VDUP32d pseudo.
unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) {
- if (!TRI->isVirtualRegister(SReg))
+ if (!Register::isVirtualRegister(SReg))
return getDPRLaneFromSPR(SReg);
MachineInstr *MI = MRI->getVRegDef(SReg);
@@ -166,7 +166,7 @@ unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) {
SReg = MI->getOperand(1).getReg();
}
- if (TargetRegisterInfo::isVirtualRegister(SReg)) {
+ if (Register::isVirtualRegister(SReg)) {
if (MO->getSubReg() == ARM::ssub_1) return ARM::ssub_1;
return ARM::ssub_0;
}
@@ -192,7 +192,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
if ((!MO.isReg()) || (!MO.isUse()))
continue;
unsigned Reg = MO.getReg();
- if (!TRI->isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
MachineOperand *Op = MI->findRegisterDefOperand(Reg);
@@ -214,7 +214,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
if ((!MODef.isReg()) || (!MODef.isDef()))
continue;
unsigned DefReg = MODef.getReg();
- if (!TRI->isVirtualRegister(DefReg)) {
+ if (!Register::isVirtualRegister(DefReg)) {
IsDead = false;
break;
}
@@ -248,7 +248,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
unsigned DPRReg = MI->getOperand(1).getReg();
unsigned SPRReg = MI->getOperand(2).getReg();
- if (TRI->isVirtualRegister(DPRReg) && TRI->isVirtualRegister(SPRReg)) {
+ if (Register::isVirtualRegister(DPRReg) && Register::isVirtualRegister(SPRReg)) {
MachineInstr *DPRMI = MRI->getVRegDef(MI->getOperand(1).getReg());
MachineInstr *SPRMI = MRI->getVRegDef(MI->getOperand(2).getReg());
@@ -298,7 +298,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
++NumTotal;
unsigned OpReg = MI->getOperand(I).getReg();
- if (!TRI->isVirtualRegister(OpReg))
+ if (!Register::isVirtualRegister(OpReg))
break;
MachineInstr *Def = MRI->getVRegDef(OpReg);
@@ -342,7 +342,7 @@ bool A15SDOptimizer::hasPartialWrite(MachineInstr *MI) {
MachineInstr *A15SDOptimizer::elideCopies(MachineInstr *MI) {
if (!MI->isFullCopy())
return MI;
- if (!TRI->isVirtualRegister(MI->getOperand(1).getReg()))
+ if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
return nullptr;
MachineInstr *Def = MRI->getVRegDef(MI->getOperand(1).getReg());
if (!Def)
@@ -370,7 +370,7 @@ void A15SDOptimizer::elideCopiesAndPHIs(MachineInstr *MI,
if (MI->isPHI()) {
for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) {
unsigned Reg = MI->getOperand(I).getReg();
- if (!TRI->isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
continue;
}
MachineInstr *NewMI = MRI->getVRegDef(Reg);
@@ -379,7 +379,7 @@ void A15SDOptimizer::elideCopiesAndPHIs(MachineInstr *MI,
Front.push_back(NewMI);
}
} else if (MI->isFullCopy()) {
- if (!TRI->isVirtualRegister(MI->getOperand(1).getReg()))
+ if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
continue;
MachineInstr *NewMI = MRI->getVRegDef(MI->getOperand(1).getReg());
if (!NewMI)
@@ -602,7 +602,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
// we can end up with multiple defs of this DPR.
SmallVector<MachineInstr *, 8> DefSrcs;
- if (!TRI->isVirtualRegister(*I))
+ if (!Register::isVirtualRegister(*I))
continue;
MachineInstr *Def = MRI->getVRegDef(*I);
if (!Def)
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index e29077266fc..b8d2c7fbbe5 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -204,7 +204,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
default: llvm_unreachable("<unknown operand type>");
case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
assert(!MO.getSubReg() && "Subregs should be eliminated!");
if(ARM::GPRPairRegClass.contains(Reg)) {
const MachineFunction &MF = *MI->getParent()->getParent();
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 7b5e45f0ba2..763722cbf1d 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -276,7 +276,7 @@ MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(
if (LV) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
@@ -1019,7 +1019,7 @@ ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
if (!SubIdx)
return MIB.addReg(Reg, State);
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
@@ -1337,7 +1337,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
}
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
llvm_unreachable("Unknown reg class!");
@@ -1382,7 +1382,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
} else
@@ -1405,7 +1405,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
} else
@@ -1425,7 +1425,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
llvm_unreachable("Unknown reg class!");
@@ -1797,9 +1797,8 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
unsigned Addr0 = MI0.getOperand(1).getReg();
unsigned Addr1 = MI1.getOperand(1).getReg();
if (Addr0 != Addr1) {
- if (!MRI ||
- !TargetRegisterInfo::isVirtualRegister(Addr0) ||
- !TargetRegisterInfo::isVirtualRegister(Addr1))
+ if (!MRI || !Register::isVirtualRegister(Addr0) ||
+ !Register::isVirtualRegister(Addr1))
return false;
// This assumes SSA form.
@@ -2141,7 +2140,7 @@ MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
MachineInstr *
ARMBaseInstrInfo::canFoldIntoMOVCC(unsigned Reg, const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII) const {
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
if (!MRI.hasOneNonDBGUse(Reg))
return nullptr;
@@ -2163,7 +2162,7 @@ ARMBaseInstrInfo::canFoldIntoMOVCC(unsigned Reg, const MachineRegisterInfo &MRI,
// MI can't have any tied operands, that would conflict with predication.
if (MO.isTied())
return nullptr;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
return nullptr;
if (MO.isDef() && !MO.isDead())
return nullptr;
@@ -5135,7 +5134,7 @@ unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
return 0;
// We must be able to clobber the whole D-reg.
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
// Virtual register must be a def undef foo:ssub_0 operand.
if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
return 0;
@@ -5161,7 +5160,7 @@ void ARMBaseInstrInfo::breakPartialRegDependency(
const MachineOperand &MO = MI.getOperand(OpNum);
unsigned Reg = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ assert(Register::isPhysicalRegister(Reg) &&
"Can't break virtual register dependencies.");
unsigned DReg = Reg;
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index dc99b37742d..e63ea7a4447 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -317,7 +317,7 @@ ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
return false;
unsigned PairedPhys = 0;
- if (TargetRegisterInfo::isPhysicalRegister(Paired)) {
+ if (Register::isPhysicalRegister(Paired)) {
PairedPhys = Paired;
} else if (VRM && VRM->hasPhys(Paired)) {
PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
@@ -347,7 +347,7 @@ ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
Hint.first == (unsigned)ARMRI::RegPairEven) &&
- TargetRegisterInfo::isVirtualRegister(Hint.second)) {
+ Register::isVirtualRegister(Hint.second)) {
// If 'Reg' is one of the even / odd register pair and it's now changed
// (e.g. coalesced) into a different register. The other register of the
// pair allocation hint must be updated to reflect the relationship
@@ -357,7 +357,7 @@ ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
// Make sure the pair has not already divorced.
if (Hint.second == Reg) {
MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
- if (TargetRegisterInfo::isVirtualRegister(NewReg))
+ if (Register::isVirtualRegister(NewReg))
MRI->setRegAllocationHint(NewReg,
Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven
: ARMRI::RegPairOdd, OtherReg);
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 790998b8c65..3d22a9191b4 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -532,7 +532,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
MIB.add(Callee);
if (!IsDirect) {
auto CalleeReg = Callee.getReg();
- if (CalleeReg && !TRI->isPhysicalRegister(CalleeReg)) {
+ if (CalleeReg && !Register::isPhysicalRegister(CalleeReg)) {
unsigned CalleeIdx = IsThumb ? 2 : 0;
MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass(
MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e372bdb4f58..c0232f01c3e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2450,7 +2450,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
int FI = std::numeric_limits<int>::max();
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VR))
+ if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 4485a474a6d..fe5f8dd787e 100644
--- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -211,7 +211,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) {
unsigned DstReg = I.getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
return true;
const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
diff --git a/llvm/lib/Target/ARM/ARMScheduleA9.td b/llvm/lib/Target/ARM/ARMScheduleA9.td
index 21d32bde471..3f0b71afd97 100644
--- a/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -2239,9 +2239,9 @@ def A9WriteLMfpPostRA : SchedWriteVariant<[
// Distinguish between our multiple MI-level forms of the same
// VLDM/VSTM instructions.
def A9PreRA : SchedPredicate<
- "TargetRegisterInfo::isVirtualRegister(MI->getOperand(0).getReg())">;
+ "Register::isVirtualRegister(MI->getOperand(0).getReg())">;
def A9PostRA : SchedPredicate<
- "TargetRegisterInfo::isPhysicalRegister(MI->getOperand(0).getReg())">;
+ "Register::isPhysicalRegister(MI->getOperand(0).getReg())">;
// VLDM represents all destination registers as a single register
// tuple, unlike LDM. So the number of write operands is not variadic.
diff --git a/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 4b25986b90a..4256f8dddaa 100644
--- a/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -87,7 +87,7 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const {
// Look past COPY and INSERT_SUBREG instructions to find the
// real definition MI. This is important for _sfp instructions.
unsigned Reg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return nullptr;
MachineBasicBlock *MBB = MI->getParent();
@@ -97,13 +97,13 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const {
break;
if (DefMI->isCopyLike()) {
Reg = DefMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
DefMI = MRI->getVRegDef(Reg);
continue;
}
} else if (DefMI->isInsertSubreg()) {
Reg = DefMI->getOperand(2).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
DefMI = MRI->getVRegDef(Reg);
continue;
}
@@ -115,8 +115,7 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const {
unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
unsigned Reg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !MRI->hasOneNonDBGUse(Reg))
+ if (Register::isPhysicalRegister(Reg) || !MRI->hasOneNonDBGUse(Reg))
return Reg;
MachineBasicBlock *MBB = MI->getParent();
@@ -126,8 +125,7 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
while (UseMI->isCopy() || UseMI->isInsertSubreg()) {
Reg = UseMI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !MRI->hasOneNonDBGUse(Reg))
+ if (Register::isPhysicalRegister(Reg) || !MRI->hasOneNonDBGUse(Reg))
return Reg;
UseMI = &*MRI->use_instr_nodbg_begin(Reg);
if (UseMI->getParent() != MBB)
@@ -141,7 +139,7 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
/// a single-MBB loop.
bool MLxExpansion::hasLoopHazard(MachineInstr *MI) const {
unsigned Reg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
return false;
MachineBasicBlock *MBB = MI->getParent();
@@ -155,7 +153,7 @@ outer_continue:
for (unsigned i = 1, e = DefMI->getNumOperands(); i < e; i += 2) {
if (DefMI->getOperand(i + 1).getMBB() == MBB) {
unsigned SrcReg = DefMI->getOperand(i).getReg();
- if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(SrcReg)) {
DefMI = MRI->getVRegDef(SrcReg);
goto outer_continue;
}
@@ -163,13 +161,13 @@ outer_continue:
}
} else if (DefMI->isCopyLike()) {
Reg = DefMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
DefMI = MRI->getVRegDef(Reg);
continue;
}
} else if (DefMI->isInsertSubreg()) {
Reg = DefMI->getOperand(2).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
DefMI = MRI->getVRegDef(Reg);
continue;
}
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index f57d93a2e83..fccaa4c9cc8 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -80,12 +80,11 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
assert((RC == &ARM::tGPRRegClass ||
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- isARMLowRegister(SrcReg))) && "Unknown regclass!");
+ (Register::isPhysicalRegister(SrcReg) && isARMLowRegister(SrcReg))) &&
+ "Unknown regclass!");
if (RC == &ARM::tGPRRegClass ||
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- isARMLowRegister(SrcReg))) {
+ (Register::isPhysicalRegister(SrcReg) && isARMLowRegister(SrcReg))) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -108,13 +107,13 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert((RC->hasSuperClassEq(&ARM::tGPRRegClass) ||
- (TargetRegisterInfo::isPhysicalRegister(DestReg) &&
- isARMLowRegister(DestReg))) && "Unknown regclass!");
+ assert(
+ (RC->hasSuperClassEq(&ARM::tGPRRegClass) ||
+ (Register::isPhysicalRegister(DestReg) && isARMLowRegister(DestReg))) &&
+ "Unknown regclass!");
if (RC->hasSuperClassEq(&ARM::tGPRRegClass) ||
- (TargetRegisterInfo::isPhysicalRegister(DestReg) &&
- isARMLowRegister(DestReg))) {
+ (Register::isPhysicalRegister(DestReg) && isARMLowRegister(DestReg))) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 5a965f7a6b9..fa2be13f753 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -159,7 +159,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
// gsub_0, but needs an extra constraint for gsub_1 (which could be sp
// otherwise).
- if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(SrcReg)) {
MachineRegisterInfo *MRI = &MF.getRegInfo();
MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_GPRwithAPSRnospRegClass);
}
@@ -200,7 +200,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
// gsub_0, but needs an extra constraint for gsub_1 (which could be sp
// otherwise).
- if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
+ if (Register::isVirtualRegister(DestReg)) {
MachineRegisterInfo *MRI = &MF.getRegInfo();
MRI->constrainRegClass(DestReg,
&ARM::GPRPair_with_gsub_1_in_GPRwithAPSRnospRegClass);
@@ -211,7 +211,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
MIB.addReg(DestReg, RegState::ImplicitDefine);
return;
}
diff --git a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
index a96417ffce4..6ddcdefdb14 100644
--- a/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -107,8 +107,9 @@ void ThumbRegisterInfo::emitLoadConstPool(
MachineFunction &MF = *MBB.getParent();
const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
if (STI.isThumb1Only()) {
- assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
- "Thumb1 does not have ldr to high register");
+ assert(
+ (isARMLowRegister(DestReg) || Register::isVirtualRegister(DestReg)) &&
+ "Thumb1 does not have ldr to high register");
return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
PredReg, MIFlags);
}
@@ -141,7 +142,7 @@ static void emitThumbRegPlusImmInReg(
unsigned LdReg = DestReg;
if (DestReg == ARM::SP)
assert(BaseReg == ARM::SP && "Unexpected!");
- if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
+ if (!isARMLowRegister(DestReg) && !Register::isVirtualRegister(DestReg))
LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 1bd705c5518..98c6528664d 100644
--- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -493,7 +493,7 @@ bool BPFDAGToDAGISel::fillConstantStruct(const DataLayout &DL,
void BPFDAGToDAGISel::PreprocessCopyToReg(SDNode *Node) {
const RegisterSDNode *RegN = dyn_cast<RegisterSDNode>(Node->getOperand(1));
- if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
+ if (!RegN || !Register::isVirtualRegister(RegN->getReg()))
return;
const LoadSDNode *LD = dyn_cast<LoadSDNode>(Node->getOperand(2));
@@ -517,8 +517,7 @@ void BPFDAGToDAGISel::PreprocessCopyToReg(SDNode *Node) {
}
LLVM_DEBUG(dbgs() << "Find Load Value to VReg "
- << TargetRegisterInfo::virtReg2Index(RegN->getReg())
- << '\n');
+ << Register::virtReg2Index(RegN->getReg()) << '\n');
load_to_vreg_[RegN->getReg()] = mem_load_op;
}
@@ -576,7 +575,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
const RegisterSDNode *RegN =
dyn_cast<RegisterSDNode>(BaseV.getNode()->getOperand(1));
- if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
+ if (!RegN || !Register::isVirtualRegister(RegN->getReg()))
return;
unsigned AndOpReg = RegN->getReg();
LLVM_DEBUG(dbgs() << "Examine " << printReg(AndOpReg) << '\n');
@@ -593,7 +592,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (!MOP.isReg() || !MOP.isDef())
continue;
unsigned Reg = MOP.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg) && Reg == AndOpReg) {
+ if (Register::isVirtualRegister(Reg) && Reg == AndOpReg) {
MII = &MI;
break;
}
@@ -618,7 +617,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (MOP.isDef())
continue;
PrevReg = MOP.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(PrevReg))
+ if (!Register::isVirtualRegister(PrevReg))
return;
if (!checkLoadDef(PrevReg, match_load_op))
return;
diff --git a/llvm/lib/Target/BPF/BPFMIPeephole.cpp b/llvm/lib/Target/BPF/BPFMIPeephole.cpp
index 156ba793e35..ab25da4aa99 100644
--- a/llvm/lib/Target/BPF/BPFMIPeephole.cpp
+++ b/llvm/lib/Target/BPF/BPFMIPeephole.cpp
@@ -105,9 +105,9 @@ bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI)
return false;
unsigned Reg = opnd.getReg();
- if ((TargetRegisterInfo::isVirtualRegister(Reg) &&
+ if ((Register::isVirtualRegister(Reg) &&
MRI->getRegClass(Reg) == &BPF::GPRRegClass))
- return false;
+ return false;
}
LLVM_DEBUG(dbgs() << " One ZExt elim sequence identified.\n");
diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp
index b7e95caf24f..a2dcad1932c 100644
--- a/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -84,7 +84,7 @@ namespace {
raw_ostream &operator<< (raw_ostream &OS, const printv &PV) {
if (PV.R)
- OS << 'v' << TargetRegisterInfo::virtReg2Index(PV.R);
+ OS << 'v' << Register::virtReg2Index(PV.R);
else
OS << 's';
return OS;
@@ -201,7 +201,7 @@ BitTracker::~BitTracker() {
bool BT::RegisterCell::meet(const RegisterCell &RC, unsigned SelfR) {
// An example when "meet" can be invoked with SelfR == 0 is a phi node
// with a physical register as an operand.
- assert(SelfR == 0 || TargetRegisterInfo::isVirtualRegister(SelfR));
+ assert(SelfR == 0 || Register::isVirtualRegister(SelfR));
bool Changed = false;
for (uint16_t i = 0, n = Bits.size(); i < n; ++i) {
const BitValue &RCV = RC[i];
@@ -335,11 +335,11 @@ uint16_t BT::MachineEvaluator::getRegBitWidth(const RegisterRef &RR) const {
// 1. find a physical register PhysR from the same class as RR.Reg,
// 2. find a physical register PhysS that corresponds to PhysR:RR.Sub,
// 3. find a register class that contains PhysS.
- if (TargetRegisterInfo::isVirtualRegister(RR.Reg)) {
+ if (Register::isVirtualRegister(RR.Reg)) {
const auto &VC = composeWithSubRegIndex(*MRI.getRegClass(RR.Reg), RR.Sub);
return TRI.getRegSizeInBits(VC);
}
- assert(TargetRegisterInfo::isPhysicalRegister(RR.Reg));
+ assert(Register::isPhysicalRegister(RR.Reg));
unsigned PhysR = (RR.Sub == 0) ? RR.Reg : TRI.getSubReg(RR.Reg, RR.Sub);
return getPhysRegBitWidth(PhysR);
}
@@ -350,10 +350,10 @@ BT::RegisterCell BT::MachineEvaluator::getCell(const RegisterRef &RR,
// Physical registers are assumed to be present in the map with an unknown
// value. Don't actually insert anything in the map, just return the cell.
- if (TargetRegisterInfo::isPhysicalRegister(RR.Reg))
+ if (Register::isPhysicalRegister(RR.Reg))
return RegisterCell::self(0, BW);
- assert(TargetRegisterInfo::isVirtualRegister(RR.Reg));
+ assert(Register::isVirtualRegister(RR.Reg));
// For virtual registers that belong to a class that is not tracked,
// generate an "unknown" value as well.
const TargetRegisterClass *C = MRI.getRegClass(RR.Reg);
@@ -376,7 +376,7 @@ void BT::MachineEvaluator::putCell(const RegisterRef &RR, RegisterCell RC,
// While updating the cell map can be done in a meaningful way for
// a part of a register, it makes little sense to implement it as the
// SSA representation would never contain such "partial definitions".
- if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
+ if (!Register::isVirtualRegister(RR.Reg))
return;
assert(RR.Sub == 0 && "Unexpected sub-register in definition");
// Eliminate all ref-to-reg-0 bit values: replace them with "self".
@@ -711,7 +711,7 @@ BT::BitMask BT::MachineEvaluator::mask(unsigned Reg, unsigned Sub) const {
}
uint16_t BT::MachineEvaluator::getPhysRegBitWidth(unsigned Reg) const {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
const TargetRegisterClass &PC = *TRI.getMinimalPhysRegClass(Reg);
return TRI.getRegSizeInBits(PC);
}
@@ -874,7 +874,7 @@ void BT::visitNonBranch(const MachineInstr &MI) {
continue;
RegisterRef RD(MO);
assert(RD.Sub == 0 && "Unexpected sub-register in definition");
- if (!TargetRegisterInfo::isVirtualRegister(RD.Reg))
+ if (!Register::isVirtualRegister(RD.Reg))
continue;
bool Changed = false;
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 7b75d251ccd..d33d179616a 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -147,11 +147,11 @@ namespace {
}
static inline unsigned v2x(unsigned v) {
- return TargetRegisterInfo::virtReg2Index(v);
+ return Register::virtReg2Index(v);
}
static inline unsigned x2v(unsigned x) {
- return TargetRegisterInfo::index2VirtReg(x);
+ return Register::index2VirtReg(x);
}
};
@@ -291,7 +291,7 @@ void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI,
if (!Op.isReg() || !Op.isDef())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
Defs.insert(R);
}
@@ -303,7 +303,7 @@ void HexagonBitSimplify::getInstrUses(const MachineInstr &MI,
if (!Op.isReg() || !Op.isUse())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
Uses.insert(R);
}
@@ -353,8 +353,7 @@ bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC,
bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
- !TargetRegisterInfo::isVirtualRegister(NewR))
+ if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
return false;
auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
decltype(End) NextI;
@@ -367,8 +366,7 @@ bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
unsigned NewSR, MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
- !TargetRegisterInfo::isVirtualRegister(NewR))
+ if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
return false;
if (hasTiedUse(OldR, MRI, NewSR))
return false;
@@ -384,8 +382,7 @@ bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR,
unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
- !TargetRegisterInfo::isVirtualRegister(NewR))
+ if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
return false;
if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR))
return false;
@@ -896,7 +893,7 @@ bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
// register class.
const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
+ if (!Register::isVirtualRegister(RR.Reg))
return nullptr;
auto *RC = MRI.getRegClass(RR.Reg);
if (RR.Sub == 0)
@@ -927,8 +924,8 @@ const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
// with a 32-bit register.
bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD,
const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) ||
- !TargetRegisterInfo::isVirtualRegister(RS.Reg))
+ if (!Register::isVirtualRegister(RD.Reg) ||
+ !Register::isVirtualRegister(RS.Reg))
return false;
// Return false if one (or both) classes are nullptr.
auto *DRC = getFinalVRegClass(RD, MRI);
@@ -1019,7 +1016,7 @@ bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) {
if (!Op.isReg() || !Op.isDef())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) {
+ if (!Register::isVirtualRegister(R) || !isDead(R)) {
AllDead = false;
break;
}
@@ -1221,7 +1218,7 @@ bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
MachineInstr &UseI = *I->getParent();
if (UseI.isPHI() || UseI.isCopy()) {
unsigned DefR = UseI.getOperand(0).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(DefR))
+ if (!Register::isVirtualRegister(DefR))
return false;
Pending.push_back(DefR);
} else {
@@ -1470,7 +1467,7 @@ bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
if (Defs.count() != 1)
continue;
unsigned DR = Defs.find_first();
- if (!TargetRegisterInfo::isVirtualRegister(DR))
+ if (!Register::isVirtualRegister(DR))
continue;
uint64_t U;
const BitTracker::RegisterCell &DRC = BT.lookup(DR);
@@ -1819,7 +1816,7 @@ bool BitSimplification::matchHalf(unsigned SelfR,
if (Reg == 0 || Reg == SelfR) // Don't match "self".
return false;
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return false;
if (!BT.has(Reg))
return false;
@@ -3162,7 +3159,7 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
if (Defs.count() != 1)
continue;
unsigned DefR = Defs.find_first();
- if (!TargetRegisterInfo::isVirtualRegister(DefR))
+ if (!Register::isVirtualRegister(DefR))
continue;
if (!isBitShuffle(&*I, DefR))
continue;
diff --git a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
index ba50faac2cf..80ef3c1e158 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -111,7 +111,7 @@ BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const {
}
uint16_t HexagonEvaluator::getPhysRegBitWidth(unsigned Reg) const {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
using namespace Hexagon;
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
@@ -1043,7 +1043,7 @@ unsigned HexagonEvaluator::getUniqueDefVReg(const MachineInstr &MI) const {
if (!Op.isReg() || !Op.isDef())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
if (DefReg != 0)
return 0;
@@ -1220,7 +1220,7 @@ bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI,
RegisterRef RD = MI.getOperand(0);
RegisterRef RS = MI.getOperand(1);
assert(RD.Sub == 0);
- if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg))
+ if (!Register::isPhysicalRegister(RS.Reg))
return false;
RegExtMap::const_iterator F = VRX.find(RD.Reg);
if (F == VRX.end())
diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 999150fc8c6..d1d1b8ee7d4 100644
--- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -268,14 +268,14 @@ HexagonBlockRanges::RegisterSet HexagonBlockRanges::expandToSubRegs(
return SRs;
}
- if (TargetRegisterInfo::isPhysicalRegister(R.Reg)) {
+ if (Register::isPhysicalRegister(R.Reg)) {
MCSubRegIterator I(R.Reg, &TRI);
if (!I.isValid())
SRs.insert({R.Reg, 0});
for (; I.isValid(); ++I)
SRs.insert({*I, 0});
} else {
- assert(TargetRegisterInfo::isVirtualRegister(R.Reg));
+ assert(Register::isVirtualRegister(R.Reg));
auto &RC = *MRI.getRegClass(R.Reg);
unsigned PReg = *RC.begin();
MCSubRegIndexIterator I(PReg, &TRI);
@@ -321,7 +321,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
if (!Op.isReg() || !Op.isUse() || Op.isUndef())
continue;
RegisterRef R = { Op.getReg(), Op.getSubReg() };
- if (TargetRegisterInfo::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
+ if (Register::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
continue;
bool IsKill = Op.isKill();
for (auto S : expandToSubRegs(R, MRI, TRI)) {
@@ -338,7 +338,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
continue;
RegisterRef R = { Op.getReg(), Op.getSubReg() };
for (auto S : expandToSubRegs(R, MRI, TRI)) {
- if (TargetRegisterInfo::isPhysicalRegister(S.Reg) && Reserved[S.Reg])
+ if (Register::isPhysicalRegister(S.Reg) && Reserved[S.Reg])
continue;
if (Op.isDead())
Clobbers.insert(S);
@@ -374,7 +374,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
// Update maps for defs.
for (RegisterRef S : Defs) {
// Defs should already be expanded into subregs.
- assert(!TargetRegisterInfo::isPhysicalRegister(S.Reg) ||
+ assert(!Register::isPhysicalRegister(S.Reg) ||
!MCSubRegIterator(S.Reg, &TRI, false).isValid());
if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
closeRange(S);
@@ -383,7 +383,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
// Update maps for clobbers.
for (RegisterRef S : Clobbers) {
// Clobbers should already be expanded into subregs.
- assert(!TargetRegisterInfo::isPhysicalRegister(S.Reg) ||
+ assert(!Register::isPhysicalRegister(S.Reg) ||
!MCSubRegIterator(S.Reg, &TRI, false).isValid());
if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
closeRange(S);
@@ -482,7 +482,7 @@ HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeDeadMap(
}
}
for (auto &P : LiveMap)
- if (TargetRegisterInfo::isVirtualRegister(P.first.Reg))
+ if (Register::isVirtualRegister(P.first.Reg))
addDeadRanges(P.first);
LLVM_DEBUG(dbgs() << __func__ << ": dead map\n"
diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
index cfed0ecef27..c900851d739 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -14,9 +14,10 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Pass.h"
#include <map>
#include <set>
#include <utility>
@@ -235,24 +236,24 @@ namespace {
Reg = Op.getReg();
Sub = Op.getSubReg();
} else if (Op.isFI()) {
- Reg = TargetRegisterInfo::index2StackSlot(Op.getIndex());
+ Reg = llvm::Register::index2StackSlot(Op.getIndex());
}
return *this;
}
bool isVReg() const {
- return Reg != 0 && !TargetRegisterInfo::isStackSlot(Reg) &&
- TargetRegisterInfo::isVirtualRegister(Reg);
+ return Reg != 0 && !llvm::Register::isStackSlot(Reg) &&
+ llvm::Register::isVirtualRegister(Reg);
}
bool isSlot() const {
- return Reg != 0 && TargetRegisterInfo::isStackSlot(Reg);
+ return Reg != 0 && llvm::Register::isStackSlot(Reg);
}
operator MachineOperand() const {
if (isVReg())
return MachineOperand::CreateReg(Reg, /*Def*/false, /*Imp*/false,
/*Kill*/false, /*Dead*/false, /*Undef*/false,
/*EarlyClobber*/false, Sub);
- if (TargetRegisterInfo::isStackSlot(Reg)) {
- int FI = TargetRegisterInfo::stackSlot2Index(Reg);
+ if (llvm::Register::isStackSlot(Reg)) {
+ int FI = llvm::Register::stackSlot2Index(Reg);
return MachineOperand::CreateFI(FI);
}
llvm_unreachable("Cannot create MachineOperand");
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index d1fde5da5fe..6724093bf48 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -208,14 +208,14 @@ namespace {
bool has(unsigned R) const {
// All non-virtual registers are considered "bottom".
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
return true;
MapType::const_iterator F = Map.find(R);
return F != Map.end();
}
const LatticeCell &get(unsigned R) const {
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
return Bottom;
MapType::const_iterator F = Map.find(R);
if (F != Map.end())
@@ -623,7 +623,7 @@ void MachineConstPropagator::visitPHI(const MachineInstr &PN) {
const MachineOperand &MD = PN.getOperand(0);
RegisterSubReg DefR(MD);
- assert(TargetRegisterInfo::isVirtualRegister(DefR.Reg));
+ assert(Register::isVirtualRegister(DefR.Reg));
bool Changed = false;
@@ -652,7 +652,7 @@ Bottomize:
RegisterSubReg UseR(SO);
// If the input is not a virtual register, we don't really know what
// value it holds.
- if (!TargetRegisterInfo::isVirtualRegister(UseR.Reg))
+ if (!Register::isVirtualRegister(UseR.Reg))
goto Bottomize;
// If there is no cell for an input register, it means top.
if (!Cells.has(UseR.Reg))
@@ -694,7 +694,7 @@ void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) {
continue;
RegisterSubReg DefR(MO);
// Only track virtual registers.
- if (!TargetRegisterInfo::isVirtualRegister(DefR.Reg))
+ if (!Register::isVirtualRegister(DefR.Reg))
continue;
bool Changed = false;
// If the evaluation failed, set cells for all output registers to bottom.
@@ -1070,7 +1070,7 @@ bool MachineConstPropagator::run(MachineFunction &MF) {
bool MachineConstEvaluator::getCell(const RegisterSubReg &R, const CellMap &Inputs,
LatticeCell &RC) {
- if (!TargetRegisterInfo::isVirtualRegister(R.Reg))
+ if (!Register::isVirtualRegister(R.Reg))
return false;
const LatticeCell &L = Inputs.get(R.Reg);
if (!R.SubReg) {
@@ -1926,7 +1926,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
unsigned Opc = MI.getOpcode();
RegisterSubReg DefR(MD);
assert(!DefR.SubReg);
- if (!TargetRegisterInfo::isVirtualRegister(DefR.Reg))
+ if (!Register::isVirtualRegister(DefR.Reg))
return false;
if (MI.isCopy()) {
@@ -2793,7 +2793,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI,
if (!MO.isReg() || !MO.isUse() || MO.isImplicit())
continue;
RegisterSubReg R(MO);
- if (!TargetRegisterInfo::isVirtualRegister(R.Reg))
+ if (!Register::isVirtualRegister(R.Reg))
continue;
HasUse = true;
// PHIs can legitimately have "top" cells after propagation.
@@ -2832,7 +2832,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI,
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
assert(!MO.getSubReg());
assert(Inputs.has(R));
@@ -3111,8 +3111,8 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
void HexagonConstEvaluator::replaceAllRegUsesWith(unsigned FromReg,
unsigned ToReg) {
- assert(TargetRegisterInfo::isVirtualRegister(FromReg));
- assert(TargetRegisterInfo::isVirtualRegister(ToReg));
+ assert(Register::isVirtualRegister(FromReg));
+ assert(Register::isVirtualRegister(ToReg));
for (auto I = MRI->use_begin(FromReg), E = MRI->use_end(); I != E;) {
MachineOperand &O = *I;
++I;
diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index a09ccab483c..db0bd3b40ca 100644
--- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -226,7 +226,7 @@ static bool areCombinableOperations(const TargetRegisterInfo *TRI,
}
static bool isEvenReg(unsigned Reg) {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
if (Hexagon::IntRegsRegClass.contains(Reg))
return (Reg - Hexagon::R0) % 2 == 0;
if (Hexagon::HvxVRRegClass.contains(Reg))
diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index c1f32e54e98..36a5fadc781 100644
--- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -385,7 +385,7 @@ bool HexagonEarlyIfConversion::isValidCandidate(const MachineBasicBlock *B)
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
if (!isPredicate(R))
continue;
@@ -402,7 +402,7 @@ bool HexagonEarlyIfConversion::usesUndefVReg(const MachineInstr *MI) const {
if (!MO.isReg() || !MO.isUse())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
const MachineInstr *DefI = MRI->getVRegDef(R);
// "Undefined" virtual registers are actually defined via IMPLICIT_DEF.
@@ -492,7 +492,7 @@ unsigned HexagonEarlyIfConversion::countPredicateDefs(
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
if (isPredicate(R))
PredDefs++;
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index c343e426ac7..52e97edac1a 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -285,7 +285,7 @@ bool HexagonExpandCondsets::isCondset(const MachineInstr &MI) {
}
LaneBitmask HexagonExpandCondsets::getLaneMask(unsigned Reg, unsigned Sub) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(Register::isVirtualRegister(Reg));
return Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
: MRI->getMaxLaneMaskForVReg(Reg);
}
@@ -364,7 +364,7 @@ void HexagonExpandCondsets::updateKillFlags(unsigned Reg) {
void HexagonExpandCondsets::updateDeadsInRange(unsigned Reg, LaneBitmask LM,
LiveRange &Range) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(Register::isVirtualRegister(Reg));
if (Range.empty())
return;
@@ -373,7 +373,7 @@ void HexagonExpandCondsets::updateDeadsInRange(unsigned Reg, LaneBitmask LM,
if (!Op.isReg() || !Op.isDef())
return { false, false };
unsigned DR = Op.getReg(), DSR = Op.getSubReg();
- if (!TargetRegisterInfo::isVirtualRegister(DR) || DR != Reg)
+ if (!Register::isVirtualRegister(DR) || DR != Reg)
return { false, false };
LaneBitmask SLM = getLaneMask(DR, DSR);
LaneBitmask A = SLM & LM;
@@ -551,8 +551,8 @@ void HexagonExpandCondsets::updateLiveness(std::set<unsigned> &RegSet,
bool Recalc, bool UpdateKills, bool UpdateDeads) {
UpdateKills |= UpdateDeads;
for (unsigned R : RegSet) {
- if (!TargetRegisterInfo::isVirtualRegister(R)) {
- assert(TargetRegisterInfo::isPhysicalRegister(R));
+ if (!Register::isVirtualRegister(R)) {
+ assert(Register::isPhysicalRegister(R));
// There shouldn't be any physical registers as operands, except
// possibly reserved registers.
assert(MRI->isReserved(R));
@@ -581,12 +581,12 @@ unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO,
if (SO.isReg()) {
unsigned PhysR;
RegisterRef RS = SO;
- if (TargetRegisterInfo::isVirtualRegister(RS.Reg)) {
+ if (Register::isVirtualRegister(RS.Reg)) {
const TargetRegisterClass *VC = MRI->getRegClass(RS.Reg);
assert(VC->begin() != VC->end() && "Empty register class");
PhysR = *VC->begin();
} else {
- assert(TargetRegisterInfo::isPhysicalRegister(RS.Reg));
+ assert(Register::isPhysicalRegister(RS.Reg));
PhysR = RS.Reg;
}
unsigned PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub);
@@ -802,7 +802,7 @@ bool HexagonExpandCondsets::canMoveOver(MachineInstr &MI, ReferenceMap &Defs,
// For physical register we would need to check register aliases, etc.
// and we don't want to bother with that. It would be of little value
// before the actual register rewriting (from virtual to physical).
- if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
+ if (!Register::isVirtualRegister(RR.Reg))
return false;
// No redefs for any operand.
if (isRefInMap(RR, Defs, Exec_Then))
@@ -999,7 +999,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
// subregisters are other physical registers, and we are not checking
// that.
RegisterRef RR = Op;
- if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
+ if (!Register::isVirtualRegister(RR.Reg))
return false;
ReferenceMap &Map = Op.isDef() ? Defs : Uses;
@@ -1091,7 +1091,7 @@ bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B,
}
bool HexagonExpandCondsets::isIntReg(RegisterRef RR, unsigned &BW) {
- if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
+ if (!Register::isVirtualRegister(RR.Reg))
return false;
const TargetRegisterClass *RC = MRI->getRegClass(RR.Reg);
if (RC == &Hexagon::IntRegsRegClass) {
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 3368ee4fb3b..5d2b5b6e536 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -306,7 +306,7 @@ static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
unsigned R = MO.getReg();
// Virtual registers will need scavenging, which then may require
// a stack slot.
- if (TargetRegisterInfo::isVirtualRegister(R))
+ if (Register::isVirtualRegister(R))
return true;
for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S)
if (CSR[*S])
diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index 81025c1c532..6c996c0c8a8 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -163,11 +163,11 @@ namespace {
}
static inline unsigned v2x(unsigned v) {
- return TargetRegisterInfo::virtReg2Index(v);
+ return Register::virtReg2Index(v);
}
static inline unsigned x2v(unsigned x) {
- return TargetRegisterInfo::index2VirtReg(x);
+ return Register::index2VirtReg(x);
}
};
@@ -267,7 +267,7 @@ namespace {
CellMapShadow(const BitTracker &T) : BT(T) {}
const BitTracker::RegisterCell &lookup(unsigned VR) {
- unsigned RInd = TargetRegisterInfo::virtReg2Index(VR);
+ unsigned RInd = Register::virtReg2Index(VR);
// Grow the vector to at least 32 elements.
if (RInd >= CVect.size())
CVect.resize(std::max(RInd+16, 32U), nullptr);
@@ -608,7 +608,7 @@ void HexagonGenInsert::buildOrderingMF(RegisterOrdering &RO) const {
if (MO.isReg() && MO.isDef()) {
unsigned R = MO.getReg();
assert(MO.getSubReg() == 0 && "Unexpected subregister in definition");
- if (TargetRegisterInfo::isVirtualRegister(R))
+ if (Register::isVirtualRegister(R))
RO.insert(std::make_pair(R, Index++));
}
}
@@ -725,7 +725,7 @@ void HexagonGenInsert::getInstrDefs(const MachineInstr *MI,
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
Defs.insert(R);
}
@@ -738,7 +738,7 @@ void HexagonGenInsert::getInstrUses(const MachineInstr *MI,
if (!MO.isReg() || !MO.isUse())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
Uses.insert(R);
}
@@ -1478,8 +1478,7 @@ bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R) ||
- !MRI->use_nodbg_empty(R)) {
+ if (!Register::isVirtualRegister(R) || !MRI->use_nodbg_empty(R)) {
AllDead = false;
break;
}
@@ -1598,7 +1597,7 @@ bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) {
IterListType Out;
for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- unsigned Idx = TargetRegisterInfo::virtReg2Index(I->first);
+ unsigned Idx = Register::virtReg2Index(I->first);
if (Idx >= Cutoff)
Out.push_back(I);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index e991fa8b61c..cd4b5fd221f 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -133,7 +133,7 @@ INITIALIZE_PASS_END(HexagonGenPredicate, "hexagon-gen-pred",
"Hexagon generate predicate operations", false, false)
bool HexagonGenPredicate::isPredReg(unsigned R) {
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
return false;
const TargetRegisterClass *RC = MRI->getRegClass(R);
return RC == &Hexagon::PredRegsRegClass;
@@ -213,7 +213,7 @@ void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) {
case TargetOpcode::COPY:
if (isPredReg(MI->getOperand(1).getReg())) {
RegisterSubReg RD = MI->getOperand(0);
- if (TargetRegisterInfo::isVirtualRegister(RD.R))
+ if (Register::isVirtualRegister(RD.R))
PredGPRs.insert(RD);
}
break;
@@ -245,7 +245,7 @@ RegisterSubReg HexagonGenPredicate::getPredRegFor(const RegisterSubReg &Reg) {
// Create a predicate register for a given Reg. The newly created register
// will have its value copied from Reg, so that it can be later used as
// an operand in other instructions.
- assert(TargetRegisterInfo::isVirtualRegister(Reg.R));
+ assert(Register::isVirtualRegister(Reg.R));
RegToRegMap::iterator F = G2P.find(Reg);
if (F != G2P.end())
return F->second;
@@ -471,9 +471,9 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
continue;
RegisterSubReg DR = MI.getOperand(0);
RegisterSubReg SR = MI.getOperand(1);
- if (!TargetRegisterInfo::isVirtualRegister(DR.R))
+ if (!Register::isVirtualRegister(DR.R))
continue;
- if (!TargetRegisterInfo::isVirtualRegister(SR.R))
+ if (!Register::isVirtualRegister(SR.R))
continue;
if (MRI->getRegClass(DR.R) != PredRC)
continue;
diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index cecbaedb6d7..5c68d223200 100644
--- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1431,7 +1431,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow(
unsigned Reg = InitVal->getReg();
// We don't know the value of a physical register.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return true;
MachineInstr *Def = MRI->getVRegDef(Reg);
@@ -1509,7 +1509,7 @@ bool HexagonHardwareLoops::checkForImmediate(const MachineOperand &MO,
int64_t TV;
unsigned R = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
return false;
MachineInstr *DI = MRI->getVRegDef(R);
unsigned DOpc = DI->getOpcode();
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index a156de5ba12..135fb8e7a5b 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -2094,12 +2094,12 @@ bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
if (RegA == RegB)
return true;
- if (TargetRegisterInfo::isPhysicalRegister(RegA))
+ if (Register::isPhysicalRegister(RegA))
for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
if (RegB == *SubRegs)
return true;
- if (TargetRegisterInfo::isPhysicalRegister(RegB))
+ if (Register::isPhysicalRegister(RegB))
for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
if (RegA == *SubRegs)
return true;
@@ -4091,7 +4091,7 @@ int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// Get DefIdx and UseIdx for super registers.
const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
- if (DefMO.isReg() && HRI.isPhysicalRegister(DefMO.getReg())) {
+ if (DefMO.isReg() && Register::isPhysicalRegister(DefMO.getReg())) {
if (DefMO.isImplicit()) {
for (MCSuperRegIterator SR(DefMO.getReg(), &HRI); SR.isValid(); ++SR) {
int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &HRI);
diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
index db44901ca70..1c038e31ff1 100644
--- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -290,7 +290,7 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII,
// at machine code level, we don't need this, but if we decide
// to move new value jump prior to RA, we would be needing this.
MachineRegisterInfo &MRI = MF.getRegInfo();
- if (secondReg && !TargetRegisterInfo::isPhysicalRegister(cmpOp2)) {
+ if (secondReg && !Register::isPhysicalRegister(cmpOp2)) {
MachineInstr *def = MRI.getVRegDef(cmpOp2);
if (def->getOpcode() == TargetOpcode::COPY)
return false;
@@ -516,7 +516,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
jmpPos = MII;
jmpInstr = &MI;
predReg = MI.getOperand(0).getReg();
- afterRA = TargetRegisterInfo::isPhysicalRegister(predReg);
+ afterRA = Register::isPhysicalRegister(predReg);
// If ifconverter had not messed up with the kill flags of the
// operands, the following check on the kill flag would suffice.
diff --git a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 8f761d2d480..2748e4500b3 100644
--- a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -139,8 +139,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
unsigned DstReg = Dst.getReg();
unsigned SrcReg = Src.getReg();
// Just handle virtual registers.
- if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(DstReg) &&
+ Register::isVirtualRegister(SrcReg)) {
// Map the following:
// %170 = SXTW %166
// PeepholeMap[170] = %166
@@ -188,8 +188,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
unsigned DstReg = Dst.getReg();
unsigned SrcReg = Src.getReg();
// Just handle virtual registers.
- if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(DstReg) &&
+ Register::isVirtualRegister(SrcReg)) {
// Map the following:
// %170 = NOT_xx %166
// PeepholeMap[170] = %166
@@ -210,8 +210,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
unsigned DstReg = Dst.getReg();
unsigned SrcReg = Src.getReg();
- if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ if (Register::isVirtualRegister(DstReg) &&
+ Register::isVirtualRegister(SrcReg)) {
// Try to find in the map.
if (unsigned PeepholeSrc = PeepholeMap.lookup(SrcReg)) {
// Change the 1st operand.
@@ -242,7 +242,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (RC0->getID() == Hexagon::PredRegsRegClassID) {
// Handle instructions that have a prediate register in op0
// (most cases of predicable instructions).
- if (TargetRegisterInfo::isVirtualRegister(Reg0)) {
+ if (Register::isVirtualRegister(Reg0)) {
// Try to find in the map.
if (unsigned PeepholeSrc = PeepholeMap.lookup(Reg0)) {
// Change the 1st operand and, flip the opcode.
diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 013eede2d41..ec9d0b03275 100644
--- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -211,7 +211,7 @@ bool HexagonSplitDoubleRegs::isFixedInstr(const MachineInstr *MI) const {
if (!Op.isReg())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
return true;
}
return false;
@@ -224,14 +224,14 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NumRegs = MRI->getNumVirtRegs();
BitVector DoubleRegs(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
- unsigned R = TargetRegisterInfo::index2VirtReg(i);
+ unsigned R = Register::index2VirtReg(i);
if (MRI->getRegClass(R) == DoubleRC)
DoubleRegs.set(i);
}
BitVector FixedRegs(NumRegs);
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = TargetRegisterInfo::index2VirtReg(x);
+ unsigned R = Register::index2VirtReg(x);
MachineInstr *DefI = MRI->getVRegDef(R);
// In some cases a register may exist, but never be defined or used.
// It should never appear anywhere, but mark it as "fixed", just to be
@@ -244,7 +244,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
if (FixedRegs[x])
continue;
- unsigned R = TargetRegisterInfo::index2VirtReg(x);
+ unsigned R = Register::index2VirtReg(x);
LLVM_DEBUG(dbgs() << printReg(R, TRI) << " ~~");
USet &Asc = AssocMap[R];
for (auto U = MRI->use_nodbg_begin(R), Z = MRI->use_nodbg_end();
@@ -259,13 +259,13 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
if (&MO == &Op || !MO.isReg() || MO.getSubReg())
continue;
unsigned T = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(T)) {
+ if (!Register::isVirtualRegister(T)) {
FixedRegs.set(x);
continue;
}
if (MRI->getRegClass(T) != DoubleRC)
continue;
- unsigned u = TargetRegisterInfo::virtReg2Index(T);
+ unsigned u = Register::virtReg2Index(T);
if (FixedRegs[u])
continue;
LLVM_DEBUG(dbgs() << ' ' << printReg(T, TRI));
@@ -281,7 +281,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NextP = 1;
USet Visited;
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = TargetRegisterInfo::index2VirtReg(x);
+ unsigned R = Register::index2VirtReg(x);
if (Visited.count(R))
continue;
// Create a new partition for R.
@@ -400,7 +400,7 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const {
}
int32_t HexagonSplitDoubleRegs::profit(unsigned Reg) const {
- assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ assert(Register::isVirtualRegister(Reg));
const MachineInstr *DefI = MRI->getVRegDef(Reg);
switch (DefI->getOpcode()) {
@@ -605,7 +605,7 @@ void HexagonSplitDoubleRegs::createHalfInstr(unsigned Opc, MachineInstr *MI,
// For register operands, set the subregister.
unsigned R = Op.getReg();
unsigned SR = Op.getSubReg();
- bool isVirtReg = TargetRegisterInfo::isVirtualRegister(R);
+ bool isVirtReg = Register::isVirtualRegister(R);
bool isKill = Op.isKill();
if (isVirtReg && MRI->getRegClass(R) == DoubleRC) {
isKill = false;
@@ -1105,7 +1105,7 @@ void HexagonSplitDoubleRegs::collapseRegPairs(MachineInstr *MI,
if (!Op.isReg() || !Op.isUse())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
+ if (!Register::isVirtualRegister(R))
continue;
if (MRI->getRegClass(R) != DoubleRC || Op.getSubReg())
continue;
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 7ec63a642b0..31dac55db2d 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -230,7 +230,7 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
else if (SchedRetvalOptimization) {
const MachineInstr *MI = DAG->SUnits[su].getInstr();
if (MI->isCopy() &&
- TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
+ Register::isPhysicalRegister(MI->getOperand(1).getReg())) {
// %vregX = COPY %r0
VRegHoldingReg[MI->getOperand(0).getReg()] = MI->getOperand(1).getReg();
LastVRegUse.erase(MI->getOperand(1).getReg());
@@ -243,8 +243,7 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
VRegHoldingReg.count(MO.getReg())) {
// <use of %vregX>
LastVRegUse[VRegHoldingReg[MO.getReg()]] = &DAG->SUnits[su];
- } else if (MO.isDef() &&
- TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ } else if (MO.isDef() && Register::isPhysicalRegister(MO.getReg())) {
for (MCRegAliasIterator AI(MO.getReg(), &TRI, true); AI.isValid();
++AI) {
if (LastVRegUse.count(*AI) &&
diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp
index 7702024f87b..a9d39fd4b2d 100644
--- a/llvm/lib/Target/Hexagon/RDFCopy.cpp
+++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp
@@ -45,8 +45,8 @@ bool CopyPropagation::interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) {
const MachineOperand &Src = MI->getOperand(1);
RegisterRef DstR = DFG.makeRegRef(Dst.getReg(), Dst.getSubReg());
RegisterRef SrcR = DFG.makeRegRef(Src.getReg(), Src.getSubReg());
- assert(TargetRegisterInfo::isPhysicalRegister(DstR.Reg));
- assert(TargetRegisterInfo::isPhysicalRegister(SrcR.Reg));
+ assert(Register::isPhysicalRegister(DstR.Reg));
+ assert(Register::isPhysicalRegister(SrcR.Reg));
const TargetRegisterInfo &TRI = DFG.getTRI();
if (TRI.getMinimalPhysRegClass(DstR.Reg) !=
TRI.getMinimalPhysRegClass(SrcR.Reg))
diff --git a/llvm/lib/Target/Hexagon/RDFGraph.cpp b/llvm/lib/Target/Hexagon/RDFGraph.cpp
index 9d8f706b8a0..7b1a4007d12 100644
--- a/llvm/lib/Target/Hexagon/RDFGraph.cpp
+++ b/llvm/lib/Target/Hexagon/RDFGraph.cpp
@@ -963,7 +963,7 @@ void DataFlowGraph::build(unsigned Options) {
RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
assert(PhysicalRegisterInfo::isRegMaskId(Reg) ||
- TargetRegisterInfo::isPhysicalRegister(Reg));
+ Register::isPhysicalRegister(Reg));
assert(Reg != 0);
if (Sub != 0)
Reg = TRI.getSubReg(Reg, Sub);
@@ -1292,7 +1292,7 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
continue;
unsigned R = Op.getReg();
- if (!R || !TargetRegisterInfo::isPhysicalRegister(R))
+ if (!R || !Register::isPhysicalRegister(R))
continue;
uint16_t Flags = NodeAttrs::None;
if (TOI.isPreserving(In, OpN)) {
@@ -1337,7 +1337,7 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
continue;
unsigned R = Op.getReg();
- if (!R || !TargetRegisterInfo::isPhysicalRegister(R) || DoneDefs.test(R))
+ if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R))
continue;
RegisterRef RR = makeRegRef(Op);
uint16_t Flags = NodeAttrs::None;
@@ -1366,7 +1366,7 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
if (!Op.isReg() || !Op.isUse())
continue;
unsigned R = Op.getReg();
- if (!R || !TargetRegisterInfo::isPhysicalRegister(R))
+ if (!R || !Register::isPhysicalRegister(R))
continue;
uint16_t Flags = NodeAttrs::None;
if (Op.isUndef())
diff --git a/llvm/lib/Target/Hexagon/RDFLiveness.cpp b/llvm/lib/Target/Hexagon/RDFLiveness.cpp
index 9cd304aa10b..ed8f08f6224 100644
--- a/llvm/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/llvm/lib/Target/Hexagon/RDFLiveness.cpp
@@ -890,7 +890,7 @@ void Liveness::resetKills(MachineBasicBlock *B) {
if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isPhysicalRegister(R))
+ if (!Register::isPhysicalRegister(R))
continue;
for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
Live.reset(*SR);
@@ -899,7 +899,7 @@ void Liveness::resetKills(MachineBasicBlock *B) {
if (!Op.isReg() || !Op.isUse() || Op.isUndef())
continue;
unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isPhysicalRegister(R))
+ if (!Register::isPhysicalRegister(R))
continue;
bool IsLive = false;
for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
diff --git a/llvm/lib/Target/Hexagon/RDFRegisters.cpp b/llvm/lib/Target/Hexagon/RDFRegisters.cpp
index 6e0f33695f0..b5675784e34 100644
--- a/llvm/lib/Target/Hexagon/RDFRegisters.cpp
+++ b/llvm/lib/Target/Hexagon/RDFRegisters.cpp
@@ -101,7 +101,7 @@ RegisterRef PhysicalRegisterInfo::normalize(RegisterRef RR) const {
std::set<RegisterId> PhysicalRegisterInfo::getAliasSet(RegisterId Reg) const {
// Do not include RR in the alias set.
std::set<RegisterId> AS;
- assert(isRegMaskId(Reg) || TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(isRegMaskId(Reg) || Register::isPhysicalRegister(Reg));
if (isRegMaskId(Reg)) {
// XXX SLOW
const uint32_t *MB = getRegMaskBits(Reg);
@@ -129,8 +129,8 @@ std::set<RegisterId> PhysicalRegisterInfo::getAliasSet(RegisterId Reg) const {
}
bool PhysicalRegisterInfo::aliasRR(RegisterRef RA, RegisterRef RB) const {
- assert(TargetRegisterInfo::isPhysicalRegister(RA.Reg));
- assert(TargetRegisterInfo::isPhysicalRegister(RB.Reg));
+ assert(Register::isPhysicalRegister(RA.Reg));
+ assert(Register::isPhysicalRegister(RB.Reg));
MCRegUnitMaskIterator UMA(RA.Reg, &TRI);
MCRegUnitMaskIterator UMB(RB.Reg, &TRI);
@@ -160,7 +160,7 @@ bool PhysicalRegisterInfo::aliasRR(RegisterRef RA, RegisterRef RB) const {
}
bool PhysicalRegisterInfo::aliasRM(RegisterRef RR, RegisterRef RM) const {
- assert(TargetRegisterInfo::isPhysicalRegister(RR.Reg) && isRegMaskId(RM.Reg));
+ assert(Register::isPhysicalRegister(RR.Reg) && isRegMaskId(RM.Reg));
const uint32_t *MB = getRegMaskBits(RM.Reg);
bool Preserved = MB[RR.Reg/32] & (1u << (RR.Reg%32));
// If the lane mask information is "full", e.g. when the given lane mask
diff --git a/llvm/lib/Target/Hexagon/RDFRegisters.h b/llvm/lib/Target/Hexagon/RDFRegisters.h
index 646233bacda..4afaf80e465 100644
--- a/llvm/lib/Target/Hexagon/RDFRegisters.h
+++ b/llvm/lib/Target/Hexagon/RDFRegisters.h
@@ -99,15 +99,15 @@ namespace rdf {
const MachineFunction &mf);
static bool isRegMaskId(RegisterId R) {
- return TargetRegisterInfo::isStackSlot(R);
+ return Register::isStackSlot(R);
}
RegisterId getRegMaskId(const uint32_t *RM) const {
- return TargetRegisterInfo::index2StackSlot(RegMasks.find(RM));
+ return Register::index2StackSlot(RegMasks.find(RM));
}
const uint32_t *getRegMaskBits(RegisterId R) const {
- return RegMasks.get(TargetRegisterInfo::stackSlot2Index(R));
+ return RegMasks.get(Register::stackSlot2Index(R));
}
RegisterRef normalize(RegisterRef RR) const;
@@ -125,7 +125,7 @@ namespace rdf {
}
const BitVector &getMaskUnits(RegisterId MaskId) const {
- return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units;
+ return MaskInfos[Register::stackSlot2Index(MaskId)].Units;
}
RegisterRef mapTo(RegisterRef RR, unsigned R) const;
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index 700a8606910..2b0e53c8e4e 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -457,7 +457,7 @@ bool LanaiInstrInfo::analyzeSelect(const MachineInstr &MI,
// return the defining instruction.
static MachineInstr *canFoldIntoSelect(unsigned Reg,
const MachineRegisterInfo &MRI) {
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
if (!MRI.hasOneNonDBGUse(Reg))
return nullptr;
@@ -479,7 +479,7 @@ static MachineInstr *canFoldIntoSelect(unsigned Reg,
// MI can't have any tied operands, that would conflict with predication.
if (MO.isTied())
return nullptr;
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
return nullptr;
if (MO.isDef() && !MO.isDead())
return nullptr;
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index c234c309d76..0d735c20ec2 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -358,7 +358,7 @@ unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
MachineOperand &MO = II->getOperand(i);
if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() &&
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ !Register::isVirtualRegister(MO.getReg()))
Candidates.reset(MO.getReg());
}
diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index 2ec12fda70c..2cb2e9461ac 100644
--- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -84,7 +84,7 @@ MipsInstructionSelector::MipsInstructionSelector(
bool MipsInstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
Register DstReg = I.getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
return true;
const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI);
@@ -289,7 +289,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
const TargetRegisterClass *DefRC = nullptr;
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
DefRC = TRI.getRegClass(DestReg);
else
DefRC = getRegClassForTypeOnBank(OpSize,
diff --git a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp
index 5ef07a2d283..c3853e71e7d 100644
--- a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp
+++ b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp
@@ -127,8 +127,7 @@ static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) {
MachineOperand &MO = MI.getOperand(0);
- if (!MO.isReg() || !MO.isUse() ||
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (!MO.isReg() || !MO.isUse() || !Register::isVirtualRegister(MO.getReg()))
return nullptr;
return &MO;
diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
index dba866d781b..fc514cd2b3b 100644
--- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -163,8 +163,7 @@ void MipsRegisterBankInfo::AmbiguousRegDefUseContainer::addDefUses(
MachineInstr *NonCopyInstr = skipCopiesOutgoing(&UseMI);
// Copy with many uses.
if (NonCopyInstr->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(
- NonCopyInstr->getOperand(0).getReg()))
+ !Register::isPhysicalRegister(NonCopyInstr->getOperand(0).getReg()))
addDefUses(NonCopyInstr->getOperand(0).getReg(), MRI);
else
DefUses.push_back(skipCopiesOutgoing(&UseMI));
@@ -186,7 +185,7 @@ MipsRegisterBankInfo::AmbiguousRegDefUseContainer::skipCopiesOutgoing(
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineInstr *Ret = MI;
while (Ret->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(Ret->getOperand(0).getReg()) &&
+ !Register::isPhysicalRegister(Ret->getOperand(0).getReg()) &&
MRI.hasOneUse(Ret->getOperand(0).getReg())) {
Ret = &(*MRI.use_instr_begin(Ret->getOperand(0).getReg()));
}
@@ -200,7 +199,7 @@ MipsRegisterBankInfo::AmbiguousRegDefUseContainer::skipCopiesIncoming(
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineInstr *Ret = MI;
while (Ret->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(Ret->getOperand(1).getReg()))
+ !Register::isPhysicalRegister(Ret->getOperand(1).getReg()))
Ret = MRI.getVRegDef(Ret->getOperand(1).getReg());
return Ret;
}
@@ -318,8 +317,7 @@ void MipsRegisterBankInfo::TypeInfoForMF::setTypes(const MachineInstr *MI,
void MipsRegisterBankInfo::TypeInfoForMF::setTypesAccordingToPhysicalRegister(
const MachineInstr *MI, const MachineInstr *CopyInst, unsigned Op) {
- assert((TargetRegisterInfo::isPhysicalRegister(
- CopyInst->getOperand(Op).getReg())) &&
+ assert((Register::isPhysicalRegister(CopyInst->getOperand(Op).getReg())) &&
"Copies of non physical registers should not be considered here.\n");
const MachineFunction &MF = *CopyInst->getMF();
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 5f38b4a3c4c..02fc2f8ffcd 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -282,7 +282,7 @@ bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
}
unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC];
@@ -508,7 +508,7 @@ const MCSymbol *NVPTXAsmPrinter::getFunctionFrameSymbol() const {
void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
unsigned RegNo = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isVirtualRegister(RegNo)) {
+ if (Register::isVirtualRegister(RegNo)) {
OutStreamer->AddComment(Twine("implicit-def: ") +
getVirtualRegisterName(RegNo));
} else {
@@ -1653,7 +1653,7 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
// We use the per class virtual register number in the ptx output.
unsigned int numVRs = MRI->getNumVirtRegs();
for (unsigned i = 0; i < numVRs; i++) {
- unsigned int vr = TRI->index2VirtReg(i);
+ unsigned int vr = Register::index2VirtReg(i);
const TargetRegisterClass *RC = MRI->getRegClass(vr);
DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
int n = regmap.size();
@@ -2212,7 +2212,7 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
case MachineOperand::MO_Register:
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (Register::isPhysicalRegister(MO.getReg())) {
if (MO.getReg() == NVPTX::VRDepot)
O << DEPOTNAME << getFunctionNumber();
else
diff --git a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
index 629757db870..5e6411c61ea 100644
--- a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -81,7 +81,7 @@ static bool isCVTAToLocalCombinationCandidate(MachineInstr &Root) {
auto &Op = Root.getOperand(1);
const auto &MRI = MF.getRegInfo();
MachineInstr *GenericAddrDef = nullptr;
- if (Op.isReg() && TargetRegisterInfo::isVirtualRegister(Op.getReg())) {
+ if (Op.isReg() && Register::isVirtualRegister(Op.getReg())) {
GenericAddrDef = MRI.getUniqueVRegDef(Op.getReg());
}
diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 5e9a661f8f0..d325b078979 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -340,9 +340,10 @@ bool PPCBranchCoalescing::identicalOperands(
if (Op1.isIdenticalTo(Op2)) {
// filter out instructions with physical-register uses
- if (Op1.isReg() && TargetRegisterInfo::isPhysicalRegister(Op1.getReg())
- // If the physical register is constant then we can assume the value
- // has not changed between uses.
+ if (Op1.isReg() &&
+ Register::isPhysicalRegister(Op1.getReg())
+ // If the physical register is constant then we can assume the value
+ // has not changed between uses.
&& !(Op1.isUse() && MRI->isConstantPhysReg(Op1.getReg()))) {
LLVM_DEBUG(dbgs() << "The operands are not provably identical.\n");
return false;
@@ -355,8 +356,8 @@ bool PPCBranchCoalescing::identicalOperands(
// definition of the register produces the same value. If they produce the
// same value, consider them to be identical.
if (Op1.isReg() && Op2.isReg() &&
- TargetRegisterInfo::isVirtualRegister(Op1.getReg()) &&
- TargetRegisterInfo::isVirtualRegister(Op2.getReg())) {
+ Register::isVirtualRegister(Op1.getReg()) &&
+ Register::isVirtualRegister(Op2.getReg())) {
MachineInstr *Op1Def = MRI->getVRegDef(Op1.getReg());
MachineInstr *Op2Def = MRI->getVRegDef(Op2.getReg());
if (TII->produceSameValue(*Op1Def, *Op2Def, MRI)) {
@@ -456,7 +457,7 @@ bool PPCBranchCoalescing::canMoveToEnd(const MachineInstr &MI,
<< TargetMBB.getNumber() << "\n");
for (auto &Use : MI.uses()) {
- if (Use.isReg() && TargetRegisterInfo::isVirtualRegister(Use.getReg())) {
+ if (Use.isReg() && Register::isVirtualRegister(Use.getReg())) {
MachineInstr *DefInst = MRI->getVRegDef(Use.getReg());
if (DefInst->isPHI() && DefInst->getParent() == MI.getParent()) {
LLVM_DEBUG(dbgs() << " *** Cannot move this instruction ***\n");
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 2cb0387d9a9..645a740572c 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -371,7 +371,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// by the scheduler. Detect them now.
bool HasVectorVReg = false;
for (unsigned i = 0, e = RegInfo->getNumVirtRegs(); i != e; ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ unsigned Reg = Register::index2VirtReg(i);
if (RegInfo->getRegClass(Reg) == &PPC::VRRCRegClass) {
HasVectorVReg = true;
break;
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 8355b1af53d..7ffbab72d95 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -187,7 +187,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
unsigned Reg = DefMO.getReg();
bool IsRegCR;
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
const MachineRegisterInfo *MRI =
&DefMI.getParent()->getParent()->getRegInfo();
IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
@@ -1671,7 +1671,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
// Look through copies unless that gets us to a physical register.
unsigned ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI);
- if (TargetRegisterInfo::isVirtualRegister(ActualSrc))
+ if (Register::isVirtualRegister(ActualSrc))
SrcReg = ActualSrc;
// Get the unique definition of SrcReg.
@@ -2360,10 +2360,10 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI(
if (!MI.getOperand(i).isReg())
continue;
unsigned Reg = MI.getOperand(i).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI);
- if (TargetRegisterInfo::isVirtualRegister(TrueReg)) {
+ if (Register::isVirtualRegister(TrueReg)) {
DefMI = MRI->getVRegDef(TrueReg);
if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI8) {
OpNoForForwarding = i;
@@ -3619,7 +3619,7 @@ bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI,
// If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no
// need to fix up register class.
unsigned RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg();
- if (TargetRegisterInfo::isVirtualRegister(RegToModify)) {
+ if (Register::isVirtualRegister(RegToModify)) {
const TargetRegisterClass *NewRC =
MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
&PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
@@ -3835,7 +3835,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
}
// If this is a copy from another register, we recursively check source.
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
if (SrcMI != NULL)
@@ -3859,7 +3859,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
// logical operation with 16-bit immediate does not change the upper bits.
// So, we track the operand register as we do for register copy.
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
if (SrcMI != NULL)
@@ -3888,7 +3888,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
for (unsigned I = 1; I != E; I += D) {
if (MI.getOperand(I).isReg()) {
unsigned SrcReg = MI.getOperand(I).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
if (SrcMI == NULL || !isSignOrZeroExtended(*SrcMI, SignExt, Depth+1))
@@ -3913,9 +3913,9 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
unsigned SrcReg1 = MI.getOperand(1).getReg();
unsigned SrcReg2 = MI.getOperand(2).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg1) ||
- !TargetRegisterInfo::isVirtualRegister(SrcReg2))
- return false;
+ if (!Register::isVirtualRegister(SrcReg1) ||
+ !Register::isVirtualRegister(SrcReg2))
+ return false;
const MachineInstr *MISrc1 = MRI->getVRegDef(SrcReg1);
const MachineInstr *MISrc2 = MRI->getVRegDef(SrcReg2);
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index 446246358e9..1b48bbaf1f4 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -149,7 +149,7 @@ static MachineInstr *getVRegDefOrNull(MachineOperand *Op,
return nullptr;
unsigned Reg = Op->getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
return MRI->getVRegDef(Reg);
@@ -344,8 +344,7 @@ bool PPCMIPeephole::simplifyCode(void) {
unsigned TrueReg2 =
TRI->lookThruCopyLike(MI.getOperand(2).getReg(), MRI);
- if (TrueReg1 == TrueReg2
- && TargetRegisterInfo::isVirtualRegister(TrueReg1)) {
+ if (TrueReg1 == TrueReg2 && Register::isVirtualRegister(TrueReg1)) {
MachineInstr *DefMI = MRI->getVRegDef(TrueReg1);
unsigned DefOpc = DefMI ? DefMI->getOpcode() : 0;
@@ -358,7 +357,7 @@ bool PPCMIPeephole::simplifyCode(void) {
return false;
unsigned DefReg =
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
- if (TargetRegisterInfo::isVirtualRegister(DefReg)) {
+ if (Register::isVirtualRegister(DefReg)) {
MachineInstr *LoadMI = MRI->getVRegDef(DefReg);
if (LoadMI && LoadMI->getOpcode() == PPC::LXVDSX)
return true;
@@ -444,7 +443,7 @@ bool PPCMIPeephole::simplifyCode(void) {
unsigned OpNo = MyOpcode == PPC::XXSPLTW ? 1 : 2;
unsigned TrueReg =
TRI->lookThruCopyLike(MI.getOperand(OpNo).getReg(), MRI);
- if (!TargetRegisterInfo::isVirtualRegister(TrueReg))
+ if (!Register::isVirtualRegister(TrueReg))
break;
MachineInstr *DefMI = MRI->getVRegDef(TrueReg);
if (!DefMI)
@@ -454,7 +453,7 @@ bool PPCMIPeephole::simplifyCode(void) {
if (DefOpcode != PPC::XVCVSPSXWS && DefOpcode != PPC::XVCVSPUXWS)
return false;
unsigned ConvReg = DefMI->getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(ConvReg))
+ if (!Register::isVirtualRegister(ConvReg))
return false;
MachineInstr *Splt = MRI->getVRegDef(ConvReg);
return Splt && (Splt->getOpcode() == PPC::LXVWSX ||
@@ -507,7 +506,7 @@ bool PPCMIPeephole::simplifyCode(void) {
// If this is a DP->SP conversion fed by an FRSP, the FRSP is redundant.
unsigned TrueReg =
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
- if (!TargetRegisterInfo::isVirtualRegister(TrueReg))
+ if (!Register::isVirtualRegister(TrueReg))
break;
MachineInstr *DefMI = MRI->getVRegDef(TrueReg);
@@ -518,8 +517,8 @@ bool PPCMIPeephole::simplifyCode(void) {
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
unsigned DefsReg2 =
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
- if (!TargetRegisterInfo::isVirtualRegister(DefsReg1) ||
- !TargetRegisterInfo::isVirtualRegister(DefsReg2))
+ if (!Register::isVirtualRegister(DefsReg1) ||
+ !Register::isVirtualRegister(DefsReg2))
break;
MachineInstr *P1 = MRI->getVRegDef(DefsReg1);
MachineInstr *P2 = MRI->getVRegDef(DefsReg2);
@@ -567,7 +566,7 @@ bool PPCMIPeephole::simplifyCode(void) {
case PPC::EXTSH8_32_64: {
if (!EnableSExtElimination) break;
unsigned NarrowReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(NarrowReg))
+ if (!Register::isVirtualRegister(NarrowReg))
break;
MachineInstr *SrcMI = MRI->getVRegDef(NarrowReg);
@@ -611,7 +610,7 @@ bool PPCMIPeephole::simplifyCode(void) {
case PPC::EXTSW_32_64: {
if (!EnableSExtElimination) break;
unsigned NarrowReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(NarrowReg))
+ if (!Register::isVirtualRegister(NarrowReg))
break;
MachineInstr *SrcMI = MRI->getVRegDef(NarrowReg);
@@ -680,7 +679,7 @@ bool PPCMIPeephole::simplifyCode(void) {
break;
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
break;
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
@@ -696,7 +695,7 @@ bool PPCMIPeephole::simplifyCode(void) {
SrcMI = SubRegMI;
if (SubRegMI->getOpcode() == PPC::COPY) {
unsigned CopyReg = SubRegMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(CopyReg))
+ if (Register::isVirtualRegister(CopyReg))
SrcMI = MRI->getVRegDef(CopyReg);
}
@@ -927,7 +926,7 @@ static unsigned getSrcVReg(unsigned Reg, MachineBasicBlock *BB1,
}
else if (Inst->isFullCopy())
NextReg = Inst->getOperand(1).getReg();
- if (NextReg == SrcReg || !TargetRegisterInfo::isVirtualRegister(NextReg))
+ if (NextReg == SrcReg || !Register::isVirtualRegister(NextReg))
break;
SrcReg = NextReg;
}
@@ -950,8 +949,7 @@ static bool eligibleForCompareElimination(MachineBasicBlock &MBB,
(*BII).getOperand(1).isReg()) {
// We optimize only if the condition code is used only by one BCC.
unsigned CndReg = (*BII).getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(CndReg) ||
- !MRI->hasOneNonDBGUse(CndReg))
+ if (!Register::isVirtualRegister(CndReg) || !MRI->hasOneNonDBGUse(CndReg))
return false;
MachineInstr *CMPI = MRI->getVRegDef(CndReg);
@@ -961,7 +959,7 @@ static bool eligibleForCompareElimination(MachineBasicBlock &MBB,
// We skip this BB if a physical register is used in comparison.
for (MachineOperand &MO : CMPI->operands())
- if (MO.isReg() && !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (MO.isReg() && !Register::isVirtualRegister(MO.getReg()))
return false;
return true;
@@ -1335,7 +1333,7 @@ bool PPCMIPeephole::emitRLDICWhenLoweringJumpTables(MachineInstr &MI) {
return false;
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
return false;
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
@@ -1415,7 +1413,7 @@ bool PPCMIPeephole::combineSEXTAndSHL(MachineInstr &MI,
return false;
unsigned SrcReg = MI.getOperand(1).getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ if (!Register::isVirtualRegister(SrcReg))
return false;
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
diff --git a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
index 8eaa6dfe2bf..f2c71913a37 100644
--- a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
+++ b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
@@ -535,7 +535,7 @@ MachineInstr *PPCReduceCRLogicals::lookThroughCRCopy(unsigned Reg,
unsigned &Subreg,
MachineInstr *&CpDef) {
Subreg = -1;
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
MachineInstr *Copy = MRI->getVRegDef(Reg);
CpDef = Copy;
@@ -543,7 +543,7 @@ MachineInstr *PPCReduceCRLogicals::lookThroughCRCopy(unsigned Reg,
return Copy;
unsigned CopySrc = Copy->getOperand(1).getReg();
Subreg = Copy->getOperand(1).getSubReg();
- if (!TargetRegisterInfo::isVirtualRegister(CopySrc)) {
+ if (!Register::isVirtualRegister(CopySrc)) {
const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
// Set the Subreg
if (CopySrc == PPC::CR0EQ || CopySrc == PPC::CR6EQ)
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 12554ea8d07..f01e476d8a7 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -391,7 +391,7 @@ bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) co
bool PPCRegisterInfo::isCallerPreservedPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
- assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
+ assert(Register::isPhysicalRegister(PhysReg));
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (!TM.isPPC64())
diff --git a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp
index 719ed7b6387..fb2b26a3bf3 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp
@@ -50,7 +50,7 @@ namespace {
bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC,
MachineRegisterInfo &MRI) {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
return RC->hasSubClassEq(MRI.getRegClass(Reg));
} else if (RC->contains(Reg)) {
return true;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index ce78239df0a..aedbf5150cd 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -127,7 +127,7 @@ protected:
continue;
unsigned AddendSrcReg = AddendMI->getOperand(1).getReg();
- if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg)) {
+ if (Register::isVirtualRegister(AddendSrcReg)) {
if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) !=
MRI.getRegClass(AddendSrcReg))
continue;
@@ -208,7 +208,7 @@ protected:
// legality checks above, the live range for the addend source register
// could be extended), but it seems likely that such a trivial copy can
// be coalesced away later, and thus is not worth the effort.
- if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg) &&
+ if (Register::isVirtualRegister(AddendSrcReg) &&
!LIS->getInterval(AddendSrcReg).liveAt(FMAIdx))
continue;
@@ -314,7 +314,7 @@ protected:
// Extend the live interval of the addend source (it might end at the
// copy to be removed, or somewhere in between there and here). This
// is necessary only if it is a physical register.
- if (!TargetRegisterInfo::isVirtualRegister(AddendSrcReg))
+ if (!Register::isVirtualRegister(AddendSrcReg))
for (MCRegUnitIterator Units(AddendSrcReg, TRI); Units.isValid();
++Units) {
unsigned Unit = *Units;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 44175af7f9b..aced629f9b2 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -158,7 +158,7 @@ private:
// Return true iff the given register is in the given class.
bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) {
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ if (Register::isVirtualRegister(Reg))
return RC->hasSubClassEq(MRI->getRegClass(Reg));
return RC->contains(Reg);
}
@@ -566,7 +566,7 @@ unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg,
CopySrcReg = MI->getOperand(2).getReg();
}
- if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)) {
+ if (!Register::isVirtualRegister(CopySrcReg)) {
if (!isScalarVecReg(CopySrcReg))
SwapVector[VecIdx].MentionsPhysVR = 1;
return CopySrcReg;
@@ -605,7 +605,7 @@ void PPCVSXSwapRemoval::formWebs() {
if (!isVecReg(Reg) && !isScalarVecReg(Reg))
continue;
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!Register::isVirtualRegister(Reg)) {
if (!(MI->isCopy() && isScalarVecReg(Reg)))
SwapVector[EntryIdx].MentionsPhysVR = 1;
continue;
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 57c1cf4ec70..6841ce084cf 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1258,13 +1258,14 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
assert(NumOps == 3 && "Expected two source registers.");
Register DstReg = MI.getOperand(0).getReg();
Register DstPhys =
- (TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
+ (Register::isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
: ((OpNum == 1 && MI.isCommutable())
? MI.getOperand(2).getReg()
: Register()));
if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
- TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg))
+ Register::isVirtualRegister(SrcReg) &&
+ DstPhys == VRM->getPhys(SrcReg))
NeedsCommute = (OpNum == 1);
else
MemOpcode = -1;
diff --git a/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index e7cd6871dbb..210c0f44c2c 100644
--- a/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -169,7 +169,8 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
auto tryAddHint = [&](const MachineOperand *MO) -> void {
Register Reg = MO->getReg();
- Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
+ Register PhysReg =
+ Register::isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
if (PhysReg) {
if (MO->getSubReg())
PhysReg = getSubReg(PhysReg, MO->getSubReg());
@@ -385,7 +386,7 @@ bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
MEE++;
for (; MII != MEE; ++MII) {
for (const MachineOperand &MO : MII->operands())
- if (MO.isReg() && isPhysicalRegister(MO.getReg())) {
+ if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
SI.isValid(); ++SI)
if (NewRC->contains(*SI)) {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index 7f9d41da397..38e9d339e74 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -68,7 +68,7 @@ MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
std::string WebAssemblyAsmPrinter::regToString(const MachineOperand &MO) {
unsigned RegNo = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(RegNo) &&
+ assert(Register::isVirtualRegister(RegNo) &&
"Unlowered physical register encountered during assembly printing");
assert(!MFI->isVRegStackified(RegNo));
unsigned WAReg = MFI->getWAReg(RegNo);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index dbd62179f05..515dbd5c4e7 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -221,7 +221,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
// drops to their defs.
BitVector UseEmpty(MRI.getNumVirtRegs());
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I)
- UseEmpty[I] = MRI.use_empty(TargetRegisterInfo::index2VirtReg(I));
+ UseEmpty[I] = MRI.use_empty(Register::index2VirtReg(I));
// Visit each instruction in the function.
for (MachineBasicBlock &MBB : MF) {
@@ -280,7 +280,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
Changed = true;
continue;
}
- if (UseEmpty[TargetRegisterInfo::virtReg2Index(OldReg)]) {
+ if (UseEmpty[Register::virtReg2Index(OldReg)]) {
unsigned Opc = getDropOpcode(RC);
MachineInstr *Drop =
BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc))
@@ -369,7 +369,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
// TODO: Sort the locals for better compression.
MFI.setNumLocals(CurLocal - MFI.getParams().size());
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned Reg = Register::index2VirtReg(I);
auto RL = Reg2Local.find(Reg);
if (RL == Reg2Local.end() || RL->second < MFI.getParams().size())
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
index a86c9af28f0..68db96a1ce2 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
@@ -60,7 +60,7 @@ void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// exist. However we need to handle both here.
auto &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC =
- TargetRegisterInfo::isVirtualRegister(DestReg)
+ Register::isVirtualRegister(DestReg)
? MRI.getRegClass(DestReg)
: MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(DestReg);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
index 4b9ba491dee..91e5055ad59 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
@@ -96,13 +96,13 @@ public:
void stackifyVReg(unsigned VReg) {
assert(MF.getRegInfo().getUniqueVRegDef(VReg));
- auto I = TargetRegisterInfo::virtReg2Index(VReg);
+ auto I = Register::virtReg2Index(VReg);
if (I >= VRegStackified.size())
VRegStackified.resize(I + 1);
VRegStackified.set(I);
}
bool isVRegStackified(unsigned VReg) const {
- auto I = TargetRegisterInfo::virtReg2Index(VReg);
+ auto I = Register::virtReg2Index(VReg);
if (I >= VRegStackified.size())
return false;
return VRegStackified.test(I);
@@ -111,12 +111,12 @@ public:
void initWARegs();
void setWAReg(unsigned VReg, unsigned WAReg) {
assert(WAReg != UnusedReg);
- auto I = TargetRegisterInfo::virtReg2Index(VReg);
+ auto I = Register::virtReg2Index(VReg);
assert(I < WARegs.size());
WARegs[I] = WAReg;
}
unsigned getWAReg(unsigned VReg) const {
- auto I = TargetRegisterInfo::virtReg2Index(VReg);
+ auto I = Register::virtReg2Index(VReg);
assert(I < WARegs.size());
return WARegs[I];
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
index 8c7c3305c20..0bd30791e57 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -81,7 +81,7 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
// Split multiple-VN LiveIntervals into multiple LiveIntervals.
SmallVector<LiveInterval *, 4> SplitLIs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned Reg = Register::index2VirtReg(I);
if (MRI.reg_nodbg_empty(Reg))
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
index 3bfbf607344..799b9388097 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -95,7 +95,7 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(
// TODO: This is fairly heavy-handed; find a better approach.
//
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned Reg = Register::index2VirtReg(I);
// Skip unused registers.
if (MRI.use_nodbg_empty(Reg))
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index 6f09c45b664..043b6f1b7d1 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -98,7 +98,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
for (unsigned I = 0; I < NumVRegs; ++I) {
- unsigned VReg = TargetRegisterInfo::index2VirtReg(I);
+ unsigned VReg = Register::index2VirtReg(I);
if (MFI.isVRegStackified(VReg))
continue;
// Skip unused registers, which can use $drop.
@@ -157,9 +157,8 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
Changed |= Old != New;
UsedColors.set(Color);
Assignments[Color].push_back(LI);
- LLVM_DEBUG(
- dbgs() << "Assigning vreg" << TargetRegisterInfo::virtReg2Index(LI->reg)
- << " to vreg" << TargetRegisterInfo::virtReg2Index(New) << "\n");
+ LLVM_DEBUG(dbgs() << "Assigning vreg" << Register::virtReg2Index(LI->reg)
+ << " to vreg" << Register::virtReg2Index(New) << "\n");
}
if (!Changed)
return false;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
index cdca23f55b2..72e7a7cf504 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -89,7 +89,7 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
// Start the numbering for locals after the arg regs
unsigned CurReg = MFI.getParams().size();
for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
- unsigned VReg = TargetRegisterInfo::index2VirtReg(VRegIdx);
+ unsigned VReg = Register::index2VirtReg(VRegIdx);
// Skip unused registers.
if (MRI.use_empty(VReg))
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index a120a647101..12177f84c94 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -341,7 +341,7 @@ static bool isSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
!Insert->readsRegister(Reg))
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (Register::isPhysicalRegister(Reg)) {
// Ignore ARGUMENTS; it's just used to keep the ARGUMENT_* instructions
// from moving down, and we've already checked for that.
if (Reg == WebAssembly::ARGUMENTS)
@@ -437,7 +437,7 @@ static bool oneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
if (!MO.isReg())
return false;
unsigned DefReg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(DefReg) ||
+ if (!Register::isVirtualRegister(DefReg) ||
!MFI.isVRegStackified(DefReg))
return false;
assert(MRI.hasOneNonDBGUse(DefReg));
@@ -811,7 +811,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
assert(Op.isUse() && "explicit_uses() should only iterate over uses");
assert(!Op.isImplicit() &&
"explicit_uses() should only iterate over explicit operands");
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
continue;
// Identify the definition for this register at this point.
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
index ea9cfc00adf..42098c86777 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
@@ -92,7 +92,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
MachineOperand &OtherMO = MI.getOperand(3 - FIOperandNum);
if (OtherMO.isReg()) {
unsigned OtherMOReg = OtherMO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(OtherMOReg)) {
+ if (Register::isVirtualRegister(OtherMOReg)) {
MachineInstr *Def = MF.getRegInfo().getUniqueVRegDef(OtherMOReg);
// TODO: For now we just opportunistically do this in the case where
// the CONST_I32 happens to have exactly one def and one use. We
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
index e9d88d4818a..8f66e18da55 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
@@ -33,8 +33,7 @@ bool WebAssembly::isChild(const MachineInstr &MI,
if (!MO.isReg() || MO.isImplicit() || !MO.isDef())
return false;
unsigned Reg = MO.getReg();
- return TargetRegisterInfo::isVirtualRegister(Reg) &&
- MFI.isVRegStackified(Reg);
+ return Register::isVirtualRegister(Reg) && MFI.isVRegStackified(Reg);
}
bool WebAssembly::mayThrow(const MachineInstr &MI) {
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index 4df849a2e14..4b6f5aba9f0 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -326,7 +326,7 @@ X86CallFrameOptimization::classifyInstruction(
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
- if (!RegInfo.isPhysicalRegister(Reg))
+ if (!Register::isPhysicalRegister(Reg))
continue;
if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
return Exit;
@@ -444,7 +444,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
- if (RegInfo.isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
UsedRegs.insert(Reg);
}
}
@@ -598,7 +598,7 @@ MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
// movl %eax, (%esp)
// call
// Get rid of those with prejudice.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
// Make sure this is the only use of Reg.
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index a61fa3246f0..6e7275b7e6b 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -437,7 +437,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
- auto &RDM = RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)];
+ auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)];
if (MachineInstr *DefMI = RDM.lookup(Reg)) {
OperandToDefMap[&MO] = DefMI;
DepthInfo Info = DepthMap.lookup(DefMI);
@@ -457,7 +457,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
- RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)][Reg] = &MI;
+ RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI;
}
unsigned Latency = TSchedModel.computeInstrLatency(&MI);
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 18bbfa32e11..dc82987cf60 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -220,12 +220,12 @@ public:
// Don't allow copies to/flow GR8/GR16 physical registers.
// FIXME: Is there some better way to support this?
unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
+ if (Register::isPhysicalRegister(DstReg) &&
(X86::GR8RegClass.contains(DstReg) ||
X86::GR16RegClass.contains(DstReg)))
return false;
unsigned SrcReg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ if (Register::isPhysicalRegister(SrcReg) &&
(X86::GR8RegClass.contains(SrcReg) ||
X86::GR16RegClass.contains(SrcReg)))
return false;
@@ -241,7 +241,7 @@ public:
// Physical registers will not be converted. Assume that converting the
// COPY to the destination domain will eventually result in a actual
// instruction.
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
return 1;
RegDomain OpDomain = getDomain(MRI->getRegClass(MO.getReg()),
@@ -436,7 +436,7 @@ void X86DomainReassignment::visitRegister(Closure &C, unsigned Reg,
if (EnclosedEdges.count(Reg))
return;
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return;
if (!MRI->hasOneDef(Reg))
@@ -594,7 +594,7 @@ void X86DomainReassignment::buildClosure(Closure &C, unsigned Reg) {
continue;
unsigned DefReg = DefOp.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(DefReg)) {
+ if (!Register::isVirtualRegister(DefReg)) {
C.setAllIllegal();
continue;
}
@@ -751,7 +751,7 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
// Go over all virtual registers and calculate a closure.
unsigned ClosureID = 0;
for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(Idx);
+ unsigned Reg = Register::index2VirtReg(Idx);
// GPR only current source domain supported.
if (!isGPR(MRI->getRegClass(Reg)))
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index 5ce3255ea96..6523d5049d3 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -721,8 +721,9 @@ CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
for (MachineInstr &MI :
llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) {
X86::CondCode Cond = X86::getCondFromSETCC(MI);
- if (Cond != X86::COND_INVALID && !MI.mayStore() && MI.getOperand(0).isReg() &&
- TRI->isVirtualRegister(MI.getOperand(0).getReg())) {
+ if (Cond != X86::COND_INVALID && !MI.mayStore() &&
+ MI.getOperand(0).isReg() &&
+ Register::isVirtualRegister(MI.getOperand(0).getReg())) {
assert(MI.getOperand(0).isDef() &&
"A non-storing SETcc should always define a register!");
CondRegs[Cond] = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 46b31894df7..99ca08c5614 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4190,7 +4190,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VR))
+ if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 93c669801a7..a5cc2d1d0be 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -465,7 +465,7 @@ unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
// Don't waste compile time scanning use-def chains of physregs.
- if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
+ if (!Register::isVirtualRegister(BaseReg))
return false;
bool isPICBase = false;
for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
@@ -684,7 +684,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
isKill = Src.isKill();
assert(!Src.isUndef() && "Undef op doesn't need optimization");
- if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
+ if (Register::isVirtualRegister(NewSrc) &&
!MF.getRegInfo().constrainRegClass(NewSrc, RC))
return false;
@@ -693,7 +693,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
// This is for an LEA64_32r and incoming registers are 32-bit. One way or
// another we need to add 64-bit registers to the final MI.
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ if (Register::isPhysicalRegister(SrcReg)) {
ImplicitOp = Src;
ImplicitOp.setImplicit();
@@ -888,7 +888,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
// LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ if (Register::isVirtualRegister(Src.getReg()) &&
!MF.getRegInfo().constrainRegClass(Src.getReg(),
&X86::GR64_NOSPRegClass))
return nullptr;
@@ -4252,7 +4252,7 @@ unsigned X86InstrInfo::getPartialRegUpdateClearance(
// If MI is marked as reading Reg, the partial register update is wanted.
const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
@@ -4456,7 +4456,7 @@ X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
OpNum = 1;
const MachineOperand &MO = MI.getOperand(OpNum);
- if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (MO.isUndef() && Register::isPhysicalRegister(MO.getReg())) {
return UndefRegClearance;
}
return 0;
@@ -4539,7 +4539,7 @@ static void updateOperandRegConstraints(MachineFunction &MF,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (!TRI.isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
auto *NewRC = MRI.constrainRegClass(
@@ -4822,7 +4822,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// value and zero-extend the top bits. Change the destination register
// to a 32-bit one.
unsigned DstReg = NewMI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
else
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
@@ -7384,9 +7384,8 @@ X86InstrInfo::describeLoadedValue(const MachineInstr &MI) const {
const MachineOperand &Op1 = MI.getOperand(1);
const MachineOperand &Op2 = MI.getOperand(3);
const TargetRegisterInfo *TRI = &getRegisterInfo();
- assert(Op2.isReg() &&
- (Op2.getReg() == X86::NoRegister ||
- TargetRegisterInfo::isPhysicalRegister(Op2.getReg())));
+ assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
+ Register::isPhysicalRegister(Op2.getReg())));
// Omit situations like:
// %rsi = lea %rsi, 4, ...
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 892a083f4d1..4b08ad2c61b 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -217,7 +217,7 @@ static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
}
static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
if (X86::GR64RegClass.contains(Reg))
return &X86::GR64RegClass;
if (X86::GR32RegClass.contains(Reg))
@@ -241,7 +241,7 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
+ if (Register::isPhysicalRegister(DstReg)) {
assert(I.isCopy() && "Generic operators do not allow physical registers");
if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
@@ -268,12 +268,12 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
return true;
}
- assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
+ assert((!Register::isPhysicalRegister(SrcReg) || I.isCopy()) &&
"No phys reg on generic operators");
assert((DstSize == SrcSize ||
// Copies are a mean to setup initial types, the number of
// bits may not exactly match.
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ (Register::isPhysicalRegister(SrcReg) &&
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
"Copy with different width?!");
@@ -282,7 +282,7 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
if (SrcRegBank.getID() == X86::GPRRegBankID &&
DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
- TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ Register::isPhysicalRegister(SrcReg)) {
// Change the physical register to performe truncate.
const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 7f75598b065..92675ab8bcd 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -198,8 +198,7 @@ static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
static inline bool isIdenticalOp(const MachineOperand &MO1,
const MachineOperand &MO2) {
return MO1.isIdenticalTo(MO2) &&
- (!MO1.isReg() ||
- !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+ (!MO1.isReg() || !Register::isPhysicalRegister(MO1.getReg()));
}
#ifndef NDEBUG
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 40f5dbe57e4..2741254cbd0 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -2211,7 +2211,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
// just bail. Also check that its register class is one of the ones we
// can harden.
unsigned UseDefReg = UseMI.getOperand(0).getReg();
- if (!TRI->isVirtualRegister(UseDefReg) ||
+ if (!Register::isVirtualRegister(UseDefReg) ||
!canHardenRegister(UseDefReg))
return {};
@@ -2278,7 +2278,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
DebugLoc Loc) {
assert(canHardenRegister(Reg) && "Cannot harden this register!");
- assert(TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!");
+ assert(Register::isVirtualRegister(Reg) && "Cannot harden a physical register!");
auto *RC = MRI->getRegClass(Reg);
int Bytes = TRI->getRegSizeInBits(*RC) / 8;
OpenPOWER on IntegriCloud