summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86CallFrameOptimization.cpp6
-rw-r--r--llvm/lib/Target/X86/X86CmovConversion.cpp4
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp12
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp5
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp21
-rw-r--r--llvm/lib/Target/X86/X86InstructionSelector.cpp10
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp3
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp4
9 files changed, 33 insertions, 34 deletions
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index 4df849a2e14..4b6f5aba9f0 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -326,7 +326,7 @@ X86CallFrameOptimization::classifyInstruction(
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
- if (!RegInfo.isPhysicalRegister(Reg))
+ if (!Register::isPhysicalRegister(Reg))
continue;
if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
return Exit;
@@ -444,7 +444,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
- if (RegInfo.isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg))
UsedRegs.insert(Reg);
}
}
@@ -598,7 +598,7 @@ MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
// movl %eax, (%esp)
// call
// Get rid of those with prejudice.
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return nullptr;
// Make sure this is the only use of Reg.
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index a61fa3246f0..6e7275b7e6b 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -437,7 +437,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
- auto &RDM = RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)];
+ auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)];
if (MachineInstr *DefMI = RDM.lookup(Reg)) {
OperandToDefMap[&MO] = DefMI;
DepthInfo Info = DepthMap.lookup(DefMI);
@@ -457,7 +457,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
- RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)][Reg] = &MI;
+ RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI;
}
unsigned Latency = TSchedModel.computeInstrLatency(&MI);
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 18bbfa32e11..dc82987cf60 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -220,12 +220,12 @@ public:
// Don't allow copies to/flow GR8/GR16 physical registers.
// FIXME: Is there some better way to support this?
unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
+ if (Register::isPhysicalRegister(DstReg) &&
(X86::GR8RegClass.contains(DstReg) ||
X86::GR16RegClass.contains(DstReg)))
return false;
unsigned SrcReg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ if (Register::isPhysicalRegister(SrcReg) &&
(X86::GR8RegClass.contains(SrcReg) ||
X86::GR16RegClass.contains(SrcReg)))
return false;
@@ -241,7 +241,7 @@ public:
// Physical registers will not be converted. Assume that converting the
// COPY to the destination domain will eventually result in a actual
// instruction.
- if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ if (Register::isPhysicalRegister(MO.getReg()))
return 1;
RegDomain OpDomain = getDomain(MRI->getRegClass(MO.getReg()),
@@ -436,7 +436,7 @@ void X86DomainReassignment::visitRegister(Closure &C, unsigned Reg,
if (EnclosedEdges.count(Reg))
return;
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
return;
if (!MRI->hasOneDef(Reg))
@@ -594,7 +594,7 @@ void X86DomainReassignment::buildClosure(Closure &C, unsigned Reg) {
continue;
unsigned DefReg = DefOp.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(DefReg)) {
+ if (!Register::isVirtualRegister(DefReg)) {
C.setAllIllegal();
continue;
}
@@ -751,7 +751,7 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
// Go over all virtual registers and calculate a closure.
unsigned ClosureID = 0;
for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(Idx);
+ unsigned Reg = Register::index2VirtReg(Idx);
// GPR only current source domain supported.
if (!isGPR(MRI->getRegClass(Reg)))
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index 5ce3255ea96..6523d5049d3 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -721,8 +721,9 @@ CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
for (MachineInstr &MI :
llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) {
X86::CondCode Cond = X86::getCondFromSETCC(MI);
- if (Cond != X86::COND_INVALID && !MI.mayStore() && MI.getOperand(0).isReg() &&
- TRI->isVirtualRegister(MI.getOperand(0).getReg())) {
+ if (Cond != X86::COND_INVALID && !MI.mayStore() &&
+ MI.getOperand(0).isReg() &&
+ Register::isVirtualRegister(MI.getOperand(0).getReg())) {
assert(MI.getOperand(0).isDef() &&
"A non-storing SETcc should always define a register!");
CondRegs[Cond] = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 46b31894df7..99ca08c5614 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4190,7 +4190,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VR))
+ if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 93c669801a7..a5cc2d1d0be 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -465,7 +465,7 @@ unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
// Don't waste compile time scanning use-def chains of physregs.
- if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
+ if (!Register::isVirtualRegister(BaseReg))
return false;
bool isPICBase = false;
for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
@@ -684,7 +684,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
isKill = Src.isKill();
assert(!Src.isUndef() && "Undef op doesn't need optimization");
- if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
+ if (Register::isVirtualRegister(NewSrc) &&
!MF.getRegInfo().constrainRegClass(NewSrc, RC))
return false;
@@ -693,7 +693,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
// This is for an LEA64_32r and incoming registers are 32-bit. One way or
// another we need to add 64-bit registers to the final MI.
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ if (Register::isPhysicalRegister(SrcReg)) {
ImplicitOp = Src;
ImplicitOp.setImplicit();
@@ -888,7 +888,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
// LEA can't handle RSP.
- if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
+ if (Register::isVirtualRegister(Src.getReg()) &&
!MF.getRegInfo().constrainRegClass(Src.getReg(),
&X86::GR64_NOSPRegClass))
return nullptr;
@@ -4252,7 +4252,7 @@ unsigned X86InstrInfo::getPartialRegUpdateClearance(
// If MI is marked as reading Reg, the partial register update is wanted.
const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (Register::isVirtualRegister(Reg)) {
if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
@@ -4456,7 +4456,7 @@ X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
OpNum = 1;
const MachineOperand &MO = MI.getOperand(OpNum);
- if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ if (MO.isUndef() && Register::isPhysicalRegister(MO.getReg())) {
return UndefRegClearance;
}
return 0;
@@ -4539,7 +4539,7 @@ static void updateOperandRegConstraints(MachineFunction &MF,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (!TRI.isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
auto *NewRC = MRI.constrainRegClass(
@@ -4822,7 +4822,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// value and zero-extend the top bits. Change the destination register
// to a 32-bit one.
unsigned DstReg = NewMI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
else
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
@@ -7384,9 +7384,8 @@ X86InstrInfo::describeLoadedValue(const MachineInstr &MI) const {
const MachineOperand &Op1 = MI.getOperand(1);
const MachineOperand &Op2 = MI.getOperand(3);
const TargetRegisterInfo *TRI = &getRegisterInfo();
- assert(Op2.isReg() &&
- (Op2.getReg() == X86::NoRegister ||
- TargetRegisterInfo::isPhysicalRegister(Op2.getReg())));
+ assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
+ Register::isPhysicalRegister(Op2.getReg())));
// Omit situations like:
// %rsi = lea %rsi, 4, ...
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 892a083f4d1..4b08ad2c61b 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -217,7 +217,7 @@ static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
}
static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(Register::isPhysicalRegister(Reg));
if (X86::GR64RegClass.contains(Reg))
return &X86::GR64RegClass;
if (X86::GR32RegClass.contains(Reg))
@@ -241,7 +241,7 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
+ if (Register::isPhysicalRegister(DstReg)) {
assert(I.isCopy() && "Generic operators do not allow physical registers");
if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
@@ -268,12 +268,12 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
return true;
}
- assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
+ assert((!Register::isPhysicalRegister(SrcReg) || I.isCopy()) &&
"No phys reg on generic operators");
assert((DstSize == SrcSize ||
// Copies are a mean to setup initial types, the number of
// bits may not exactly match.
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ (Register::isPhysicalRegister(SrcReg) &&
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
"Copy with different width?!");
@@ -282,7 +282,7 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
if (SrcRegBank.getID() == X86::GPRRegBankID &&
DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
- TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ Register::isPhysicalRegister(SrcReg)) {
// Change the physical register to performe truncate.
const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 7f75598b065..92675ab8bcd 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -198,8 +198,7 @@ static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
static inline bool isIdenticalOp(const MachineOperand &MO1,
const MachineOperand &MO2) {
return MO1.isIdenticalTo(MO2) &&
- (!MO1.isReg() ||
- !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+ (!MO1.isReg() || !Register::isPhysicalRegister(MO1.getReg()));
}
#ifndef NDEBUG
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 40f5dbe57e4..2741254cbd0 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -2211,7 +2211,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
// just bail. Also check that its register class is one of the ones we
// can harden.
unsigned UseDefReg = UseMI.getOperand(0).getReg();
- if (!TRI->isVirtualRegister(UseDefReg) ||
+ if (!Register::isVirtualRegister(UseDefReg) ||
!canHardenRegister(UseDefReg))
return {};
@@ -2278,7 +2278,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
DebugLoc Loc) {
assert(canHardenRegister(Reg) && "Cannot harden this register!");
- assert(TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!");
+ assert(Register::isVirtualRegister(Reg) && "Cannot harden a physical register!");
auto *RC = MRI->getRegClass(Reg);
int Bytes = TRI->getRegSizeInBits(*RC) / 8;
OpenPOWER on IntegriCloud