summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp2
-rw-r--r--llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp16
-rw-r--r--llvm/lib/Target/X86/X86CmovConversion.cpp12
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp10
-rw-r--r--llvm/lib/Target/X86/X86FixupBWInsts.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FixupLEAs.cpp36
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp28
-rw-r--r--llvm/lib/Target/X86/X86FloatingPoint.cpp57
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp25
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp4
-rw-r--r--llvm/lib/Target/X86/X86InstructionSelector.cpp40
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp7
-rw-r--r--llvm/lib/Target/X86/X86RetpolineThunks.cpp2
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.cpp6
-rw-r--r--llvm/lib/Target/X86/X86VZeroUpper.cpp8
-rw-r--r--llvm/lib/Target/X86/X86WinEHState.cpp12
16 files changed, 142 insertions, 127 deletions
diff --git a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index b3c491b3de5..bac9d02d69c 100644
--- a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -103,7 +103,7 @@ StringRef llvm::X86Disassembler::GetInstrName(unsigned Opcode,
return MII->getName(Opcode);
}
-#define debug(s) DEBUG(Debug(__FILE__, __LINE__, s));
+#define debug(s) LLVM_DEBUG(Debug(__FILE__, __LINE__, s));
namespace llvm {
diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index e89dd497259..53fc9754590 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -407,7 +407,7 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size));
if (LoadBase.isReg())
getBaseOperand(NewLoad).setIsKill(false);
- DEBUG(NewLoad->dump());
+ LLVM_DEBUG(NewLoad->dump());
// If the load and store are consecutive, use the loadInst location to
// reduce register pressure.
MachineInstr *StInst = StoreInst;
@@ -428,7 +428,7 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands);
assert(StoreSrcVReg.isReg() && "Expected virtual register");
NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill());
- DEBUG(NewStore->dump());
+ LLVM_DEBUG(NewStore->dump());
}
void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst,
@@ -674,7 +674,7 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
+ LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
// Look for a load then a store to XMM/YMM which look like a memcpy
findPotentiallylBlockedCopies(MF);
@@ -711,10 +711,10 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
// into smaller copies such that each smaller store that was causing
// a store block would now be copied separately.
MachineInstr *StoreInst = LoadStoreInstPair.second;
- DEBUG(dbgs() << "Blocked load and store instructions: \n");
- DEBUG(LoadInst->dump());
- DEBUG(StoreInst->dump());
- DEBUG(dbgs() << "Replaced with:\n");
+ LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n");
+ LLVM_DEBUG(LoadInst->dump());
+ LLVM_DEBUG(StoreInst->dump());
+ LLVM_DEBUG(dbgs() << "Replaced with:\n");
removeRedundantBlockingStores(BlockingStoresDispSizeMap);
breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
updateKillStatus(LoadInst, StoreInst);
@@ -726,7 +726,7 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
}
ForRemoval.clear();
BlockedLoadsStoresPairs.clear();
- DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
+ LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
return Changed;
}
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index 38baa90f479..f73455cc31b 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -169,8 +169,8 @@ bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
if (!EnableCmovConverter)
return false;
- DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
- << "**********\n");
+ LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
+ << "**********\n");
bool Changed = false;
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
@@ -776,7 +776,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
auto *NewCMOV = NewMIs.pop_back_val();
assert(X86::getCondFromCMovOpc(NewCMOV->getOpcode()) == OppCC &&
"Last new instruction isn't the expected CMOV!");
- DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
+ LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
if (&*MIItBegin == &MI)
MIItBegin = MachineBasicBlock::iterator(NewCMOV);
@@ -784,7 +784,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
// Sink whatever instructions were needed to produce the unfolded operand
// into the false block.
for (auto *NewMI : NewMIs) {
- DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
+ LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
FalseMBB->insert(FalseInsertionPoint, NewMI);
// Re-map any operands that are from other cmovs to the inputs for this block.
for (auto &MOp : NewMI->uses()) {
@@ -846,8 +846,8 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
.addReg(Op2Reg)
.addMBB(MBB);
(void)MIB;
- DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
- DEBUG(dbgs() << "\tTo: "; MIB->dump());
+ LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
+ LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index b41640f7bd7..ef854dcc2a7 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -701,8 +701,9 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
if (DisableX86DomainReassignment)
return false;
- DEBUG(dbgs() << "***** Machine Function before Domain Reassignment *****\n");
- DEBUG(MF.print(dbgs()));
+ LLVM_DEBUG(
+ dbgs() << "***** Machine Function before Domain Reassignment *****\n");
+ LLVM_DEBUG(MF.print(dbgs()));
STI = &MF.getSubtarget<X86Subtarget>();
// GPR->K is the only transformation currently supported, bail out early if no
@@ -752,8 +753,9 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
DeleteContainerSeconds(Converters);
- DEBUG(dbgs() << "***** Machine Function after Domain Reassignment *****\n");
- DEBUG(MF.print(dbgs()));
+ LLVM_DEBUG(
+ dbgs() << "***** Machine Function after Domain Reassignment *****\n");
+ LLVM_DEBUG(MF.print(dbgs()));
return Changed;
}
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index 46f13821bae..d9bf60c2c9f 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -155,13 +155,13 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
- DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
+ LLVM_DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
// Process all basic blocks.
for (auto &MBB : MF)
processBasicBlock(MF, MBB);
- DEBUG(dbgs() << "End X86FixupBWInsts\n";);
+ LLVM_DEBUG(dbgs() << "End X86FixupBWInsts\n";);
return true;
}
diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp
index df8c8340a61..157b07d819b 100644
--- a/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -206,11 +206,11 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
TSM.init(&Func.getSubtarget());
TII = ST.getInstrInfo();
- DEBUG(dbgs() << "Start X86FixupLEAs\n";);
+ LLVM_DEBUG(dbgs() << "Start X86FixupLEAs\n";);
// Process all basic blocks.
for (MachineFunction::iterator I = Func.begin(), E = Func.end(); I != E; ++I)
processBasicBlock(Func, I);
- DEBUG(dbgs() << "End X86FixupLEAs\n";);
+ LLVM_DEBUG(dbgs() << "End X86FixupLEAs\n";);
return true;
}
@@ -408,9 +408,9 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
// now to replace with an equivalent LEA...
- DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
MFI->erase(MBI);
MachineBasicBlock::iterator J =
static_cast<MachineBasicBlock::iterator>(NewMI);
@@ -435,8 +435,8 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
return;
if (MI.getOperand(2).getImm() > 1)
return;
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
- DEBUG(dbgs() << "FixLEA: Replaced by: ";);
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
MachineInstr *NewMI = nullptr;
// Make ADD instruction for two registers writing to LEA's destination
if (SrcR1 != 0 && SrcR2 != 0) {
@@ -444,7 +444,7 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
const MachineOperand &Src = MI.getOperand(SrcR1 == DstR ? 3 : 1);
NewMI =
BuildMI(*MFI, I, MI.getDebugLoc(), ADDrr, DstR).addReg(DstR).add(Src);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
// Make ADD instruction for immediate
if (MI.getOperand(4).getImm() != 0) {
@@ -454,7 +454,7 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), ADDri, DstR)
.add(SrcR)
.addImm(MI.getOperand(4).getImm());
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
if (NewMI) {
MFI->erase(I);
@@ -504,8 +504,8 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(LEAOpcode));
const MCInstrDesc &ADDri = TII->get(getADDriFromLEA(LEAOpcode, Offset));
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump(););
- DEBUG(dbgs() << "FixLEA: Replaced by: ";);
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
// First try to replace LEA with one or two (for the 3-op LEA case)
// add instructions:
@@ -515,11 +515,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
const MachineOperand &Src = DstR == BaseR ? Index : Base;
MachineInstr *NewMI =
BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Src);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
// Create ADD instruction for the Offset in case of 3-Ops LEA.
if (hasLEAOffset(Offset)) {
NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
return NewMI;
}
@@ -535,11 +535,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
.add(IsInefficientBase ? Base : Index)
.addImm(0)
.add(Segment);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
// Create ADD instruction for the Offset in case of 3-Ops LEA.
if (hasLEAOffset(Offset)) {
NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
return NewMI;
}
@@ -551,11 +551,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
if (IsScale1 && !hasLEAOffset(Offset)) {
bool BIK = Base.isKill() && BaseR != IndexR;
TII->copyPhysReg(*MFI, MI, DL, DstR, BaseR, BIK);
- DEBUG(MI.getPrevNode()->dump(););
+ LLVM_DEBUG(MI.getPrevNode()->dump(););
MachineInstr *NewMI =
BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Index);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
return NewMI;
}
// lea offset(%base,%index,scale), %dst =>
@@ -567,10 +567,10 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
.add(Index)
.add(Offset)
.add(Segment);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
NewMI = BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Base);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
return NewMI;
}
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index e31a47c5224..c18739254b0 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -338,8 +338,8 @@ static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
}
bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
- << " **********\n");
+ LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
+ << " **********\n");
auto &Subtarget = MF.getSubtarget<X86Subtarget>();
MRI = &MF.getRegInfo();
@@ -381,8 +381,9 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
// instructions. Until we have a motivating test case and fail to avoid
// it by changing other parts of LLVM's lowering, we refuse to handle
// this complex case here.
- DEBUG(dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
- CopyDefI.dump());
+ LLVM_DEBUG(
+ dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
+ CopyDefI.dump());
report_fatal_error(
"Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
}
@@ -406,7 +407,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
auto TestPos = CopyDefI.getIterator();
DebugLoc TestLoc = CopyDefI.getDebugLoc();
- DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
+ LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
// Scan for usage of newly set EFLAGS so we can rewrite them. We just buffer
// jumps because their usage is very constrained.
@@ -443,7 +444,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
// other lowering transformation could induce this to happen, we do
// a hard check even in non-debug builds here.
if (&TestMBB != &UseMBB && !MDT->dominates(&TestMBB, &UseMBB)) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "ERROR: Encountered use that is not dominated by our test "
"basic block! Rewriting this would require inserting PHI "
"nodes to track the flag state across the CFG.\n\nTest "
@@ -477,7 +478,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- DEBUG(dbgs() << " Rewriting use: "; MI.dump());
+ LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
// Check the kill flag before we rewrite as that may change it.
if (FlagUse->isKill())
@@ -570,7 +571,8 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
if (MI.getOpcode() == TargetOpcode::COPY &&
(MI.getOperand(0).getReg() == X86::EFLAGS ||
MI.getOperand(1).getReg() == X86::EFLAGS)) {
- DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: "; MI.dump());
+ LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
+ MI.dump());
llvm_unreachable("Unlowered EFLAGS copy!");
}
#endif
@@ -608,7 +610,7 @@ unsigned X86FlagsCopyLoweringPass::promoteCondToReg(
auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
TII->get(X86::getSETFromCond(Cond)), Reg);
(void)SetI;
- DEBUG(dbgs() << " save cond: "; SetI->dump());
+ LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
++NumSetCCsInserted;
return Reg;
}
@@ -633,7 +635,7 @@ void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
auto TestI =
BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
(void)TestI;
- DEBUG(dbgs() << " test cond: "; TestI->dump());
+ LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
++NumTestsInserted;
}
@@ -685,7 +687,7 @@ void X86FlagsCopyLoweringPass::rewriteArithmetic(
.addReg(CondReg)
.addImm(Addend);
(void)AddI;
- DEBUG(dbgs() << " add cond: "; AddI->dump());
+ LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
++NumAddsInserted;
FlagUse.setIsKill(true);
}
@@ -715,7 +717,7 @@ void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
Inverted ? X86::COND_E : X86::COND_NE, TRI->getRegSizeInBits(CMovRC) / 8,
!CMovI.memoperands_empty())));
FlagUse.setIsKill(true);
- DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
+ LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
}
void X86FlagsCopyLoweringPass::rewriteCondJmp(
@@ -739,7 +741,7 @@ void X86FlagsCopyLoweringPass::rewriteCondJmp(
X86::GetCondBranchFromCond(Inverted ? X86::COND_E : X86::COND_NE)));
const int ImplicitEFLAGSOpIdx = 1;
JmpI.getOperand(ImplicitEFLAGSOpIdx).setIsKill(true);
- DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
+ LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
}
void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index ffdb949918f..ae748901164 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -435,7 +435,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
PrevMI = &*std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
- DEBUG(dbgs() << "\nFPInst:\t" << MI);
+ LLVM_DEBUG(dbgs() << "\nFPInst:\t" << MI);
// Get dead variables list now because the MI pointer may be deleted as part
// of processing!
@@ -465,13 +465,13 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
// is in the clobber list and marked dead might not be live on the stack.
static_assert(X86::FP7 - X86::FP0 == 7, "sequential FP regnumbers");
if (Reg >= X86::FP0 && Reg <= X86::FP6 && isLive(Reg-X86::FP0)) {
- DEBUG(dbgs() << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
+ LLVM_DEBUG(dbgs() << "Register FP#" << Reg - X86::FP0 << " is dead!\n");
freeStackSlotAfter(I, Reg-X86::FP0);
}
}
// Print out all of the instructions expanded to if -debug
- DEBUG({
+ LLVM_DEBUG({
MachineBasicBlock::iterator PrevI = PrevMI;
if (I == PrevI) {
dbgs() << "Just deleted pseudo instruction\n";
@@ -500,15 +500,15 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
/// setupBlockStack - Use the live bundles to set up our model of the stack
/// to match predecessors' live out stack.
void FPS::setupBlockStack() {
- DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
- << " derived from " << MBB->getName() << ".\n");
+ LLVM_DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
+ << " derived from " << MBB->getName() << ".\n");
StackTop = 0;
// Get the live-in bundle for MBB.
const LiveBundle &Bundle =
LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
if (!Bundle.Mask) {
- DEBUG(dbgs() << "Block has no FP live-ins.\n");
+ LLVM_DEBUG(dbgs() << "Block has no FP live-ins.\n");
return;
}
@@ -517,8 +517,8 @@ void FPS::setupBlockStack() {
// Push the fixed live-in registers.
for (unsigned i = Bundle.FixCount; i > 0; --i) {
- DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %fp"
- << unsigned(Bundle.FixStack[i-1]) << '\n');
+ LLVM_DEBUG(dbgs() << "Live-in st(" << (i - 1) << "): %fp"
+ << unsigned(Bundle.FixStack[i - 1]) << '\n');
pushReg(Bundle.FixStack[i-1]);
}
@@ -527,7 +527,7 @@ void FPS::setupBlockStack() {
// to be revived at the end of a short block. It might save a few instrs.
unsigned Mask = calcLiveInMask(MBB, /*RemoveFPs=*/true);
adjustLiveRegs(Mask, MBB->begin());
- DEBUG(MBB->dump());
+ LLVM_DEBUG(MBB->dump());
}
/// finishBlockStack - Revive live-outs that are implicitly defined out of
@@ -539,8 +539,8 @@ void FPS::finishBlockStack() {
if (MBB->succ_empty())
return;
- DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
- << " derived from " << MBB->getName() << ".\n");
+ LLVM_DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
+ << " derived from " << MBB->getName() << ".\n");
// Get MBB's live-out bundle.
unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
@@ -552,18 +552,18 @@ void FPS::finishBlockStack() {
adjustLiveRegs(Bundle.Mask, Term);
if (!Bundle.Mask) {
- DEBUG(dbgs() << "No live-outs.\n");
+ LLVM_DEBUG(dbgs() << "No live-outs.\n");
return;
}
// Has the stack order been fixed yet?
- DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
+ LLVM_DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
if (Bundle.isFixed()) {
- DEBUG(dbgs() << "Shuffling stack to match.\n");
+ LLVM_DEBUG(dbgs() << "Shuffling stack to match.\n");
shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
} else {
// Not fixed yet, we get to choose.
- DEBUG(dbgs() << "Fixing stack order now.\n");
+ LLVM_DEBUG(dbgs() << "Fixing stack order now.\n");
Bundle.FixCount = StackTop;
for (unsigned i = 0; i < StackTop; ++i)
Bundle.FixStack[i] = getStackEntry(i);
@@ -895,7 +895,8 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
while (Kills && Defs) {
unsigned KReg = countTrailingZeros(Kills);
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg << "\n");
+ LLVM_DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg
+ << "\n");
std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
std::swap(RegMap[KReg], RegMap[DReg]);
Kills &= ~(1 << KReg);
@@ -909,7 +910,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
unsigned KReg = getStackEntry(0);
if (!(Kills & (1 << KReg)))
break;
- DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
+ LLVM_DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
popStackAfter(I2);
Kills &= ~(1 << KReg);
}
@@ -918,7 +919,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Manually kill the rest.
while (Kills) {
unsigned KReg = countTrailingZeros(Kills);
- DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
+ LLVM_DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
freeStackSlotBefore(I, KReg);
Kills &= ~(1 << KReg);
}
@@ -926,14 +927,14 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Load zeros for all the imp-defs.
while(Defs) {
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
+ LLVM_DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
pushReg(DReg);
Defs &= ~(1 << DReg);
}
// Now we should have the correct registers live.
- DEBUG(dumpStack());
+ LLVM_DEBUG(dumpStack());
assert(StackTop == countPopulation(Mask) && "Live count mismatch");
}
@@ -956,7 +957,7 @@ void FPS::shuffleStackTop(const unsigned char *FixStack,
if (FixCount > 0)
moveToTop(OldReg, I);
}
- DEBUG(dumpStack());
+ LLVM_DEBUG(dumpStack());
}
@@ -1468,7 +1469,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
case TargetOpcode::IMPLICIT_DEF: {
// All FP registers must be explicitly defined, so load a 0 instead.
unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
- DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
+ LLVM_DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
pushReg(Reg);
break;
@@ -1573,8 +1574,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
MI.emitError("implicitly popped regs must be last on the x87 stack");
unsigned NumSTPopped = countTrailingOnes(STPopped);
- DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
- << NumSTPopped << ", and defines " << NumSTDefs << " regs.\n");
+ LLVM_DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
+ << NumSTPopped << ", and defines " << NumSTDefs
+ << " regs.\n");
#ifndef NDEBUG
// If any input operand uses constraint "f", all output register
@@ -1612,7 +1614,10 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
STUsesArray[I] = I;
shuffleStackTop(STUsesArray, NumSTUses, Inst);
- DEBUG({dbgs() << "Before asm: "; dumpStack();});
+ LLVM_DEBUG({
+ dbgs() << "Before asm: ";
+ dumpStack();
+ });
// With the stack layout fixed, rewrite the FP registers.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -1660,7 +1665,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// We want to leave I pointing to the previous instruction, but what if we
// just erased the first instruction?
if (Inst == MBB->begin()) {
- DEBUG(dbgs() << "Inserting dummy KILL\n");
+ LLVM_DEBUG(dbgs() << "Inserting dummy KILL\n");
Inst = BuildMI(*MBB, Inst, DebugLoc(), TII->get(TargetOpcode::KILL));
} else
--Inst;
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 73455912d2c..c041d0212b0 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1308,10 +1308,10 @@ static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
SDLoc dl(N);
- DEBUG({
- dbgs() << "MatchAddress: ";
- AM.dump(CurDAG);
- });
+ LLVM_DEBUG({
+ dbgs() << "MatchAddress: ";
+ AM.dump(CurDAG);
+ });
// Limit recursion.
if (Depth > 5)
return matchAddressBase(N, AM);
@@ -2744,7 +2744,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
SDLoc dl(Node);
if (Node->isMachineOpcode()) {
- DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Node->setNodeId(-1);
return; // Already selected.
}
@@ -3025,7 +3025,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
InFlag = ResLo.getValue(2);
}
ReplaceUses(SDValue(Node, 0), ResLo);
- DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
@@ -3036,7 +3037,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
InFlag = ResHi.getValue(2);
}
ReplaceUses(SDValue(Node, 1), ResHi);
- DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
CurDAG->RemoveDeadNode(Node);
@@ -3198,7 +3200,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
}
ReplaceUses(SDValue(Node, 1), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the division (low) result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
@@ -3206,7 +3209,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 0), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the remainder (high) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
@@ -3214,7 +3218,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
CurDAG->RemoveDeadNode(Node);
return;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 31ce4bdc767..fe86cf0e8ff 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6847,8 +6847,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
report_fatal_error("Unable to copy EFLAGS physical register!");
}
- DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
- << " to " << RI.getName(DestReg) << '\n');
+ LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
+ << RI.getName(DestReg) << '\n');
llvm_unreachable("Cannot emit physreg copy instruction");
}
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 4996288a193..36d36cb11d7 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -296,8 +296,8 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
}
@@ -333,7 +333,7 @@ bool X86InstructionSelector::select(MachineInstr &I,
if (selectImpl(I, CoverageInfo))
return true;
- DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
// TODO: This should be implemented by tblgen.
switch (I.getOpcode()) {
@@ -503,7 +503,7 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
@@ -675,8 +675,8 @@ bool X86InstructionSelector::selectTurnIntoCOPY(
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
I.setDesc(TII.get(X86::COPY));
@@ -700,8 +700,8 @@ bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
if (DstRB.getID() != SrcRB.getID()) {
- DEBUG(dbgs() << TII.getName(I.getOpcode())
- << " input/output on different banks\n");
+ LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
+ << " input/output on different banks\n");
return false;
}
@@ -738,8 +738,8 @@ bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << "\n");
return false;
}
@@ -792,8 +792,8 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
@@ -894,8 +894,8 @@ bool X86InstructionSelector::selectAnyext(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
@@ -1111,7 +1111,7 @@ bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
return false;
}
@@ -1148,7 +1148,7 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
return false;
}
@@ -1392,8 +1392,8 @@ bool X86InstructionSelector::selectImplicitDefOrPHI(
const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
}
@@ -1544,8 +1544,8 @@ bool X86InstructionSelector::selectSDiv(MachineInstr &I,
if (!RBI.constrainGenericRegister(DividentReg, *RegRC, MRI) ||
!RBI.constrainGenericRegister(DiviserReg, *RegRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 6329375720b..42db51b3cf0 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -541,7 +541,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
MRI->clearKillFlags(DefMI->getOperand(0).getReg());
++NumSubstLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
// Change instruction operands.
MI.getOperand(MemOpNo + X86::AddrBaseReg)
@@ -553,7 +553,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
MI.getOperand(MemOpNo + X86::AddrSegmentReg)
.ChangeToRegister(X86::NoRegister, false);
- DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
Changed = true;
}
@@ -649,7 +649,8 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
MRI->clearKillFlags(FirstVReg);
++NumRedundantLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
+ Last.dump(););
// By this moment, all of the Last LEA's uses must be replaced. So we
// can freely remove it.
diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
index f37c3fbc399..250deb3523b 100644
--- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp
+++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp
@@ -91,7 +91,7 @@ bool X86RetpolineThunks::doInitialization(Module &M) {
}
bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << getPassName() << '\n');
+ LLVM_DEBUG(dbgs() << getPassName() << '\n');
TM = &MF.getTarget();;
STI = &MF.getSubtarget<X86Subtarget>();
diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp
index a137cb4ed38..963016548f2 100644
--- a/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -231,9 +231,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
else
llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
- DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
- << ", 3DNowLevel " << X863DNowLevel
- << ", 64bit " << HasX86_64 << "\n");
+ LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
+ << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
+ << HasX86_64 << "\n");
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
diff --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 224262830b1..f882b760927 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -264,8 +264,8 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
}
}
- DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
- << getBlockExitStateName(CurState) << '\n');
+ LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
+ << getBlockExitStateName(CurState) << '\n');
if (CurState == EXITS_DIRTY)
for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
@@ -341,8 +341,8 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
// successors need to be added to the worklist (if they haven't been
// already).
if (BBState.ExitState == PASS_THROUGH) {
- DEBUG(dbgs() << "MBB #" << MBB.getNumber()
- << " was Pass-through, is now Dirty-out.\n");
+ LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber()
+ << " was Pass-through, is now Dirty-out.\n");
for (MachineBasicBlock *Succ : MBB.successors())
addDirtySuccessor(*Succ);
}
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 6d6dedc6073..dde9c734f49 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -695,10 +695,10 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
Worklist.push_back(BB);
continue;
}
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " InitialState=" << InitialState << '\n');
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " FinalState=" << FinalState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " InitialState=" << InitialState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " FinalState=" << FinalState << '\n');
InitialStates.insert({BB, InitialState});
FinalStates.insert({BB, FinalState});
}
@@ -743,8 +743,8 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
continue;
int PrevState = getPredState(FinalStates, F, ParentBaseState, BB);
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " PrevState=" << PrevState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " PrevState=" << PrevState << '\n');
for (Instruction &I : *BB) {
CallSite CS(&I);
OpenPOWER on IntegriCloud