diff options
author | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2018-05-14 12:53:11 +0000 |
---|---|---|
committer | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2018-05-14 12:53:11 +0000 |
commit | d34e60ca8532511acb8c93ef26297e349fbec86a (patch) | |
tree | 1a095bc8694498d94232e81b95c1da05d462d3ec /llvm/lib/Target/AArch64 | |
parent | affbc99bea94e77f7ebccd8ba887e33051bd04ee (diff) | |
download | bcm5719-llvm-d34e60ca8532511acb8c93ef26297e349fbec86a.tar.gz bcm5719-llvm-d34e60ca8532511acb8c93ef26297e349fbec86a.zip |
Rename DEBUG macro to LLVM_DEBUG.
The DEBUG() macro is very generic so it might clash with other projects.
The renaming was done as follows:
- git grep -l 'DEBUG' | xargs sed -i 's/\bDEBUG\s\?(/LLVM_DEBUG(/g'
- git diff -U0 master | ../clang/tools/clang-format/clang-format-diff.py -i -p1 -style LLVM
- Manual change to APInt
- Manually chage DOCS as regex doesn't match it.
In the transition period the DEBUG() macro is still present and aliased
to the LLVM_DEBUG() one.
Differential Revision: https://reviews.llvm.org/D43624
llvm-svn: 332240
Diffstat (limited to 'llvm/lib/Target/AArch64')
19 files changed, 427 insertions, 390 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp index 7de5d0ef66b..30232afaf02 100644 --- a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp +++ b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp @@ -116,7 +116,7 @@ INITIALIZE_PASS(AArch64A53Fix835769, "aarch64-fix-cortex-a53-835769-pass", bool AArch64A53Fix835769::runOnMachineFunction(MachineFunction &F) { - DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n"); + LLVM_DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n"); bool Changed = false; TII = F.getSubtarget().getInstrInfo(); @@ -190,7 +190,8 @@ static void insertNopBeforeInstruction(MachineBasicBlock &MBB, MachineInstr* MI, bool AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; - DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n"); + LLVM_DEBUG(dbgs() << "Running on MBB: " << MBB + << " - scanning instructions...\n"); // First, scan the basic block, looking for a sequence of 2 instructions // that match the conditions under which the erratum may trigger. @@ -206,17 +207,17 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) { for (auto &MI : MBB) { MachineInstr *CurrInstr = &MI; - DEBUG(dbgs() << " Examining: " << MI); + LLVM_DEBUG(dbgs() << " Examining: " << MI); if (PrevInstr) { - DEBUG(dbgs() << " PrevInstr: " << *PrevInstr - << " CurrInstr: " << *CurrInstr - << " isFirstInstructionInSequence(PrevInstr): " - << isFirstInstructionInSequence(PrevInstr) << "\n" - << " isSecondInstructionInSequence(CurrInstr): " - << isSecondInstructionInSequence(CurrInstr) << "\n"); + LLVM_DEBUG(dbgs() << " PrevInstr: " << *PrevInstr + << " CurrInstr: " << *CurrInstr + << " isFirstInstructionInSequence(PrevInstr): " + << isFirstInstructionInSequence(PrevInstr) << "\n" + << " isSecondInstructionInSequence(CurrInstr): " + << isSecondInstructionInSequence(CurrInstr) << "\n"); if (isFirstInstructionInSequence(PrevInstr) && isSecondInstructionInSequence(CurrInstr)) { - DEBUG(dbgs() << " ** pattern found at Idx " << Idx << "!\n"); + LLVM_DEBUG(dbgs() << " ** pattern found at Idx " << Idx << "!\n"); Sequences.push_back(CurrInstr); } } @@ -225,8 +226,8 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) { ++Idx; } - DEBUG(dbgs() << "Scan complete, " << Sequences.size() - << " occurrences of pattern found.\n"); + LLVM_DEBUG(dbgs() << "Scan complete, " << Sequences.size() + << " occurrences of pattern found.\n"); // Then update the basic block, inserting nops between the detected sequences. for (auto &MI : Sequences) { diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp index 330237fc10b..a95476b9118 100644 --- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -315,7 +315,7 @@ bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) { return false; bool Changed = false; - DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n"); + LLVM_DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n"); MRI = &F.getRegInfo(); TRI = F.getRegInfo().getTargetRegisterInfo(); @@ -330,7 +330,8 @@ bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) { bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; - DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n"); + LLVM_DEBUG(dbgs() << "Running on MBB: " << MBB + << " - scanning instructions...\n"); // First, scan the basic block producing a set of chains. @@ -343,7 +344,8 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) { for (auto &MI : MBB) scanInstruction(&MI, Idx++, ActiveChains, AllChains); - DEBUG(dbgs() << "Scan complete, "<< AllChains.size() << " chains created.\n"); + LLVM_DEBUG(dbgs() << "Scan complete, " << AllChains.size() + << " chains created.\n"); // Group the chains into disjoint sets based on their liveness range. This is // a poor-man's version of graph coloring. Ideally we'd create an interference @@ -360,7 +362,7 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) { for (auto &J : AllChains) if (I != J && I->rangeOverlapsWith(*J)) EC.unionSets(I.get(), J.get()); - DEBUG(dbgs() << "Created " << EC.getNumClasses() << " disjoint sets.\n"); + LLVM_DEBUG(dbgs() << "Created " << EC.getNumClasses() << " disjoint sets.\n"); // Now we assume that every member of an equivalence class interferes // with every other member of that class, and with no members of other classes. @@ -440,7 +442,7 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV, MachineBasicBlock &MBB, int &Parity) { bool Changed = false; - DEBUG(dbgs() << "colorChainSet(): #sets=" << GV.size() << "\n"); + LLVM_DEBUG(dbgs() << "colorChainSet(): #sets=" << GV.size() << "\n"); // Sort by descending size order so that we allocate the most important // sets first. @@ -470,16 +472,18 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV, // But if we really don't care, use the chain's preferred color. C = G->getPreferredColor(); - DEBUG(dbgs() << " - Parity=" << Parity << ", Color=" - << ColorNames[(int)C] << "\n"); + LLVM_DEBUG(dbgs() << " - Parity=" << Parity + << ", Color=" << ColorNames[(int)C] << "\n"); // If we'll need a fixup FMOV, don't bother. Testing has shown that this // happens infrequently and when it does it has at least a 50% chance of // slowing code down instead of speeding it up. if (G->requiresFixup() && C != G->getPreferredColor()) { C = G->getPreferredColor(); - DEBUG(dbgs() << " - " << G->str() << " - not worthwhile changing; " - "color remains " << ColorNames[(int)C] << "\n"); + LLVM_DEBUG(dbgs() << " - " << G->str() + << " - not worthwhile changing; " + "color remains " + << ColorNames[(int)C] << "\n"); } Changed |= colorChain(G, C, MBB); @@ -528,17 +532,17 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C, bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C, MachineBasicBlock &MBB) { bool Changed = false; - DEBUG(dbgs() << " - colorChain(" << G->str() << ", " - << ColorNames[(int)C] << ")\n"); + LLVM_DEBUG(dbgs() << " - colorChain(" << G->str() << ", " + << ColorNames[(int)C] << ")\n"); // Try and obtain a free register of the right class. Without a register // to play with we cannot continue. int Reg = scavengeRegister(G, C, MBB); if (Reg == -1) { - DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n"); + LLVM_DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n"); return false; } - DEBUG(dbgs() << " - Scavenged register: " << printReg(Reg, TRI) << "\n"); + LLVM_DEBUG(dbgs() << " - Scavenged register: " << printReg(Reg, TRI) << "\n"); std::map<unsigned, unsigned> Substs; for (MachineInstr &I : *G) { @@ -586,11 +590,11 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C, assert(Substs.size() == 0 && "No substitutions should be left active!"); if (G->getKill()) { - DEBUG(dbgs() << " - Kill instruction seen.\n"); + LLVM_DEBUG(dbgs() << " - Kill instruction seen.\n"); } else { // We didn't have a kill instruction, but we didn't seem to need to change // the destination register anyway. - DEBUG(dbgs() << " - Destination register not changed.\n"); + LLVM_DEBUG(dbgs() << " - Destination register not changed.\n"); } return Changed; } @@ -611,8 +615,8 @@ void AArch64A57FPLoadBalancing::scanInstruction( // unit. unsigned DestReg = MI->getOperand(0).getReg(); - DEBUG(dbgs() << "New chain started for register " << printReg(DestReg, TRI) - << " at " << *MI); + LLVM_DEBUG(dbgs() << "New chain started for register " + << printReg(DestReg, TRI) << " at " << *MI); auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg)); ActiveChains[DestReg] = G.get(); @@ -631,8 +635,8 @@ void AArch64A57FPLoadBalancing::scanInstruction( maybeKillChain(MI->getOperand(0), Idx, ActiveChains); if (ActiveChains.find(AccumReg) != ActiveChains.end()) { - DEBUG(dbgs() << "Chain found for accumulator register " - << printReg(AccumReg, TRI) << " in MI " << *MI); + LLVM_DEBUG(dbgs() << "Chain found for accumulator register " + << printReg(AccumReg, TRI) << " in MI " << *MI); // For simplicity we only chain together sequences of MULs/MLAs where the // accumulator register is killed on each instruction. This means we don't @@ -641,7 +645,7 @@ void AArch64A57FPLoadBalancing::scanInstruction( // FIXME: We could extend to handle the non-kill cases for more coverage. if (MI->getOperand(3).isKill()) { // Add to chain. - DEBUG(dbgs() << "Instruction was successfully added to chain.\n"); + LLVM_DEBUG(dbgs() << "Instruction was successfully added to chain.\n"); ActiveChains[AccumReg]->add(MI, Idx, getColor(DestReg)); // Handle cases where the destination is not the same as the accumulator. if (DestReg != AccumReg) { @@ -651,13 +655,14 @@ void AArch64A57FPLoadBalancing::scanInstruction( return; } - DEBUG(dbgs() << "Cannot add to chain because accumulator operand wasn't " - << "marked <kill>!\n"); + LLVM_DEBUG( + dbgs() << "Cannot add to chain because accumulator operand wasn't " + << "marked <kill>!\n"); maybeKillChain(MI->getOperand(3), Idx, ActiveChains); } - DEBUG(dbgs() << "Creating new chain for dest register " - << printReg(DestReg, TRI) << "\n"); + LLVM_DEBUG(dbgs() << "Creating new chain for dest register " + << printReg(DestReg, TRI) << "\n"); auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg)); ActiveChains[DestReg] = G.get(); AllChains.push_back(std::move(G)); @@ -685,8 +690,8 @@ maybeKillChain(MachineOperand &MO, unsigned Idx, // If this is a KILL of a current chain, record it. if (MO.isKill() && ActiveChains.find(MO.getReg()) != ActiveChains.end()) { - DEBUG(dbgs() << "Kill seen for chain " << printReg(MO.getReg(), TRI) - << "\n"); + LLVM_DEBUG(dbgs() << "Kill seen for chain " << printReg(MO.getReg(), TRI) + << "\n"); ActiveChains[MO.getReg()]->setKill(MI, Idx, /*Immutable=*/MO.isTied()); } ActiveChains.erase(MO.getReg()); @@ -696,8 +701,8 @@ maybeKillChain(MachineOperand &MO, unsigned Idx, for (auto I = ActiveChains.begin(), E = ActiveChains.end(); I != E;) { if (MO.clobbersPhysReg(I->first)) { - DEBUG(dbgs() << "Kill (regmask) seen for chain " - << printReg(I->first, TRI) << "\n"); + LLVM_DEBUG(dbgs() << "Kill (regmask) seen for chain " + << printReg(I->first, TRI) << "\n"); I->second->setKill(MI, Idx, /*Immutable=*/true); ActiveChains.erase(I++); } else diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp index 338daecb49e..22b0c1e3b47 100644 --- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -277,7 +277,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI, MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(AArch64::COPY), Dst) .addReg(Src, getKillRegState(IsKill)); - DEBUG(dbgs() << " adding copy: " << *MIB); + LLVM_DEBUG(dbgs() << " adding copy: " << *MIB); ++NumCopiesInserted; return MIB; } @@ -286,7 +286,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI, // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) { - DEBUG(dbgs() << "Scalar transform: " << MI); + LLVM_DEBUG(dbgs() << "Scalar transform: " << MI); MachineBasicBlock *MBB = MI.getParent(); unsigned OldOpc = MI.getOpcode(); @@ -391,7 +391,7 @@ bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) { // runOnMachineFunction - Pass entry point from PassManager. bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) { bool Changed = false; - DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n"); + LLVM_DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n"); if (skipFunction(mf.getFunction())) return false; diff --git a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp index 0a9167edcdb..720323f81d2 100644 --- a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp +++ b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp @@ -380,8 +380,8 @@ static bool handleMiddleInst(const MachineInstr &MI, LOHInfo &DefInfo, static void handleADRP(const MachineInstr &MI, AArch64FunctionInfo &AFI, LOHInfo &Info) { if (Info.LastADRP != nullptr) { - DEBUG(dbgs() << "Adding MCLOH_AdrpAdrp:\n" << '\t' << MI << '\t' - << *Info.LastADRP); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAdrp:\n" + << '\t' << MI << '\t' << *Info.LastADRP); AFI.addLOHDirective(MCLOH_AdrpAdrp, {&MI, Info.LastADRP}); ++NumADRPSimpleCandidate; } @@ -390,48 +390,52 @@ static void handleADRP(const MachineInstr &MI, AArch64FunctionInfo &AFI, if (Info.IsCandidate) { switch (Info.Type) { case MCLOH_AdrpAdd: - DEBUG(dbgs() << "Adding MCLOH_AdrpAdd:\n" << '\t' << MI << '\t' - << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAdd:\n" + << '\t' << MI << '\t' << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpAdd, {&MI, Info.MI0}); ++NumADRSimpleCandidate; break; case MCLOH_AdrpLdr: if (supportLoadFromLiteral(*Info.MI0)) { - DEBUG(dbgs() << "Adding MCLOH_AdrpLdr:\n" << '\t' << MI << '\t' - << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdr:\n" + << '\t' << MI << '\t' << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpLdr, {&MI, Info.MI0}); ++NumADRPToLDR; } break; case MCLOH_AdrpAddLdr: - DEBUG(dbgs() << "Adding MCLOH_AdrpAddLdr:\n" << '\t' << MI << '\t' - << *Info.MI1 << '\t' << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAddLdr:\n" + << '\t' << MI << '\t' << *Info.MI1 << '\t' + << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpAddLdr, {&MI, Info.MI1, Info.MI0}); ++NumADDToLDR; break; case MCLOH_AdrpAddStr: if (Info.MI1 != nullptr) { - DEBUG(dbgs() << "Adding MCLOH_AdrpAddStr:\n" << '\t' << MI << '\t' - << *Info.MI1 << '\t' << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAddStr:\n" + << '\t' << MI << '\t' << *Info.MI1 << '\t' + << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpAddStr, {&MI, Info.MI1, Info.MI0}); ++NumADDToSTR; } break; case MCLOH_AdrpLdrGotLdr: - DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotLdr:\n" << '\t' << MI << '\t' - << *Info.MI1 << '\t' << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotLdr:\n" + << '\t' << MI << '\t' << *Info.MI1 << '\t' + << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpLdrGotLdr, {&MI, Info.MI1, Info.MI0}); ++NumLDRToLDR; break; case MCLOH_AdrpLdrGotStr: - DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotStr:\n" << '\t' << MI << '\t' - << *Info.MI1 << '\t' << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotStr:\n" + << '\t' << MI << '\t' << *Info.MI1 << '\t' + << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpLdrGotStr, {&MI, Info.MI1, Info.MI0}); ++NumLDRToSTR; break; case MCLOH_AdrpLdrGot: - DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGot:\n" << '\t' << MI << '\t' - << *Info.MI0); + LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGot:\n" + << '\t' << MI << '\t' << *Info.MI0); AFI.addLOHDirective(MCLOH_AdrpLdrGot, {&MI, Info.MI0}); break; case MCLOH_AdrpAdrp: @@ -485,8 +489,8 @@ bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(MF.getFunction())) return false; - DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n" - << "Looking in function " << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n" + << "Looking in function " << MF.getName() << '\n'); LOHInfo LOHInfos[N_GPR_REGS]; AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); diff --git a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp index 30cefbad884..5ae787409ae 100644 --- a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp +++ b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp @@ -201,10 +201,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI, I->readsRegister(AArch64::NZCV, TRI)) return false; } - DEBUG(dbgs() << " Replacing instructions:\n "); - DEBUG(DefMI.print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(MI.print(dbgs())); + LLVM_DEBUG(dbgs() << " Replacing instructions:\n "); + LLVM_DEBUG(DefMI.print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(MI.print(dbgs())); NewCmp = convertToFlagSetting(DefMI, IsFlagSetting); NewBr = convertToCondBr(MI); @@ -260,10 +260,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI, I->readsRegister(AArch64::NZCV, TRI)) return false; } - DEBUG(dbgs() << " Replacing instructions:\n "); - DEBUG(DefMI.print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(MI.print(dbgs())); + LLVM_DEBUG(dbgs() << " Replacing instructions:\n "); + LLVM_DEBUG(DefMI.print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(MI.print(dbgs())); NewCmp = convertToFlagSetting(DefMI, IsFlagSetting); NewBr = convertToCondBr(MI); @@ -275,10 +275,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI, (void)NewCmp; (void)NewBr; assert(NewCmp && NewBr && "Expected new instructions."); - DEBUG(dbgs() << " with instruction:\n "); - DEBUG(NewCmp->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(NewBr->print(dbgs())); + LLVM_DEBUG(dbgs() << " with instruction:\n "); + LLVM_DEBUG(NewCmp->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(NewBr->print(dbgs())); // If this was a flag setting version of the instruction, we use the original // instruction by just clearing the dead marked on the implicit-def of NCZV. @@ -293,8 +293,9 @@ bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(MF.getFunction())) return false; - DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n" - << "********** Function: " << MF.getName() << '\n'); + LLVM_DEBUG( + dbgs() << "********** AArch64 Conditional Branch Tuning **********\n" + << "********** Function: " << MF.getName() << '\n'); TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); TRI = MF.getSubtarget().getRegisterInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp index d14bde33d94..5064762b9f7 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -173,13 +173,14 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare( case AArch64::ADDSXri: { unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm()); if (!I->getOperand(2).isImm()) { - DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n'); + LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n'); return nullptr; } else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) { - DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I << '\n'); + LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I + << '\n'); return nullptr; } else if (!MRI->use_empty(I->getOperand(0).getReg())) { - DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n'); + LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n'); return nullptr; } return &*I; @@ -207,7 +208,8 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare( return nullptr; } } - DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); + LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) + << '\n'); return nullptr; } @@ -325,8 +327,8 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI, } bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" - << "********** Function: " << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" + << "********** Function: " << MF.getName() << '\n'); if (skipFunction(MF.getFunction())) return false; @@ -384,15 +386,15 @@ bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) { const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm(); const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm(); - DEBUG(dbgs() << "Head branch:\n"); - DEBUG(dbgs() << "\tcondition: " - << AArch64CC::getCondCodeName(HeadCmp) << '\n'); - DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n'); + LLVM_DEBUG(dbgs() << "Head branch:\n"); + LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp) + << '\n'); + LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n'); - DEBUG(dbgs() << "True branch:\n"); - DEBUG(dbgs() << "\tcondition: " - << AArch64CC::getCondCodeName(TrueCmp) << '\n'); - DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n'); + LLVM_DEBUG(dbgs() << "True branch:\n"); + LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp) + << '\n'); + LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n'); if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) || (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) && diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp index 9ff2472dbec..8176b6fb269 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -311,7 +311,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { return &*I; } ++NumCmpTermRejs; - DEBUG(dbgs() << "Flags not used by terminator: " << *I); + LLVM_DEBUG(dbgs() << "Flags not used by terminator: " << *I); return nullptr; } @@ -329,7 +329,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { // Check that the immediate operand is within range, ccmp wants a uimm5. // Rd = SUBSri Rn, imm, shift if (I->getOperand(3).getImm() || !isUInt<5>(I->getOperand(2).getImm())) { - DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I); + LLVM_DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I); ++NumImmRangeRejs; return nullptr; } @@ -340,7 +340,8 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { case AArch64::ADDSXrr: if (isDeadDef(I->getOperand(0).getReg())) return &*I; - DEBUG(dbgs() << "Can't convert compare with live destination: " << *I); + LLVM_DEBUG(dbgs() << "Can't convert compare with live destination: " + << *I); ++NumLiveDstRejs; return nullptr; case AArch64::FCMPSrr: @@ -358,18 +359,19 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { // The ccmp doesn't produce exactly the same flags as the original // compare, so reject the transform if there are uses of the flags // besides the terminators. - DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I); + LLVM_DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I); ++NumMultNZCVUses; return nullptr; } if (PRI.Defined || PRI.Clobbered) { - DEBUG(dbgs() << "Not convertible compare: " << *I); + LLVM_DEBUG(dbgs() << "Not convertible compare: " << *I); ++NumUnknNZCVDefs; return nullptr; } } - DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); + LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) + << '\n'); return nullptr; } @@ -383,7 +385,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, // Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to // get right. if (!MBB->livein_empty()) { - DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); + LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); return false; } @@ -396,14 +398,14 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, continue; if (++InstrCount > BlockInstrLimit && !Stress) { - DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " - << BlockInstrLimit << " instructions.\n"); + LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " + << BlockInstrLimit << " instructions.\n"); return false; } // There shouldn't normally be any phis in a single-predecessor block. if (I.isPHI()) { - DEBUG(dbgs() << "Can't hoist: " << I); + LLVM_DEBUG(dbgs() << "Can't hoist: " << I); return false; } @@ -411,20 +413,20 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, // speculate GOT or constant pool loads that are guaranteed not to trap, // but we don't support that for now. if (I.mayLoad()) { - DEBUG(dbgs() << "Won't speculate load: " << I); + LLVM_DEBUG(dbgs() << "Won't speculate load: " << I); return false; } // We never speculate stores, so an AA pointer isn't necessary. bool DontMoveAcrossStore = true; if (!I.isSafeToMove(nullptr, DontMoveAcrossStore)) { - DEBUG(dbgs() << "Can't speculate: " << I); + LLVM_DEBUG(dbgs() << "Can't speculate: " << I); return false; } // Only CmpMI is allowed to clobber the flags. if (&I != CmpMI && I.modifiesRegister(AArch64::NZCV, TRI)) { - DEBUG(dbgs() << "Clobbers flags: " << I); + LLVM_DEBUG(dbgs() << "Clobbers flags: " << I); return false; } } @@ -458,9 +460,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { return false; // The CFG topology checks out. - DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " - << printMBBReference(*CmpBB) << " -> " - << printMBBReference(*Tail) << '\n'); + LLVM_DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " + << printMBBReference(*CmpBB) << " -> " + << printMBBReference(*Tail) << '\n'); ++NumConsidered; // Tail is allowed to have many predecessors, but we can't handle PHIs yet. @@ -470,13 +472,13 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { // always be safe to sink the ccmp down to immediately before the CmpBB // terminators. if (!trivialTailPHIs()) { - DEBUG(dbgs() << "Can't handle phis in Tail.\n"); + LLVM_DEBUG(dbgs() << "Can't handle phis in Tail.\n"); ++NumPhiRejs; return false; } if (!Tail->livein_empty()) { - DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n"); + LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n"); ++NumPhysRejs; return false; } @@ -484,13 +486,13 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { // CmpBB should never have PHIs since Head is its only predecessor. // FIXME: Clean them up if it happens. if (!CmpBB->empty() && CmpBB->front().isPHI()) { - DEBUG(dbgs() << "Can't handle phis in CmpBB.\n"); + LLVM_DEBUG(dbgs() << "Can't handle phis in CmpBB.\n"); ++NumPhi2Rejs; return false; } if (!CmpBB->livein_empty()) { - DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n"); + LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n"); ++NumPhysRejs; return false; } @@ -499,7 +501,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { HeadCond.clear(); MachineBasicBlock *TBB = nullptr, *FBB = nullptr; if (TII->analyzeBranch(*Head, TBB, FBB, HeadCond)) { - DEBUG(dbgs() << "Head branch not analyzable.\n"); + LLVM_DEBUG(dbgs() << "Head branch not analyzable.\n"); ++NumHeadBranchRejs; return false; } @@ -507,13 +509,14 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { // This is weird, probably some sort of degenerate CFG, or an edge to a // landing pad. if (!TBB || HeadCond.empty()) { - DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in Head.\n"); + LLVM_DEBUG( + dbgs() << "AnalyzeBranch didn't find conditional branch in Head.\n"); ++NumHeadBranchRejs; return false; } if (!parseCond(HeadCond, HeadCmpBBCC)) { - DEBUG(dbgs() << "Unsupported branch type on Head\n"); + LLVM_DEBUG(dbgs() << "Unsupported branch type on Head\n"); ++NumHeadBranchRejs; return false; } @@ -527,19 +530,20 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { CmpBBCond.clear(); TBB = FBB = nullptr; if (TII->analyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) { - DEBUG(dbgs() << "CmpBB branch not analyzable.\n"); + LLVM_DEBUG(dbgs() << "CmpBB branch not analyzable.\n"); ++NumCmpBranchRejs; return false; } if (!TBB || CmpBBCond.empty()) { - DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in CmpBB.\n"); + LLVM_DEBUG( + dbgs() << "AnalyzeBranch didn't find conditional branch in CmpBB.\n"); ++NumCmpBranchRejs; return false; } if (!parseCond(CmpBBCond, CmpBBTailCC)) { - DEBUG(dbgs() << "Unsupported branch type on CmpBB\n"); + LLVM_DEBUG(dbgs() << "Unsupported branch type on CmpBB\n"); ++NumCmpBranchRejs; return false; } @@ -547,9 +551,10 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { if (TBB != Tail) CmpBBTailCC = AArch64CC::getInvertedCondCode(CmpBBTailCC); - DEBUG(dbgs() << "Head->CmpBB on " << AArch64CC::getCondCodeName(HeadCmpBBCC) - << ", CmpBB->Tail on " << AArch64CC::getCondCodeName(CmpBBTailCC) - << '\n'); + LLVM_DEBUG(dbgs() << "Head->CmpBB on " + << AArch64CC::getCondCodeName(HeadCmpBBCC) + << ", CmpBB->Tail on " + << AArch64CC::getCondCodeName(CmpBBTailCC) << '\n'); CmpMI = findConvertibleCompare(CmpBB); if (!CmpMI) @@ -563,9 +568,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { } void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) { - DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into " - << printMBBReference(*Head) << ":\n" - << *CmpBB); + LLVM_DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into " + << printMBBReference(*Head) << ":\n" + << *CmpBB); // All CmpBB instructions are moved into Head, and CmpBB is deleted. // Update the CFG first. @@ -710,7 +715,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) { RemovedBlocks.push_back(CmpBB); CmpBB->eraseFromParent(); - DEBUG(dbgs() << "Result:\n" << *Head); + LLVM_DEBUG(dbgs() << "Result:\n" << *Head); ++NumConverted; } @@ -860,13 +865,13 @@ bool AArch64ConditionalCompares::shouldConvert() { // If code size is the main concern if (MinSize) { int CodeSizeDelta = CmpConv.expectedCodeSizeDelta(); - DEBUG(dbgs() << "Code size delta: " << CodeSizeDelta << '\n'); + LLVM_DEBUG(dbgs() << "Code size delta: " << CodeSizeDelta << '\n'); // If we are minimizing the code size, do the conversion whatever // the cost is. if (CodeSizeDelta < 0) return true; if (CodeSizeDelta > 0) { - DEBUG(dbgs() << "Code size is increasing, give up on this one.\n"); + LLVM_DEBUG(dbgs() << "Code size is increasing, give up on this one.\n"); return false; } // CodeSizeDelta == 0, continue with the regular heuristics @@ -885,24 +890,24 @@ bool AArch64ConditionalCompares::shouldConvert() { Trace.getInstrCycles(*CmpConv.Head->getFirstTerminator()).Depth; unsigned CmpBBDepth = Trace.getInstrCycles(*CmpConv.CmpBB->getFirstTerminator()).Depth; - DEBUG(dbgs() << "Head depth: " << HeadDepth - << "\nCmpBB depth: " << CmpBBDepth << '\n'); + LLVM_DEBUG(dbgs() << "Head depth: " << HeadDepth + << "\nCmpBB depth: " << CmpBBDepth << '\n'); if (CmpBBDepth > HeadDepth + DelayLimit) { - DEBUG(dbgs() << "Branch delay would be larger than " << DelayLimit - << " cycles.\n"); + LLVM_DEBUG(dbgs() << "Branch delay would be larger than " << DelayLimit + << " cycles.\n"); return false; } // Check the resource depth at the bottom of CmpBB - these instructions will // be speculated. unsigned ResDepth = Trace.getResourceDepth(true); - DEBUG(dbgs() << "Resources: " << ResDepth << '\n'); + LLVM_DEBUG(dbgs() << "Resources: " << ResDepth << '\n'); // Heuristic: The speculatively executed instructions must all be able to // merge into the Head block. The Head critical path should dominate the // resource cost of the speculated instructions. if (ResDepth > HeadDepth) { - DEBUG(dbgs() << "Too many instructions to speculate.\n"); + LLVM_DEBUG(dbgs() << "Too many instructions to speculate.\n"); return false; } return true; @@ -922,8 +927,8 @@ bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) { } bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" - << "********** Function: " << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" + << "********** Function: " << MF.getName() << '\n'); if (skipFunction(MF.getFunction())) return false; diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp index 8e7e740da6f..2ba10d25e93 100644 --- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp @@ -136,18 +136,21 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock( // We need to skip this instruction because while it appears to have a // dead def it uses a frame index which might expand into a multi // instruction sequence during EPI. - DEBUG(dbgs() << " Ignoring, operand is frame index\n"); + LLVM_DEBUG(dbgs() << " Ignoring, operand is frame index\n"); continue; } if (MI.definesRegister(AArch64::XZR) || MI.definesRegister(AArch64::WZR)) { // It is not allowed to write to the same register (not even the zero // register) twice in a single instruction. - DEBUG(dbgs() << " Ignoring, XZR or WZR already used by the instruction\n"); + LLVM_DEBUG( + dbgs() + << " Ignoring, XZR or WZR already used by the instruction\n"); continue; } if (shouldSkip(MI, MF)) { - DEBUG(dbgs() << " Ignoring, Atomic instruction with acquire semantics using WZR/XZR\n"); + LLVM_DEBUG(dbgs() << " Ignoring, Atomic instruction with acquire " + "semantics using WZR/XZR\n"); continue; } @@ -163,30 +166,30 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock( (!MO.isDead() && !MRI->use_nodbg_empty(Reg))) continue; assert(!MO.isImplicit() && "Unexpected implicit def!"); - DEBUG(dbgs() << " Dead def operand #" << I << " in:\n "; - MI.print(dbgs())); + LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n "; + MI.print(dbgs())); // Be careful not to change the register if it's a tied operand. if (MI.isRegTiedToUseOperand(I)) { - DEBUG(dbgs() << " Ignoring, def is tied operand.\n"); + LLVM_DEBUG(dbgs() << " Ignoring, def is tied operand.\n"); continue; } const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI, MF); unsigned NewReg; if (RC == nullptr) { - DEBUG(dbgs() << " Ignoring, register is not a GPR.\n"); + LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n"); continue; } else if (RC->contains(AArch64::WZR)) NewReg = AArch64::WZR; else if (RC->contains(AArch64::XZR)) NewReg = AArch64::XZR; else { - DEBUG(dbgs() << " Ignoring, register is not a GPR.\n"); + LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n"); continue; } - DEBUG(dbgs() << " Replacing with zero register. New:\n "); + LLVM_DEBUG(dbgs() << " Replacing with zero register. New:\n "); MO.setReg(NewReg); MO.setIsDead(); - DEBUG(MI.print(dbgs())); + LLVM_DEBUG(MI.print(dbgs())); ++NumDeadDefsReplaced; Changed = true; // Only replace one dead register, see check for zero register above. @@ -204,7 +207,7 @@ bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) { TRI = MF.getSubtarget().getRegisterInfo(); TII = MF.getSubtarget().getInstrInfo(); MRI = &MF.getRegInfo(); - DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n"); + LLVM_DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n"); Changed = false; for (auto &MBB : MF) processMachineBasicBlock(MBB); diff --git a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp index 4be44ecb52d..bc9a5ca97fe 100644 --- a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -169,7 +169,7 @@ bool FalkorMarkStridedAccesses::runOnLoop(Loop &L) { LoadI->setMetadata(FALKOR_STRIDED_ACCESS_MD, MDNode::get(LoadI->getContext(), {})); ++NumStridedLoadsMarked; - DEBUG(dbgs() << "Load: " << I << " marked as strided\n"); + LLVM_DEBUG(dbgs() << "Load: " << I << " marked as strided\n"); MadeChange = true; } } @@ -731,10 +731,10 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { continue; bool Fixed = false; - DEBUG(dbgs() << "Attempting to fix tag collision: " << MI); + LLVM_DEBUG(dbgs() << "Attempting to fix tag collision: " << MI); if (!DebugCounter::shouldExecute(FixCounter)) { - DEBUG(dbgs() << "Skipping fix due to debug counter:\n " << MI); + LLVM_DEBUG(dbgs() << "Skipping fix due to debug counter:\n " << MI); continue; } @@ -759,8 +759,8 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { if (TagMap.count(NewTag)) continue; - DEBUG(dbgs() << "Changing base reg to: " << printReg(ScratchReg, TRI) - << '\n'); + LLVM_DEBUG(dbgs() << "Changing base reg to: " + << printReg(ScratchReg, TRI) << '\n'); // Rewrite: // Xd = LOAD Xb, off @@ -778,8 +778,8 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) { // If the load does a pre/post increment, then insert a MOV after as // well to update the real base register. if (LdI.IsPrePost) { - DEBUG(dbgs() << "Doing post MOV of incremented reg: " - << printReg(ScratchReg, TRI) << '\n'); + LLVM_DEBUG(dbgs() << "Doing post MOV of incremented reg: " + << printReg(ScratchReg, TRI) << '\n'); MI.getOperand(0).setReg( ScratchReg); // Change tied operand pre/post update dest. BuildMI(*MBB, std::next(MachineBasicBlock::iterator(MI)), DL, diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 2523cfdfb53..6dc5d19862a 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -1287,13 +1287,11 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters( StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; else StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; - DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); - if (RPI.isPaired()) - dbgs() << ", " << printReg(Reg2, TRI); - dbgs() << ") -> fi#(" << RPI.FrameIdx; - if (RPI.isPaired()) - dbgs() << ", " << RPI.FrameIdx+1; - dbgs() << ")\n"); + LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); + if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); + dbgs() << ") -> fi#(" << RPI.FrameIdx; + if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; + dbgs() << ")\n"); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); if (!MRI.isReserved(Reg1)) @@ -1350,13 +1348,11 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; else LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; - DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); - if (RPI.isPaired()) - dbgs() << ", " << printReg(Reg2, TRI); - dbgs() << ") -> fi#(" << RPI.FrameIdx; - if (RPI.isPaired()) - dbgs() << ", " << RPI.FrameIdx+1; - dbgs() << ")\n"); + LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); + if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); + dbgs() << ") -> fi#(" << RPI.FrameIdx; + if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; + dbgs() << ")\n"); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); if (RPI.isPaired()) { @@ -1465,10 +1461,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, } } - DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:"; - for (unsigned Reg : SavedRegs.set_bits()) - dbgs() << ' ' << printReg(Reg, RegInfo); - dbgs() << "\n";); + LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:"; + for (unsigned Reg + : SavedRegs.set_bits()) dbgs() + << ' ' << printReg(Reg, RegInfo); + dbgs() << "\n";); // If any callee-saved registers are used, the frame cannot be eliminated. unsigned NumRegsSpilled = SavedRegs.count(); @@ -1477,7 +1474,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // The CSR spill slots have not been allocated yet, so estimateStackSize // won't include them. unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled; - DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n"); + LLVM_DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n"); unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); bool BigStack = (CFSize > EstimatedStackSizeLimit); if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) @@ -1491,8 +1488,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // here. if (BigStack) { if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { - DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) - << " to get a scratch register.\n"); + LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) + << " to get a scratch register.\n"); SavedRegs.set(UnspilledCSGPR); // MachO's compact unwind format relies on all registers being stored in // pairs, so if we need to spill one extra for BigStack, then we need to @@ -1512,8 +1509,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, unsigned Align = TRI->getSpillAlignment(RC); int FI = MFI.CreateStackObject(Size, Align, false); RS->addScavengingFrameIndex(FI); - DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI - << " as the emergency spill slot.\n"); + LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI + << " as the emergency spill slot.\n"); } } diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 3124204fc59..fd040313179 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1553,8 +1553,9 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N, // Bail out on large immediates. This happens when no proper // combining/constant folding was performed. if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) { - DEBUG((dbgs() << N - << ": Found large shift immediate, this should not happen\n")); + LLVM_DEBUG( + (dbgs() << N + << ": Found large shift immediate, this should not happen\n")); return false; } @@ -1696,8 +1697,9 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0, // Missing combines/constant folding may have left us with strange // constants. if (ShlImm >= VT.getSizeInBits()) { - DEBUG((dbgs() << N - << ": Found large shift immediate, this should not happen\n")); + LLVM_DEBUG( + (dbgs() << N + << ": Found large shift immediate, this should not happen\n")); return false; } @@ -2657,7 +2659,7 @@ bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) { void AArch64DAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { - DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); + LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); return; } @@ -2754,9 +2756,9 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { } SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT, Node->getOperand(0)); - DEBUG(dbgs() << "ISEL: Custom selection!\n=> "); - DEBUG(Extract->dumpr(CurDAG)); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "ISEL: Custom selection!\n=> "); + LLVM_DEBUG(Extract->dumpr(CurDAG)); + LLVM_DEBUG(dbgs() << "\n"); ReplaceNode(Node, Extract.getNode()); return; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8dc13c6fb24..51063ee7fb4 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1442,7 +1442,8 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, static bool isLegalArithImmed(uint64_t C) { // Matches AArch64DAGToDAGISel::SelectArithImmed(). bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); - DEBUG(dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n")); + LLVM_DEBUG(dbgs() << "Is imm " << C + << " legal: " << (IsLegal ? "yes\n" : "no\n")); return IsLegal; } @@ -2644,8 +2645,8 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { - DEBUG(dbgs() << "Custom lowering: "); - DEBUG(Op.dump()); + LLVM_DEBUG(dbgs() << "Custom lowering: "); + LLVM_DEBUG(Op.dump()); switch (Op.getOpcode()) { default: @@ -3774,7 +3775,7 @@ SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty, template <class NodeTy> SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { - DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n"); + LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags); @@ -3787,7 +3788,7 @@ SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, template <class NodeTy> SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { - DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n"); + LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); const unsigned char MO_NC = AArch64II::MO_NC; @@ -3803,7 +3804,7 @@ SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, template <class NodeTy> SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags) const { - DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n"); + LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n"); SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags); @@ -5073,7 +5074,8 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { // FIXME: We should be able to handle f128 as well with a clever lowering. if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasFullFP16()))) { - DEBUG(dbgs() << "Legal fp imm: materialize 0 using the zero register\n"); + LLVM_DEBUG( + dbgs() << "Legal fp imm: materialize 0 using the zero register\n"); return true; } @@ -5094,14 +5096,17 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { } if (IsLegal) { - DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal << "\n"); + LLVM_DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal + << "\n"); return true; } if (!FPType.empty()) - DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal << "\n"); + LLVM_DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal + << "\n"); else - DEBUG(dbgs() << "Illegal fp imm " << ImmStrVal << ": unsupported fp type\n"); + LLVM_DEBUG(dbgs() << "Illegal fp imm " << ImmStrVal + << ": unsupported fp type\n"); return false; } @@ -5540,7 +5545,7 @@ static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); - DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n"); + LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n"); SDLoc dl(Op); EVT VT = Op.getValueType(); unsigned NumElts = VT.getVectorNumElements(); @@ -5576,10 +5581,11 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, continue; else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || !isa<ConstantSDNode>(V.getOperand(1))) { - DEBUG(dbgs() << "Reshuffle failed: " - "a shuffle can only come from building a vector from " - "various elements of other vectors, provided their " - "indices are constant\n"); + LLVM_DEBUG( + dbgs() << "Reshuffle failed: " + "a shuffle can only come from building a vector from " + "various elements of other vectors, provided their " + "indices are constant\n"); return SDValue(); } @@ -5596,8 +5602,9 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, } if (Sources.size() > 2) { - DEBUG(dbgs() << "Reshuffle failed: currently only do something sane when at " - "most two source vectors are involved\n"); + LLVM_DEBUG( + dbgs() << "Reshuffle failed: currently only do something sane when at " + "most two source vectors are involved\n"); return SDValue(); } @@ -5643,7 +5650,8 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, assert(SrcVT.getSizeInBits() == 2 * VT.getSizeInBits()); if (Src.MaxElt - Src.MinElt >= NumSrcElts) { - DEBUG(dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n"); + LLVM_DEBUG( + dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n"); return SDValue(); } @@ -5689,10 +5697,9 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, } // Final sanity check before we try to actually produce a shuffle. - DEBUG( - for (auto Src : Sources) - assert(Src.ShuffleVec.getValueType() == ShuffleVT); - ); + LLVM_DEBUG(for (auto Src + : Sources) + assert(Src.ShuffleVec.getValueType() == ShuffleVT);); // The stars all align, our next step is to produce the mask for the shuffle. SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); @@ -5725,7 +5732,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, // Final check before we try to produce nonsense... if (!isShuffleMaskLegal(Mask, ShuffleVT)) { - DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n"); + LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n"); return SDValue(); } @@ -5737,12 +5744,8 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, ShuffleOps[1], Mask); SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); - DEBUG( - dbgs() << "Reshuffle, creating node: "; - Shuffle.dump(); - dbgs() << "Reshuffle, creating node: "; - V.dump(); - ); + LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump(); + dbgs() << "Reshuffle, creating node: "; V.dump();); return V; } @@ -6699,10 +6702,10 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { DAG.getConstant(Intrin, DL, MVT::i32), X, Y, Shift.getOperand(1)); - DEBUG(dbgs() << "aarch64-lower: transformed: \n"); - DEBUG(N->dump(&DAG)); - DEBUG(dbgs() << "into: \n"); - DEBUG(ResultSLI->dump(&DAG)); + LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n"); + LLVM_DEBUG(N->dump(&DAG)); + LLVM_DEBUG(dbgs() << "into: \n"); + LLVM_DEBUG(ResultSLI->dump(&DAG)); ++NumShiftInserts; return ResultSLI; @@ -6889,13 +6892,14 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, } if (!Value.getNode()) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n"); return DAG.getUNDEF(VT); } if (isOnlyLowElement) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " - "SCALAR_TO_VECTOR node\n"); + LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " + "SCALAR_TO_VECTOR node\n"); return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); } @@ -6966,7 +6970,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, if (!isConstant) { if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || Value.getValueType() != VT) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n"); return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); } @@ -6975,8 +6980,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SDValue Lane = Value.getOperand(1); Value = Value.getOperand(0); if (Value.getValueSizeInBits() == 64) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " - "widening it\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " + "widening it\n"); Value = WidenVector(Value, DAG); } @@ -6989,17 +6995,16 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, EVT EltTy = VT.getVectorElementType(); assert ((EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) && "Unsupported floating-point vector type"); - DEBUG(dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " - "BITCASTS, and try again\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " + "BITCASTS, and try again\n"); MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits()); for (unsigned i = 0; i < NumElts; ++i) Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); - DEBUG( - dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: "; - Val.dump(); - ); + LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: "; + Val.dump();); Val = LowerBUILD_VECTOR(Val, DAG); if (Val.getNode()) return DAG.getNode(ISD::BITCAST, dl, VT, Val); @@ -7034,8 +7039,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, // This will generate a load from the constant pool. if (isConstant) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " - "expansion\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " + "expansion\n"); return SDValue(); } @@ -7052,8 +7058,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, // shuffle is valid for the target) and materialization element by element // on the stack followed by a load for everything else. if (!isConstant && !usesOnlyOneValue) { - DEBUG(dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " - "of INSERT_VECTOR_ELT\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " + "of INSERT_VECTOR_ELT\n"); SDValue Vec = DAG.getUNDEF(VT); SDValue Op0 = Op.getOperand(0); @@ -7070,14 +7077,12 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, // extended (i32) and it is safe to cast them to the vector type by ignoring // the upper bits of the lowest lane (e.g. v8i8, v4i16). if (!Op0.isUndef()) { - DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n"); + LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n"); Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); ++i; } - DEBUG( - if (i < NumElts) - dbgs() << "Creating nodes for the other vector elements:\n"; - ); + LLVM_DEBUG(if (i < NumElts) dbgs() + << "Creating nodes for the other vector elements:\n";); for (; i < NumElts; ++i) { SDValue V = Op.getOperand(i); if (V.isUndef()) @@ -7088,8 +7093,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, return Vec; } - DEBUG(dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " - "better alternative\n"); + LLVM_DEBUG( + dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " + "better alternative\n"); return SDValue(); } @@ -8216,15 +8222,16 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, // 12-bit optionally shifted immediates are legal for adds. bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { if (Immed == std::numeric_limits<int64_t>::min()) { - DEBUG(dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n"); + LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed + << ": avoid UB for INT64_MIN\n"); return false; } // Same encoding for add/sub, just flip the sign. Immed = std::abs(Immed); bool IsLegal = ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); - DEBUG(dbgs() << "Is " << Immed << " legal add imm: " << - (IsLegal ? "yes" : "no") << "\n"); + LLVM_DEBUG(dbgs() << "Is " << Immed + << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"); return IsLegal; } @@ -9028,7 +9035,8 @@ static SDValue performBitcastCombine(SDNode *N, SVT.getVectorNumElements() != VT.getVectorNumElements() * 2) return SDValue(); - DEBUG(dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n"); + LLVM_DEBUG( + dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n"); // Create the simplified form to just extract the low or high half of the // vector directly rather than bothering with the bitcasts. @@ -9116,7 +9124,8 @@ static SDValue performConcatVectorsCombine(SDNode *N, if (!RHSTy.isVector()) return SDValue(); - DEBUG(dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n"); + LLVM_DEBUG( + dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n"); MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), RHSTy.getVectorNumElements() * 2); @@ -10758,7 +10767,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { default: - DEBUG(dbgs() << "Custom combining: skipping\n"); + LLVM_DEBUG(dbgs() << "Custom combining: skipping\n"); break; case ISD::ADD: case ISD::SUB: diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp index c7ece250aa8..4d7ca2349ed 100644 --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -174,7 +174,7 @@ static bool unsupportedBinOp(const MachineInstr &I, const AArch64RegisterInfo &TRI) { LLT Ty = MRI.getType(I.getOperand(0).getReg()); if (!Ty.isValid()) { - DEBUG(dbgs() << "Generic binop register should be typed\n"); + LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n"); return true; } @@ -182,7 +182,7 @@ static bool unsupportedBinOp(const MachineInstr &I, for (auto &MO : I.operands()) { // FIXME: Support non-register operands. if (!MO.isReg()) { - DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n"); + LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n"); return true; } @@ -191,18 +191,18 @@ static bool unsupportedBinOp(const MachineInstr &I, // bank out of the minimal class for the register. // Either way, this needs to be documented (and possibly verified). if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) { - DEBUG(dbgs() << "Generic inst has physical register operand\n"); + LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n"); return true; } const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI); if (!OpBank) { - DEBUG(dbgs() << "Generic register has no bank or class\n"); + LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n"); return true; } if (PrevOpBank && OpBank != PrevOpBank) { - DEBUG(dbgs() << "Generic inst operands have different banks\n"); + LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n"); return true; } PrevOpBank = OpBank; @@ -378,7 +378,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterClass *RC = getRegClassForTypeOnBank( MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true); if (!RC) { - DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n'); + LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n'); return false; } @@ -412,8 +412,8 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, // we hit another of its use or its defs. // Copies do not have constraints. if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { - DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) - << " operand\n"); + LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) + << " operand\n"); return false; } I.setDesc(TII.get(AArch64::COPY)); @@ -686,13 +686,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I, DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); if (!DefRC) { if (!DefTy.isValid()) { - DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); + LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); return false; } const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI); if (!DefRC) { - DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); + LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); return false; } } @@ -710,7 +710,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, if (I.getNumOperands() != I.getNumExplicitOperands()) { - DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n"); + LLVM_DEBUG( + dbgs() << "Generic instruction has unexpected implicit operands\n"); return false; } @@ -726,8 +727,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, // We shouldn't need this on AArch64, but it would be implemented as an // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the // bit being tested is < 32. - DEBUG(dbgs() << "G_BRCOND has type: " << Ty - << ", expected at most 32-bits"); + LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty + << ", expected at most 32-bits"); return false; } @@ -767,15 +768,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I, // FIXME: Redundant check, but even less readable when factored out. if (isFP) { if (Ty != s32 && Ty != s64) { - DEBUG(dbgs() << "Unable to materialize FP " << Ty - << " constant, expected: " << s32 << " or " << s64 - << '\n'); + LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty + << " constant, expected: " << s32 << " or " << s64 + << '\n'); return false; } if (RB.getID() != AArch64::FPRRegBankID) { - DEBUG(dbgs() << "Unable to materialize FP " << Ty - << " constant on bank: " << RB << ", expected: FPR\n"); + LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty + << " constant on bank: " << RB + << ", expected: FPR\n"); return false; } @@ -786,15 +788,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I, } else { // s32 and s64 are covered by tablegen. if (Ty != p0) { - DEBUG(dbgs() << "Unable to materialize integer " << Ty - << " constant, expected: " << s32 << ", " << s64 << ", or " - << p0 << '\n'); + LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty + << " constant, expected: " << s32 << ", " << s64 + << ", or " << p0 << '\n'); return false; } if (RB.getID() != AArch64::GPRRegBankID) { - DEBUG(dbgs() << "Unable to materialize integer " << Ty - << " constant on bank: " << RB << ", expected: GPR\n"); + LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty + << " constant on bank: " << RB + << ", expected: GPR\n"); return false; } } @@ -820,7 +823,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I, .addUse(DefGPRReg); if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) { - DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n"); + LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n"); return false; } @@ -908,8 +911,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, case TargetOpcode::G_FRAME_INDEX: { // allocas and G_FRAME_INDEX are only supported in addrspace(0). if (Ty != LLT::pointer(0, 64)) { - DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty - << ", expected: " << LLT::pointer(0, 64) << '\n'); + LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty + << ", expected: " << LLT::pointer(0, 64) << '\n'); return false; } I.setDesc(TII.get(AArch64::ADDXri)); @@ -980,14 +983,14 @@ bool AArch64InstructionSelector::select(MachineInstr &I, LLT PtrTy = MRI.getType(I.getOperand(1).getReg()); if (PtrTy != LLT::pointer(0, 64)) { - DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy - << ", expected: " << LLT::pointer(0, 64) << '\n'); + LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy + << ", expected: " << LLT::pointer(0, 64) << '\n'); return false; } auto &MemOp = **I.memoperands_begin(); if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { - DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); return false; } unsigned MemSizeInBits = MemOp.getSize() * 8; @@ -1066,13 +1069,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I, const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); if (RB.getID() != AArch64::GPRRegBankID) { - DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n"); + LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n"); return false; } if (Ty != LLT::scalar(64)) { - DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty - << ", expected: " << LLT::scalar(64) << '\n'); + LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty + << ", expected: " << LLT::scalar(64) << '\n'); return false; } @@ -1138,7 +1141,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); if (DstRB.getID() != SrcRB.getID()) { - DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n"); + LLVM_DEBUG( + dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n"); return false; } @@ -1155,7 +1159,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I, if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { - DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n"); + LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n"); return false; } @@ -1169,7 +1173,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, SrcRC == &AArch64::GPR64RegClass) { I.getOperand(1).setSubReg(AArch64::sub_32); } else { - DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n"); + LLVM_DEBUG( + dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n"); return false; } @@ -1192,26 +1197,28 @@ bool AArch64InstructionSelector::select(MachineInstr &I, const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI); if (RBDst.getID() != AArch64::GPRRegBankID) { - DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n"); + LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst + << ", expected: GPR\n"); return false; } const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI); if (RBSrc.getID() != AArch64::GPRRegBankID) { - DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n"); + LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc + << ", expected: GPR\n"); return false; } const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); if (DstSize == 0) { - DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n"); + LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n"); return false; } if (DstSize != 64 && DstSize > 32) { - DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize - << ", expected: 32 or 64\n"); + LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize + << ", expected: 32 or 64\n"); return false; } // At this point G_ANYEXT is just like a plain COPY, but we need @@ -1239,8 +1246,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); if (RB.getID() != AArch64::GPRRegBankID) { - DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB - << ", expected: GPR\n"); + LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB + << ", expected: GPR\n"); return false; } @@ -1248,8 +1255,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, if (DstTy == LLT::scalar(64)) { // FIXME: Can we avoid manually doing this? if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) { - DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) - << " operand\n"); + LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) + << " operand\n"); return false; } @@ -1317,8 +1324,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, case TargetOpcode::G_SELECT: { if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) { - DEBUG(dbgs() << "G_SELECT cond has type: " << Ty - << ", expected: " << LLT::scalar(1) << '\n'); + LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty + << ", expected: " << LLT::scalar(1) << '\n'); return false; } @@ -1356,8 +1363,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, } case TargetOpcode::G_ICMP: { if (Ty != LLT::scalar(32)) { - DEBUG(dbgs() << "G_ICMP result has type: " << Ty - << ", expected: " << LLT::scalar(32) << '\n'); + LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty + << ", expected: " << LLT::scalar(32) << '\n'); return false; } @@ -1403,8 +1410,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I, case TargetOpcode::G_FCMP: { if (Ty != LLT::scalar(32)) { - DEBUG(dbgs() << "G_FCMP result has type: " << Ty - << ", expected: " << LLT::scalar(32) << '\n'); + LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty + << ", expected: " << LLT::scalar(32) << '\n'); return false; } diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 14b9e675176..4a19ecd6910 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -706,13 +706,13 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I, .setMIFlags(I->mergeFlagsWith(*MergeMI)); (void)MIB; - DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(MergeMI->print(dbgs())); - DEBUG(dbgs() << " with instruction:\n "); - DEBUG(((MachineInstr *)MIB)->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(MergeMI->print(dbgs())); + LLVM_DEBUG(dbgs() << " with instruction:\n "); + LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); // Erase the old instructions. I->eraseFromParent(); @@ -824,11 +824,12 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, (void)MIB; - DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(Paired->print(dbgs())); - DEBUG(dbgs() << " with instruction:\n "); + LLVM_DEBUG( + dbgs() << "Creating pair load/store. Replacing instructions:\n "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(Paired->print(dbgs())); + LLVM_DEBUG(dbgs() << " with instruction:\n "); if (SExtIdx != -1) { // Generate the sign extension for the proper result of the ldp. // I.e., with X1, that would be: @@ -842,8 +843,8 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32); // Update the result of LDP to use the W instead of the X variant. DstMO.setReg(DstRegW); - DEBUG(((MachineInstr *)MIB)->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); // Make the machine verifier happy by providing a definition for // the X register. // Insert this definition right after the generated LDP, i.e., before @@ -860,12 +861,12 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, .addImm(0) .addImm(31); (void)MIBSXTW; - DEBUG(dbgs() << " Extend operand:\n "); - DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs())); + LLVM_DEBUG(dbgs() << " Extend operand:\n "); + LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs())); } else { - DEBUG(((MachineInstr *)MIB)->print(dbgs())); + LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); } - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "\n"); // Erase the old instructions. I->eraseFromParent(); @@ -903,9 +904,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI, break; } } - DEBUG(dbgs() << "Remove load instruction:\n "); - DEBUG(LoadI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "Remove load instruction:\n "); + LLVM_DEBUG(LoadI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); LoadI->eraseFromParent(); return NextI; } @@ -979,15 +980,15 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI, break; } - DEBUG(dbgs() << "Promoting load by replacing :\n "); - DEBUG(StoreI->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(LoadI->print(dbgs())); - DEBUG(dbgs() << " with instructions:\n "); - DEBUG(StoreI->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG((BitExtMI)->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n "); + LLVM_DEBUG(StoreI->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(LoadI->print(dbgs())); + LLVM_DEBUG(dbgs() << " with instructions:\n "); + LLVM_DEBUG(StoreI->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG((BitExtMI)->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); // Erase the old instructions. LoadI->eraseFromParent(); @@ -1355,18 +1356,18 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I, if (IsPreIdx) { ++NumPreFolded; - DEBUG(dbgs() << "Creating pre-indexed load/store."); + LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store."); } else { ++NumPostFolded; - DEBUG(dbgs() << "Creating post-indexed load/store."); + LLVM_DEBUG(dbgs() << "Creating post-indexed load/store."); } - DEBUG(dbgs() << " Replacing instructions:\n "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << " "); - DEBUG(Update->print(dbgs())); - DEBUG(dbgs() << " with instruction:\n "); - DEBUG(((MachineInstr *)MIB)->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Replacing instructions:\n "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << " "); + LLVM_DEBUG(Update->print(dbgs())); + LLVM_DEBUG(dbgs() << " with instruction:\n "); + LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); // Erase the old instructions for the block. I->eraseFromParent(); diff --git a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp index ee6703aed1e..ccf64657529 100644 --- a/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp +++ b/llvm/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp @@ -164,10 +164,10 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd, LiveIntervals &LIs = G.getMetadata().LIS; if (TRI->isPhysicalRegister(Rd) || TRI->isPhysicalRegister(Ra)) { - DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd) - << '\n'); - DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra) - << '\n'); + LLVM_DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd) + << '\n'); + LLVM_DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra) + << '\n'); return false; } @@ -247,14 +247,14 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd, // Do some Chain management if (Chains.count(Ra)) { if (Rd != Ra) { - DEBUG(dbgs() << "Moving acc chain from " << printReg(Ra, TRI) << " to " - << printReg(Rd, TRI) << '\n';); + LLVM_DEBUG(dbgs() << "Moving acc chain from " << printReg(Ra, TRI) + << " to " << printReg(Rd, TRI) << '\n';); Chains.remove(Ra); Chains.insert(Rd); } } else { - DEBUG(dbgs() << "Creating new acc chain for " << printReg(Rd, TRI) - << '\n';); + LLVM_DEBUG(dbgs() << "Creating new acc chain for " << printReg(Rd, TRI) + << '\n';); Chains.insert(Rd); } @@ -279,7 +279,7 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd, assert(edge != G.invalidEdgeId() && "PBQP error ! The edge should exist !"); - DEBUG(dbgs() << "Refining constraint !\n";); + LLVM_DEBUG(dbgs() << "Refining constraint !\n";); if (G.getEdgeNode1Id(edge) == node2) { std::swap(node1, node2); @@ -329,7 +329,7 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) { LiveIntervals &LIs = G.getMetadata().LIS; TRI = MF.getSubtarget().getRegisterInfo(); - DEBUG(MF.dump()); + LLVM_DEBUG(MF.dump()); for (const auto &MBB: MF) { Chains.clear(); // FIXME: really needed ? Could not work at MF level ? @@ -340,8 +340,8 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) { for (auto r : Chains) { SmallVector<unsigned, 8> toDel; if(regJustKilledBefore(LIs, r, MI)) { - DEBUG(dbgs() << "Killing chain " << printReg(r, TRI) << " at "; - MI.print(dbgs());); + LLVM_DEBUG(dbgs() << "Killing chain " << printReg(r, TRI) << " at "; + MI.print(dbgs());); toDel.push_back(r); } diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp index a8dc6e74ef6..01d8a35bbc2 100644 --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -119,7 +119,7 @@ public: /// Iterate over the functions and promote the interesting constants into /// global variables with module scope. bool runOnModule(Module &M) override { - DEBUG(dbgs() << getPassName() << '\n'); + LLVM_DEBUG(dbgs() << getPassName() << '\n'); if (skipModule(M)) return false; bool Changed = false; @@ -380,9 +380,9 @@ bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Instruction *User, (IPI.first->getParent() != NewPt->getParent() && DT.dominates(IPI.first->getParent(), NewPt->getParent()))) { // No need to insert this point. Just record the dominated use. - DEBUG(dbgs() << "Insertion point dominated by:\n"); - DEBUG(IPI.first->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Insertion point dominated by:\n"); + LLVM_DEBUG(IPI.first->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); IPI.second.emplace_back(User, OpNo); return true; } @@ -408,9 +408,9 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User, // Instructions are in the same block. // By construction, NewPt is dominating the other. // Indeed, isDominated returned false with the exact same arguments. - DEBUG(dbgs() << "Merge insertion point with:\n"); - DEBUG(IPI->first->print(dbgs())); - DEBUG(dbgs() << "\nat considered insertion point.\n"); + LLVM_DEBUG(dbgs() << "Merge insertion point with:\n"); + LLVM_DEBUG(IPI->first->print(dbgs())); + LLVM_DEBUG(dbgs() << "\nat considered insertion point.\n"); appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts); return true; } @@ -430,11 +430,11 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User, } // else, CommonDominator is the block of NewBB, hence NewBB is the last // possible insertion point in that block. - DEBUG(dbgs() << "Merge insertion point with:\n"); - DEBUG(IPI->first->print(dbgs())); - DEBUG(dbgs() << '\n'); - DEBUG(NewPt->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Merge insertion point with:\n"); + LLVM_DEBUG(IPI->first->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); + LLVM_DEBUG(NewPt->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts); return true; } @@ -443,15 +443,15 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User, void AArch64PromoteConstant::computeInsertionPoint( Instruction *User, unsigned OpNo, InsertionPoints &InsertPts) { - DEBUG(dbgs() << "Considered use, opidx " << OpNo << ":\n"); - DEBUG(User->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Considered use, opidx " << OpNo << ":\n"); + LLVM_DEBUG(User->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); Instruction *InsertionPoint = findInsertionPoint(*User, OpNo); - DEBUG(dbgs() << "Considered insertion point:\n"); - DEBUG(InsertionPoint->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Considered insertion point:\n"); + LLVM_DEBUG(InsertionPoint->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); if (isDominated(InsertionPoint, User, OpNo, InsertPts)) return; @@ -460,7 +460,7 @@ void AArch64PromoteConstant::computeInsertionPoint( if (tryAndMerge(InsertionPoint, User, OpNo, InsertPts)) return; - DEBUG(dbgs() << "Keep considered insertion point\n"); + LLVM_DEBUG(dbgs() << "Keep considered insertion point\n"); // It is definitely useful by its own InsertPts[InsertionPoint].emplace_back(User, OpNo); @@ -476,9 +476,9 @@ static void ensurePromotedGV(Function &F, Constant &C, *F.getParent(), C.getType(), true, GlobalValue::InternalLinkage, nullptr, "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal); PC.GV->setInitializer(&C); - DEBUG(dbgs() << "Global replacement: "); - DEBUG(PC.GV->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Global replacement: "); + LLVM_DEBUG(PC.GV->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); ++NumPromoted; } @@ -495,10 +495,10 @@ void AArch64PromoteConstant::insertDefinitions(Function &F, // Create the load of the global variable. IRBuilder<> Builder(IPI.first); LoadInst *LoadedCst = Builder.CreateLoad(&PromotedGV); - DEBUG(dbgs() << "**********\n"); - DEBUG(dbgs() << "New def: "); - DEBUG(LoadedCst->print(dbgs())); - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "**********\n"); + LLVM_DEBUG(dbgs() << "New def: "); + LLVM_DEBUG(LoadedCst->print(dbgs())); + LLVM_DEBUG(dbgs() << '\n'); // Update the dominated uses. for (auto Use : IPI.second) { @@ -507,11 +507,11 @@ void AArch64PromoteConstant::insertDefinitions(Function &F, findInsertionPoint(*Use.first, Use.second)) && "Inserted definition does not dominate all its uses!"); #endif - DEBUG({ - dbgs() << "Use to update " << Use.second << ":"; - Use.first->print(dbgs()); - dbgs() << '\n'; - }); + LLVM_DEBUG({ + dbgs() << "Use to update " << Use.second << ":"; + Use.first->print(dbgs()); + dbgs() << '\n'; + }); Use.first->setOperand(Use.second, LoadedCst); ++NumPromotedUses; } @@ -523,7 +523,7 @@ void AArch64PromoteConstant::promoteConstants( PromotionCacheTy &PromotionCache) { // Promote the constants. for (auto U = Updates.begin(), E = Updates.end(); U != E;) { - DEBUG(dbgs() << "** Compute insertion points **\n"); + LLVM_DEBUG(dbgs() << "** Compute insertion points **\n"); auto First = U; Constant *C = First->C; InsertionPoints InsertPts; diff --git a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp index e5822b11432..fac9ec3e74c 100644 --- a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -427,9 +427,9 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { } if (IsCopy) - DEBUG(dbgs() << "Remove redundant Copy : " << *MI); + LLVM_DEBUG(dbgs() << "Remove redundant Copy : " << *MI); else - DEBUG(dbgs() << "Remove redundant Move : " << *MI); + LLVM_DEBUG(dbgs() << "Remove redundant Move : " << *MI); MI->eraseFromParent(); Changed = true; @@ -473,8 +473,8 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { // Clear kills in the range where changes were made. This is conservative, // but should be okay since kill markers are being phased out. - DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse - << "\tLastChange: " << *LastChange); + LLVM_DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse + << "\tLastChange: " << *LastChange); for (MachineInstr &MMI : make_range(FirstUse, PredMBB->end())) MMI.clearKillInfo(); for (MachineInstr &MMI : make_range(MBB->begin(), LastChange)) diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp index 2f7a597f3bf..fc7b5984fe3 100644 --- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -91,9 +91,9 @@ bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) if (SCDesc->isValid() && !SCDesc->isVariant()) { unsigned ResLenWithSTP = BBTrace.getResourceLength(None, SCDesc); if (ResLenWithSTP > ResLength) { - DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber() - << " resources " << ResLength << " -> " << ResLenWithSTP - << "\n"); + LLVM_DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber() + << " resources " << ResLength << " -> " << ResLenWithSTP + << "\n"); return false; } } @@ -131,10 +131,10 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) { Traces = &getAnalysis<MachineTraceMetrics>(); MinInstr = nullptr; - DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n'); if (!SchedModel.hasInstrSchedModel()) { - DEBUG(dbgs() << " Skipping pass: no machine model present.\n"); + LLVM_DEBUG(dbgs() << " Skipping pass: no machine model present.\n"); return false; } @@ -156,7 +156,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) { if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent())) break; // Otherwise, continue unpairing the stores in this block. - DEBUG(dbgs() << "Unpairing store " << MI << "\n"); + LLVM_DEBUG(dbgs() << "Unpairing store " << MI << "\n"); SuppressSTP = true; TII->suppressLdStPair(MI); } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 098272dc2e2..7eb4c1c9667 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -728,14 +728,14 @@ getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, }; int StridedLoads = countStridedLoads(L, SE); - DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads - << " strided loads\n"); + LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads + << " strided loads\n"); // Pick the largest power of 2 unroll count that won't result in too many // strided loads. if (StridedLoads) { UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); - DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount - << '\n'); + LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " + << UP.MaxCount << '\n'); } } |