diff options
author | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2018-05-14 12:53:11 +0000 |
---|---|---|
committer | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2018-05-14 12:53:11 +0000 |
commit | d34e60ca8532511acb8c93ef26297e349fbec86a (patch) | |
tree | 1a095bc8694498d94232e81b95c1da05d462d3ec /llvm/lib/Transforms | |
parent | affbc99bea94e77f7ebccd8ba887e33051bd04ee (diff) | |
download | bcm5719-llvm-d34e60ca8532511acb8c93ef26297e349fbec86a.tar.gz bcm5719-llvm-d34e60ca8532511acb8c93ef26297e349fbec86a.zip |
Rename DEBUG macro to LLVM_DEBUG.
The DEBUG() macro is very generic so it might clash with other projects.
The renaming was done as follows:
- git grep -l 'DEBUG' | xargs sed -i 's/\bDEBUG\s\?(/LLVM_DEBUG(/g'
- git diff -U0 master | ../clang/tools/clang-format/clang-format-diff.py -i -p1 -style LLVM
- Manual change to APInt
- Manually chage DOCS as regex doesn't match it.
In the transition period the DEBUG() macro is still present and aliased
to the LLVM_DEBUG() one.
Differential Revision: https://reviews.llvm.org/D43624
llvm-svn: 332240
Diffstat (limited to 'llvm/lib/Transforms')
111 files changed, 2573 insertions, 2347 deletions
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp index c72df2d70a4..8289b2d68f8 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp @@ -405,9 +405,10 @@ bool TruncInstCombine::run(Function &F) { CurrentTruncInst = Worklist.pop_back_val(); if (Type *NewDstSclTy = getBestTruncatedType()) { - DEBUG(dbgs() << "ICE: TruncInstCombine reducing type of expression dag " - "dominated by: " - << CurrentTruncInst << '\n'); + LLVM_DEBUG( + dbgs() << "ICE: TruncInstCombine reducing type of expression dag " + "dominated by: " + << CurrentTruncInst << '\n'); ReduceExpressionDag(NewDstSclTy); MadeIRChange = true; } diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp index 1676e7a5736..c4172c035dc 100644 --- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -106,8 +106,8 @@ struct SuspendCrossingInfo { assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def"); bool const Result = Block[UseIndex].Kills[DefIndex]; - DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() - << " answer is " << Result << "\n"); + LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() + << " answer is " << Result << "\n"); return Result; } @@ -195,8 +195,8 @@ SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) bool Changed; do { - DEBUG(dbgs() << "iteration " << ++Iteration); - DEBUG(dbgs() << "==============\n"); + LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); + LLVM_DEBUG(dbgs() << "==============\n"); Changed = false; for (size_t I = 0; I < N; ++I) { @@ -240,20 +240,20 @@ SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); if (S.Kills != SavedKills) { - DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() - << "\n"); - DEBUG(dump("S.Kills", S.Kills)); - DEBUG(dump("SavedKills", SavedKills)); + LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() + << "\n"); + LLVM_DEBUG(dump("S.Kills", S.Kills)); + LLVM_DEBUG(dump("SavedKills", SavedKills)); } if (S.Consumes != SavedConsumes) { - DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); - DEBUG(dump("S.Consume", S.Consumes)); - DEBUG(dump("SavedCons", SavedConsumes)); + LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); + LLVM_DEBUG(dump("S.Consume", S.Consumes)); + LLVM_DEBUG(dump("SavedCons", SavedConsumes)); } } } } while (Changed); - DEBUG(dump()); + LLVM_DEBUG(dump()); } #undef DEBUG_TYPE // "coro-suspend-crossing" @@ -821,7 +821,7 @@ static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills, for (User *U : CurrentValue->users()) { Instruction *I = cast<Instruction>(U); if (!DT.dominates(CoroBegin, I)) { - DEBUG(dbgs() << "will move: " << *I << "\n"); + LLVM_DEBUG(dbgs() << "will move: " << *I << "\n"); // TODO: Make this more robust. Currently if we run into a situation // where simple instruction move won't work we panic and @@ -906,7 +906,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) { break; // Rewrite materializable instructions to be materialized at the use point. - DEBUG(dump("Materializations", Spills)); + LLVM_DEBUG(dump("Materializations", Spills)); rewriteMaterializableInstructions(Builder, Spills); Spills.clear(); } @@ -936,7 +936,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) { Spills.emplace_back(&I, U); } } - DEBUG(dump("Spills", Spills)); + LLVM_DEBUG(dump("Spills", Spills)); moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin); Shape.FrameTy = buildFrameType(F, Shape, Spills); Shape.FramePtr = insertSpills(Spills, Shape); diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp index 6ab56f25e8b..e764edf7bc5 100644 --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -654,7 +654,7 @@ getNotRelocatableInstructions(CoroBeginInst *CoroBegin, // set. do { Instruction *Current = Work.pop_back_val(); - DEBUG(dbgs() << "CoroSplit: Will not relocate: " << *Current << "\n"); + LLVM_DEBUG(dbgs() << "CoroSplit: Will not relocate: " << *Current << "\n"); DoNotRelocate.insert(Current); for (Value *U : Current->operands()) { auto *I = dyn_cast<Instruction>(U); @@ -850,8 +850,8 @@ struct CoroSplit : public CallGraphSCCPass { for (Function *F : Coroutines) { Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); StringRef Value = Attr.getValueAsString(); - DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() - << "' state: " << Value << "\n"); + LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() + << "' state: " << Value << "\n"); if (Value == UNPREPARED_FOR_SPLIT) { prepareForSplit(*F, CG); continue; diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index e159920116c..f2c2b55b1c5 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -220,8 +220,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, NF->setSubprogram(F->getSubprogram()); F->setSubprogram(nullptr); - DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" - << "From: " << *F); + LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" + << "From: " << *F); // Recompute the parameter attributes list based on the new arguments for // the function. @@ -426,8 +426,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, I2->setName(I->getName() + ".val"); LI->replaceAllUsesWith(&*I2); LI->eraseFromParent(); - DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() - << "' in function '" << F->getName() << "'\n"); + LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() + << "' in function '" << F->getName() << "'\n"); } else { GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); IndicesVector Operands; @@ -453,8 +453,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, NewName += ".val"; TheArg->setName(NewName); - DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() - << "' of function '" << NF->getName() << "'\n"); + LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() + << "' of function '" << NF->getName() << "'\n"); // All of the uses must be load instructions. Replace them all with // the argument specified by ArgNo. @@ -688,11 +688,11 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca, // to do. if (ToPromote.find(Operands) == ToPromote.end()) { if (MaxElements > 0 && ToPromote.size() == MaxElements) { - DEBUG(dbgs() << "argpromotion not promoting argument '" - << Arg->getName() - << "' because it would require adding more " - << "than " << MaxElements - << " arguments to the function.\n"); + LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '" + << Arg->getName() + << "' because it would require adding more " + << "than " << MaxElements + << " arguments to the function.\n"); // We limit aggregate promotion to only promoting up to a fixed number // of elements of the aggregate. return false; @@ -901,11 +901,11 @@ promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter, if (isSafeToPromote) { if (StructType *STy = dyn_cast<StructType>(AgTy)) { if (MaxElements > 0 && STy->getNumElements() > MaxElements) { - DEBUG(dbgs() << "argpromotion disable promoting argument '" - << PtrArg->getName() - << "' because it would require adding more" - << " than " << MaxElements - << " arguments to the function.\n"); + LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '" + << PtrArg->getName() + << "' because it would require adding more" + << " than " << MaxElements + << " arguments to the function.\n"); continue; } diff --git a/llvm/lib/Transforms/IPO/BlockExtractor.cpp b/llvm/lib/Transforms/IPO/BlockExtractor.cpp index 9d30efdb7c8..ff5ee817da4 100644 --- a/llvm/lib/Transforms/IPO/BlockExtractor.cpp +++ b/llvm/lib/Transforms/IPO/BlockExtractor.cpp @@ -147,8 +147,9 @@ bool BlockExtractor::runOnModule(Module &M) { // Check if the module contains BB. if (BB->getParent()->getParent() != &M) report_fatal_error("Invalid basic block"); - DEBUG(dbgs() << "BlockExtractor: Extracting " << BB->getParent()->getName() - << ":" << BB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "BlockExtractor: Extracting " + << BB->getParent()->getName() << ":" << BB->getName() + << "\n"); SmallVector<BasicBlock *, 2> BlocksToExtractVec; BlocksToExtractVec.push_back(BB); if (const InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) @@ -161,8 +162,8 @@ bool BlockExtractor::runOnModule(Module &M) { // Erase the functions. if (EraseFunctions || BlockExtractorEraseFuncs) { for (Function *F : Functions) { - DEBUG(dbgs() << "BlockExtractor: Trying to delete " << F->getName() - << "\n"); + LLVM_DEBUG(dbgs() << "BlockExtractor: Trying to delete " << F->getName() + << "\n"); F->deleteBody(); } // Set linkage as ExternalLinkage to avoid erasing unreachable functions. diff --git a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp index 4ef8e4b509a..31e771da3bd 100644 --- a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -529,8 +529,8 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) { } if (HasMustTailCalls) { - DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName() - << " has musttail calls\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName() + << " has musttail calls\n"); } if (!F.hasLocalLinkage() && (!ShouldHackArguments || F.isIntrinsic())) { @@ -538,8 +538,9 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) { return; } - DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting callers for fn: " - << F.getName() << "\n"); + LLVM_DEBUG( + dbgs() << "DeadArgumentEliminationPass - Inspecting callers for fn: " + << F.getName() << "\n"); // Keep track of the number of live retvals, so we can skip checks once all // of them turn out to be live. unsigned NumLiveRetVals = 0; @@ -606,16 +607,16 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) { } if (HasMustTailCallers) { - DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName() - << " has musttail callers\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName() + << " has musttail callers\n"); } // Now we've inspected all callers, record the liveness of our return values. for (unsigned i = 0; i != RetCount; ++i) MarkValue(CreateRet(&F, i), RetValLiveness[i], MaybeLiveRetUses[i]); - DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting args for fn: " - << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting args for fn: " + << F.getName() << "\n"); // Now, check all of our arguments. unsigned i = 0; @@ -674,8 +675,8 @@ void DeadArgumentEliminationPass::MarkValue(const RetOrArg &RA, Liveness L, /// mark any values that are used as this function's parameters or by its return /// values (according to Uses) live as well. void DeadArgumentEliminationPass::MarkLive(const Function &F) { - DEBUG(dbgs() << "DeadArgumentEliminationPass - Intrinsically live fn: " - << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Intrinsically live fn: " + << F.getName() << "\n"); // Mark the function as live. LiveFunctions.insert(&F); // Mark all arguments as live. @@ -696,8 +697,8 @@ void DeadArgumentEliminationPass::MarkLive(const RetOrArg &RA) { if (!LiveValues.insert(RA).second) return; // We were already marked Live. - DEBUG(dbgs() << "DeadArgumentEliminationPass - Marking " - << RA.getDescription() << " live\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Marking " + << RA.getDescription() << " live\n"); PropagateLiveness(RA); } @@ -755,9 +756,9 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) { HasLiveReturnedArg |= PAL.hasParamAttribute(i, Attribute::Returned); } else { ++NumArgumentsEliminated; - DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing argument " << i - << " (" << I->getName() << ") from " << F->getName() - << "\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing argument " + << i << " (" << I->getName() << ") from " + << F->getName() << "\n"); } } @@ -800,8 +801,9 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) { NewRetIdxs[i] = RetTypes.size() - 1; } else { ++NumRetValsEliminated; - DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing return value " - << i << " from " << F->getName() << "\n"); + LLVM_DEBUG( + dbgs() << "DeadArgumentEliminationPass - Removing return value " + << i << " from " << F->getName() << "\n"); } } if (RetTypes.size() > 1) { @@ -1084,7 +1086,7 @@ PreservedAnalyses DeadArgumentEliminationPass::run(Module &M, // removed. We can do this if they never call va_start. This loop cannot be // fused with the next loop, because deleting a function invalidates // information computed while surveying other functions. - DEBUG(dbgs() << "DeadArgumentEliminationPass - Deleting dead varargs\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Deleting dead varargs\n"); for (Module::iterator I = M.begin(), E = M.end(); I != E; ) { Function &F = *I++; if (F.getFunctionType()->isVarArg()) @@ -1095,7 +1097,7 @@ PreservedAnalyses DeadArgumentEliminationPass::run(Module &M, // We assume all arguments are dead unless proven otherwise (allowing us to // determine that dead arguments passed into recursive functions are dead). // - DEBUG(dbgs() << "DeadArgumentEliminationPass - Determining liveness\n"); + LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Determining liveness\n"); for (auto &F : M) SurveyFunction(F); diff --git a/llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp b/llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp index 16d78e6b00c..37273f97541 100644 --- a/llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp @@ -75,8 +75,8 @@ static void addForcedAttributes(Function &F) { auto Kind = parseAttrKind(KV.second); if (Kind == Attribute::None) { - DEBUG(dbgs() << "ForcedAttribute: " << KV.second - << " unknown or not handled!\n"); + LLVM_DEBUG(dbgs() << "ForcedAttribute: " << KV.second + << " unknown or not handled!\n"); continue; } if (F.hasFnAttribute(Kind)) diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index eef0738197b..91c855021fd 100644 --- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -1013,7 +1013,8 @@ static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) { if (!Speculative) { // Mark the function eagerly since we may discover a function // which prevents us from speculating about the entire SCC - DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n"); + LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName() + << " as nonnull\n"); F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); ++NumNonNullReturn; MadeChange = true; @@ -1032,7 +1033,7 @@ static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) { !F->getReturnType()->isPointerTy()) continue; - DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); + LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); ++NumNonNullReturn; MadeChange = true; @@ -1218,8 +1219,8 @@ static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) { return InstrBreaksNonConvergent(I, SCCNodes); }, [](Function &F) { - DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName() - << "\n"); + LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName() + << "\n"); F.setNotConvergent(); }, /* RequiresExactDefinition= */ false}); @@ -1239,7 +1240,8 @@ static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) { return InstrBreaksNonThrowing(I, SCCNodes); }, [](Function &F) { - DEBUG(dbgs() << "Adding nounwind attr to fn " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() + << "Adding nounwind attr to fn " << F.getName() << "\n"); F.setDoesNotThrow(); ++NumNoUnwind; }, diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index 246d75caefa..2ac01095210 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -136,7 +136,7 @@ static cl::opt<bool> static std::unique_ptr<Module> loadFile(const std::string &FileName, LLVMContext &Context) { SMDiagnostic Err; - DEBUG(dbgs() << "Loading '" << FileName << "'\n"); + LLVM_DEBUG(dbgs() << "Loading '" << FileName << "'\n"); // Metadata isn't loaded until functions are imported, to minimize // the memory overhead. std::unique_ptr<Module> Result = @@ -249,11 +249,12 @@ static void computeImportForReferencedGlobals( StringMap<FunctionImporter::ExportSetTy> *ExportLists) { for (auto &VI : Summary.refs()) { if (DefinedGVSummaries.count(VI.getGUID())) { - DEBUG(dbgs() << "Ref ignored! Target already in destination module.\n"); + LLVM_DEBUG( + dbgs() << "Ref ignored! Target already in destination module.\n"); continue; } - DEBUG(dbgs() << " ref -> " << VI.getGUID() << "\n"); + LLVM_DEBUG(dbgs() << " ref -> " << VI.getGUID() << "\n"); for (auto &RefSummary : VI.getSummaryList()) if (RefSummary->getSummaryKind() == GlobalValueSummary::GlobalVarKind && @@ -283,12 +284,12 @@ static void computeImportForFunction( static int ImportCount = 0; for (auto &Edge : Summary.calls()) { ValueInfo VI = Edge.first; - DEBUG(dbgs() << " edge -> " << VI.getGUID() << " Threshold:" << Threshold - << "\n"); + LLVM_DEBUG(dbgs() << " edge -> " << VI.getGUID() + << " Threshold:" << Threshold << "\n"); if (ImportCutoff >= 0 && ImportCount >= ImportCutoff) { - DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff - << " reached.\n"); + LLVM_DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff + << " reached.\n"); continue; } @@ -297,7 +298,7 @@ static void computeImportForFunction( continue; if (DefinedGVSummaries.count(VI.getGUID())) { - DEBUG(dbgs() << "ignored! Target already in destination module.\n"); + LLVM_DEBUG(dbgs() << "ignored! Target already in destination module.\n"); continue; } @@ -317,7 +318,8 @@ static void computeImportForFunction( auto *CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold, Summary.modulePath()); if (!CalleeSummary) { - DEBUG(dbgs() << "ignored! No qualifying callee with summary found.\n"); + LLVM_DEBUG( + dbgs() << "ignored! No qualifying callee with summary found.\n"); continue; } @@ -346,8 +348,8 @@ static void computeImportForFunction( /// a second time with a higher threshold. In this case, it is added back to /// the worklist with the new threshold. if (ProcessedThreshold && ProcessedThreshold >= AdjThreshold) { - DEBUG(dbgs() << "ignored! Target was already seen with Threshold " - << ProcessedThreshold << "\n"); + LLVM_DEBUG(dbgs() << "ignored! Target was already seen with Threshold " + << ProcessedThreshold << "\n"); continue; } bool PreviouslyImported = ProcessedThreshold != 0; @@ -398,7 +400,7 @@ static void ComputeImportForModule( // module for (auto &GVSummary : DefinedGVSummaries) { if (!Index.isGlobalValueLive(GVSummary.second)) { - DEBUG(dbgs() << "Ignores Dead GUID: " << GVSummary.first << "\n"); + LLVM_DEBUG(dbgs() << "Ignores Dead GUID: " << GVSummary.first << "\n"); continue; } auto *FuncSummary = @@ -406,7 +408,7 @@ static void ComputeImportForModule( if (!FuncSummary) // Skip import for global variables continue; - DEBUG(dbgs() << "Initialize import for " << GVSummary.first << "\n"); + LLVM_DEBUG(dbgs() << "Initialize import for " << GVSummary.first << "\n"); computeImportForFunction(*FuncSummary, Index, ImportInstrLimit, DefinedGVSummaries, Worklist, ImportList, ExportLists); @@ -469,8 +471,8 @@ void llvm::ComputeCrossModuleImport( // For each module that has function defined, compute the import/export lists. for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) { auto &ImportList = ImportLists[DefinedGVSummaries.first()]; - DEBUG(dbgs() << "Computing import for Module '" - << DefinedGVSummaries.first() << "'\n"); + LLVM_DEBUG(dbgs() << "Computing import for Module '" + << DefinedGVSummaries.first() << "'\n"); ComputeImportForModule(DefinedGVSummaries.second, Index, ImportList, &ExportLists); } @@ -492,23 +494,23 @@ void llvm::ComputeCrossModuleImport( } #ifndef NDEBUG - DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size() - << " modules:\n"); + LLVM_DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size() + << " modules:\n"); for (auto &ModuleImports : ImportLists) { auto ModName = ModuleImports.first(); auto &Exports = ExportLists[ModName]; unsigned NumGVS = numGlobalVarSummaries(Index, Exports); - DEBUG(dbgs() << "* Module " << ModName << " exports " - << Exports.size() - NumGVS << " functions and " << NumGVS - << " vars. Imports from " - << ModuleImports.second.size() << " modules.\n"); + LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports " + << Exports.size() - NumGVS << " functions and " << NumGVS + << " vars. Imports from " << ModuleImports.second.size() + << " modules.\n"); for (auto &Src : ModuleImports.second) { auto SrcModName = Src.first(); unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second); - DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod - << " functions imported from " << SrcModName << "\n"); - DEBUG(dbgs() << " - " << NumGVSPerMod << " global vars imported from " - << SrcModName << "\n"); + LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod + << " functions imported from " << SrcModName << "\n"); + LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod + << " global vars imported from " << SrcModName << "\n"); } } #endif @@ -518,15 +520,15 @@ void llvm::ComputeCrossModuleImport( static void dumpImportListForModule(const ModuleSummaryIndex &Index, StringRef ModulePath, FunctionImporter::ImportMapTy &ImportList) { - DEBUG(dbgs() << "* Module " << ModulePath << " imports from " - << ImportList.size() << " modules.\n"); + LLVM_DEBUG(dbgs() << "* Module " << ModulePath << " imports from " + << ImportList.size() << " modules.\n"); for (auto &Src : ImportList) { auto SrcModName = Src.first(); unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second); - DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod - << " functions imported from " << SrcModName << "\n"); - DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from " - << SrcModName << "\n"); + LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod + << " functions imported from " << SrcModName << "\n"); + LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from " + << SrcModName << "\n"); } } #endif @@ -541,7 +543,7 @@ void llvm::ComputeCrossModuleImportForModule( Index.collectDefinedFunctionsForModule(ModulePath, FunctionSummaryMap); // Compute the import list for this module. - DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n"); + LLVM_DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n"); ComputeImportForModule(FunctionSummaryMap, Index, ImportList); #ifndef NDEBUG @@ -601,7 +603,7 @@ void llvm::computeDeadSymbols( for (const auto &Entry : Index) for (auto &S : Entry.second.SummaryList) if (S->isLive()) { - DEBUG(dbgs() << "Live root: " << Entry.first << "\n"); + LLVM_DEBUG(dbgs() << "Live root: " << Entry.first << "\n"); Worklist.push_back(ValueInfo(/*IsAnalysis=*/false, &Entry)); ++LiveSymbols; break; @@ -667,8 +669,8 @@ void llvm::computeDeadSymbols( Index.setWithGlobalValueDeadStripping(); unsigned DeadSymbols = Index.size() - LiveSymbols; - DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols - << " symbols Dead \n"); + LLVM_DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols + << " symbols Dead \n"); NumDeadSymbols += DeadSymbols; NumLiveSymbols += LiveSymbols; } @@ -711,7 +713,8 @@ llvm::EmitImportsFiles(StringRef ModulePath, StringRef OutputFilename, } bool llvm::convertToDeclaration(GlobalValue &GV) { - DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName() + << "\n"); if (Function *F = dyn_cast<Function>(&GV)) { F->deleteBody(); F->clearMetadata(); @@ -787,8 +790,9 @@ void llvm::thinLTOResolveWeakForLinkerModule( NewLinkage == GlobalValue::WeakODRLinkage) GV.setVisibility(GlobalValue::HiddenVisibility); - DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName() << "` from " - << GV.getLinkage() << " to " << NewLinkage << "\n"); + LLVM_DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName() + << "` from " << GV.getLinkage() << " to " << NewLinkage + << "\n"); GV.setLinkage(NewLinkage); } // Remove declarations from comdats, including available_externally @@ -865,8 +869,8 @@ static Function *replaceAliasWithAliasee(Module *SrcModule, GlobalAlias *GA) { // index. Expected<bool> FunctionImporter::importFunctions( Module &DestModule, const FunctionImporter::ImportMapTy &ImportList) { - DEBUG(dbgs() << "Starting import for Module " - << DestModule.getModuleIdentifier() << "\n"); + LLVM_DEBUG(dbgs() << "Starting import for Module " + << DestModule.getModuleIdentifier() << "\n"); unsigned ImportedCount = 0, ImportedGVCount = 0; IRMover Mover(DestModule); @@ -899,9 +903,9 @@ Expected<bool> FunctionImporter::importFunctions( continue; auto GUID = F.getGUID(); auto Import = ImportGUIDs.count(GUID); - DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function " << GUID - << " " << F.getName() << " from " - << SrcModule->getSourceFileName() << "\n"); + LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function " + << GUID << " " << F.getName() << " from " + << SrcModule->getSourceFileName() << "\n"); if (Import) { if (Error Err = F.materialize()) return std::move(Err); @@ -921,9 +925,9 @@ Expected<bool> FunctionImporter::importFunctions( continue; auto GUID = GV.getGUID(); auto Import = ImportGUIDs.count(GUID); - DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global " << GUID - << " " << GV.getName() << " from " - << SrcModule->getSourceFileName() << "\n"); + LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global " + << GUID << " " << GV.getName() << " from " + << SrcModule->getSourceFileName() << "\n"); if (Import) { if (Error Err = GV.materialize()) return std::move(Err); @@ -935,9 +939,9 @@ Expected<bool> FunctionImporter::importFunctions( continue; auto GUID = GA.getGUID(); auto Import = ImportGUIDs.count(GUID); - DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias " << GUID - << " " << GA.getName() << " from " - << SrcModule->getSourceFileName() << "\n"); + LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias " + << GUID << " " << GA.getName() << " from " + << SrcModule->getSourceFileName() << "\n"); if (Import) { if (Error Err = GA.materialize()) return std::move(Err); @@ -946,9 +950,9 @@ Expected<bool> FunctionImporter::importFunctions( if (Error Err = Base->materialize()) return std::move(Err); auto *Fn = replaceAliasWithAliasee(SrcModule.get(), &GA); - DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID() - << " " << Base->getName() << " from " - << SrcModule->getSourceFileName() << "\n"); + LLVM_DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID() + << " " << Base->getName() << " from " + << SrcModule->getSourceFileName() << "\n"); if (EnableImportMetadata) { // Add 'thinlto_src_module' metadata for statistics and debugging. Fn->setMetadata( @@ -987,12 +991,12 @@ Expected<bool> FunctionImporter::importFunctions( NumImportedFunctions += (ImportedCount - ImportedGVCount); NumImportedGlobalVars += ImportedGVCount; - DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount - << " functions for Module " << DestModule.getModuleIdentifier() - << "\n"); - DEBUG(dbgs() << "Imported " << ImportedGVCount - << " global variables for Module " - << DestModule.getModuleIdentifier() << "\n"); + LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount + << " functions for Module " + << DestModule.getModuleIdentifier() << "\n"); + LLVM_DEBUG(dbgs() << "Imported " << ImportedGVCount + << " global variables for Module " + << DestModule.getModuleIdentifier() << "\n"); return ImportedCount; } diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 1ae5dde5fc8..fa9689e7948 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -567,7 +567,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { if (NewGlobals.empty()) return nullptr; - DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); @@ -799,7 +799,8 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, } if (Changed) { - DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV + << "\n"); ++NumGlobUses; } @@ -813,7 +814,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); } if (GV->use_empty()) { - DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); + LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); Changed = true; GV->eraseFromParent(); ++NumDeleted; @@ -849,7 +850,8 @@ static GlobalVariable * OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, ConstantInt *NElements, const DataLayout &DL, TargetLibraryInfo *TLI) { - DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); + LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI + << '\n'); Type *GlobalType; if (NElements->getZExtValue() == 1) @@ -1285,7 +1287,8 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, Value *NElems, const DataLayout &DL, const TargetLibraryInfo *TLI) { - DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); + LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI + << '\n'); Type *MAT = getMallocAllocatedType(CI, TLI); StructType *STy = cast<StructType>(MAT); @@ -1624,7 +1627,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) return false; - DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); // Create the new global, initializing it to false. GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), @@ -1763,7 +1766,7 @@ static bool deleteIfDead(GlobalValue &GV, if (!Dead) return false; - DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); + LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); GV.eraseFromParent(); ++NumDeleted; return true; @@ -1929,7 +1932,7 @@ static bool processInternalGlobal( LookupDomTree)) { const DataLayout &DL = GV->getParent()->getDataLayout(); - DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction ->getEntryBlock().begin()); Type *ElemTy = GV->getValueType(); @@ -1950,7 +1953,7 @@ static bool processInternalGlobal( // If the global is never loaded (but may be stored to), it is dead. // Delete it now. if (!GS.IsLoaded) { - DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); bool Changed; if (isLeakCheckerRoot(GV)) { @@ -1972,7 +1975,7 @@ static bool processInternalGlobal( } if (GS.StoredType <= GlobalStatus::InitializerStored) { - DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); + LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); GV->setConstant(true); // Clean up any obviously simplifiable users now. @@ -1980,8 +1983,8 @@ static bool processInternalGlobal( // If the global is dead now, just nuke it. if (GV->use_empty()) { - DEBUG(dbgs() << " *** Marking constant allowed us to simplify " - << "all users and delete global!\n"); + LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify " + << "all users and delete global!\n"); GV->eraseFromParent(); ++NumDeleted; return true; @@ -2009,8 +2012,8 @@ static bool processInternalGlobal( CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); if (GV->use_empty()) { - DEBUG(dbgs() << " *** Substituting initializer allowed us to " - << "simplify all users and delete global!\n"); + LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to " + << "simplify all users and delete global!\n"); GV->eraseFromParent(); ++NumDeleted; } @@ -2545,9 +2548,9 @@ static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, ++NumCtorsEvaluated; // We succeeded at evaluation: commit the result. - DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" - << F->getName() << "' to " << Eval.getMutatedMemory().size() - << " stores.\n"); + LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" + << F->getName() << "' to " + << Eval.getMutatedMemory().size() << " stores.\n"); BatchCommitValueTo(Eval.getMutatedMemory()); for (GlobalVariable *GV : Eval.getInvariants()) GV->setConstant(true); diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp index 26b889e617e..e4c7af31f67 100644 --- a/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/llvm/lib/Transforms/IPO/Inliner.cpp @@ -208,8 +208,8 @@ static void mergeInlinedArrayAllocas( // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare // success! - DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI - << "\n\t\tINTO: " << *AvailableAlloca << '\n'); + LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI + << "\n\t\tINTO: " << *AvailableAlloca << '\n'); // Move affected dbg.declare calls immediately after the new alloca to // avoid the situation when a dbg.declare precedes its alloca. @@ -379,14 +379,14 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, Function *Caller = CS.getCaller(); if (IC.isAlways()) { - DEBUG(dbgs() << " Inlining: cost=always" - << ", Call: " << *CS.getInstruction() << "\n"); + LLVM_DEBUG(dbgs() << " Inlining: cost=always" + << ", Call: " << *CS.getInstruction() << "\n"); return IC; } if (IC.isNever()) { - DEBUG(dbgs() << " NOT Inlining: cost=never" - << ", Call: " << *CS.getInstruction() << "\n"); + LLVM_DEBUG(dbgs() << " NOT Inlining: cost=never" + << ", Call: " << *CS.getInstruction() << "\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call) << NV("Callee", Callee) << " not inlined into " @@ -397,9 +397,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, } if (!IC) { - DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost() - << ", thres=" << IC.getThreshold() - << ", Call: " << *CS.getInstruction() << "\n"); + LLVM_DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost() + << ", thres=" << IC.getThreshold() + << ", Call: " << *CS.getInstruction() << "\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "TooCostly", Call) << NV("Callee", Callee) << " not inlined into " @@ -412,9 +412,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, int TotalSecondaryCost = 0; if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost, GetInlineCost)) { - DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() - << " Cost = " << IC.getCost() - << ", outer Cost = " << TotalSecondaryCost << '\n'); + LLVM_DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() + << " Cost = " << IC.getCost() + << ", outer Cost = " << TotalSecondaryCost << '\n'); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts", Call) @@ -428,9 +428,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, return None; } - DEBUG(dbgs() << " Inlining: cost=" << IC.getCost() - << ", thres=" << IC.getThreshold() - << ", Call: " << *CS.getInstruction() << '\n'); + LLVM_DEBUG(dbgs() << " Inlining: cost=" << IC.getCost() + << ", thres=" << IC.getThreshold() + << ", Call: " << *CS.getInstruction() << '\n'); return IC; } @@ -470,12 +470,12 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, function_ref<AAResults &(Function &)> AARGetter, ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { SmallPtrSet<Function *, 8> SCCFunctions; - DEBUG(dbgs() << "Inliner visiting SCC:"); + LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); for (CallGraphNode *Node : SCC) { Function *F = Node->getFunction(); if (F) SCCFunctions.insert(F); - DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); + LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); } // Scan through and identify all call sites ahead of time so that we only @@ -524,7 +524,7 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, } } - DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); + LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); // If there are no calls in this function, exit early. if (CallSites.empty()) @@ -593,7 +593,7 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, // size. This happens because IPSCCP propagates the result out of the // call and then we're left with the dead call. if (IsTriviallyDead) { - DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n"); + LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n"); // Update the call graph by deleting the edge from Callee to Caller. CG[Caller]->removeCallEdgeFor(CS); Instr->eraseFromParent(); @@ -657,8 +657,8 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, // callgraph references to the node, we cannot delete it yet, this // could invalidate the CGSCC iterator. CG[Callee]->getNumReferences() == 0) { - DEBUG(dbgs() << " -> Deleting dead function: " << Callee->getName() - << "\n"); + LLVM_DEBUG(dbgs() << " -> Deleting dead function: " + << Callee->getName() << "\n"); CallGraphNode *CalleeNode = CG[Callee]; // Remove any call graph edges from the callee to its callees. @@ -896,7 +896,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, if (F.hasFnAttribute(Attribute::OptimizeNone)) continue; - DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); // Get a FunctionAnalysisManager via a proxy for this particular node. We // do this each time we visit a node as the SCC may have changed and as @@ -948,9 +948,9 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, // and thus hidden from the full inline history. if (CG.lookupSCC(*CG.lookup(Callee)) == C && UR.InlinedInternalEdges.count({&N, C})) { - DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " - "previously split out of this SCC by inlining: " - << F.getName() << " -> " << Callee.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " + "previously split out of this SCC by inlining: " + << F.getName() << " -> " << Callee.getName() << "\n"); continue; } @@ -1069,7 +1069,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, // change. LazyCallGraph::SCC *OldC = C; C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR); - DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); + LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); RC = &C->getOuterRefSCC(); // If this causes an SCC to split apart into multiple smaller SCCs, there @@ -1087,8 +1087,8 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) { return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; })) { - DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " - "retaining this to avoid infinite inlining.\n"); + LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " + "retaining this to avoid infinite inlining.\n"); UR.InlinedInternalEdges.insert({&N, OldC}); } InlinedCallees.clear(); diff --git a/llvm/lib/Transforms/IPO/Internalize.cpp b/llvm/lib/Transforms/IPO/Internalize.cpp index 26db1465bb2..a6542d28dfd 100644 --- a/llvm/lib/Transforms/IPO/Internalize.cpp +++ b/llvm/lib/Transforms/IPO/Internalize.cpp @@ -192,7 +192,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) { ExternalNode->removeOneAbstractEdgeTo((*CG)[&I]); ++NumFunctions; - DEBUG(dbgs() << "Internalizing func " << I.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Internalizing func " << I.getName() << "\n"); } // Never internalize the llvm.used symbol. It is used to implement @@ -221,7 +221,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) { Changed = true; ++NumGlobals; - DEBUG(dbgs() << "Internalized gvar " << GV.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Internalized gvar " << GV.getName() << "\n"); } // Mark all aliases that are not in the api as internal as well. @@ -231,7 +231,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) { Changed = true; ++NumAliases; - DEBUG(dbgs() << "Internalized alias " << GA.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Internalized alias " << GA.getName() << "\n"); } return Changed; diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp index 1f19a3b9779..d2545af8502 100644 --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -1026,7 +1026,7 @@ void LowerTypeTestsModule::lowerTypeTestCalls( for (Metadata *TypeId : TypeIds) { // Build the bitset. BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout); - DEBUG({ + LLVM_DEBUG({ if (auto MDS = dyn_cast<MDString>(TypeId)) dbgs() << MDS->getString() << ": "; else diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp index 76b90391fbb..080e2f63449 100644 --- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp +++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp @@ -407,10 +407,10 @@ bool MergeFunctions::runOnModule(Module &M) { std::vector<WeakTrackingVH> Worklist; Deferred.swap(Worklist); - DEBUG(doSanityCheck(Worklist)); + LLVM_DEBUG(doSanityCheck(Worklist)); - DEBUG(dbgs() << "size of module: " << M.size() << '\n'); - DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n'); + LLVM_DEBUG(dbgs() << "size of module: " << M.size() << '\n'); + LLVM_DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n'); // Insert functions and merge them. for (WeakTrackingVH &I : Worklist) { @@ -421,7 +421,7 @@ bool MergeFunctions::runOnModule(Module &M) { Changed |= insert(F); } } - DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n'); + LLVM_DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n'); } while (!Deferred.empty()); FnTree.clear(); @@ -498,19 +498,20 @@ static Value *createCast(IRBuilder<> &Builder, Value *V, Type *DestTy) { // parameter debug info, from the entry block. void MergeFunctions::eraseInstsUnrelatedToPDI( std::vector<Instruction *> &PDIUnrelatedWL) { - DEBUG(dbgs() << " Erasing instructions (in reverse order of appearance in " - "entry block) unrelated to parameter debug info from entry " - "block: {\n"); + LLVM_DEBUG( + dbgs() << " Erasing instructions (in reverse order of appearance in " + "entry block) unrelated to parameter debug info from entry " + "block: {\n"); while (!PDIUnrelatedWL.empty()) { Instruction *I = PDIUnrelatedWL.back(); - DEBUG(dbgs() << " Deleting Instruction: "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Deleting Instruction: "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); I->eraseFromParent(); PDIUnrelatedWL.pop_back(); } - DEBUG(dbgs() << " } // Done erasing instructions unrelated to parameter " - "debug info from entry block. \n"); + LLVM_DEBUG(dbgs() << " } // Done erasing instructions unrelated to parameter " + "debug info from entry block. \n"); } // Reduce G to its entry block. @@ -543,99 +544,100 @@ void MergeFunctions::filterInstsUnrelatedToPDI( for (BasicBlock::iterator BI = GEntryBlock->begin(), BIE = GEntryBlock->end(); BI != BIE; ++BI) { if (auto *DVI = dyn_cast<DbgValueInst>(&*BI)) { - DEBUG(dbgs() << " Deciding: "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Deciding: "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); DILocalVariable *DILocVar = DVI->getVariable(); if (DILocVar->isParameter()) { - DEBUG(dbgs() << " Include (parameter): "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Include (parameter): "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIRelated.insert(&*BI); } else { - DEBUG(dbgs() << " Delete (!parameter): "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Delete (!parameter): "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } else if (auto *DDI = dyn_cast<DbgDeclareInst>(&*BI)) { - DEBUG(dbgs() << " Deciding: "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Deciding: "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); DILocalVariable *DILocVar = DDI->getVariable(); if (DILocVar->isParameter()) { - DEBUG(dbgs() << " Parameter: "); - DEBUG(DILocVar->print(dbgs())); + LLVM_DEBUG(dbgs() << " Parameter: "); + LLVM_DEBUG(DILocVar->print(dbgs())); AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress()); if (AI) { - DEBUG(dbgs() << " Processing alloca users: "); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Processing alloca users: "); + LLVM_DEBUG(dbgs() << "\n"); for (User *U : AI->users()) { if (StoreInst *SI = dyn_cast<StoreInst>(U)) { if (Value *Arg = SI->getValueOperand()) { if (dyn_cast<Argument>(Arg)) { - DEBUG(dbgs() << " Include: "); - DEBUG(AI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Include: "); + LLVM_DEBUG(AI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIRelated.insert(AI); - DEBUG(dbgs() << " Include (parameter): "); - DEBUG(SI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Include (parameter): "); + LLVM_DEBUG(SI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIRelated.insert(SI); - DEBUG(dbgs() << " Include: "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Include: "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIRelated.insert(&*BI); } else { - DEBUG(dbgs() << " Delete (!parameter): "); - DEBUG(SI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Delete (!parameter): "); + LLVM_DEBUG(SI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } } else { - DEBUG(dbgs() << " Defer: "); - DEBUG(U->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Defer: "); + LLVM_DEBUG(U->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } } else { - DEBUG(dbgs() << " Delete (alloca NULL): "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Delete (alloca NULL): "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } else { - DEBUG(dbgs() << " Delete (!parameter): "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Delete (!parameter): "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } else if (dyn_cast<TerminatorInst>(BI) == GEntryBlock->getTerminator()) { - DEBUG(dbgs() << " Will Include Terminator: "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Will Include Terminator: "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIRelated.insert(&*BI); } else { - DEBUG(dbgs() << " Defer: "); - DEBUG(BI->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Defer: "); + LLVM_DEBUG(BI->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } - DEBUG(dbgs() - << " Report parameter debug info related/related instructions: {\n"); + LLVM_DEBUG( + dbgs() + << " Report parameter debug info related/related instructions: {\n"); for (BasicBlock::iterator BI = GEntryBlock->begin(), BE = GEntryBlock->end(); BI != BE; ++BI) { Instruction *I = &*BI; if (PDIRelated.find(I) == PDIRelated.end()) { - DEBUG(dbgs() << " !PDIRelated: "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " !PDIRelated: "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); PDIUnrelatedWL.push_back(I); } else { - DEBUG(dbgs() << " PDIRelated: "); - DEBUG(I->print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " PDIRelated: "); + LLVM_DEBUG(I->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } } - DEBUG(dbgs() << " }\n"); + LLVM_DEBUG(dbgs() << " }\n"); } // Replace G with a simple tail call to bitcast(F). Also (unless @@ -674,8 +676,8 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { // making the function larger. if (F->size() == 1) { if (F->front().size() <= 2) { - DEBUG(dbgs() << "writeThunk: " << F->getName() - << " is too small to bother creating a thunk for\n"); + LLVM_DEBUG(dbgs() << "writeThunk: " << F->getName() + << " is too small to bother creating a thunk for\n"); return; } } @@ -685,13 +687,14 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { BasicBlock *BB = nullptr; Function *NewG = nullptr; if (MergeFunctionsPDI) { - DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) Do not create a new " - "function as thunk; retain original: " - << G->getName() << "()\n"); + LLVM_DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) Do not create a new " + "function as thunk; retain original: " + << G->getName() << "()\n"); GEntryBlock = &G->getEntryBlock(); - DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) filter parameter related " - "debug info for " - << G->getName() << "() {\n"); + LLVM_DEBUG( + dbgs() << "writeThunk: (MergeFunctionsPDI) filter parameter related " + "debug info for " + << G->getName() << "() {\n"); filterInstsUnrelatedToPDI(GEntryBlock, PDIUnrelatedWL); GEntryBlock->getTerminator()->eraseFromParent(); BB = GEntryBlock; @@ -730,13 +733,15 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { CI->setDebugLoc(CIDbgLoc); RI->setDebugLoc(RIDbgLoc); } else { - DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) No DISubprogram for " - << G->getName() << "()\n"); + LLVM_DEBUG( + dbgs() << "writeThunk: (MergeFunctionsPDI) No DISubprogram for " + << G->getName() << "()\n"); } eraseTail(G); eraseInstsUnrelatedToPDI(PDIUnrelatedWL); - DEBUG(dbgs() << "} // End of parameter related debug info filtering for: " - << G->getName() << "()\n"); + LLVM_DEBUG( + dbgs() << "} // End of parameter related debug info filtering for: " + << G->getName() << "()\n"); } else { NewG->copyAttributesFrom(G); NewG->takeName(G); @@ -745,7 +750,7 @@ void MergeFunctions::writeThunk(Function *F, Function *G) { G->eraseFromParent(); } - DEBUG(dbgs() << "writeThunk: " << H->getName() << '\n'); + LLVM_DEBUG(dbgs() << "writeThunk: " << H->getName() << '\n'); ++NumThunksWritten; } @@ -806,7 +811,8 @@ bool MergeFunctions::insert(Function *NewFunction) { if (Result.second) { assert(FNodesInTree.count(NewFunction) == 0); FNodesInTree.insert({NewFunction, Result.first}); - DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() + << '\n'); return false; } @@ -827,8 +833,8 @@ bool MergeFunctions::insert(Function *NewFunction) { assert(OldF.getFunc() != F && "Must have swapped the functions."); } - DEBUG(dbgs() << " " << OldF.getFunc()->getName() - << " == " << NewFunction->getName() << '\n'); + LLVM_DEBUG(dbgs() << " " << OldF.getFunc()->getName() + << " == " << NewFunction->getName() << '\n'); Function *DeleteF = NewFunction; mergeTwoFunctions(OldF.getFunc(), DeleteF); @@ -840,7 +846,7 @@ bool MergeFunctions::insert(Function *NewFunction) { void MergeFunctions::remove(Function *F) { auto I = FNodesInTree.find(F); if (I != FNodesInTree.end()) { - DEBUG(dbgs() << "Deferred " << F->getName()<< ".\n"); + LLVM_DEBUG(dbgs() << "Deferred " << F->getName() << ".\n"); FnTree.erase(I->second); // I->second has been invalidated, remove it from the FNodesInTree map to // preserve the invariant. diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 0222d249a85..945c660aa78 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -557,11 +557,11 @@ ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { return Remark; }); } - DEBUG(dbgs() << " " << DLoc.getLine() << "." - << DIL->getBaseDiscriminator() << ":" << Inst - << " (line offset: " << LineOffset << "." - << DIL->getBaseDiscriminator() << " - weight: " << R.get() - << ")\n"); + LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." + << DIL->getBaseDiscriminator() << ":" << Inst + << " (line offset: " << LineOffset << "." + << DIL->getBaseDiscriminator() << " - weight: " << R.get() + << ")\n"); } return R; } @@ -595,7 +595,7 @@ ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { /// \param F The function to query. bool SampleProfileLoader::computeBlockWeights(Function &F) { bool Changed = false; - DEBUG(dbgs() << "Block weights\n"); + LLVM_DEBUG(dbgs() << "Block weights\n"); for (const auto &BB : F) { ErrorOr<uint64_t> Weight = getBlockWeight(&BB); if (Weight) { @@ -603,7 +603,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) { VisitedBlocks.insert(&BB); Changed = true; } - DEBUG(printBlockWeight(dbgs(), &BB)); + LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); } return Changed; @@ -811,9 +811,9 @@ bool SampleProfileLoader::inlineHotFunctions( inlineCallInstruction(DI)) LocalChanged = true; } else { - DEBUG(dbgs() - << "\nFailed to promote indirect call to " - << CalleeFunctionName << " because " << Reason << "\n"); + LLVM_DEBUG(dbgs() + << "\nFailed to promote indirect call to " + << CalleeFunctionName << " because " << Reason << "\n"); } } } else if (CalledFunction && CalledFunction->getSubprogram() && @@ -902,14 +902,14 @@ void SampleProfileLoader::findEquivalencesFor( /// \param F The function to query. void SampleProfileLoader::findEquivalenceClasses(Function &F) { SmallVector<BasicBlock *, 8> DominatedBBs; - DEBUG(dbgs() << "\nBlock equivalence classes\n"); + LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); // Find equivalence sets based on dominance and post-dominance information. for (auto &BB : F) { BasicBlock *BB1 = &BB; // Compute BB1's equivalence class once. if (EquivalenceClass.count(BB1)) { - DEBUG(printBlockEquivalence(dbgs(), BB1)); + LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); continue; } @@ -930,7 +930,7 @@ void SampleProfileLoader::findEquivalenceClasses(Function &F) { DT->getDescendants(BB1, DominatedBBs); findEquivalencesFor(BB1, DominatedBBs, PDT.get()); - DEBUG(printBlockEquivalence(dbgs(), BB1)); + LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); } // Assign weights to equivalence classes. @@ -939,13 +939,14 @@ void SampleProfileLoader::findEquivalenceClasses(Function &F) { // the same number of times. Since we know that the head block in // each equivalence class has the largest weight, assign that weight // to all the blocks in that equivalence class. - DEBUG(dbgs() << "\nAssign the same weight to all blocks in the same class\n"); + LLVM_DEBUG( + dbgs() << "\nAssign the same weight to all blocks in the same class\n"); for (auto &BI : F) { const BasicBlock *BB = &BI; const BasicBlock *EquivBB = EquivalenceClass[BB]; if (BB != EquivBB) BlockWeights[BB] = BlockWeights[EquivBB]; - DEBUG(printBlockWeight(dbgs(), BB)); + LLVM_DEBUG(printBlockWeight(dbgs(), BB)); } } @@ -986,7 +987,7 @@ uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, bool SampleProfileLoader::propagateThroughEdges(Function &F, bool UpdateBlockCount) { bool Changed = false; - DEBUG(dbgs() << "\nPropagation through edges\n"); + LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); for (const auto &BI : F) { const BasicBlock *BB = &BI; const BasicBlock *EC = EquivalenceClass[BB]; @@ -1058,9 +1059,9 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F, if (TotalWeight > BBWeight) { BBWeight = TotalWeight; Changed = true; - DEBUG(dbgs() << "All edge weights for " << BB->getName() - << " known. Set weight for block: "; - printBlockWeight(dbgs(), BB);); + LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() + << " known. Set weight for block: "; + printBlockWeight(dbgs(), BB);); } } else if (NumTotalEdges == 1 && EdgeWeights[SingleEdge] < BlockWeights[EC]) { @@ -1087,8 +1088,8 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F, EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; VisitedEdges.insert(UnknownEdge); Changed = true; - DEBUG(dbgs() << "Set weight for edge: "; - printEdgeWeight(dbgs(), UnknownEdge)); + LLVM_DEBUG(dbgs() << "Set weight for edge: "; + printEdgeWeight(dbgs(), UnknownEdge)); } } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { // If a block Weights 0, all its in/out edges should weight 0. @@ -1114,8 +1115,8 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F, EdgeWeights[SelfReferentialEdge] = 0; VisitedEdges.insert(SelfReferentialEdge); Changed = true; - DEBUG(dbgs() << "Set self-referential edge weight to: "; - printEdgeWeight(dbgs(), SelfReferentialEdge)); + LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; + printEdgeWeight(dbgs(), SelfReferentialEdge)); } if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { BlockWeights[EC] = TotalWeight; @@ -1239,7 +1240,7 @@ void SampleProfileLoader::propagateWeights(Function &F) { // Generate MD_prof metadata for every branch instruction using the // edge weights computed during propagation. - DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); + LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); LLVMContext &Ctx = F.getContext(); MDBuilder MDB(Ctx); for (auto &BI : F) { @@ -1285,10 +1286,10 @@ void SampleProfileLoader::propagateWeights(Function &F) { continue; DebugLoc BranchLoc = TI->getDebugLoc(); - DEBUG(dbgs() << "\nGetting weights for branch at line " - << ((BranchLoc) ? Twine(BranchLoc.getLine()) - : Twine("<UNKNOWN LOCATION>")) - << ".\n"); + LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " + << ((BranchLoc) ? Twine(BranchLoc.getLine()) + : Twine("<UNKNOWN LOCATION>")) + << ".\n"); SmallVector<uint32_t, 4> Weights; uint32_t MaxWeight = 0; Instruction *MaxDestInst; @@ -1296,12 +1297,12 @@ void SampleProfileLoader::propagateWeights(Function &F) { BasicBlock *Succ = TI->getSuccessor(I); Edge E = std::make_pair(BB, Succ); uint64_t Weight = EdgeWeights[E]; - DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); + LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); // Use uint32_t saturated arithmetic to adjust the incoming weights, // if needed. Sample counts in profiles are 64-bit unsigned values, // but internally branch weights are expressed as 32-bit values. if (Weight > std::numeric_limits<uint32_t>::max()) { - DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); + LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); Weight = std::numeric_limits<uint32_t>::max(); } // Weight is added by one to avoid propagation errors introduced by @@ -1322,7 +1323,7 @@ void SampleProfileLoader::propagateWeights(Function &F) { // annotation is done twice. If the first annotation already set the // weights, the second pass does not need to set it. if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { - DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); + LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights)); ORE->emit([&]() { @@ -1331,7 +1332,7 @@ void SampleProfileLoader::propagateWeights(Function &F) { << ore::NV("CondBranchesLoc", BranchLoc); }); } else { - DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); + LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); } } } @@ -1426,8 +1427,8 @@ bool SampleProfileLoader::emitAnnotations(Function &F) { if (getFunctionLoc(F) == 0) return false; - DEBUG(dbgs() << "Line number for the first instruction in " << F.getName() - << ": " << getFunctionLoc(F) << "\n"); + LLVM_DEBUG(dbgs() << "Line number for the first instruction in " + << F.getName() << ": " << getFunctionLoc(F) << "\n"); DenseSet<GlobalValue::GUID> InlinedGUIDs; Changed |= inlineHotFunctions(F, InlinedGUIDs); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 1300a92a24c..4a01c1106d3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3780,8 +3780,8 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { // Remove the convergent attr on calls when the callee is not convergent. if (CS.isConvergent() && !CalleeF->isConvergent() && !CalleeF->isIntrinsic()) { - DEBUG(dbgs() << "Removing convergent attr from instr " - << CS.getInstruction() << "\n"); + LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " + << CS.getInstruction() << "\n"); CS.setNotConvergent(); return CS.getInstruction(); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 16136777731..f01bfca5abe 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -697,8 +697,10 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) { // If this cast is a truncate, evaluting in a different type always // eliminates the cast, so it is always a win. - DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" - " to avoid cast: " << CI << '\n'); + LLVM_DEBUG( + dbgs() << "ICE: EvaluateInDifferentType converting expression type" + " to avoid cast: " + << CI << '\n'); Value *Res = EvaluateInDifferentType(Src, DestTy, false); assert(Res->getType() == DestTy); return replaceInstUsesWith(CI, Res); @@ -1070,8 +1072,10 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) { "Can't clear more bits than in SrcTy"); // Okay, we can transform this! Insert the new expression now. - DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" - " to avoid zero extend: " << CI << '\n'); + LLVM_DEBUG( + dbgs() << "ICE: EvaluateInDifferentType converting expression type" + " to avoid zero extend: " + << CI << '\n'); Value *Res = EvaluateInDifferentType(Src, DestTy, false); assert(Res->getType() == DestTy); @@ -1344,8 +1348,10 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) { if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && canEvaluateSExtd(Src, DestTy)) { // Okay, we can transform this! Insert the new expression now. - DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type" - " to avoid sign extend: " << CI << '\n'); + LLVM_DEBUG( + dbgs() << "ICE: EvaluateInDifferentType converting expression type" + " to avoid sign extend: " + << CI << '\n'); Value *Res = EvaluateInDifferentType(Src, DestTy, true); assert(Res->getType() == DestTy); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index a3a485c2111..38dc75335e2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -536,8 +536,8 @@ public: if (&I == V) V = UndefValue::get(I.getType()); - DEBUG(dbgs() << "IC: Replacing " << I << "\n" - << " with " << *V << '\n'); + LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" + << " with " << *V << '\n'); I.replaceAllUsesWith(V); return &I; @@ -559,7 +559,7 @@ public: /// value, we can't rely on DCE to delete the instruction. Instead, visit /// methods should return the value returned by this function. Instruction *eraseInstFromFunction(Instruction &I) { - DEBUG(dbgs() << "IC: ERASE " << I << '\n'); + LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); assert(I.use_empty() && "Cannot erase instruction that is used!"); salvageDebugInfo(I); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index b78de0fa691..41cbd647ca7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -270,7 +270,7 @@ void PointerReplacer::findLoadAndReplace(Instruction &I) { auto *Inst = dyn_cast<Instruction>(&*U); if (!Inst) return; - DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); + LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); if (isa<LoadInst>(Inst)) { for (auto P : Path) replace(P); @@ -405,8 +405,8 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); if (AI.getAlignment() <= SourceAlign && isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { - DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); - DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); + LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); + LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) eraseInstFromFunction(*ToDelete[i]); Constant *TheSrc = cast<Constant>(Copy->getSource()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp index 0827a34006c..91850cee77f 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -1008,10 +1008,9 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { // extracted out of it. First, sort the users by their offset and size. array_pod_sort(PHIUsers.begin(), PHIUsers.end()); - DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n'; - for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) - dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n'; - ); + LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n'; + for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) dbgs() + << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';); // PredValues - This is a temporary used when rewriting PHI nodes. It is // hoisted out here to avoid construction/destruction thrashing. @@ -1092,8 +1091,8 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { } PredValues.clear(); - DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": " - << *EltPHI << '\n'); + LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": " + << *EltPHI << '\n'); ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 4b5c96ccbf1..16782806397 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -356,8 +356,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1, // cast of lshr(shl(x,c1),c2) as well as other more complex cases. if (I.getOpcode() != Instruction::AShr && canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) { - DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression" - " to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n"); + LLVM_DEBUG( + dbgs() << "ICE: GetShiftedValue propagating shift through expression" + " to eliminate shift:\n IN: " + << *Op0 << "\n SH: " << I << "\n"); return replaceInstUsesWith( I, getShiftedValue(Op0, Op1C->getZExtValue(), isLeftShift, *this, DL)); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index ac4e568d529..2549257bc97 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2939,7 +2939,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { for (auto *DII : DbgUsers) { if (DII->getParent() == SrcBlock) { DII->moveBefore(&*InsertPos); - DEBUG(dbgs() << "SINK: " << *DII << '\n'); + LLVM_DEBUG(dbgs() << "SINK: " << *DII << '\n'); } } return true; @@ -2952,7 +2952,7 @@ bool InstCombiner::run() { // Check to see if we can DCE the instruction. if (isInstructionTriviallyDead(I, &TLI)) { - DEBUG(dbgs() << "IC: DCE: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "IC: DCE: " << *I << '\n'); eraseInstFromFunction(*I); ++NumDeadInst; MadeIRChange = true; @@ -2966,7 +2966,8 @@ bool InstCombiner::run() { if (!I->use_empty() && (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) { if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) { - DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I + << '\n'); // Add operands to the worklist. replaceInstUsesWith(*I, C); @@ -2985,8 +2986,8 @@ bool InstCombiner::run() { KnownBits Known = computeKnownBits(I, /*Depth*/0, I); if (Known.isConstant()) { Constant *C = ConstantInt::get(Ty, Known.getConstant()); - DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C << - " from: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C + << " from: " << *I << '\n'); // Add operands to the worklist. replaceInstUsesWith(*I, C); @@ -3025,7 +3026,7 @@ bool InstCombiner::run() { if (UserIsSuccessor && UserParent->getUniquePredecessor()) { // Okay, the CFG is simple enough, try to sink this instruction. if (TryToSinkInstruction(I, UserParent)) { - DEBUG(dbgs() << "IC: Sink: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n'); MadeIRChange = true; // We'll add uses of the sunk instruction below, but since sinking // can expose opportunities for it's *operands* add them to the @@ -3045,15 +3046,15 @@ bool InstCombiner::run() { #ifndef NDEBUG std::string OrigI; #endif - DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); - DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); + LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); + LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); if (Instruction *Result = visit(*I)) { ++NumCombined; // Should we replace the old instruction with a new one? if (Result != I) { - DEBUG(dbgs() << "IC: Old = " << *I << '\n' - << " New = " << *Result << '\n'); + LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n' + << " New = " << *Result << '\n'); if (I->getDebugLoc()) Result->setDebugLoc(I->getDebugLoc()); @@ -3080,8 +3081,8 @@ bool InstCombiner::run() { eraseInstFromFunction(*I); } else { - DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' - << " New = " << *I << '\n'); + LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' + << " New = " << *I << '\n'); // If the instruction was modified, it's possible that it is now dead. // if so, remove it. @@ -3132,7 +3133,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, // DCE instruction if trivially dead. if (isInstructionTriviallyDead(Inst, TLI)) { ++NumDeadInst; - DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); salvageDebugInfo(*Inst); Inst->eraseFromParent(); MadeIRChange = true; @@ -3143,8 +3144,8 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, if (!Inst->use_empty() && (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0)))) if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) { - DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " - << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *Inst + << '\n'); Inst->replaceAllUsesWith(C); ++NumConstProp; if (isInstructionTriviallyDead(Inst, TLI)) @@ -3166,9 +3167,9 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, FoldRes = C; if (FoldRes != C) { - DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst - << "\n Old = " << *C - << "\n New = " << *FoldRes << '\n'); + LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst + << "\n Old = " << *C + << "\n New = " << *FoldRes << '\n'); U = FoldRes; MadeIRChange = true; } @@ -3271,8 +3272,8 @@ static bool combineInstructionsOverFunction( int Iteration = 0; while (true) { ++Iteration; - DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " - << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " + << F.getName() << "\n"); MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 5715c6067e9..eadc9ce50a8 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -877,7 +877,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { processStaticAllocas(); if (ClDebugStack) { - DEBUG(dbgs() << F); + LLVM_DEBUG(dbgs() << F); } return true; } @@ -1617,7 +1617,7 @@ void AddressSanitizerModule::createInitializerPoisonCalls( bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { Type *Ty = G->getValueType(); - DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); + LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); if (GlobalsMD.get(G).IsBlacklisted) return false; if (!Ty->isSized()) return false; @@ -1659,7 +1659,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { // See https://github.com/google/sanitizers/issues/305 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx if (Section.startswith(".CRT")) { - DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); + LLVM_DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G + << "\n"); return false; } @@ -1676,7 +1677,7 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { // them. if (ParsedSegment == "__OBJC" || (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { - DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); + LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); return false; } // See https://github.com/google/sanitizers/issues/32 @@ -1688,13 +1689,13 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { // Therefore there's no point in placing redzones into __DATA,__cfstring. // Moreover, it causes the linker to crash on OS X 10.7 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { - DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); + LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); return false; } // The linker merges the contents of cstring_literals and removes the // trailing zeroes. if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { - DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); + LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); return false; } } @@ -2161,7 +2162,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; - DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); + LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); Initializers[i] = Initializer; } @@ -2195,7 +2196,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool if (HasDynamicallyInitializedGlobals) createInitializerPoisonCalls(M, ModuleName); - DEBUG(dbgs() << M); + LLVM_DEBUG(dbgs() << M); return true; } @@ -2436,7 +2437,7 @@ bool AddressSanitizer::runOnFunction(Function &F) { // Leave if the function doesn't need instrumentation. if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; - DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); + LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); initializeCallbacks(*F.getParent()); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); @@ -2549,8 +2550,8 @@ bool AddressSanitizer::runOnFunction(Function &F) { if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty()) FunctionModified = true; - DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " - << F << "\n"); + LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " " + << F << "\n"); return FunctionModified; } @@ -2866,7 +2867,7 @@ void FunctionStackPoisoner::processStaticAllocas() { } auto DescriptionString = ComputeASanStackFrameDescription(SVD); - DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); + LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"); uint64_t LocalStackSize = L.FrameSize; bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; @@ -3101,7 +3102,8 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) { Res = findAllocaForValue(EP->getPointerOperand()); } else { - DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V << "\n"); + LLVM_DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V + << "\n"); } if (Res) AllocaForValue[V] = Res; return Res; diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp index be9a22a8681..87b3019c910 100644 --- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -62,8 +62,8 @@ static bool instrumentMemAccess(Value *Ptr, Value *InstVal, BuilderTy &IRB, GetTrapBBT GetTrapBB) { uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType()); - DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize) - << " bytes\n"); + LLVM_DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize) + << " bytes\n"); SizeOffsetEvalType SizeOffset = ObjSizeEval.compute(Ptr); diff --git a/llvm/lib/Transforms/Instrumentation/CFGMST.h b/llvm/lib/Transforms/Instrumentation/CFGMST.h index 54a36eb716a..cc9b149d0b6 100644 --- a/llvm/lib/Transforms/Instrumentation/CFGMST.h +++ b/llvm/lib/Transforms/Instrumentation/CFGMST.h @@ -97,7 +97,7 @@ public: // Edges with large weight will be put into MST first so they are less likely // to be instrumented. void buildEdges() { - DEBUG(dbgs() << "Build Edge on " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Build Edge on " << F.getName() << "\n"); const BasicBlock *Entry = &(F.getEntryBlock()); uint64_t EntryWeight = (BFI != nullptr ? BFI->getEntryFreq() : 2); @@ -107,8 +107,8 @@ public: // Add a fake edge to the entry. EntryIncoming = &addEdge(nullptr, Entry, EntryWeight); - DEBUG(dbgs() << " Edge: from fake node to " << Entry->getName() - << " w = " << EntryWeight << "\n"); + LLVM_DEBUG(dbgs() << " Edge: from fake node to " << Entry->getName() + << " w = " << EntryWeight << "\n"); // Special handling for single BB functions. if (succ_empty(Entry)) { @@ -138,8 +138,8 @@ public: Weight = BPI->getEdgeProbability(&*BB, TargetBB).scale(scaleFactor); auto *E = &addEdge(&*BB, TargetBB, Weight); E->IsCritical = Critical; - DEBUG(dbgs() << " Edge: from " << BB->getName() << " to " - << TargetBB->getName() << " w=" << Weight << "\n"); + LLVM_DEBUG(dbgs() << " Edge: from " << BB->getName() << " to " + << TargetBB->getName() << " w=" << Weight << "\n"); // Keep track of entry/exit edges: if (&*BB == Entry) { @@ -164,8 +164,8 @@ public: MaxExitOutWeight = BBWeight; ExitOutgoing = ExitO; } - DEBUG(dbgs() << " Edge: from " << BB->getName() << " to fake exit" - << " w = " << BBWeight << "\n"); + LLVM_DEBUG(dbgs() << " Edge: from " << BB->getName() << " to fake exit" + << " w = " << BBWeight << "\n"); } } diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 49f7c330be8..c8eb680bbdf 100644 --- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -316,7 +316,7 @@ namespace { ReturnBlock(1, os) { this->os = os; - DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n"); + LLVM_DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n"); uint32_t i = 0; for (auto &BB : *F) { @@ -384,7 +384,7 @@ namespace { for (int i = 0, e = Blocks.size() + 1; i != e; ++i) { write(0); // No flags on our blocks. } - DEBUG(dbgs() << Blocks.size() << " blocks.\n"); + LLVM_DEBUG(dbgs() << Blocks.size() << " blocks.\n"); // Emit edges between blocks. if (Blocks.empty()) return; @@ -397,8 +397,8 @@ namespace { write(Block.OutEdges.size() * 2 + 1); write(Block.Number); for (int i = 0, e = Block.OutEdges.size(); i != e; ++i) { - DEBUG(dbgs() << Block.Number << " -> " << Block.OutEdges[i]->Number - << "\n"); + LLVM_DEBUG(dbgs() << Block.Number << " -> " + << Block.OutEdges[i]->Number << "\n"); write(Block.OutEdges[i]->Number); write(0); // no flags } diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp index 75061749fbb..a1205d81c91 100644 --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -227,7 +227,7 @@ FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel, /// /// inserts a call to __hwasan_init to the module's constructor list. bool HWAddressSanitizer::doInitialization(Module &M) { - DEBUG(dbgs() << "Init " << M.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n"); auto &DL = M.getDataLayout(); TargetTriple = Triple(M.getTargetTriple()); @@ -457,7 +457,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite, } bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) { - DEBUG(dbgs() << "Instrumenting: " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n"); bool IsWrite = false; unsigned Alignment = 0; uint64_t TypeSize = 0; @@ -684,7 +684,7 @@ bool HWAddressSanitizer::runOnFunction(Function &F) { if (!F.hasFnAttribute(Attribute::SanitizeHWAddress)) return false; - DEBUG(dbgs() << "Function: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); initializeCallbacks(*F.getParent()); diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp index ec035c8474b..27fb0e4393a 100644 --- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -223,12 +223,12 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite( uint64_t TotalCount, uint32_t NumCandidates) { std::vector<PromotionCandidate> Ret; - DEBUG(dbgs() << " \nWork on callsite #" << NumOfPGOICallsites << *Inst - << " Num_targets: " << ValueDataRef.size() - << " Num_candidates: " << NumCandidates << "\n"); + LLVM_DEBUG(dbgs() << " \nWork on callsite #" << NumOfPGOICallsites << *Inst + << " Num_targets: " << ValueDataRef.size() + << " Num_candidates: " << NumCandidates << "\n"); NumOfPGOICallsites++; if (ICPCSSkip != 0 && NumOfPGOICallsites <= ICPCSSkip) { - DEBUG(dbgs() << " Skip: User options.\n"); + LLVM_DEBUG(dbgs() << " Skip: User options.\n"); return Ret; } @@ -236,11 +236,11 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite( uint64_t Count = ValueDataRef[I].Count; assert(Count <= TotalCount); uint64_t Target = ValueDataRef[I].Value; - DEBUG(dbgs() << " Candidate " << I << " Count=" << Count - << " Target_func: " << Target << "\n"); + LLVM_DEBUG(dbgs() << " Candidate " << I << " Count=" << Count + << " Target_func: " << Target << "\n"); if (ICPInvokeOnly && dyn_cast<CallInst>(Inst)) { - DEBUG(dbgs() << " Not promote: User options.\n"); + LLVM_DEBUG(dbgs() << " Not promote: User options.\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst) << " Not promote: User options"; @@ -248,7 +248,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite( break; } if (ICPCallOnly && dyn_cast<InvokeInst>(Inst)) { - DEBUG(dbgs() << " Not promote: User option.\n"); + LLVM_DEBUG(dbgs() << " Not promote: User option.\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst) << " Not promote: User options"; @@ -256,7 +256,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite( break; } if (ICPCutOff != 0 && NumOfPGOICallPromotion >= ICPCutOff) { - DEBUG(dbgs() << " Not promote: Cutoff reached.\n"); + LLVM_DEBUG(dbgs() << " Not promote: Cutoff reached.\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "CutOffReached", Inst) << " Not promote: Cutoff reached"; @@ -266,7 +266,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite( Function *TargetFunction = Symtab->getFunction(Target); if (TargetFunction == nullptr) { - DEBUG(dbgs() << " Not promote: Cannot find the target\n"); + LLVM_DEBUG(dbgs() << " Not promote: Cannot find the target\n"); ORE.emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UnableToFindTarget", Inst) << "Cannot promote indirect call: target not found"; @@ -387,7 +387,7 @@ static bool promoteIndirectCalls(Module &M, ProfileSummaryInfo *PSI, InstrProfSymtab Symtab; if (Error E = Symtab.create(M, InLTO)) { std::string SymtabFailure = toString(std::move(E)); - DEBUG(dbgs() << "Failed to create symtab: " << SymtabFailure << "\n"); + LLVM_DEBUG(dbgs() << "Failed to create symtab: " << SymtabFailure << "\n"); (void)SymtabFailure; return false; } @@ -412,12 +412,12 @@ static bool promoteIndirectCalls(Module &M, ProfileSummaryInfo *PSI, ICallPromotionFunc ICallPromotion(F, &M, &Symtab, SamplePGO, *ORE); bool FuncChanged = ICallPromotion.processFunction(PSI); if (ICPDUMPAFTER && FuncChanged) { - DEBUG(dbgs() << "\n== IR Dump After =="; F.print(dbgs())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "\n== IR Dump After =="; F.print(dbgs())); + LLVM_DEBUG(dbgs() << "\n"); } Changed |= FuncChanged; if (ICPCutOff != 0 && NumOfPGOICallPromotion >= ICPCutOff) { - DEBUG(dbgs() << " Stop: Cutoff reached.\n"); + LLVM_DEBUG(dbgs() << " Stop: Cutoff reached.\n"); break; } } diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp index 0aaf87499f6..8be1638ce40 100644 --- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp @@ -271,8 +271,8 @@ public: break; } - DEBUG(dbgs() << Promoted << " counters promoted for loop (depth=" - << L.getLoopDepth() << ")\n"); + LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth=" + << L.getLoopDepth() << ")\n"); return Promoted != 0; } diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index e18ea717ee3..2fad41a1021 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -796,9 +796,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { MS.initializeCallbacks(*F.getParent()); ActualFnStart = &F.getEntryBlock(); - DEBUG(if (!InsertChecks) - dbgs() << "MemorySanitizer is not inserting checks into '" - << F.getName() << "'\n"); + LLVM_DEBUG(if (!InsertChecks) dbgs() + << "MemorySanitizer is not inserting checks into '" + << F.getName() << "'\n"); } Value *updateOrigin(Value *V, IRBuilder<> &IRB) { @@ -901,7 +901,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true); StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment); - DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); + LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); if (ClCheckAccessAddress) insertShadowCheck(Addr, NewSI); @@ -932,9 +932,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, bool AsCall) { IRBuilder<> IRB(OrigIns); - DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); + LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); - DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); + LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); if (ConstantShadow) { @@ -964,7 +964,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRB.SetInsertPoint(CheckTerm); insertWarningFn(IRB, Origin); - DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); + LLVM_DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } } @@ -975,7 +975,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Value *Origin = ShadowData.Origin; materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); } - DEBUG(dbgs() << "DONE:\n" << F); + LLVM_DEBUG(dbgs() << "DONE:\n" << F); } /// Add MemorySanitizer instrumentation to a function. @@ -1048,7 +1048,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) Elements.push_back(getShadowTy(ST->getElementType(i))); StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); - DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); + LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); return Res; } uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy); @@ -1182,7 +1182,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void setOrigin(Value *V, Value *Origin) { if (!MS.TrackOrigins) return; assert(!OriginMap.count(V) && "Values may only have one origin"); - DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); + LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); OriginMap[V] = Origin; } @@ -1245,7 +1245,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // For instructions the shadow is already stored in the map. Value *Shadow = ShadowMap[V]; if (!Shadow) { - DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); + LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); (void)I; assert(Shadow && "No shadow for a value"); } @@ -1253,7 +1253,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } if (UndefValue *U = dyn_cast<UndefValue>(V)) { Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); - DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); + LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); (void)U; return AllOnes; } @@ -1268,7 +1268,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { if (!FArg.getType()->isSized()) { - DEBUG(dbgs() << "Arg is not sized\n"); + LLVM_DEBUG(dbgs() << "Arg is not sized\n"); continue; } unsigned Size = @@ -1300,7 +1300,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, CopyAlign, Size); - DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); + LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } *ShadowPtr = getCleanShadow(V); @@ -1313,8 +1313,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); } } - DEBUG(dbgs() << " ARG: " << FArg << " ==> " << - **ShadowPtr << "\n"); + LLVM_DEBUG(dbgs() + << " ARG: " << FArg << " ==> " << **ShadowPtr << "\n"); if (MS.TrackOrigins && !Overflow) { Value *OriginPtr = getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); @@ -2790,13 +2790,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRBuilder<> IRB(&I); unsigned ArgOffset = 0; - DEBUG(dbgs() << " CallSite: " << I << "\n"); + LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n"); for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; unsigned i = ArgIt - CS.arg_begin(); if (!A->getType()->isSized()) { - DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); + LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); continue; } unsigned Size = 0; @@ -2806,8 +2806,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // __msan_param_tls. Value *ArgShadow = getShadow(A); Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); - DEBUG(dbgs() << " Arg#" << i << ": " << *A << - " Shadow: " << *ArgShadow << "\n"); + LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A + << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; const DataLayout &DL = F.getParent()->getDataLayout(); if (CS.paramHasAttr(i, Attribute::ByVal)) { @@ -2836,10 +2836,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { getOriginPtrForArgument(A, IRB, ArgOffset)); (void)Store; assert(Size != 0 && Store != nullptr); - DEBUG(dbgs() << " Param:" << *Store << "\n"); + LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n"); ArgOffset += alignTo(Size, 8); } - DEBUG(dbgs() << " done with call args\n"); + LLVM_DEBUG(dbgs() << " done with call args\n"); FunctionType *FT = cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); @@ -3046,24 +3046,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitExtractValueInst(ExtractValueInst &I) { IRBuilder<> IRB(&I); Value *Agg = I.getAggregateOperand(); - DEBUG(dbgs() << "ExtractValue: " << I << "\n"); + LLVM_DEBUG(dbgs() << "ExtractValue: " << I << "\n"); Value *AggShadow = getShadow(Agg); - DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); - DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); + LLVM_DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); setShadow(&I, ResShadow); setOriginForNaryOp(I); } void visitInsertValueInst(InsertValueInst &I) { IRBuilder<> IRB(&I); - DEBUG(dbgs() << "InsertValue: " << I << "\n"); + LLVM_DEBUG(dbgs() << "InsertValue: " << I << "\n"); Value *AggShadow = getShadow(I.getAggregateOperand()); Value *InsShadow = getShadow(I.getInsertedValueOperand()); - DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); - DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); + LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + LLVM_DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); - DEBUG(dbgs() << " Res: " << *Res << "\n"); + LLVM_DEBUG(dbgs() << " Res: " << *Res << "\n"); setShadow(&I, Res); setOriginForNaryOp(I); } @@ -3078,17 +3078,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } void visitResumeInst(ResumeInst &I) { - DEBUG(dbgs() << "Resume: " << I << "\n"); + LLVM_DEBUG(dbgs() << "Resume: " << I << "\n"); // Nothing to do here. } void visitCleanupReturnInst(CleanupReturnInst &CRI) { - DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); + LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); // Nothing to do here. } void visitCatchReturnInst(CatchReturnInst &CRI) { - DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); + LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); // Nothing to do here. } @@ -3129,7 +3129,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // Everything else: stop propagating and check for poisoned shadow. if (ClDumpStrictInstructions) dumpInst(I); - DEBUG(dbgs() << "DEFAULT: " << I << "\n"); + LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n"); for (size_t i = 0, n = I.getNumOperands(); i < n; i++) { Value *Operand = I.getOperand(i); if (Operand->getType()->isSized()) diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index 3121d102c6d..307b7eaa219 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -546,7 +546,7 @@ public: computeCFGHash(); if (!ComdatMembers.empty()) renameComdatFunction(); - DEBUG(dumpInfo("after CFGMST")); + LLVM_DEBUG(dumpInfo("after CFGMST")); NumOfPGOBB += MST.BBInfos.size(); for (auto &E : MST.AllEdges) { @@ -596,12 +596,12 @@ void FuncPGOInstrumentation<Edge, BBInfo>::computeCFGHash() { FunctionHash = (uint64_t)SIVisitor.getNumOfSelectInsts() << 56 | (uint64_t)ValueSites[IPVK_IndirectCallTarget].size() << 48 | (uint64_t)MST.AllEdges.size() << 32 | JC.getCRC(); - DEBUG(dbgs() << "Function Hash Computation for " << F.getName() << ":\n" - << " CRC = " << JC.getCRC() - << ", Selects = " << SIVisitor.getNumOfSelectInsts() - << ", Edges = " << MST.AllEdges.size() - << ", ICSites = " << ValueSites[IPVK_IndirectCallTarget].size() - << ", Hash = " << FunctionHash << "\n";); + LLVM_DEBUG(dbgs() << "Function Hash Computation for " << F.getName() << ":\n" + << " CRC = " << JC.getCRC() + << ", Selects = " << SIVisitor.getNumOfSelectInsts() + << ", Edges = " << MST.AllEdges.size() << ", ICSites = " + << ValueSites[IPVK_IndirectCallTarget].size() + << ", Hash = " << FunctionHash << "\n";); } // Check if we can safely rename this Comdat function. @@ -702,8 +702,8 @@ BasicBlock *FuncPGOInstrumentation<Edge, BBInfo>::getInstrBB(Edge *E) { // For a critical edge, we have to split. Instrument the newly // created BB. NumOfPGOSplit++; - DEBUG(dbgs() << "Split critical edge: " << getBBInfo(SrcBB).Index << " --> " - << getBBInfo(DestBB).Index << "\n"); + LLVM_DEBUG(dbgs() << "Split critical edge: " << getBBInfo(SrcBB).Index + << " --> " << getBBInfo(DestBB).Index << "\n"); unsigned SuccNum = GetSuccessorNumber(SrcBB, DestBB); BasicBlock *InstrBB = SplitCriticalEdge(TI, SuccNum); assert(InstrBB && "Critical edge is not split"); @@ -753,8 +753,8 @@ static void instrumentOneFunc( for (auto &I : FuncInfo.ValueSites[IPVK_IndirectCallTarget]) { CallSite CS(I); Value *Callee = CS.getCalledValue(); - DEBUG(dbgs() << "Instrument one indirect call: CallSite Index = " - << NumIndirectCallSites << "\n"); + LLVM_DEBUG(dbgs() << "Instrument one indirect call: CallSite Index = " + << NumIndirectCallSites << "\n"); IRBuilder<> Builder(I); assert(Builder.GetInsertPoint() != I->getParent()->end() && "Cannot get the Instrumentation point"); @@ -1042,14 +1042,14 @@ bool PGOUseFunc::readCounters(IndexedInstrProfReader *PGOReader) { std::vector<uint64_t> &CountFromProfile = ProfileRecord.Counts; NumOfPGOFunc++; - DEBUG(dbgs() << CountFromProfile.size() << " counts\n"); + LLVM_DEBUG(dbgs() << CountFromProfile.size() << " counts\n"); uint64_t ValueSum = 0; for (unsigned I = 0, S = CountFromProfile.size(); I < S; I++) { - DEBUG(dbgs() << " " << I << ": " << CountFromProfile[I] << "\n"); + LLVM_DEBUG(dbgs() << " " << I << ": " << CountFromProfile[I] << "\n"); ValueSum += CountFromProfile[I]; } - DEBUG(dbgs() << "SUM = " << ValueSum << "\n"); + LLVM_DEBUG(dbgs() << "SUM = " << ValueSum << "\n"); getBBInfo(nullptr).UnknownCountOutEdge = 2; getBBInfo(nullptr).UnknownCountInEdge = 2; @@ -1129,7 +1129,7 @@ void PGOUseFunc::populateCounters() { } } - DEBUG(dbgs() << "Populate counts in " << NumPasses << " passes.\n"); + LLVM_DEBUG(dbgs() << "Populate counts in " << NumPasses << " passes.\n"); #ifndef NDEBUG // Assert every BB has a valid counter. for (auto &BB : F) { @@ -1154,13 +1154,13 @@ void PGOUseFunc::populateCounters() { FuncInfo.SIVisitor.annotateSelects(F, this, &CountPosition); assert(CountPosition == ProfileCountSize); - DEBUG(FuncInfo.dumpInfo("after reading profile.")); + LLVM_DEBUG(FuncInfo.dumpInfo("after reading profile.")); } // Assign the scaled count values to the BB with multiple out edges. void PGOUseFunc::setBranchWeights() { // Generate MD_prof metadata for every branch instruction. - DEBUG(dbgs() << "\nSetting branch weights.\n"); + LLVM_DEBUG(dbgs() << "\nSetting branch weights.\n"); for (auto &BB : F) { TerminatorInst *TI = BB.getTerminator(); if (TI->getNumSuccessors() < 2) @@ -1201,7 +1201,7 @@ static bool isIndirectBrTarget(BasicBlock *BB) { } void PGOUseFunc::annotateIrrLoopHeaderWeights() { - DEBUG(dbgs() << "\nAnnotating irreducible loop header weights.\n"); + LLVM_DEBUG(dbgs() << "\nAnnotating irreducible loop header weights.\n"); // Find irr loop headers for (auto &BB : F) { // As a heuristic also annotate indrectbr targets as they have a high chance @@ -1334,9 +1334,9 @@ void PGOUseFunc::annotateValueSites(uint32_t Kind) { } for (auto &I : ValueSites) { - DEBUG(dbgs() << "Read one value site profile (kind = " << Kind - << "): Index = " << ValueSiteIndex << " out of " - << NumValueSites << "\n"); + LLVM_DEBUG(dbgs() << "Read one value site profile (kind = " << Kind + << "): Index = " << ValueSiteIndex << " out of " + << NumValueSites << "\n"); annotateValueSite(*M, *I, ProfileRecord, static_cast<InstrProfValueKind>(Kind), ValueSiteIndex, Kind == IPVK_MemOPSize ? MaxNumMemOPAnnotations @@ -1432,7 +1432,7 @@ static bool annotateAllFunctions( Module &M, StringRef ProfileFileName, function_ref<BranchProbabilityInfo *(Function &)> LookupBPI, function_ref<BlockFrequencyInfo *(Function &)> LookupBFI) { - DEBUG(dbgs() << "Read in profile counters: "); + LLVM_DEBUG(dbgs() << "Read in profile counters: "); auto &Ctx = M.getContext(); // Read the counter array from file. auto ReaderOrErr = IndexedInstrProfReader::create(ProfileFileName); @@ -1518,12 +1518,13 @@ static bool annotateAllFunctions( // inconsistent MST between prof-gen and prof-use. for (auto &F : HotFunctions) { F->addFnAttr(Attribute::InlineHint); - DEBUG(dbgs() << "Set inline attribute to function: " << F->getName() - << "\n"); + LLVM_DEBUG(dbgs() << "Set inline attribute to function: " << F->getName() + << "\n"); } for (auto &F : ColdFunctions) { F->addFnAttr(Attribute::Cold); - DEBUG(dbgs() << "Set cold attribute to function: " << F->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Set cold attribute to function: " << F->getName() + << "\n"); } return true; } @@ -1586,9 +1587,10 @@ void llvm::setProfMetadata(Module *M, Instruction *TI, for (const auto &ECI : EdgeCounts) Weights.push_back(scaleBranchCount(ECI, Scale)); - DEBUG(dbgs() << "Weight is: "; - for (const auto &W : Weights) { dbgs() << W << " "; } - dbgs() << "\n";); + LLVM_DEBUG(dbgs() << "Weight is: "; for (const auto &W + : Weights) { + dbgs() << W << " "; + } dbgs() << "\n";); TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights)); if (EmitBranchProbability) { std::string BrCondStr = getBranchCondString(TI); diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp index 494aad10d2f..73d419aa6cc 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp @@ -151,8 +151,9 @@ public: if (perform(MI)) { Changed = true; ++NumOfPGOMemOPOpt; - DEBUG(dbgs() << "MemOP call: " << MI->getCalledFunction()->getName() - << "is Transformed.\n"); + LLVM_DEBUG(dbgs() << "MemOP call: " + << MI->getCalledFunction()->getName() + << "is Transformed.\n"); } } } @@ -245,9 +246,9 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { } ArrayRef<InstrProfValueData> VDs(ValueDataArray.get(), NumVals); - DEBUG(dbgs() << "Read one memory intrinsic profile with count " << ActualCount - << "\n"); - DEBUG( + LLVM_DEBUG(dbgs() << "Read one memory intrinsic profile with count " + << ActualCount << "\n"); + LLVM_DEBUG( for (auto &VD : VDs) { dbgs() << " (" << VD.Value << "," << VD.Count << ")\n"; }); @@ -260,8 +261,8 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { TotalCount = ActualCount; if (MemOPScaleCount) - DEBUG(dbgs() << "Scale counts: numerator = " << ActualCount - << " denominator = " << SavedTotalCount << "\n"); + LLVM_DEBUG(dbgs() << "Scale counts: numerator = " << ActualCount + << " denominator = " << SavedTotalCount << "\n"); // Keeping track of the count of the default case: uint64_t RemainCount = TotalCount; @@ -310,9 +311,9 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { uint64_t SumForOpt = TotalCount - RemainCount; - DEBUG(dbgs() << "Optimize one memory intrinsic call to " << Version - << " Versions (covering " << SumForOpt << " out of " - << TotalCount << ")\n"); + LLVM_DEBUG(dbgs() << "Optimize one memory intrinsic call to " << Version + << " Versions (covering " << SumForOpt << " out of " + << TotalCount << ")\n"); // mem_op(..., size) // ==> @@ -331,8 +332,8 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { // merge_bb: BasicBlock *BB = MI->getParent(); - DEBUG(dbgs() << "\n\n== Basic Block Before ==\n"); - DEBUG(dbgs() << *BB << "\n"); + LLVM_DEBUG(dbgs() << "\n\n== Basic Block Before ==\n"); + LLVM_DEBUG(dbgs() << *BB << "\n"); auto OrigBBFreq = BFI.getBlockFreq(BB); BasicBlock *DefaultBB = SplitBlock(BB, MI); @@ -358,7 +359,7 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { annotateValueSite(*Func.getParent(), *MI, VDs.slice(Version), SavedRemainCount, IPVK_MemOPSize, NumVals); - DEBUG(dbgs() << "\n\n== Basic Block After==\n"); + LLVM_DEBUG(dbgs() << "\n\n== Basic Block After==\n"); for (uint64_t SizeId : SizeIds) { BasicBlock *CaseBB = BasicBlock::Create( @@ -374,13 +375,13 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) { IRBuilder<> IRBCase(CaseBB); IRBCase.CreateBr(MergeBB); SI->addCase(CaseSizeId, CaseBB); - DEBUG(dbgs() << *CaseBB << "\n"); + LLVM_DEBUG(dbgs() << *CaseBB << "\n"); } setProfMetadata(Func.getParent(), SI, CaseCounts, MaxCount); - DEBUG(dbgs() << *BB << "\n"); - DEBUG(dbgs() << *DefaultBB << "\n"); - DEBUG(dbgs() << *MergeBB << "\n"); + LLVM_DEBUG(dbgs() << *BB << "\n"); + LLVM_DEBUG(dbgs() << *DefaultBB << "\n"); + LLVM_DEBUG(dbgs() << *MergeBB << "\n"); ORE.emit([&]() { using namespace ore; diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index b84f3916907..17168a7bd96 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -502,7 +502,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, if (Idx < 0) return false; if (IsWrite && isVtableAccess(I)) { - DEBUG(dbgs() << " VPTR : " << *I << "\n"); + LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n"); Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); // StoredValue may be a vector type if we are storing several vptrs at once. // In this case, just take the first element of the vector since this is diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp index fb4eef523ba..8d3ef8fde53 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp @@ -103,10 +103,12 @@ bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) { // zap the pair. if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) { Changed = true; - DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop " - "autorelease pair:\n" - " Pop: " << *Inst << "\n" - << " Push: " << *Push << "\n"); + LLVM_DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop " + "autorelease pair:\n" + " Pop: " + << *Inst << "\n" + << " Push: " << *Push + << "\n"); Inst->eraseFromParent(); Push->eraseFromParent(); } diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp index bab2d1c585d..55881b01b45 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -132,16 +132,18 @@ bool ObjCARCContract::optimizeRetainCall(Function &F, Instruction *Retain) { Changed = true; ++NumPeeps; - DEBUG(dbgs() << "Transforming objc_retain => " - "objc_retainAutoreleasedReturnValue since the operand is a " - "return value.\nOld: "<< *Retain << "\n"); + LLVM_DEBUG( + dbgs() << "Transforming objc_retain => " + "objc_retainAutoreleasedReturnValue since the operand is a " + "return value.\nOld: " + << *Retain << "\n"); // We do not have to worry about tail calls/does not throw since // retain/retainRV have the same properties. Constant *Decl = EP.get(ARCRuntimeEntryPointKind::RetainRV); cast<CallInst>(Retain)->setCalledFunction(Decl); - DEBUG(dbgs() << "New: " << *Retain << "\n"); + LLVM_DEBUG(dbgs() << "New: " << *Retain << "\n"); return true; } @@ -180,16 +182,19 @@ bool ObjCARCContract::contractAutorelease( Changed = true; ++NumPeeps; - DEBUG(dbgs() << " Fusing retain/autorelease!\n" - " Autorelease:" << *Autorelease << "\n" - " Retain: " << *Retain << "\n"); + LLVM_DEBUG(dbgs() << " Fusing retain/autorelease!\n" + " Autorelease:" + << *Autorelease + << "\n" + " Retain: " + << *Retain << "\n"); Constant *Decl = EP.get(Class == ARCInstKind::AutoreleaseRV ? ARCRuntimeEntryPointKind::RetainAutoreleaseRV : ARCRuntimeEntryPointKind::RetainAutorelease); Retain->setCalledFunction(Decl); - DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n"); + LLVM_DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n"); EraseInstruction(Autorelease); return true; @@ -387,7 +392,7 @@ void ObjCARCContract::tryToContractReleaseIntoStoreStrong( Changed = true; ++NumStoreStrongs; - DEBUG( + LLVM_DEBUG( llvm::dbgs() << " Contracting retain, release into objc_storeStrong.\n" << " Old:\n" << " Store: " << *Store << "\n" @@ -414,7 +419,8 @@ void ObjCARCContract::tryToContractReleaseIntoStoreStrong( // we can set the tail flag once we know it's safe. StoreStrongCalls.insert(StoreStrong); - DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong << "\n"); + LLVM_DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong + << "\n"); if (&*Iter == Retain) ++Iter; if (&*Iter == Store) ++Iter; @@ -472,8 +478,8 @@ bool ObjCARCContract::tryToPeepholeInstruction( } while (IsNoopInstruction(&*BBI)); if (&*BBI == GetArgRCIdentityRoot(Inst)) { - DEBUG(dbgs() << "Adding inline asm marker for the return value " - "optimization.\n"); + LLVM_DEBUG(dbgs() << "Adding inline asm marker for the return value " + "optimization.\n"); Changed = true; InlineAsm *IA = InlineAsm::get( FunctionType::get(Type::getVoidTy(Inst->getContext()), @@ -495,8 +501,8 @@ bool ObjCARCContract::tryToPeepholeInstruction( Changed = true; new StoreInst(Null, CI->getArgOperand(0), CI); - DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n" - << " New = " << *Null << "\n"); + LLVM_DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n" + << " New = " << *Null << "\n"); CI->replaceAllUsesWith(Null); CI->eraseFromParent(); @@ -547,7 +553,7 @@ bool ObjCARCContract::runOnFunction(Function &F) { isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) BlockColors = colorEHFunclets(F); - DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n"); + LLVM_DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n"); // Track whether it's ok to mark objc_storeStrong calls with the "tail" // keyword. Be conservative if the function has variadic arguments. @@ -565,7 +571,7 @@ bool ObjCARCContract::runOnFunction(Function &F) { for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E;) { Instruction *Inst = &*I++; - DEBUG(dbgs() << "Visiting: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n"); // First try to peephole Inst. If there is nothing further we can do in // terms of undoing objc-arc-expand, process the next inst. diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp index fab9845facc..6a345ef56e1 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp @@ -91,12 +91,13 @@ bool ObjCARCExpand::runOnFunction(Function &F) { bool Changed = false; - DEBUG(dbgs() << "ObjCARCExpand: Visiting Function: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "ObjCARCExpand: Visiting Function: " << F.getName() + << "\n"); for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) { Instruction *Inst = &*I; - DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n"); switch (GetBasicARCInstKind(Inst)) { case ARCInstKind::Retain: @@ -111,8 +112,10 @@ bool ObjCARCExpand::runOnFunction(Function &F) { // emitted here. We'll redo them in the contract pass. Changed = true; Value *Value = cast<CallInst>(Inst)->getArgOperand(0); - DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst << "\n" - " New = " << *Value << "\n"); + LLVM_DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst + << "\n" + " New = " + << *Value << "\n"); Inst->replaceAllUsesWith(Value); break; } @@ -121,7 +124,7 @@ bool ObjCARCExpand::runOnFunction(Function &F) { } } - DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n"); + LLVM_DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n"); return Changed; } diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 7df2fe52cae..db09cc3487a 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -423,7 +423,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) { // Dump the pointers we are tracking. OS << " TopDown State:\n"; if (!BBInfo.hasTopDownPtrs()) { - DEBUG(dbgs() << " NONE!\n"); + LLVM_DEBUG(dbgs() << " NONE!\n"); } else { for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end(); I != E; ++I) { @@ -443,7 +443,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) { OS << " BottomUp State:\n"; if (!BBInfo.hasBottomUpPtrs()) { - DEBUG(dbgs() << " NONE!\n"); + LLVM_DEBUG(dbgs() << " NONE!\n"); } else { for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end(); I != E; ++I) { @@ -613,8 +613,8 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) { Changed = true; ++NumPeeps; - DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n" - << "Erasing " << *RetainRV << "\n"); + LLVM_DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n" + << "Erasing " << *RetainRV << "\n"); EraseInstruction(&*I); EraseInstruction(RetainRV); @@ -626,14 +626,15 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) { Changed = true; ++NumPeeps; - DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => " - "objc_retain since the operand is not a return value.\n" - "Old = " << *RetainRV << "\n"); + LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => " + "objc_retain since the operand is not a return value.\n" + "Old = " + << *RetainRV << "\n"); Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain); cast<CallInst>(RetainRV)->setCalledFunction(NewDecl); - DEBUG(dbgs() << "New = " << *RetainRV << "\n"); + LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n"); return false; } @@ -671,10 +672,12 @@ void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Changed = true; ++NumPeeps; - DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => " - "objc_autorelease since its operand is not used as a return " - "value.\n" - "Old = " << *AutoreleaseRV << "\n"); + LLVM_DEBUG( + dbgs() << "Transforming objc_autoreleaseReturnValue => " + "objc_autorelease since its operand is not used as a return " + "value.\n" + "Old = " + << *AutoreleaseRV << "\n"); CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV); Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease); @@ -682,7 +685,7 @@ void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease. Class = ARCInstKind::Autorelease; - DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n"); + LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n"); } namespace { @@ -713,7 +716,7 @@ CloneCallInstForBB(CallInst &CI, BasicBlock &BB, /// Visit each call, one at a time, and make simplifications without doing any /// additional analysis. void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { - DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n"); // Reset all the flags in preparation for recomputing them. UsedInThisFunction = 0; @@ -728,7 +731,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { ARCInstKind Class = GetBasicARCInstKind(Inst); - DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n"); switch (Class) { default: break; @@ -744,7 +747,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case ARCInstKind::NoopCast: Changed = true; ++NumNoops; - DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n"); EraseInstruction(Inst); continue; @@ -762,8 +765,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { Constant::getNullValue(Ty), CI); Value *NewValue = UndefValue::get(CI->getType()); - DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior." - "\nOld = " << *CI << "\nNew = " << *NewValue << "\n"); + LLVM_DEBUG( + dbgs() << "A null pointer-to-weak-pointer is undefined behavior." + "\nOld = " + << *CI << "\nNew = " << *NewValue << "\n"); CI->replaceAllUsesWith(NewValue); CI->eraseFromParent(); continue; @@ -782,8 +787,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { CI); Value *NewValue = UndefValue::get(CI->getType()); - DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior." - "\nOld = " << *CI << "\nNew = " << *NewValue << "\n"); + LLVM_DEBUG( + dbgs() << "A null pointer-to-weak-pointer is undefined behavior." + "\nOld = " + << *CI << "\nNew = " << *NewValue << "\n"); CI->replaceAllUsesWith(NewValue); CI->eraseFromParent(); @@ -818,9 +825,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), MDNode::get(C, None)); - DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) " - "since x is otherwise unused.\nOld: " << *Call << "\nNew: " - << *NewCall << "\n"); + LLVM_DEBUG( + dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) " + "since x is otherwise unused.\nOld: " + << *Call << "\nNew: " << *NewCall << "\n"); EraseInstruction(Call); Inst = NewCall; @@ -832,8 +840,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { // a tail keyword. if (IsAlwaysTail(Class)) { Changed = true; - DEBUG(dbgs() << "Adding tail keyword to function since it can never be " - "passed stack args: " << *Inst << "\n"); + LLVM_DEBUG( + dbgs() << "Adding tail keyword to function since it can never be " + "passed stack args: " + << *Inst << "\n"); cast<CallInst>(Inst)->setTailCall(); } @@ -841,16 +851,16 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { // semantics of ARC truly do not do so. if (IsNeverTail(Class)) { Changed = true; - DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst << - "\n"); + LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst + << "\n"); cast<CallInst>(Inst)->setTailCall(false); } // Set nounwind as needed. if (IsNoThrow(Class)) { Changed = true; - DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst - << "\n"); + LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " + << *Inst << "\n"); cast<CallInst>(Inst)->setDoesNotThrow(); } @@ -865,8 +875,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { if (IsNullOrUndef(Arg)) { Changed = true; ++NumNoops; - DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst - << "\n"); + LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst + << "\n"); EraseInstruction(Inst); continue; } @@ -967,14 +977,15 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { Clone->setArgOperand(0, Op); Clone->insertBefore(InsertPos); - DEBUG(dbgs() << "Cloning " - << *CInst << "\n" - "And inserting clone at " << *InsertPos << "\n"); + LLVM_DEBUG(dbgs() << "Cloning " << *CInst + << "\n" + "And inserting clone at " + << *InsertPos << "\n"); Worklist.push_back(std::make_pair(Clone, Incoming)); } } // Erase the original call. - DEBUG(dbgs() << "Erasing: " << *CInst << "\n"); + LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n"); EraseInstruction(CInst); continue; } @@ -1151,7 +1162,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp( ARCInstKind Class = GetARCInstKind(Inst); const Value *Arg = nullptr; - DEBUG(dbgs() << " Class: " << Class << "\n"); + LLVM_DEBUG(dbgs() << " Class: " << Class << "\n"); switch (Class) { case ARCInstKind::Release: { @@ -1174,7 +1185,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp( // Don't do retain+release tracking for ARCInstKind::RetainRV, because // it's better to let it remain as the first instruction after a call. if (Class != ARCInstKind::RetainRV) { - DEBUG(dbgs() << " Matching with: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n"); Retains[Inst] = S.GetRRInfo(); } S.ClearSequenceProgress(); @@ -1216,7 +1227,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp( bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates, BlotMapVector<Value *, RRInfo> &Retains) { - DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n"); bool NestingDetected = false; BBState &MyStates = BBStates[BB]; @@ -1239,8 +1250,9 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB, } } - DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n" - << "Performing Dataflow:\n"); + LLVM_DEBUG(dbgs() << "Before:\n" + << BBStates[BB] << "\n" + << "Performing Dataflow:\n"); // Visit all the instructions, bottom-up. for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) { @@ -1250,7 +1262,7 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB, if (isa<InvokeInst>(Inst)) continue; - DEBUG(dbgs() << " Visiting " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n"); NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates); } @@ -1265,7 +1277,7 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB, NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates); } - DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n"); + LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n"); return NestingDetected; } @@ -1278,7 +1290,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, ARCInstKind Class = GetARCInstKind(Inst); const Value *Arg = nullptr; - DEBUG(dbgs() << " Class: " << Class << "\n"); + LLVM_DEBUG(dbgs() << " Class: " << Class << "\n"); switch (Class) { case ARCInstKind::RetainBlock: @@ -1304,7 +1316,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, if (S.MatchWithRelease(MDKindCache, Inst)) { // If we succeed, copy S's RRInfo into the Release -> {Retain Set // Map}. Then we clear S. - DEBUG(dbgs() << " Matching with: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n"); Releases[Inst] = S.GetRRInfo(); S.ClearSequenceProgress(); } @@ -1344,7 +1356,7 @@ bool ObjCARCOpt::VisitTopDown(BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates, DenseMap<Value *, RRInfo> &Releases) { - DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n"); bool NestingDetected = false; BBState &MyStates = BBStates[BB]; @@ -1366,20 +1378,21 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB, } } - DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n" - << "Performing Dataflow:\n"); + LLVM_DEBUG(dbgs() << "Before:\n" + << BBStates[BB] << "\n" + << "Performing Dataflow:\n"); // Visit all the instructions, top-down. for (Instruction &Inst : *BB) { - DEBUG(dbgs() << " Visiting " << Inst << "\n"); + LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n"); NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates); } - DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n" - << BBStates[BB] << "\n\n"); + LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n" + << BBStates[BB] << "\n\n"); CheckForCFGHazards(BB, BBStates, MyStates); - DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n"); + LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n"); return NestingDetected; } @@ -1502,7 +1515,7 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove, Type *ArgTy = Arg->getType(); Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext())); - DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n"); + LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n"); // Insert the new retain and release calls. for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) { @@ -1513,8 +1526,10 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove, Call->setDoesNotThrow(); Call->setTailCall(); - DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n" - "At insertion point: " << *InsertPt << "\n"); + LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call + << "\n" + "At insertion point: " + << *InsertPt << "\n"); } for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) { Value *MyArg = ArgTy == ParamTy ? Arg : @@ -1528,20 +1543,22 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove, if (ReleasesToMove.IsTailCallRelease) Call->setTailCall(); - DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n" - "At insertion point: " << *InsertPt << "\n"); + LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call + << "\n" + "At insertion point: " + << *InsertPt << "\n"); } // Delete the original retain and release calls. for (Instruction *OrigRetain : RetainsToMove.Calls) { Retains.blot(OrigRetain); DeadInsts.push_back(OrigRetain); - DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n"); + LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n"); } for (Instruction *OrigRelease : ReleasesToMove.Calls) { Releases.erase(OrigRelease); DeadInsts.push_back(OrigRelease); - DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n"); + LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n"); } } @@ -1747,7 +1764,7 @@ bool ObjCARCOpt::PerformCodePlacement( DenseMap<const BasicBlock *, BBState> &BBStates, BlotMapVector<Value *, RRInfo> &Retains, DenseMap<Value *, RRInfo> &Releases, Module *M) { - DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n"); bool AnyPairsCompletelyEliminated = false; SmallVector<Instruction *, 8> DeadInsts; @@ -1761,7 +1778,7 @@ bool ObjCARCOpt::PerformCodePlacement( Instruction *Retain = cast<Instruction>(V); - DEBUG(dbgs() << "Visiting: " << *Retain << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n"); Value *Arg = GetArgRCIdentityRoot(Retain); @@ -1806,7 +1823,7 @@ bool ObjCARCOpt::PerformCodePlacement( /// Weak pointer optimizations. void ObjCARCOpt::OptimizeWeakCalls(Function &F) { - DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n"); // First, do memdep-style RLE and S2L optimizations. We can't use memdep // itself because it uses AliasAnalysis and we need to do provenance @@ -1814,7 +1831,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) { for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) { Instruction *Inst = &*I++; - DEBUG(dbgs() << "Visiting: " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n"); ARCInstKind Class = GetBasicARCInstKind(Inst); if (Class != ARCInstKind::LoadWeak && @@ -2073,7 +2090,7 @@ void ObjCARCOpt::OptimizeReturns(Function &F) { if (!F.getReturnType()->isPointerTy()) return; - DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n"); + LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n"); SmallPtrSet<Instruction *, 4> DependingInstructions; SmallPtrSet<const BasicBlock *, 4> Visited; @@ -2082,7 +2099,7 @@ void ObjCARCOpt::OptimizeReturns(Function &F) { if (!Ret) continue; - DEBUG(dbgs() << "Visiting: " << *Ret << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n"); const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0)); @@ -2120,8 +2137,8 @@ void ObjCARCOpt::OptimizeReturns(Function &F) { // If so, we can zap the retain and autorelease. Changed = true; ++NumRets; - DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " - << *Autorelease << "\n"); + LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease + << "\n"); EraseInstruction(Retain); EraseInstruction(Autorelease); } @@ -2181,8 +2198,9 @@ bool ObjCARCOpt::runOnFunction(Function &F) { Changed = false; - DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>" - "\n"); + LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() + << " >>>" + "\n"); PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults()); @@ -2230,7 +2248,7 @@ bool ObjCARCOpt::runOnFunction(Function &F) { } #endif - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "\n"); return Changed; } diff --git a/llvm/lib/Transforms/ObjCARC/PtrState.cpp b/llvm/lib/Transforms/ObjCARC/PtrState.cpp index e1774b88fd3..b6c48529de4 100644 --- a/llvm/lib/Transforms/ObjCARC/PtrState.cpp +++ b/llvm/lib/Transforms/ObjCARC/PtrState.cpp @@ -126,22 +126,23 @@ bool RRInfo::Merge(const RRInfo &Other) { //===----------------------------------------------------------------------===// void PtrState::SetKnownPositiveRefCount() { - DEBUG(dbgs() << " Setting Known Positive.\n"); + LLVM_DEBUG(dbgs() << " Setting Known Positive.\n"); KnownPositiveRefCount = true; } void PtrState::ClearKnownPositiveRefCount() { - DEBUG(dbgs() << " Clearing Known Positive.\n"); + LLVM_DEBUG(dbgs() << " Clearing Known Positive.\n"); KnownPositiveRefCount = false; } void PtrState::SetSeq(Sequence NewSeq) { - DEBUG(dbgs() << " Old: " << GetSeq() << "; New: " << NewSeq << "\n"); + LLVM_DEBUG(dbgs() << " Old: " << GetSeq() << "; New: " << NewSeq + << "\n"); Seq = NewSeq; } void PtrState::ResetSequenceProgress(Sequence NewSeq) { - DEBUG(dbgs() << " Resetting sequence progress.\n"); + LLVM_DEBUG(dbgs() << " Resetting sequence progress.\n"); SetSeq(NewSeq); Partial = false; RRI.clear(); @@ -184,7 +185,8 @@ bool BottomUpPtrState::InitBottomUp(ARCMDKindCache &Cache, Instruction *I) { // simple and avoids adding overhead for the non-nested case. bool NestingDetected = false; if (GetSeq() == S_Release || GetSeq() == S_MovableRelease) { - DEBUG(dbgs() << " Found nested releases (i.e. a release pair)\n"); + LLVM_DEBUG( + dbgs() << " Found nested releases (i.e. a release pair)\n"); NestingDetected = true; } @@ -234,8 +236,8 @@ bool BottomUpPtrState::HandlePotentialAlterRefCount(Instruction *Inst, if (!CanAlterRefCount(Inst, Ptr, PA, Class)) return false; - DEBUG(dbgs() << " CanAlterRefCount: Seq: " << S << "; " << *Ptr - << "\n"); + LLVM_DEBUG(dbgs() << " CanAlterRefCount: Seq: " << S << "; " + << *Ptr << "\n"); switch (S) { case S_Use: SetSeq(S_CanRelease); @@ -277,26 +279,26 @@ void BottomUpPtrState::HandlePotentialUse(BasicBlock *BB, Instruction *Inst, case S_Release: case S_MovableRelease: if (CanUse(Inst, Ptr, PA, Class)) { - DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr - << "\n"); + LLVM_DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " + << *Ptr << "\n"); SetSeqAndInsertReverseInsertPt(S_Use); } else if (Seq == S_Release && IsUser(Class)) { - DEBUG(dbgs() << " PreciseReleaseUse: Seq: " << GetSeq() << "; " - << *Ptr << "\n"); + LLVM_DEBUG(dbgs() << " PreciseReleaseUse: Seq: " << GetSeq() + << "; " << *Ptr << "\n"); // Non-movable releases depend on any possible objc pointer use. SetSeqAndInsertReverseInsertPt(S_Stop); } else if (const auto *Call = getreturnRVOperand(*Inst, Class)) { if (CanUse(Call, Ptr, PA, GetBasicARCInstKind(Call))) { - DEBUG(dbgs() << " ReleaseUse: Seq: " << GetSeq() << "; " - << *Ptr << "\n"); + LLVM_DEBUG(dbgs() << " ReleaseUse: Seq: " << GetSeq() << "; " + << *Ptr << "\n"); SetSeqAndInsertReverseInsertPt(S_Stop); } } break; case S_Stop: if (CanUse(Inst, Ptr, PA, Class)) { - DEBUG(dbgs() << " PreciseStopUse: Seq: " << GetSeq() << "; " - << *Ptr << "\n"); + LLVM_DEBUG(dbgs() << " PreciseStopUse: Seq: " << GetSeq() + << "; " << *Ptr << "\n"); SetSeq(S_Use); } break; @@ -377,8 +379,8 @@ bool TopDownPtrState::HandlePotentialAlterRefCount(Instruction *Inst, Class != ARCInstKind::IntrinsicUser) return false; - DEBUG(dbgs() << " CanAlterRefCount: Seq: " << GetSeq() << "; " << *Ptr - << "\n"); + LLVM_DEBUG(dbgs() << " CanAlterRefCount: Seq: " << GetSeq() << "; " + << *Ptr << "\n"); ClearKnownPositiveRefCount(); switch (GetSeq()) { case S_Retain: @@ -410,8 +412,8 @@ void TopDownPtrState::HandlePotentialUse(Instruction *Inst, const Value *Ptr, case S_CanRelease: if (!CanUse(Inst, Ptr, PA, Class)) return; - DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr - << "\n"); + LLVM_DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " + << *Ptr << "\n"); SetSeq(S_Use); return; case S_Retain: diff --git a/llvm/lib/Transforms/Scalar/ADCE.cpp b/llvm/lib/Transforms/Scalar/ADCE.cpp index 03a44849645..ce09a477b5f 100644 --- a/llvm/lib/Transforms/Scalar/ADCE.cpp +++ b/llvm/lib/Transforms/Scalar/ADCE.cpp @@ -298,8 +298,8 @@ void AggressiveDeadCodeElimination::initialize() { auto &Info = BlockInfo[BB]; // Real function return if (isa<ReturnInst>(Info.Terminator)) { - DEBUG(dbgs() << "post-dom root child is a return: " << BB->getName() - << '\n';); + LLVM_DEBUG(dbgs() << "post-dom root child is a return: " << BB->getName() + << '\n';); continue; } @@ -356,7 +356,7 @@ void AggressiveDeadCodeElimination::markLiveInstructions() { // where we need to mark the inputs as live. while (!Worklist.empty()) { Instruction *LiveInst = Worklist.pop_back_val(); - DEBUG(dbgs() << "work live: "; LiveInst->dump();); + LLVM_DEBUG(dbgs() << "work live: "; LiveInst->dump();); for (Use &OI : LiveInst->operands()) if (Instruction *Inst = dyn_cast<Instruction>(OI)) @@ -378,7 +378,7 @@ void AggressiveDeadCodeElimination::markLive(Instruction *I) { if (Info.Live) return; - DEBUG(dbgs() << "mark live: "; I->dump()); + LLVM_DEBUG(dbgs() << "mark live: "; I->dump()); Info.Live = true; Worklist.push_back(I); @@ -402,7 +402,7 @@ void AggressiveDeadCodeElimination::markLive(Instruction *I) { void AggressiveDeadCodeElimination::markLive(BlockInfoType &BBInfo) { if (BBInfo.Live) return; - DEBUG(dbgs() << "mark block live: " << BBInfo.BB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "mark block live: " << BBInfo.BB->getName() << '\n'); BBInfo.Live = true; if (!BBInfo.CFLive) { BBInfo.CFLive = true; @@ -463,7 +463,7 @@ void AggressiveDeadCodeElimination::markLiveBranchesFromControlDependences() { if (BlocksWithDeadTerminators.empty()) return; - DEBUG({ + LLVM_DEBUG({ dbgs() << "new live blocks:\n"; for (auto *BB : NewLiveBlocks) dbgs() << "\t" << BB->getName() << '\n'; @@ -487,7 +487,7 @@ void AggressiveDeadCodeElimination::markLiveBranchesFromControlDependences() { // Dead terminators which control live blocks are now marked live. for (auto *BB : IDFBlocks) { - DEBUG(dbgs() << "live control in: " << BB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "live control in: " << BB->getName() << '\n'); markLive(BB->getTerminator()); } } @@ -501,7 +501,7 @@ bool AggressiveDeadCodeElimination::removeDeadInstructions() { // Updates control and dataflow around dead blocks updateDeadRegions(); - DEBUG({ + LLVM_DEBUG({ for (Instruction &I : instructions(F)) { // Check if the instruction is alive. if (isLive(&I)) @@ -555,7 +555,7 @@ bool AggressiveDeadCodeElimination::removeDeadInstructions() { // A dead region is the set of dead blocks with a common live post-dominator. void AggressiveDeadCodeElimination::updateDeadRegions() { - DEBUG({ + LLVM_DEBUG({ dbgs() << "final dead terminator blocks: " << '\n'; for (auto *BB : BlocksWithDeadTerminators) dbgs() << '\t' << BB->getName() @@ -607,8 +607,9 @@ void AggressiveDeadCodeElimination::updateDeadRegions() { // It might have happened that the same successor appeared multiple times // and the CFG edge wasn't really removed. if (Succ != PreferredSucc->BB) { - DEBUG(dbgs() << "ADCE: (Post)DomTree edge enqueued for deletion" - << BB->getName() << " -> " << Succ->getName() << "\n"); + LLVM_DEBUG(dbgs() << "ADCE: (Post)DomTree edge enqueued for deletion" + << BB->getName() << " -> " << Succ->getName() + << "\n"); DeletedEdges.push_back({DominatorTree::Delete, BB, Succ}); } } @@ -652,7 +653,7 @@ void AggressiveDeadCodeElimination::makeUnconditional(BasicBlock *BB, InstInfo[PredTerm].Live = true; return; } - DEBUG(dbgs() << "making unconditional " << BB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "making unconditional " << BB->getName() << '\n'); NumBranchesRemoved += 1; IRBuilder<> Builder(PredTerm); auto *NewTerm = Builder.CreateBr(Target); diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index b84528271a7..fa7bcec677f 100644 --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -98,8 +98,8 @@ static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV, const SCEV *DiffAlign = SE->getMulExpr(DiffAlignDiv, AlignSCEV); const SCEV *DiffUnitsSCEV = SE->getMinusSCEV(DiffAlign, DiffSCEV); - DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " << - *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n"); + LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " + << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n"); if (const SCEVConstant *ConstDUSCEV = dyn_cast<SCEVConstant>(DiffUnitsSCEV)) { @@ -139,12 +139,12 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, // address. This address is displaced by the provided offset. DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV); - DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " << - *AlignSCEV << " and offset " << *OffSCEV << - " using diff " << *DiffSCEV << "\n"); + LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " + << *AlignSCEV << " and offset " << *OffSCEV + << " using diff " << *DiffSCEV << "\n"); unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE); - DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n"); if (NewAlignment) { return NewAlignment; @@ -160,8 +160,8 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, const SCEV *DiffStartSCEV = DiffARSCEV->getStart(); const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE); - DEBUG(dbgs() << "\ttrying start/inc alignment using start " << - *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n"); + LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start " + << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n"); // Now compute the new alignment using the displacement to the value in the // first iteration, and also the alignment using the per-iteration delta. @@ -170,26 +170,26 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); - DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n"); - DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n"); if (!NewAlignment || !NewIncAlignment) { return 0; } else if (NewAlignment > NewIncAlignment) { if (NewAlignment % NewIncAlignment == 0) { - DEBUG(dbgs() << "\tnew start/inc alignment: " << - NewIncAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment + << "\n"); return NewIncAlignment; } } else if (NewIncAlignment > NewAlignment) { if (NewIncAlignment % NewAlignment == 0) { - DEBUG(dbgs() << "\tnew start/inc alignment: " << - NewAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment + << "\n"); return NewAlignment; } } else if (NewIncAlignment == NewAlignment) { - DEBUG(dbgs() << "\tnew start/inc alignment: " << - NewAlignment << "\n"); + LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment + << "\n"); return NewAlignment; } } @@ -339,7 +339,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); - DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";); + LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";); if (NewDestAlignment > MI->getDestAlignment()) { MI->setDestAlignment(NewDestAlignment); ++NumMemIntAlignChanged; @@ -351,7 +351,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); - DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";); + LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";); if (NewSrcAlignment > MTI->getSourceAlignment()) { MTI->setSourceAlignment(NewSrcAlignment); diff --git a/llvm/lib/Transforms/Scalar/BDCE.cpp b/llvm/lib/Transforms/Scalar/BDCE.cpp index c39e7443030..dd763847501 100644 --- a/llvm/lib/Transforms/Scalar/BDCE.cpp +++ b/llvm/lib/Transforms/Scalar/BDCE.cpp @@ -100,7 +100,7 @@ static bool bitTrackingDCE(Function &F, DemandedBits &DB) { // For live instructions that have all dead bits, first make them dead by // replacing all uses with something else. Then, if they don't need to // remain live (because they have side effects, etc.) we can remove them. - DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n"); + LLVM_DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n"); clearAssumptionsOfUsers(&I, DB); diff --git a/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp b/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp index dac52a6e3b7..098ad920a76 100644 --- a/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp +++ b/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp @@ -316,7 +316,7 @@ static void splitCallSite( if (!IsMustTailCall && !Instr->use_empty()) CallPN = PHINode::Create(Instr->getType(), Preds.size(), "phi.call"); - DEBUG(dbgs() << "split call-site : " << *Instr << " into \n"); + LLVM_DEBUG(dbgs() << "split call-site : " << *Instr << " into \n"); assert(Preds.size() == 2 && "The ValueToValueMaps array has size 2."); // ValueToValueMapTy is neither copy nor moveable, so we use a simple array @@ -344,8 +344,8 @@ static void splitCallSite( ++ArgNo; } } - DEBUG(dbgs() << " " << *NewCI << " in " << SplitBlock->getName() - << "\n"); + LLVM_DEBUG(dbgs() << " " << *NewCI << " in " << SplitBlock->getName() + << "\n"); if (CallPN) CallPN->addIncoming(NewCI, SplitBlock); diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp index 470e687a722..7f710761611 100644 --- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -132,8 +132,8 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) { if (skipFunction(Fn)) return false; - DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n"); - DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n"); + LLVM_DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n'); bool MadeChange = Impl.runImpl(Fn, getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn), @@ -144,11 +144,11 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) { Fn.getEntryBlock()); if (MadeChange) { - DEBUG(dbgs() << "********** Function after Constant Hoisting: " - << Fn.getName() << '\n'); - DEBUG(dbgs() << Fn); + LLVM_DEBUG(dbgs() << "********** Function after Constant Hoisting: " + << Fn.getName() << '\n'); + LLVM_DEBUG(dbgs() << Fn); } - DEBUG(dbgs() << "********** End Constant Hoisting **********\n"); + LLVM_DEBUG(dbgs() << "********** End Constant Hoisting **********\n"); return MadeChange; } @@ -364,14 +364,13 @@ void ConstantHoistingPass::collectConstantCandidates( Itr->second = ConstCandVec.size() - 1; } ConstCandVec[Itr->second].addUser(Inst, Idx, Cost); - DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx))) - dbgs() << "Collect constant " << *ConstInt << " from " << *Inst + LLVM_DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx))) dbgs() + << "Collect constant " << *ConstInt << " from " << *Inst << " with cost " << Cost << '\n'; - else - dbgs() << "Collect constant " << *ConstInt << " indirectly from " - << *Inst << " via " << *Inst->getOperand(Idx) << " with cost " - << Cost << '\n'; - ); + else dbgs() << "Collect constant " << *ConstInt + << " indirectly from " << *Inst << " via " + << *Inst->getOperand(Idx) << " with cost " << Cost + << '\n';); } } @@ -501,20 +500,21 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S, return NumUses; } - DEBUG(dbgs() << "== Maximize constants in range ==\n"); + LLVM_DEBUG(dbgs() << "== Maximize constants in range ==\n"); int MaxCost = -1; for (auto ConstCand = S; ConstCand != E; ++ConstCand) { auto Value = ConstCand->ConstInt->getValue(); Type *Ty = ConstCand->ConstInt->getType(); int Cost = 0; NumUses += ConstCand->Uses.size(); - DEBUG(dbgs() << "= Constant: " << ConstCand->ConstInt->getValue() << "\n"); + LLVM_DEBUG(dbgs() << "= Constant: " << ConstCand->ConstInt->getValue() + << "\n"); for (auto User : ConstCand->Uses) { unsigned Opcode = User.Inst->getOpcode(); unsigned OpndIdx = User.OpndIdx; Cost += TTI->getIntImmCost(Opcode, OpndIdx, Value, Ty); - DEBUG(dbgs() << "Cost: " << Cost << "\n"); + LLVM_DEBUG(dbgs() << "Cost: " << Cost << "\n"); for (auto C2 = S; C2 != E; ++C2) { Optional<APInt> Diff = calculateOffsetDiff( @@ -524,18 +524,18 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S, const int ImmCosts = TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.getValue(), Ty); Cost -= ImmCosts; - DEBUG(dbgs() << "Offset " << Diff.getValue() << " " - << "has penalty: " << ImmCosts << "\n" - << "Adjusted cost: " << Cost << "\n"); + LLVM_DEBUG(dbgs() << "Offset " << Diff.getValue() << " " + << "has penalty: " << ImmCosts << "\n" + << "Adjusted cost: " << Cost << "\n"); } } } - DEBUG(dbgs() << "Cumulative cost: " << Cost << "\n"); + LLVM_DEBUG(dbgs() << "Cumulative cost: " << Cost << "\n"); if (Cost > MaxCost) { MaxCost = Cost; MaxCostItr = ConstCand; - DEBUG(dbgs() << "New candidate: " << MaxCostItr->ConstInt->getValue() - << "\n"); + LLVM_DEBUG(dbgs() << "New candidate: " << MaxCostItr->ConstInt->getValue() + << "\n"); } } return NumUses; @@ -641,19 +641,20 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base, Mat = BinaryOperator::Create(Instruction::Add, Base, Offset, "const_mat", InsertionPt); - DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0) - << " + " << *Offset << ") in BB " - << Mat->getParent()->getName() << '\n' << *Mat << '\n'); + LLVM_DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0) + << " + " << *Offset << ") in BB " + << Mat->getParent()->getName() << '\n' + << *Mat << '\n'); Mat->setDebugLoc(ConstUser.Inst->getDebugLoc()); } Value *Opnd = ConstUser.Inst->getOperand(ConstUser.OpndIdx); // Visit constant integer. if (isa<ConstantInt>(Opnd)) { - DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, Mat) && Offset) Mat->eraseFromParent(); - DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } @@ -669,13 +670,13 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base, ClonedCastInst->insertAfter(CastInst); // Use the same debug location as the original cast instruction. ClonedCastInst->setDebugLoc(CastInst->getDebugLoc()); - DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n' - << "To : " << *ClonedCastInst << '\n'); + LLVM_DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n' + << "To : " << *ClonedCastInst << '\n'); } - DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ClonedCastInst); - DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } @@ -689,15 +690,15 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base, // Use the same debug location as the instruction we are about to update. ConstExprInst->setDebugLoc(ConstUser.Inst->getDebugLoc()); - DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n' - << "From : " << *ConstExpr << '\n'); - DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n' + << "From : " << *ConstExpr << '\n'); + LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ConstExprInst)) { ConstExprInst->eraseFromParent(); if (Offset) Mat->eraseFromParent(); } - DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); + LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } } @@ -720,9 +721,9 @@ bool ConstantHoistingPass::emitBaseConstants() { Base->setDebugLoc(IP->getDebugLoc()); - DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant - << ") to BB " << IP->getParent()->getName() << '\n' - << *Base << '\n'); + LLVM_DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant + << ") to BB " << IP->getParent()->getName() << '\n' + << *Base << '\n'); // Emit materialization code for all rebased constants. unsigned Uses = 0; diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index a85daaa0780..15df482652c 100644 --- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -228,7 +228,7 @@ static bool processPHI(PHINode *P, LazyValueInfo *LVI, V = SI->getTrueValue(); } - DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n'); + LLVM_DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n'); } P->setIncomingValue(i, V); diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 1d56a6e8aaa..fd2cacf461b 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -420,9 +420,10 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later, // Insert our part of the overlap into the map. auto &IM = IOL[DepWrite]; - DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " << - int64_t(EarlierOff + Earlier.Size) << ") Later [" << - LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n"); + LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff + << ", " << int64_t(EarlierOff + Earlier.Size) + << ") Later [" << LaterOff << ", " + << int64_t(LaterOff + Later.Size) << ")\n"); // Make sure that we only insert non-overlapping intervals and combine // adjacent intervals. The intervals are stored in the map with the ending @@ -459,11 +460,11 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later, ILI = IM.begin(); if (ILI->second <= EarlierOff && ILI->first >= int64_t(EarlierOff + Earlier.Size)) { - DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" << - EarlierOff << ", " << - int64_t(EarlierOff + Earlier.Size) << - ") Composite Later [" << - ILI->second << ", " << ILI->first << ")\n"); + LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" + << EarlierOff << ", " + << int64_t(EarlierOff + Earlier.Size) + << ") Composite Later [" << ILI->second << ", " + << ILI->first << ")\n"); ++NumCompletePartials; return OW_Complete; } @@ -474,10 +475,11 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later, if (EnablePartialStoreMerging && LaterOff >= EarlierOff && int64_t(EarlierOff + Earlier.Size) > LaterOff && uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) { - DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff - << ", " << int64_t(EarlierOff + Earlier.Size) - << ") by a later store [" << LaterOff << ", " - << int64_t(LaterOff + Later.Size) << ")\n"); + LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" + << EarlierOff << ", " + << int64_t(EarlierOff + Earlier.Size) + << ") by a later store [" << LaterOff << ", " + << int64_t(LaterOff + Later.Size) << ")\n"); // TODO: Maybe come up with a better name? return OW_PartialEarlierWithFullLater; } @@ -677,8 +679,9 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA, if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) break; - DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: " - << *Dependency << '\n'); + LLVM_DEBUG( + dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: " + << *Dependency << '\n'); // DCE instructions only used to calculate that store. BasicBlock::iterator BBI(Dependency); @@ -787,15 +790,16 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, if (AllDead) { Instruction *Dead = &*BBI; - DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " - << *Dead << "\n Objects: "; - for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), - E = Pointers.end(); I != E; ++I) { - dbgs() << **I; - if (std::next(I) != E) - dbgs() << ", "; - } - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " + << *Dead << "\n Objects: "; + for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), + E = Pointers.end(); + I != E; ++I) { + dbgs() << **I; + if (std::next(I) != E) + dbgs() << ", "; + } dbgs() + << '\n'); // DCE instructions only used to calculate that store. deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); @@ -807,8 +811,8 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(&*BBI, TLI)) { - DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: " - << *&*BBI << '\n'); + LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: " + << *&*BBI << '\n'); deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); ++NumFastOther; MadeChange = true; @@ -917,10 +921,10 @@ static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset, return false; } - DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " - << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite - << "\n KILLER (offset " << LaterOffset << ", " << EarlierSize - << ")\n"); + LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " + << (IsOverwriteEnd ? "END" : "BEGIN") << ": " + << *EarlierWrite << "\n KILLER (offset " << LaterOffset + << ", " << EarlierSize << ")\n"); Value *EarlierWriteLength = EarlierIntrinsic->getLength(); Value *TrimmedLength = @@ -1025,8 +1029,9 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, if (SI->getPointerOperand() == DepLoad->getPointerOperand() && isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) { - DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: " - << *DepLoad << "\n STORE: " << *SI << '\n'); + LLVM_DEBUG( + dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: " + << *DepLoad << "\n STORE: " << *SI << '\n'); deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering); ++NumRedundantStores; @@ -1042,7 +1047,7 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) && memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) { - DEBUG( + LLVM_DEBUG( dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: " << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n'); @@ -1173,8 +1178,8 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset, DepWrite, IOL, *AA); if (OR == OW_Complete) { - DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " - << *DepWrite << "\n KILLER: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite + << "\n KILLER: " << *Inst << '\n'); // Delete the store and now-dead instructions that feed it. deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering); @@ -1232,9 +1237,9 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, // store, shifted appropriately. APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount); - DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite - << "\n Later: " << *Inst - << "\n Merged Value: " << Merged << '\n'); + LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite + << "\n Later: " << *Inst + << "\n Merged Value: " << Merged << '\n'); auto *SI = new StoreInst( ConstantInt::get(Earlier->getValueOperand()->getType(), Merged), diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index 4380812968a..d19ceab3889 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -728,11 +728,11 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ? ConstantInt::getTrue(BB->getContext()) : ConstantInt::getFalse(BB->getContext()); AvailableValues.insert(CondInst, TorF); - DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" - << CondInst->getName() << "' as " << *TorF << " in " - << BB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" + << CondInst->getName() << "' as " << *TorF << " in " + << BB->getName() << "\n"); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); } else { // Replace all dominated uses with the known value. if (unsigned Count = replaceDominatedUsesWith( @@ -758,9 +758,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // Dead instructions should just be removed. if (isInstructionTriviallyDead(Inst, &TLI)) { - DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } salvageDebugInfo(*Inst); @@ -779,16 +779,17 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { auto *CondI = dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0)); if (CondI && SimpleValue::canHandle(CondI)) { - DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst + << '\n'); AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); } else - DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); continue; } // Skip sideeffect intrinsics, for the same reason as assume intrinsics. if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) { - DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n'); continue; } @@ -826,7 +827,8 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // Is the condition known to be true? if (isa<ConstantInt>(KnownCond) && cast<ConstantInt>(KnownCond)->isOne()) { - DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() + << "EarlyCSE removing guard: " << *Inst << '\n'); removeMSSA(Inst); Inst->eraseFromParent(); Changed = true; @@ -851,9 +853,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // If the instruction can be simplified (e.g. X+0 = X) then replace it with // its simpler value. if (Value *V = SimplifyInstruction(Inst, SQ)) { - DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V + << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); } else { bool Killed = false; if (!Inst->use_empty()) { @@ -877,9 +880,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { if (SimpleValue::canHandle(Inst)) { // See if the instruction has an available value. If so, use it. if (Value *V = AvailableValues.lookup(Inst)) { - DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V + << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } if (auto *I = dyn_cast<Instruction>(V)) @@ -937,10 +941,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { InVal.DefInst, Inst))) { Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType()); if (Op != nullptr) { - DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst - << " to: " << *InVal.DefInst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst + << " to: " << *InVal.DefInst << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } if (!Inst->use_empty()) @@ -980,10 +984,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { if (InVal.first != nullptr && isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, Inst)) { - DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst - << " to: " << *InVal.first << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst + << " to: " << *InVal.first << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } if (!Inst->use_empty()) @@ -1036,9 +1040,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { MemInst.getPointerOperand() || MSSA) && "can't have an intervening store if not using MemorySSA!"); - DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } removeMSSA(Inst); @@ -1071,10 +1075,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { !LastStoreMemInst.isVolatile() && "Violated invariant"); if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { - DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore - << " due to: " << *Inst << '\n'); + LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore + << " due to: " << *Inst << '\n'); if (!DebugCounter::shouldExecute(CSECounter)) { - DEBUG(dbgs() << "Skipping due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); } else { removeMSSA(LastStore); LastStore->eraseFromParent(); diff --git a/llvm/lib/Transforms/Scalar/Float2Int.cpp b/llvm/lib/Transforms/Scalar/Float2Int.cpp index b105ece8dc7..f2828e80bc5 100644 --- a/llvm/lib/Transforms/Scalar/Float2Int.cpp +++ b/llvm/lib/Transforms/Scalar/Float2Int.cpp @@ -138,7 +138,7 @@ void Float2IntPass::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) { // Helper - mark I as having been traversed, having range R. void Float2IntPass::seen(Instruction *I, ConstantRange R) { - DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n"); + LLVM_DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n"); auto IT = SeenInsts.find(I); if (IT != SeenInsts.end()) IT->second = std::move(R); @@ -359,7 +359,7 @@ bool Float2IntPass::validateAndTransform() { for (User *U : I->users()) { Instruction *UI = dyn_cast<Instruction>(U); if (!UI || SeenInsts.find(UI) == SeenInsts.end()) { - DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n"); + LLVM_DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n"); Fail = true; break; } @@ -380,7 +380,7 @@ bool Float2IntPass::validateAndTransform() { // lower limits, plus one so it can be signed. unsigned MinBW = std::max(R.getLower().getMinSignedBits(), R.getUpper().getMinSignedBits()) + 1; - DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n"); + LLVM_DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n"); // If we've run off the realms of the exactly representable integers, // the floating point result will differ from an integer approximation. @@ -391,11 +391,12 @@ bool Float2IntPass::validateAndTransform() { unsigned MaxRepresentableBits = APFloat::semanticsPrecision(ConvertedToTy->getFltSemantics()) - 1; if (MinBW > MaxRepresentableBits) { - DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n"); + LLVM_DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n"); continue; } if (MinBW > 64) { - DEBUG(dbgs() << "F2I: Value requires more than 64 bits to represent!\n"); + LLVM_DEBUG( + dbgs() << "F2I: Value requires more than 64 bits to represent!\n"); continue; } @@ -490,7 +491,7 @@ void Float2IntPass::cleanup() { } bool Float2IntPass::runImpl(Function &F) { - DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n"); // Clear out all state. ECs = EquivalenceClasses<Instruction*>(); SeenInsts.clear(); diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 59b87e9a77d..ff3c2f9fbd2 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -784,9 +784,10 @@ Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, if (Res->getType() != LoadTy) { Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); - DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " - << *getSimpleValue() << '\n' - << *Res << '\n' << "\n\n\n"); + LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset + << " " << *getSimpleValue() << '\n' + << *Res << '\n' + << "\n\n\n"); } } else if (isCoercedLoadValue()) { LoadInst *Load = getCoercedLoadValue(); @@ -800,20 +801,21 @@ Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, // but then there all of the operations based on it would need to be // rehashed. Just leave the dead load around. gvn.getMemDep().removeInstruction(Load); - DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " - << *getCoercedLoadValue() << '\n' - << *Res << '\n' - << "\n\n\n"); + LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset + << " " << *getCoercedLoadValue() << '\n' + << *Res << '\n' + << "\n\n\n"); } } else if (isMemIntrinValue()) { Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, InsertPt, DL); - DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset - << " " << *getMemIntrinValue() << '\n' - << *Res << '\n' << "\n\n\n"); + LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset + << " " << *getMemIntrinValue() << '\n' + << *Res << '\n' + << "\n\n\n"); } else { assert(isUndefValue() && "Should be UndefVal"); - DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); + LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); return UndefValue::get(LoadTy); } assert(Res && "failed to materialize?"); @@ -915,13 +917,11 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, } } // Nothing known about this clobber, have to be conservative - DEBUG( - // fast print dep, using operator<< on instruction is too slow. - dbgs() << "GVN: load "; - LI->printAsOperand(dbgs()); - Instruction *I = DepInfo.getInst(); - dbgs() << " is clobbered by " << *I << '\n'; - ); + LLVM_DEBUG( + // fast print dep, using operator<< on instruction is too slow. + dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); + Instruction *I = DepInfo.getInst(); + dbgs() << " is clobbered by " << *I << '\n';); if (ORE->allowExtraAnalysis(DEBUG_TYPE)) reportMayClobberedLoad(LI, DepInfo, DT, ORE); @@ -979,12 +979,10 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, } // Unknown def - must be conservative - DEBUG( - // fast print dep, using operator<< on instruction is too slow. - dbgs() << "GVN: load "; - LI->printAsOperand(dbgs()); - dbgs() << " has unknown def " << *DepInst << '\n'; - ); + LLVM_DEBUG( + // fast print dep, using operator<< on instruction is too slow. + dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); + dbgs() << " has unknown def " << *DepInst << '\n';); return false; } @@ -1114,9 +1112,9 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, // If any predecessor block is an EH pad that does not allow non-PHI // instructions before the terminator, we can't PRE the load. if (Pred->getTerminator()->isEHPad()) { - DEBUG(dbgs() - << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" - << Pred->getName() << "': " << *LI << '\n'); + LLVM_DEBUG( + dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" + << Pred->getName() << "': " << *LI << '\n'); return false; } @@ -1126,15 +1124,16 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, if (Pred->getTerminator()->getNumSuccessors() != 1) { if (isa<IndirectBrInst>(Pred->getTerminator())) { - DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" - << Pred->getName() << "': " << *LI << '\n'); + LLVM_DEBUG( + dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" + << Pred->getName() << "': " << *LI << '\n'); return false; } if (LoadBB->isEHPad()) { - DEBUG(dbgs() - << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" - << Pred->getName() << "': " << *LI << '\n'); + LLVM_DEBUG( + dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" + << Pred->getName() << "': " << *LI << '\n'); return false; } @@ -1162,8 +1161,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); PredLoads[NewPred] = nullptr; - DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" - << LoadBB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" + << LoadBB->getName() << '\n'); } // Check if the load can safely be moved to all the unavailable predecessors. @@ -1187,8 +1186,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, // If we couldn't find or insert a computation of this phi translated value, // we fail PRE. if (!LoadPtr) { - DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " - << *LI->getPointerOperand() << "\n"); + LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " + << *LI->getPointerOperand() << "\n"); CanDoPRE = false; break; } @@ -1209,10 +1208,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, // Okay, we can eliminate this load by inserting a reload in the predecessor // and using PHI construction to get the value in the other predecessors, do // it. - DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); - DEBUG(if (!NewInsts.empty()) - dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " - << *NewInsts.back() << '\n'); + LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); + LLVM_DEBUG(if (!NewInsts.empty()) dbgs() + << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back() + << '\n'); // Assign value numbers to the new instructions. for (Instruction *I : NewInsts) { @@ -1263,7 +1262,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, NewLoad)); MD->invalidateCachedPointerInfo(LoadPtr); - DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); + LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); } // Perform PHI construction. @@ -1321,11 +1320,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { // clobber in the current block. Reject this early. if (NumDeps == 1 && !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { - DEBUG( - dbgs() << "GVN: non-local load "; - LI->printAsOperand(dbgs()); - dbgs() << " has unknown dependencies\n"; - ); + LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs()); + dbgs() << " has unknown dependencies\n";); return false; } @@ -1354,7 +1350,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { // load, then it is fully redundant and we can use PHI insertion to compute // its value. Insert PHIs and remove the fully redundant value now. if (UnavailableBlocks.empty()) { - DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); + LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); // Perform PHI construction. Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); @@ -1507,12 +1503,10 @@ bool GVN::processLoad(LoadInst *L) { // Only handle the local case below if (!Dep.isDef() && !Dep.isClobber()) { // This might be a NonFuncLocal or an Unknown - DEBUG( - // fast print dep, using operator<< on instruction is too slow. - dbgs() << "GVN: load "; - L->printAsOperand(dbgs()); - dbgs() << " has unknown dependence\n"; - ); + LLVM_DEBUG( + // fast print dep, using operator<< on instruction is too slow. + dbgs() << "GVN: load "; L->printAsOperand(dbgs()); + dbgs() << " has unknown dependence\n";); return false; } @@ -1696,8 +1690,8 @@ bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { if (it != ReplaceWithConstMap.end()) { assert(!isa<Constant>(Operand) && "Replacing constants with constants is invalid"); - DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second - << " in instruction " << *Instr << '\n'); + LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " + << *it->second << " in instruction " << *Instr << '\n'); Instr->setOperand(OpNum, it->second); Changed = true; } @@ -2039,7 +2033,7 @@ bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, unsigned Iteration = 0; while (ShouldContinue) { - DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); + LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); ShouldContinue = iterateOnFunction(F); Changed |= ShouldContinue; ++Iteration; @@ -2105,10 +2099,10 @@ bool GVN::processBlock(BasicBlock *BB) { const Instruction *MaybeFirstICF = FirstImplicitControlFlowInsts.lookup(BB); for (auto *I : InstrsToErase) { assert(I->getParent() == BB && "Removing instruction from wrong block?"); - DEBUG(dbgs() << "GVN removed: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n'); salvageDebugInfo(*I); if (MD) MD->removeInstruction(I); - DEBUG(verifyRemoved(I)); + LLVM_DEBUG(verifyRemoved(I)); if (MaybeFirstICF == I) { // We have erased the first ICF in block. The map needs to be updated. InvalidateImplicitCF = true; @@ -2290,7 +2284,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) { PREInstr = CurInst->clone(); if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) { // If we failed insertion, make sure we remove the instruction. - DEBUG(verifyRemoved(PREInstr)); + LLVM_DEBUG(verifyRemoved(PREInstr)); PREInstr->deleteValue(); return false; } @@ -2328,10 +2322,10 @@ bool GVN::performScalarPRE(Instruction *CurInst) { VN.erase(CurInst); removeFromLeaderTable(ValNo, CurInst, CurrentBlock); - DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); + LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); if (MD) MD->removeInstruction(CurInst); - DEBUG(verifyRemoved(CurInst)); + LLVM_DEBUG(verifyRemoved(CurInst)); bool InvalidateImplicitCF = FirstImplicitControlFlowInsts.lookup(CurInst->getParent()) == CurInst; // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index a8b33d19d17..6994d964a9e 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -622,7 +622,7 @@ private: // Iterate in reverse order to keep lower ranked values on the top. for (std::pair<VNType, Instruction *> &VI : reverse(it1->second)) { // Get the value of instruction I - DEBUG(dbgs() << "\nPushing on stack: " << *VI.second); + LLVM_DEBUG(dbgs() << "\nPushing on stack: " << *VI.second); RenameStack[VI.first].push_back(VI.second); } } @@ -636,7 +636,7 @@ private: if (P == CHIBBs.end()) { continue; } - DEBUG(dbgs() << "\nLooking at CHIs in: " << Pred->getName();); + LLVM_DEBUG(dbgs() << "\nLooking at CHIs in: " << Pred->getName();); // A CHI is found (BB -> Pred is an edge in the CFG) // Pop the stack until Top(V) = Ve. auto &VCHI = P->second; @@ -651,9 +651,9 @@ private: DT->properlyDominates(Pred, si->second.back()->getParent())) { C.Dest = BB; // Assign the edge C.I = si->second.pop_back_val(); // Assign the argument - DEBUG(dbgs() << "\nCHI Inserted in BB: " << C.Dest->getName() - << *C.I << ", VN: " << C.VN.first << ", " - << C.VN.second); + LLVM_DEBUG(dbgs() + << "\nCHI Inserted in BB: " << C.Dest->getName() << *C.I + << ", VN: " << C.VN.first << ", " << C.VN.second); } // Move to next CHI of a different value It = std::find_if(It, VCHI.end(), @@ -798,8 +798,8 @@ private: // Ignore spurious PDFs. if (DT->properlyDominates(IDFB, V[i]->getParent())) { OutValue[IDFB].push_back(C); - DEBUG(dbgs() << "\nInsertion a CHI for BB: " << IDFB->getName() - << ", for Insn: " << *V[i]); + LLVM_DEBUG(dbgs() << "\nInsertion a CHI for BB: " << IDFB->getName() + << ", for Insn: " << *V[i]); } } } diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp index 4368b68582e..9d8a6d1c552 100644 --- a/llvm/lib/Transforms/Scalar/GVNSink.cpp +++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp @@ -561,7 +561,8 @@ public: GVNSink() = default; bool run(Function &F) { - DEBUG(dbgs() << "GVNSink: running on function @" << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName() + << "\n"); unsigned NumSunk = 0; ReversePostOrderTraversal<Function*> RPOT(&F); @@ -629,15 +630,15 @@ Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking( LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum, ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) { auto Insts = *LRI; - DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I - : Insts) { + LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I + : Insts) { I->dump(); } dbgs() << " ]\n";); DenseMap<uint32_t, unsigned> VNums; for (auto *I : Insts) { uint32_t N = VN.lookupOrAdd(I); - DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n"); + LLVM_DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n"); if (N == ~0U) return None; VNums[N]++; @@ -749,8 +750,8 @@ Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking( } unsigned GVNSink::sinkBB(BasicBlock *BBEnd) { - DEBUG(dbgs() << "GVNSink: running on basic block "; - BBEnd->printAsOperand(dbgs()); dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "GVNSink: running on basic block "; + BBEnd->printAsOperand(dbgs()); dbgs() << "\n"); SmallVector<BasicBlock *, 4> Preds; for (auto *B : predecessors(BBEnd)) { auto *T = B->getTerminator(); @@ -794,23 +795,23 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) { Candidates.begin(), Candidates.end(), [](const SinkingInstructionCandidate &A, const SinkingInstructionCandidate &B) { return A > B; }); - DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C - : Candidates) dbgs() - << " " << C << "\n";); + LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C + : Candidates) dbgs() + << " " << C << "\n";); // Pick the top candidate, as long it is positive! if (Candidates.empty() || Candidates.front().Cost <= 0) return 0; auto C = Candidates.front(); - DEBUG(dbgs() << " -- Sinking: " << C << "\n"); + LLVM_DEBUG(dbgs() << " -- Sinking: " << C << "\n"); BasicBlock *InsertBB = BBEnd; if (C.Blocks.size() < NumOrigPreds) { - DEBUG(dbgs() << " -- Splitting edge to "; BBEnd->printAsOperand(dbgs()); - dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " -- Splitting edge to "; + BBEnd->printAsOperand(dbgs()); dbgs() << "\n"); InsertBB = SplitBlockPredecessors(BBEnd, C.Blocks, ".gvnsink.split"); if (!InsertBB) { - DEBUG(dbgs() << " -- FAILED to split edge!\n"); + LLVM_DEBUG(dbgs() << " -- FAILED to split edge!\n"); // Edge couldn't be split. return 0; } diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp index 55d2f6ee81c..ad1598d7b8b 100644 --- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp +++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp @@ -302,9 +302,9 @@ bool GuardWideningImpl::eliminateGuardViaWidening( for (auto *Candidate : make_range(I, E)) { auto Score = computeWideningScore(GuardInst, GuardInstLoop, Candidate, CurLoop); - DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0) - << " and " << *Candidate->getArgOperand(0) << " is " - << scoreTypeToString(Score) << "\n"); + LLVM_DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0) + << " and " << *Candidate->getArgOperand(0) << " is " + << scoreTypeToString(Score) << "\n"); if (Score > BestScoreSoFar) { BestScoreSoFar = Score; BestSoFar = Candidate; @@ -313,15 +313,16 @@ bool GuardWideningImpl::eliminateGuardViaWidening( } if (BestScoreSoFar == WS_IllegalOrNegative) { - DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n"); + LLVM_DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n"); return false; } assert(BestSoFar != GuardInst && "Should have never visited same guard!"); assert(DT.dominates(BestSoFar, GuardInst) && "Should be!"); - DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFar - << " with score " << scoreTypeToString(BestScoreSoFar) << "\n"); + LLVM_DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFar + << " with score " << scoreTypeToString(BestScoreSoFar) + << "\n"); widenGuard(BestSoFar, GuardInst->getArgOperand(0)); GuardInst->setArgOperand(0, ConstantInt::getTrue(GuardInst->getContext())); EliminatedGuards.push_back(GuardInst); diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp index ae0c3eb243e..86b0269e1f7 100644 --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -210,8 +210,8 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { if (FromBase == ToBase) return true; - DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " - << *FromBase << " != " << *ToBase << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " << *FromBase + << " != " << *ToBase << "\n"); return false; } @@ -653,8 +653,9 @@ void IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { Value *ExitVal = expandSCEVIfNeeded(Rewriter, ExitValue, L, Inst, PN->getType()); - DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' - << " LoopVal = " << *Inst << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal + << '\n' + << " LoopVal = " << *Inst << "\n"); if (!isValidRewrite(Inst, ExitVal)) { DeadInsts.push_back(ExitVal); @@ -1084,7 +1085,7 @@ Instruction *WidenIV::cloneBitwiseIVUser(NarrowIVDefUse DU) { Instruction *NarrowDef = DU.NarrowDef; Instruction *WideDef = DU.WideDef; - DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n"); + LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n"); // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything // about the narrow operand yet so must insert a [sz]ext. It is probably loop @@ -1115,7 +1116,7 @@ Instruction *WidenIV::cloneArithmeticIVUser(NarrowIVDefUse DU, Instruction *NarrowDef = DU.NarrowDef; Instruction *WideDef = DU.WideDef; - DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n"); + LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n"); unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1; @@ -1315,8 +1316,8 @@ WidenIV::WidenedRecTy WidenIV::getWideRecurrence(NarrowIVDefUse DU) { /// This IV user cannot be widen. Replace this use of the original narrow IV /// with a truncation of the new wide IV to isolate and eliminate the narrow IV. static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT, LoopInfo *LI) { - DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef - << " for user " << *DU.NarrowUse << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user " + << *DU.NarrowUse << "\n"); IRBuilder<> Builder( getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI)); Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); @@ -1396,8 +1397,8 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); UsePhi->replaceAllUsesWith(Trunc); DeadInsts.emplace_back(UsePhi); - DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi - << " to " << *WidePhi << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to " + << *WidePhi << "\n"); } return nullptr; } @@ -1428,15 +1429,16 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { // A wider extend was hidden behind a narrower one. This may induce // another round of IV widening in which the intermediate IV becomes // dead. It should be very rare. - DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi - << " not wide enough to subsume " << *DU.NarrowUse << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi + << " not wide enough to subsume " << *DU.NarrowUse + << "\n"); DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); NewDef = DU.NarrowUse; } } if (NewDef != DU.NarrowUse) { - DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse - << " replaced by " << *DU.WideDef << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse + << " replaced by " << *DU.WideDef << "\n"); ++NumElimExt; DU.NarrowUse->replaceAllUsesWith(NewDef); DeadInsts.emplace_back(DU.NarrowUse); @@ -1491,8 +1493,9 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { // absolutely guarantee it. Hence the following failsafe check. In rare cases // where it fails, we simply throw away the newly created wide use. if (WideAddRec.first != SE->getSCEV(WideUse)) { - DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse - << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first << "\n"); + LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": " + << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first + << "\n"); DeadInsts.emplace_back(WideUse); return nullptr; } @@ -1597,7 +1600,7 @@ PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) { WideInc->setDebugLoc(OrigInc->getDebugLoc()); } - DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); + LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); ++NumWidened; // Traverse the def-use chain using a worklist starting at the original IV. @@ -2231,12 +2234,12 @@ linearFunctionTestReplace(Loop *L, else P = ICmpInst::ICMP_EQ; - DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" - << " LHS:" << *CmpIndVar << '\n' - << " op:\t" - << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" - << " RHS:\t" << *ExitCnt << "\n" - << " IVCount:\t" << *IVCount << "\n"); + LLVM_DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" + << " LHS:" << *CmpIndVar << '\n' + << " op:\t" << (P == ICmpInst::ICMP_NE ? "!=" : "==") + << "\n" + << " RHS:\t" << *ExitCnt << "\n" + << " IVCount:\t" << *IVCount << "\n"); IRBuilder<> Builder(BI); @@ -2272,7 +2275,7 @@ linearFunctionTestReplace(Loop *L, NewLimit = Start + Count; ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); - DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); + LLVM_DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); } else { // We try to extend trip count first. If that doesn't work we truncate IV. // Zext(trunc(IV)) == IV implies equivalence of the following two: diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index 79004ddba18..92dc21456e5 100644 --- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -716,12 +716,13 @@ static bool isSafeDecreasingBound(const SCEV *Start, assert(SE.isKnownNegative(Step) && "expecting negative step"); - DEBUG(dbgs() << "irce: isSafeDecreasingBound with:\n"); - DEBUG(dbgs() << "irce: Start: " << *Start << "\n"); - DEBUG(dbgs() << "irce: Step: " << *Step << "\n"); - DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n"); - DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) << "\n"); - DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n"); + LLVM_DEBUG(dbgs() << "irce: isSafeDecreasingBound with:\n"); + LLVM_DEBUG(dbgs() << "irce: Start: " << *Start << "\n"); + LLVM_DEBUG(dbgs() << "irce: Step: " << *Step << "\n"); + LLVM_DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n"); + LLVM_DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) + << "\n"); + LLVM_DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n"); bool IsSigned = ICmpInst::isSigned(Pred); // The predicate that we need to check that the induction variable lies @@ -763,12 +764,13 @@ static bool isSafeIncreasingBound(const SCEV *Start, if (!SE.isAvailableAtLoopEntry(BoundSCEV, L)) return false; - DEBUG(dbgs() << "irce: isSafeIncreasingBound with:\n"); - DEBUG(dbgs() << "irce: Start: " << *Start << "\n"); - DEBUG(dbgs() << "irce: Step: " << *Step << "\n"); - DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n"); - DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) << "\n"); - DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n"); + LLVM_DEBUG(dbgs() << "irce: isSafeIncreasingBound with:\n"); + LLVM_DEBUG(dbgs() << "irce: Start: " << *Start << "\n"); + LLVM_DEBUG(dbgs() << "irce: Step: " << *Step << "\n"); + LLVM_DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n"); + LLVM_DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) + << "\n"); + LLVM_DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n"); bool IsSigned = ICmpInst::isSigned(Pred); // The predicate that we need to check that the induction variable lies @@ -1473,7 +1475,7 @@ bool LoopConstrainer::run() { bool IsSignedPredicate = MainLoopStructure.IsSignedPredicate; Optional<SubRanges> MaybeSR = calculateSubRanges(IsSignedPredicate); if (!MaybeSR.hasValue()) { - DEBUG(dbgs() << "irce: could not compute subranges\n"); + LLVM_DEBUG(dbgs() << "irce: could not compute subranges\n"); return false; } @@ -1509,17 +1511,18 @@ bool LoopConstrainer::run() { IsSignedPredicate)) ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); else { - DEBUG(dbgs() << "irce: could not prove no-overflow when computing " - << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) - << "\n"); + LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing " + << "preloop exit limit. HighLimit = " + << *(*SR.HighLimit) << "\n"); return false; } } if (!isSafeToExpandAt(ExitPreLoopAtSCEV, InsertPt, SE)) { - DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" - << " preloop exit limit " << *ExitPreLoopAtSCEV - << " at block " << InsertPt->getParent()->getName() << "\n"); + LLVM_DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" + << " preloop exit limit " << *ExitPreLoopAtSCEV + << " at block " << InsertPt->getParent()->getName() + << "\n"); return false; } @@ -1537,17 +1540,18 @@ bool LoopConstrainer::run() { IsSignedPredicate)) ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); else { - DEBUG(dbgs() << "irce: could not prove no-overflow when computing " - << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) - << "\n"); + LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing " + << "mainloop exit limit. LowLimit = " + << *(*SR.LowLimit) << "\n"); return false; } } if (!isSafeToExpandAt(ExitMainLoopAtSCEV, InsertPt, SE)) { - DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" - << " main loop exit limit " << *ExitMainLoopAtSCEV - << " at block " << InsertPt->getParent()->getName() << "\n"); + LLVM_DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" + << " main loop exit limit " << *ExitMainLoopAtSCEV + << " at block " << InsertPt->getParent()->getName() + << "\n"); return false; } @@ -1826,13 +1830,13 @@ bool IRCELegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) { bool InductiveRangeCheckElimination::run( Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop) { if (L->getBlocks().size() >= LoopSizeCutoff) { - DEBUG(dbgs() << "irce: giving up constraining loop, too large\n"); + LLVM_DEBUG(dbgs() << "irce: giving up constraining loop, too large\n"); return false; } BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) { - DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); + LLVM_DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); return false; } @@ -1855,7 +1859,7 @@ bool InductiveRangeCheckElimination::run( IRC.print(OS); }; - DEBUG(PrintRecognizedRangeChecks(dbgs())); + LLVM_DEBUG(PrintRecognizedRangeChecks(dbgs())); if (PrintRangeChecks) PrintRecognizedRangeChecks(errs()); @@ -1864,8 +1868,8 @@ bool InductiveRangeCheckElimination::run( Optional<LoopStructure> MaybeLoopStructure = LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); if (!MaybeLoopStructure.hasValue()) { - DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason - << "\n";); + LLVM_DEBUG(dbgs() << "irce: could not parse loop structure: " + << FailureReason << "\n";); return false; } LoopStructure LS = MaybeLoopStructure.getValue(); @@ -1915,7 +1919,7 @@ bool InductiveRangeCheckElimination::run( L->print(dbgs()); }; - DEBUG(PrintConstrainedLoopInfo()); + LLVM_DEBUG(PrintConstrainedLoopInfo()); if (PrintChangedLoops) PrintConstrainedLoopInfo(); diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp index 454ea254b88..e6fea56f264 100644 --- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -653,13 +653,13 @@ void InferAddressSpaces::inferAddressSpaces( // Tries to update the address space of the stack top according to the // address spaces of its operands. - DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n'); + LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n'); Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace); if (!NewAS.hasValue()) continue; // If any updates are made, grabs its users to the worklist because // their address spaces can also be possibly updated. - DEBUG(dbgs() << " to " << NewAS.getValue() << '\n'); + LLVM_DEBUG(dbgs() << " to " << NewAS.getValue() << '\n'); (*InferredAddrSpace)[V] = NewAS.getValue(); for (Value *User : V->users()) { @@ -901,15 +901,15 @@ bool InferAddressSpaces::rewriteWithNewAddressSpaces( if (NewV == nullptr) continue; - DEBUG(dbgs() << "Replacing the uses of " << *V - << "\n with\n " << *NewV << '\n'); + LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n " + << *NewV << '\n'); if (Constant *C = dyn_cast<Constant>(V)) { Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), C->getType()); if (C != Replace) { - DEBUG(dbgs() << "Inserting replacement const cast: " - << Replace << ": " << *Replace << '\n'); + LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace + << ": " << *Replace << '\n'); C->replaceAllUsesWith(Replace); V = Replace; } diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp index 5f379253ea5..cf9919ebf9d 100644 --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -340,7 +340,7 @@ bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, DeferredDominance *DDT_, bool HasProfileData_, std::unique_ptr<BlockFrequencyInfo> BFI_, std::unique_ptr<BranchProbabilityInfo> BPI_) { - DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); + LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); TLI = TLI_; LVI = LVI_; AA = AA_; @@ -386,8 +386,9 @@ bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, if (pred_empty(&BB)) { // When ProcessBlock makes BB unreachable it doesn't bother to fix up // the instructions in it. We must remove BB to prevent invalid IR. - DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() - << "' with terminator: " << *BB.getTerminator() << '\n'); + LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() + << "' with terminator: " << *BB.getTerminator() + << '\n'); LoopHeaders.erase(&BB); LVI->eraseBlock(&BB); DeleteDeadBlock(&BB, DDT); @@ -1084,8 +1085,8 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { Updates.push_back({DominatorTree::Delete, BB, Succ}); } - DEBUG(dbgs() << " In block '" << BB->getName() - << "' folding undef terminator: " << *BBTerm << '\n'); + LLVM_DEBUG(dbgs() << " In block '" << BB->getName() + << "' folding undef terminator: " << *BBTerm << '\n'); BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); BBTerm->eraseFromParent(); DDT->applyUpdates(Updates); @@ -1096,8 +1097,9 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { // terminator to an unconditional branch. This can occur due to threading in // other blocks. if (getKnownConstant(Condition, Preference)) { - DEBUG(dbgs() << " In block '" << BB->getName() - << "' folding terminator: " << *BB->getTerminator() << '\n'); + LLVM_DEBUG(dbgs() << " In block '" << BB->getName() + << "' folding terminator: " << *BB->getTerminator() + << '\n'); ++NumFolds; ConstantFoldTerminator(BB, true, nullptr, DDT); return true; @@ -1574,12 +1576,12 @@ bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, assert(!PredValues.empty() && "ComputeValueKnownInPredecessors returned true with no values"); - DEBUG(dbgs() << "IN BB: " << *BB; - for (const auto &PredValue : PredValues) { - dbgs() << " BB '" << BB->getName() << "': FOUND condition = " - << *PredValue.first - << " for pred '" << PredValue.second->getName() << "'.\n"; - }); + LLVM_DEBUG(dbgs() << "IN BB: " << *BB; for (const auto &PredValue + : PredValues) { + dbgs() << " BB '" << BB->getName() + << "': FOUND condition = " << *PredValue.first << " for pred '" + << PredValue.second->getName() << "'.\n"; + }); // Decide what we want to thread through. Convert our list of known values to // a list of known destinations for each pred. This also discards duplicate @@ -1901,15 +1903,15 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB, BasicBlock *SuccBB) { // If threading to the same block as we come from, we would infinite loop. if (SuccBB == BB) { - DEBUG(dbgs() << " Not threading across BB '" << BB->getName() - << "' - would thread to self!\n"); + LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() + << "' - would thread to self!\n"); return false; } // If threading this would thread across a loop header, don't thread the edge. // See the comments above FindLoopHeaders for justifications and caveats. if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { - DEBUG({ + LLVM_DEBUG({ bool BBIsHeader = LoopHeaders.count(BB); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() << " Not threading across " @@ -1923,8 +1925,8 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB, unsigned JumpThreadCost = getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); if (JumpThreadCost > BBDupThreshold) { - DEBUG(dbgs() << " Not threading BB '" << BB->getName() - << "' - Cost is too high: " << JumpThreadCost << "\n"); + LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() + << "' - Cost is too high: " << JumpThreadCost << "\n"); return false; } @@ -1933,16 +1935,16 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB, if (PredBBs.size() == 1) PredBB = PredBBs[0]; else { - DEBUG(dbgs() << " Factoring out " << PredBBs.size() - << " common predecessors.\n"); + LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() + << " common predecessors.\n"); PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); } // And finally, do it! - DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" - << SuccBB->getName() << "' with cost: " << JumpThreadCost - << ", across block:\n " - << *BB << "\n"); + LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() + << "' to '" << SuccBB->getName() + << "' with cost: " << JumpThreadCost + << ", across block:\n " << *BB << "\n"); if (DDT->pending()) LVI->disableDT(); @@ -2235,17 +2237,17 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( // cause us to transform this into an irreducible loop, don't do this. // See the comments above FindLoopHeaders for justifications and caveats. if (LoopHeaders.count(BB)) { - DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() - << "' into predecessor block '" << PredBBs[0]->getName() - << "' - it might create an irreducible loop!\n"); + LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() + << "' into predecessor block '" << PredBBs[0]->getName() + << "' - it might create an irreducible loop!\n"); return false; } unsigned DuplicationCost = getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); if (DuplicationCost > BBDupThreshold) { - DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() - << "' - Cost is too high: " << DuplicationCost << "\n"); + LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() + << "' - Cost is too high: " << DuplicationCost << "\n"); return false; } @@ -2255,17 +2257,18 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( if (PredBBs.size() == 1) PredBB = PredBBs[0]; else { - DEBUG(dbgs() << " Factoring out " << PredBBs.size() - << " common predecessors.\n"); + LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() + << " common predecessors.\n"); PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); } Updates.push_back({DominatorTree::Delete, PredBB, BB}); // Okay, we decided to do this! Clone all the instructions in BB onto the end // of PredBB. - DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" - << PredBB->getName() << "' to eliminate branch on phi. Cost: " - << DuplicationCost << " block is:" << *BB << "\n"); + LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName() + << "' into end of '" << PredBB->getName() + << "' to eliminate branch on phi. Cost: " + << DuplicationCost << " block is:" << *BB << "\n"); // Unless PredBB ends with an unconditional branch, split the edge so that we // can just clone the bits from BB into the end of the new PredBB. @@ -2357,7 +2360,7 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( if (UsesToRename.empty()) continue; - DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); + LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); // We found a use of I outside of BB. Rename all uses of I that are outside // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks @@ -2368,7 +2371,7 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( while (!UsesToRename.empty()) SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "\n"); } // PredBB no longer jumps to BB, remove entries in the PHI node for the edge @@ -2658,8 +2661,8 @@ bool JumpThreadingPass::ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( BB, PredUnguardedBlock, Guard, UnguardedMapping); assert(UnguardedBlock && "Could not create the unguarded block?"); - DEBUG(dbgs() << "Moved guard " << *Guard << " to block " - << GuardedBlock->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block " + << GuardedBlock->getName() << "\n"); // DuplicateInstructionsInSplitBetween inserts a new block "BB.split" between // PredBB and BB. We need to perform two inserts and one delete for each of // the above calls to update Dominators. diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index b9d10d9f4c8..8c0747782d7 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -392,7 +392,7 @@ bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, // If the instruction is dead, we would try to sink it because it isn't // used in the loop, instead, just delete it. if (isInstructionTriviallyDead(&I, TLI)) { - DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); + LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); salvageDebugInfo(I); ++II; CurAST->deleteValue(&I); @@ -461,7 +461,8 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, // just fold it. if (Constant *C = ConstantFoldInstruction( &I, I.getModule()->getDataLayout(), TLI)) { - DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); + LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C + << '\n'); CurAST->copyValue(&I, C); I.replaceAllUsesWith(C); if (isInstructionTriviallyDead(&I, TLI)) { @@ -927,7 +928,7 @@ static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, bool FreeInLoop) { - DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); + LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) << "sinking " << ore::NV("Inst", &I); @@ -1029,8 +1030,8 @@ static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { auto *Preheader = CurLoop->getLoopPreheader(); - DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I - << "\n"); + LLVM_DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I + << "\n"); ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " << ore::NV("Inst", &I); @@ -1410,8 +1411,8 @@ bool llvm::promoteLoopAccessesToScalars( return false; // Otherwise, this is safe to promote, lets do it! - DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr - << '\n'); + LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr + << '\n'); ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", LoopUses[0]) diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp index c804115f415..3b41b5d96c8 100644 --- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp @@ -244,9 +244,9 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) { if (ItersAhead > getMaxPrefetchIterationsAhead()) return MadeChange; - DEBUG(dbgs() << "Prefetching " << ItersAhead - << " iterations ahead (loop size: " << LoopSize << ") in " - << L->getHeader()->getParent()->getName() << ": " << *L); + LLVM_DEBUG(dbgs() << "Prefetching " << ItersAhead + << " iterations ahead (loop size: " << LoopSize << ") in " + << L->getHeader()->getParent()->getName() << ": " << *L); SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads; for (const auto BB : L->blocks()) { @@ -320,8 +320,8 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) { ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1), ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)}); ++NumPrefetches; - DEBUG(dbgs() << " Access: " << *PtrValue << ", SCEV: " << *LSCEV - << "\n"); + LLVM_DEBUG(dbgs() << " Access: " << *PtrValue << ", SCEV: " << *LSCEV + << "\n"); ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "Prefetched", MemI) << "prefetched memory access"; diff --git a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp index 15cd1086f20..d412025d7e9 100644 --- a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp @@ -142,14 +142,15 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT, // of trouble. BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader || !L->hasDedicatedExits()) { - DEBUG(dbgs() - << "Deletion requires Loop with preheader and dedicated exits.\n"); + LLVM_DEBUG( + dbgs() + << "Deletion requires Loop with preheader and dedicated exits.\n"); return LoopDeletionResult::Unmodified; } // We can't remove loops that contain subloops. If the subloops were dead, // they would already have been removed in earlier executions of this pass. if (L->begin() != L->end()) { - DEBUG(dbgs() << "Loop contains subloops.\n"); + LLVM_DEBUG(dbgs() << "Loop contains subloops.\n"); return LoopDeletionResult::Unmodified; } @@ -157,7 +158,7 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT, BasicBlock *ExitBlock = L->getUniqueExitBlock(); if (ExitBlock && isLoopNeverExecuted(L)) { - DEBUG(dbgs() << "Loop is proven to never execute, delete it!"); + LLVM_DEBUG(dbgs() << "Loop is proven to never execute, delete it!"); // Set incoming value to undef for phi nodes in the exit block. for (PHINode &P : ExitBlock->phis()) { std::fill(P.incoming_values().begin(), P.incoming_values().end(), @@ -178,13 +179,13 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT, // block will be branched to, or trying to preserve the branching logic in // a loop invariant manner. if (!ExitBlock) { - DEBUG(dbgs() << "Deletion requires single exit block\n"); + LLVM_DEBUG(dbgs() << "Deletion requires single exit block\n"); return LoopDeletionResult::Unmodified; } // Finally, we have to check that the loop really is dead. bool Changed = false; if (!isLoopDead(L, SE, ExitingBlocks, ExitBlock, Changed, Preheader)) { - DEBUG(dbgs() << "Loop is not invariant, cannot delete.\n"); + LLVM_DEBUG(dbgs() << "Loop is not invariant, cannot delete.\n"); return Changed ? LoopDeletionResult::Modified : LoopDeletionResult::Unmodified; } @@ -193,12 +194,12 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT, // They could be infinite, in which case we'd be changing program behavior. const SCEV *S = SE.getMaxBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(S)) { - DEBUG(dbgs() << "Could not compute SCEV MaxBackedgeTakenCount.\n"); + LLVM_DEBUG(dbgs() << "Could not compute SCEV MaxBackedgeTakenCount.\n"); return Changed ? LoopDeletionResult::Modified : LoopDeletionResult::Unmodified; } - DEBUG(dbgs() << "Loop is invariant, delete it!"); + LLVM_DEBUG(dbgs() << "Loop is invariant, delete it!"); deleteDeadLoop(L, &DT, &SE, &LI); ++NumDeleted; @@ -209,8 +210,8 @@ PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - DEBUG(dbgs() << "Analyzing Loop for deletion: "); - DEBUG(L.dump()); + LLVM_DEBUG(dbgs() << "Analyzing Loop for deletion: "); + LLVM_DEBUG(L.dump()); std::string LoopName = L.getName(); auto Result = deleteLoopIfDead(&L, AR.DT, AR.SE, AR.LI); if (Result == LoopDeletionResult::Unmodified) @@ -255,8 +256,8 @@ bool LoopDeletionLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) { ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); - DEBUG(dbgs() << "Analyzing Loop for deletion: "); - DEBUG(L->dump()); + LLVM_DEBUG(dbgs() << "Analyzing Loop for deletion: "); + LLVM_DEBUG(L->dump()); LoopDeletionResult Result = deleteLoopIfDead(L, DT, SE, LI); diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp index a4da0940e33..06083a4f508 100644 --- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp @@ -362,9 +362,11 @@ public: std::tie(LoadToPart, NewElt) = LoadToPartition.insert(std::make_pair(Inst, PartI)); if (!NewElt) { - DEBUG(dbgs() << "Merging partitions due to this load in multiple " - << "partitions: " << PartI << ", " - << LoadToPart->second << "\n" << *Inst << "\n"); + LLVM_DEBUG(dbgs() + << "Merging partitions due to this load in multiple " + << "partitions: " << PartI << ", " << LoadToPart->second + << "\n" + << *Inst << "\n"); auto PartJ = I; do { @@ -602,7 +604,7 @@ public: const SmallVectorImpl<Dependence> &Dependences) { Accesses.append(Instructions.begin(), Instructions.end()); - DEBUG(dbgs() << "Backward dependences:\n"); + LLVM_DEBUG(dbgs() << "Backward dependences:\n"); for (auto &Dep : Dependences) if (Dep.isPossiblyBackward()) { // Note that the designations source and destination follow the program @@ -611,7 +613,7 @@ public: ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd; --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd; - DEBUG(Dep.print(dbgs(), 2, Instructions)); + LLVM_DEBUG(Dep.print(dbgs(), 2, Instructions)); } } @@ -632,8 +634,9 @@ public: bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) { assert(L->empty() && "Only process inner loops."); - DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName() - << "\" checking " << *L << "\n"); + LLVM_DEBUG(dbgs() << "\nLDist: In \"" + << L->getHeader()->getParent()->getName() + << "\" checking " << *L << "\n"); if (!L->getExitBlock()) return fail("MultipleExitBlocks", "multiple exit blocks"); @@ -705,7 +708,7 @@ public: for (auto *Inst : DefsUsedOutside) Partitions.addToNewNonCyclicPartition(Inst); - DEBUG(dbgs() << "Seeded partitions:\n" << Partitions); + LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions); if (Partitions.getSize() < 2) return fail("CantIsolateUnsafeDeps", "cannot isolate unsafe dependencies"); @@ -713,20 +716,20 @@ public: // Run the merge heuristics: Merge non-cyclic adjacent partitions since we // should be able to vectorize these together. Partitions.mergeBeforePopulating(); - DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions); + LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions); if (Partitions.getSize() < 2) return fail("CantIsolateUnsafeDeps", "cannot isolate unsafe dependencies"); // Now, populate the partitions with non-memory operations. Partitions.populateUsedSet(); - DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions); + LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions); // In order to preserve original lexical order for loads, keep them in the // partition that we set up in the MemoryInstructionDependences loop. if (Partitions.mergeToAvoidDuplicatedLoads()) { - DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n" - << Partitions); + LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n" + << Partitions); if (Partitions.getSize() < 2) return fail("CantIsolateUnsafeDeps", "cannot isolate unsafe dependencies"); @@ -740,7 +743,7 @@ public: return fail("TooManySCEVRuntimeChecks", "too many SCEV run-time checks needed.\n"); - DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n"); + LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n"); // We're done forming the partitions set up the reverse mapping from // instructions to partitions. Partitions.setupPartitionIdOnInstructions(); @@ -759,8 +762,8 @@ public: RtPtrChecking); if (!Pred.isAlwaysTrue() || !Checks.empty()) { - DEBUG(dbgs() << "\nPointers:\n"); - DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks)); + LLVM_DEBUG(dbgs() << "\nPointers:\n"); + LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks)); LoopVersioning LVer(*LAI, L, LI, DT, SE, false); LVer.setAliasChecks(std::move(Checks)); LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate()); @@ -775,8 +778,8 @@ public: // Now, we remove the instruction from each loop that don't belong to that // partition. Partitions.removeUnusedInsts(); - DEBUG(dbgs() << "\nAfter removing unused Instrs:\n"); - DEBUG(Partitions.printBlocks()); + LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n"); + LLVM_DEBUG(Partitions.printBlocks()); if (LDistVerify) { LI->verify(*DT); @@ -798,7 +801,7 @@ public: LLVMContext &Ctx = F->getContext(); bool Forced = isForced().getValueOr(false); - DEBUG(dbgs() << "Skipping; " << Message << "\n"); + LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n"); // With Rpass-missed report that distribution failed. ORE->emit([&]() { diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 5bfb44bc151..abd6a81da1e 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -310,9 +310,9 @@ bool LoopIdiomRecognize::runOnCountableLoop() { SmallVector<BasicBlock *, 8> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); - DEBUG(dbgs() << "loop-idiom Scanning: F[" - << CurLoop->getHeader()->getParent()->getName() << "] Loop %" - << CurLoop->getHeader()->getName() << "\n"); + LLVM_DEBUG(dbgs() << "loop-idiom Scanning: F[" + << CurLoop->getHeader()->getParent()->getName() + << "] Loop %" << CurLoop->getHeader()->getName() << "\n"); bool MadeChange = false; @@ -936,8 +936,9 @@ bool LoopIdiomRecognize::processLoopStridedStore( NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); } - DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" - << " from store to: " << *Ev << " at: " << *TheStore << "\n"); + LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" + << " from store to: " << *Ev << " at: " << *TheStore + << "\n"); NewCall->setDebugLoc(TheStore->getDebugLoc()); // Okay, the memset has been formed. Zap the original store and anything that @@ -1067,9 +1068,10 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, } NewCall->setDebugLoc(SI->getDebugLoc()); - DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" - << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" - << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); + LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" + << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" + << " from store ptr=" << *StoreEv << " at: " << *SI + << "\n"); // Okay, the memcpy has been formed. Zap the original store and anything that // feeds into it. @@ -1085,9 +1087,9 @@ bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, bool IsLoopMemset) { if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) { - DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() - << " : LIR " << (IsMemset ? "Memset" : "Memcpy") - << " avoided: multi-block top-level loop\n"); + LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() + << " : LIR " << (IsMemset ? "Memset" : "Memcpy") + << " avoided: multi-block top-level loop\n"); return true; } } diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 74c8a5a543e..9addb04e81b 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -77,8 +77,8 @@ static const unsigned MaxLoopNestDepth = 10; static void printDepMatrix(CharMatrix &DepMatrix) { for (auto &Row : DepMatrix) { for (auto D : Row) - DEBUG(dbgs() << D << " "); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << D << " "); + LLVM_DEBUG(dbgs() << "\n"); } } #endif @@ -107,8 +107,8 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, } } - DEBUG(dbgs() << "Found " << MemInstr.size() - << " Loads and Stores to analyze\n"); + LLVM_DEBUG(dbgs() << "Found " << MemInstr.size() + << " Loads and Stores to analyze\n"); ValueVector::iterator I, IE, J, JE; @@ -125,11 +125,11 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, // Track Output, Flow, and Anti dependencies. if (auto D = DI->depends(Src, Dst, true)) { assert(D->isOrdered() && "Expected an output, flow or anti dep."); - DEBUG(StringRef DepType = - D->isFlow() ? "flow" : D->isAnti() ? "anti" : "output"; - dbgs() << "Found " << DepType - << " dependency between Src and Dst\n" - << " Src:" << *Src << "\n Dst:" << *Dst << '\n'); + LLVM_DEBUG(StringRef DepType = + D->isFlow() ? "flow" : D->isAnti() ? "anti" : "output"; + dbgs() << "Found " << DepType + << " dependency between Src and Dst\n" + << " Src:" << *Src << "\n Dst:" << *Dst << '\n'); unsigned Levels = D->getLevels(); char Direction; for (unsigned II = 1; II <= Levels; ++II) { @@ -169,8 +169,8 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, DepMatrix.push_back(Dep); if (DepMatrix.size() > MaxMemInstrCount) { - DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount - << " dependencies inside loop\n"); + LLVM_DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount + << " dependencies inside loop\n"); return false; } } @@ -272,9 +272,9 @@ static bool isLegalToInterChangeLoops(CharMatrix &DepMatrix, } static void populateWorklist(Loop &L, SmallVector<LoopVector, 8> &V) { - DEBUG(dbgs() << "Calling populateWorklist on Func: " - << L.getHeader()->getParent()->getName() << " Loop: %" - << L.getHeader()->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Calling populateWorklist on Func: " + << L.getHeader()->getParent()->getName() << " Loop: %" + << L.getHeader()->getName() << '\n'); LoopVector LoopList; Loop *CurrentLoop = &L; const std::vector<Loop *> *Vec = &CurrentLoop->getSubLoops(); @@ -478,7 +478,7 @@ struct LoopInterchange : public FunctionPass { for (Loop *L : *LI) populateWorklist(*L, Worklist); - DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n"); + LLVM_DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n"); bool Changed = true; while (!Worklist.empty()) { LoopVector LoopList = Worklist.pop_back_val(); @@ -491,15 +491,15 @@ struct LoopInterchange : public FunctionPass { for (Loop *L : LoopList) { const SCEV *ExitCountOuter = SE->getBackedgeTakenCount(L); if (ExitCountOuter == SE->getCouldNotCompute()) { - DEBUG(dbgs() << "Couldn't compute backedge count\n"); + LLVM_DEBUG(dbgs() << "Couldn't compute backedge count\n"); return false; } if (L->getNumBackEdges() != 1) { - DEBUG(dbgs() << "NumBackEdges is not equal to 1\n"); + LLVM_DEBUG(dbgs() << "NumBackEdges is not equal to 1\n"); return false; } if (!L->getExitingBlock()) { - DEBUG(dbgs() << "Loop doesn't have unique exit block\n"); + LLVM_DEBUG(dbgs() << "Loop doesn't have unique exit block\n"); return false; } } @@ -516,37 +516,38 @@ struct LoopInterchange : public FunctionPass { bool Changed = false; unsigned LoopNestDepth = LoopList.size(); if (LoopNestDepth < 2) { - DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n"); + LLVM_DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n"); return false; } if (LoopNestDepth > MaxLoopNestDepth) { - DEBUG(dbgs() << "Cannot handle loops of depth greater than " - << MaxLoopNestDepth << "\n"); + LLVM_DEBUG(dbgs() << "Cannot handle loops of depth greater than " + << MaxLoopNestDepth << "\n"); return false; } if (!isComputableLoopNest(LoopList)) { - DEBUG(dbgs() << "Not valid loop candidate for interchange\n"); + LLVM_DEBUG(dbgs() << "Not valid loop candidate for interchange\n"); return false; } - DEBUG(dbgs() << "Processing LoopList of size = " << LoopNestDepth << "\n"); + LLVM_DEBUG(dbgs() << "Processing LoopList of size = " << LoopNestDepth + << "\n"); CharMatrix DependencyMatrix; Loop *OuterMostLoop = *(LoopList.begin()); if (!populateDependencyMatrix(DependencyMatrix, LoopNestDepth, OuterMostLoop, DI)) { - DEBUG(dbgs() << "Populating dependency matrix failed\n"); + LLVM_DEBUG(dbgs() << "Populating dependency matrix failed\n"); return false; } #ifdef DUMP_DEP_MATRICIES - DEBUG(dbgs() << "Dependence before interchange\n"); + LLVM_DEBUG(dbgs() << "Dependence before interchange\n"); printDepMatrix(DependencyMatrix); #endif // Get the Outermost loop exit. BasicBlock *LoopNestExit = OuterMostLoop->getExitBlock(); if (!LoopNestExit) { - DEBUG(dbgs() << "OuterMostLoop needs an unique exit block"); + LLVM_DEBUG(dbgs() << "OuterMostLoop needs an unique exit block"); return false; } @@ -563,7 +564,7 @@ struct LoopInterchange : public FunctionPass { // Update the DependencyMatrix interChangeDependencies(DependencyMatrix, i, i - 1); #ifdef DUMP_DEP_MATRICIES - DEBUG(dbgs() << "Dependence after interchange\n"); + LLVM_DEBUG(dbgs() << "Dependence after interchange\n"); printDepMatrix(DependencyMatrix); #endif Changed |= Interchanged; @@ -574,21 +575,21 @@ struct LoopInterchange : public FunctionPass { bool processLoop(LoopVector LoopList, unsigned InnerLoopId, unsigned OuterLoopId, BasicBlock *LoopNestExit, std::vector<std::vector<char>> &DependencyMatrix) { - DEBUG(dbgs() << "Processing Inner Loop Id = " << InnerLoopId - << " and OuterLoopId = " << OuterLoopId << "\n"); + LLVM_DEBUG(dbgs() << "Processing Inner Loop Id = " << InnerLoopId + << " and OuterLoopId = " << OuterLoopId << "\n"); Loop *InnerLoop = LoopList[InnerLoopId]; Loop *OuterLoop = LoopList[OuterLoopId]; LoopInterchangeLegality LIL(OuterLoop, InnerLoop, SE, LI, DT, PreserveLCSSA, ORE); if (!LIL.canInterchangeLoops(InnerLoopId, OuterLoopId, DependencyMatrix)) { - DEBUG(dbgs() << "Not interchanging loops. Cannot prove legality.\n"); + LLVM_DEBUG(dbgs() << "Not interchanging loops. Cannot prove legality.\n"); return false; } - DEBUG(dbgs() << "Loops are legal to interchange\n"); + LLVM_DEBUG(dbgs() << "Loops are legal to interchange\n"); LoopInterchangeProfitability LIP(OuterLoop, InnerLoop, SE, ORE); if (!LIP.isProfitable(InnerLoopId, OuterLoopId, DependencyMatrix)) { - DEBUG(dbgs() << "Interchanging loops not profitable.\n"); + LLVM_DEBUG(dbgs() << "Interchanging loops not profitable.\n"); return false; } @@ -602,7 +603,7 @@ struct LoopInterchange : public FunctionPass { LoopInterchangeTransform LIT(OuterLoop, InnerLoop, SE, LI, DT, LoopNestExit, LIL.hasInnerLoopReduction()); LIT.transform(); - DEBUG(dbgs() << "Loops interchanged.\n"); + LLVM_DEBUG(dbgs() << "Loops interchanged.\n"); LoopsInterchanged++; return true; } @@ -651,7 +652,7 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch(); - DEBUG(dbgs() << "Checking if loops are tightly nested\n"); + LLVM_DEBUG(dbgs() << "Checking if loops are tightly nested\n"); // A perfectly nested loop will not have any branch in between the outer and // inner block i.e. outer header will branch to either inner preheader and @@ -665,14 +666,14 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) { if (Succ != InnerLoopPreHeader && Succ != OuterLoopLatch) return false; - DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch\n"); + LLVM_DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch\n"); // We do not have any basic block in between now make sure the outer header // and outer loop latch doesn't contain any unsafe instructions. if (containsUnsafeInstructionsInHeader(OuterLoopHeader) || containsUnsafeInstructionsInLatch(OuterLoopLatch)) return false; - DEBUG(dbgs() << "Loops are perfectly nested\n"); + LLVM_DEBUG(dbgs() << "Loops are perfectly nested\n"); // We have a perfect loop nest. return true; } @@ -714,7 +715,7 @@ bool LoopInterchangeLegality::findInductionAndReductions( else if (RecurrenceDescriptor::isReductionPHI(&PHI, L, RD)) Reductions.push_back(&PHI); else { - DEBUG( + LLVM_DEBUG( dbgs() << "Failed to recognize PHI as an induction or reduction.\n"); return false; } @@ -750,8 +751,9 @@ bool LoopInterchangeLegality::currentLimitations() { OuterLoop->getExitingBlock() != OuterLoop->getLoopLatch() || !isa<BranchInst>(InnerLoopLatch->getTerminator()) || !isa<BranchInst>(OuterLoop->getLoopLatch()->getTerminator())) { - DEBUG(dbgs() << "Loops where the latch is not the exiting block are not" - << " supported currently.\n"); + LLVM_DEBUG( + dbgs() << "Loops where the latch is not the exiting block are not" + << " supported currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "ExitingNotLatch", OuterLoop->getStartLoc(), @@ -766,8 +768,9 @@ bool LoopInterchangeLegality::currentLimitations() { SmallVector<PHINode *, 8> Inductions; SmallVector<PHINode *, 8> Reductions; if (!findInductionAndReductions(InnerLoop, Inductions, Reductions)) { - DEBUG(dbgs() << "Only inner loops with induction or reduction PHI nodes " - << "are supported currently.\n"); + LLVM_DEBUG( + dbgs() << "Only inner loops with induction or reduction PHI nodes " + << "are supported currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIInner", InnerLoop->getStartLoc(), @@ -780,8 +783,9 @@ bool LoopInterchangeLegality::currentLimitations() { // TODO: Currently we handle only loops with 1 induction variable. if (Inductions.size() != 1) { - DEBUG(dbgs() << "We currently only support loops with 1 induction variable." - << "Failed to interchange due to current limitation\n"); + LLVM_DEBUG( + dbgs() << "We currently only support loops with 1 induction variable." + << "Failed to interchange due to current limitation\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "MultiInductionInner", InnerLoop->getStartLoc(), @@ -797,8 +801,9 @@ bool LoopInterchangeLegality::currentLimitations() { InnerInductionVar = Inductions.pop_back_val(); Reductions.clear(); if (!findInductionAndReductions(OuterLoop, Inductions, Reductions)) { - DEBUG(dbgs() << "Only outer loops with induction or reduction PHI nodes " - << "are supported currently.\n"); + LLVM_DEBUG( + dbgs() << "Only outer loops with induction or reduction PHI nodes " + << "are supported currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIOuter", OuterLoop->getStartLoc(), @@ -812,8 +817,8 @@ bool LoopInterchangeLegality::currentLimitations() { // Outer loop cannot have reduction because then loops will not be tightly // nested. if (!Reductions.empty()) { - DEBUG(dbgs() << "Outer loops with reductions are not supported " - << "currently.\n"); + LLVM_DEBUG(dbgs() << "Outer loops with reductions are not supported " + << "currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "ReductionsOuter", OuterLoop->getStartLoc(), @@ -825,8 +830,8 @@ bool LoopInterchangeLegality::currentLimitations() { } // TODO: Currently we handle only loops with 1 induction variable. if (Inductions.size() != 1) { - DEBUG(dbgs() << "Loops with more than 1 induction variables are not " - << "supported currently.\n"); + LLVM_DEBUG(dbgs() << "Loops with more than 1 induction variables are not " + << "supported currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "MultiIndutionOuter", OuterLoop->getStartLoc(), @@ -839,7 +844,7 @@ bool LoopInterchangeLegality::currentLimitations() { // TODO: Triangular loops are not handled for now. if (!isLoopStructureUnderstood(InnerInductionVar)) { - DEBUG(dbgs() << "Loop structure not understood by pass\n"); + LLVM_DEBUG(dbgs() << "Loop structure not understood by pass\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedStructureInner", InnerLoop->getStartLoc(), @@ -852,7 +857,8 @@ bool LoopInterchangeLegality::currentLimitations() { // TODO: We only handle LCSSA PHI's corresponding to reduction for now. BasicBlock *InnerExit = InnerLoop->getExitBlock(); if (!containsSafePHI(InnerExit, false)) { - DEBUG(dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n"); + LLVM_DEBUG( + dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "NoLCSSAPHIOuterInner", InnerLoop->getStartLoc(), @@ -882,8 +888,9 @@ bool LoopInterchangeLegality::currentLimitations() { dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0)); if (!InnerIndexVarInc) { - DEBUG(dbgs() << "Did not find an instruction to increment the induction " - << "variable.\n"); + LLVM_DEBUG( + dbgs() << "Did not find an instruction to increment the induction " + << "variable.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "NoIncrementInInner", InnerLoop->getStartLoc(), @@ -907,8 +914,8 @@ bool LoopInterchangeLegality::currentLimitations() { // We found an instruction. If this is not induction variable then it is not // safe to split this loop latch. if (!I.isIdenticalTo(InnerIndexVarInc)) { - DEBUG(dbgs() << "Found unsupported instructions between induction " - << "variable increment and branch.\n"); + LLVM_DEBUG(dbgs() << "Found unsupported instructions between induction " + << "variable increment and branch.\n"); ORE->emit([&]() { return OptimizationRemarkMissed( DEBUG_TYPE, "UnsupportedInsBetweenInduction", @@ -925,7 +932,7 @@ bool LoopInterchangeLegality::currentLimitations() { // The loop latch ended and we didn't find the induction variable return as // current limitation. if (!FoundInduction) { - DEBUG(dbgs() << "Did not find the induction variable.\n"); + LLVM_DEBUG(dbgs() << "Did not find the induction variable.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "NoIndutionVariable", InnerLoop->getStartLoc(), @@ -978,9 +985,9 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix) { if (!isLegalToInterChangeLoops(DepMatrix, InnerLoopId, OuterLoopId)) { - DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId - << " and OuterLoopId = " << OuterLoopId - << " due to dependence\n"); + LLVM_DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId + << " and OuterLoopId = " << OuterLoopId + << " due to dependence\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "Dependence", InnerLoop->getStartLoc(), @@ -996,8 +1003,9 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId, // readnone functions do not prevent interchanging. if (CI->doesNotReadMemory()) continue; - DEBUG(dbgs() << "Loops with call instructions cannot be interchanged " - << "safely."); + LLVM_DEBUG( + dbgs() << "Loops with call instructions cannot be interchanged " + << "safely."); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "CallInst", CI->getDebugLoc(), @@ -1033,13 +1041,13 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId, // TODO: The loops could not be interchanged due to current limitations in the // transform module. if (currentLimitations()) { - DEBUG(dbgs() << "Not legal because of current transform limitation\n"); + LLVM_DEBUG(dbgs() << "Not legal because of current transform limitation\n"); return false; } // Check if the loops are tightly nested. if (!tightlyNested(OuterLoop, InnerLoop)) { - DEBUG(dbgs() << "Loops not tightly nested\n"); + LLVM_DEBUG(dbgs() << "Loops not tightly nested\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "NotTightlyNested", InnerLoop->getStartLoc(), @@ -1051,7 +1059,7 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId, } if (!areLoopExitPHIsSupported(OuterLoop, InnerLoop)) { - DEBUG(dbgs() << "Found unsupported PHI nodes in outer loop exit.\n"); + LLVM_DEBUG(dbgs() << "Found unsupported PHI nodes in outer loop exit.\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedExitPHI", OuterLoop->getStartLoc(), @@ -1145,7 +1153,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId, // of induction variables in the instruction and allows reordering if number // of bad orders is more than good. int Cost = getInstrOrderCost(); - DEBUG(dbgs() << "Cost = " << Cost << "\n"); + LLVM_DEBUG(dbgs() << "Cost = " << Cost << "\n"); if (Cost < -LoopInterchangeCostThreshold) return true; @@ -1258,10 +1266,10 @@ bool LoopInterchangeTransform::transform() { if (InnerLoop->getSubLoops().empty()) { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); - DEBUG(dbgs() << "Calling Split Inner Loop\n"); + LLVM_DEBUG(dbgs() << "Calling Split Inner Loop\n"); PHINode *InductionPHI = getInductionVariable(InnerLoop, SE); if (!InductionPHI) { - DEBUG(dbgs() << "Failed to find the point to split loop latch \n"); + LLVM_DEBUG(dbgs() << "Failed to find the point to split loop latch \n"); return false; } @@ -1279,16 +1287,16 @@ bool LoopInterchangeTransform::transform() { // incremented/decremented. // TODO: This splitting logic may not work always. Fix this. splitInnerLoopLatch(InnerIndexVar); - DEBUG(dbgs() << "splitInnerLoopLatch done\n"); + LLVM_DEBUG(dbgs() << "splitInnerLoopLatch done\n"); // Splits the inner loops phi nodes out into a separate basic block. splitInnerLoopHeader(); - DEBUG(dbgs() << "splitInnerLoopHeader done\n"); + LLVM_DEBUG(dbgs() << "splitInnerLoopHeader done\n"); } Transformed |= adjustLoopLinks(); if (!Transformed) { - DEBUG(dbgs() << "adjustLoopLinks failed\n"); + LLVM_DEBUG(dbgs() << "adjustLoopLinks failed\n"); return false; } @@ -1322,8 +1330,8 @@ void LoopInterchangeTransform::splitInnerLoopHeader() { } } - DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & " - "InnerLoopHeader\n"); + LLVM_DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & " + "InnerLoopHeader\n"); } /// Move all instructions except the terminator from FromBB right before @@ -1370,7 +1378,7 @@ static void updateSuccessor(BranchInst *BI, BasicBlock *OldBB, } bool LoopInterchangeTransform::adjustLoopBranches() { - DEBUG(dbgs() << "adjustLoopBranches called\n"); + LLVM_DEBUG(dbgs() << "adjustLoopBranches called\n"); std::vector<DominatorTree::UpdateType> DTUpdates; // Adjust the loop preheader diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp index a7c27662aa0..7f882191d1f 100644 --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -285,9 +285,11 @@ public: Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) { if (LoadToSingleCand[Cand.Load] != &Cand) { - DEBUG(dbgs() << "Removing from candidates: \n" << Cand - << " The load may have multiple stores forwarding to " - << "it\n"); + LLVM_DEBUG( + dbgs() << "Removing from candidates: \n" + << Cand + << " The load may have multiple stores forwarding to " + << "it\n"); return true; } return false; @@ -395,8 +397,9 @@ public: return false; }); - DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() << "):\n"); - DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks)); + LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() + << "):\n"); + LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks)); return Checks; } @@ -440,8 +443,8 @@ public: /// Top-level driver for each loop: find store->load forwarding /// candidates, add run-time checks and perform transformation. bool processLoop() { - DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName() - << "\" checking " << *L << "\n"); + LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName() + << "\" checking " << *L << "\n"); // Look for store-to-load forwarding cases across the // backedge. E.g.: @@ -480,7 +483,7 @@ public: SmallVector<StoreToLoadForwardingCandidate, 4> Candidates; unsigned NumForwarding = 0; for (const StoreToLoadForwardingCandidate Cand : StoreToLoadDependences) { - DEBUG(dbgs() << "Candidate " << Cand); + LLVM_DEBUG(dbgs() << "Candidate " << Cand); // Make sure that the stored values is available everywhere in the loop in // the next iteration. @@ -499,9 +502,10 @@ public: continue; ++NumForwarding; - DEBUG(dbgs() - << NumForwarding - << ". Valid store-to-load forwarding across the loop backedge\n"); + LLVM_DEBUG( + dbgs() + << NumForwarding + << ". Valid store-to-load forwarding across the loop backedge\n"); Candidates.push_back(Cand); } if (Candidates.empty()) @@ -514,25 +518,26 @@ public: // Too many checks are likely to outweigh the benefits of forwarding. if (Checks.size() > Candidates.size() * CheckPerElim) { - DEBUG(dbgs() << "Too many run-time checks needed.\n"); + LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n"); return false; } if (LAI.getPSE().getUnionPredicate().getComplexity() > LoadElimSCEVCheckThreshold) { - DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n"); + LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n"); return false; } if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) { if (L->getHeader()->getParent()->optForSize()) { - DEBUG(dbgs() << "Versioning is needed but not allowed when optimizing " - "for size.\n"); + LLVM_DEBUG( + dbgs() << "Versioning is needed but not allowed when optimizing " + "for size.\n"); return false; } if (!L->isLoopSimplifyForm()) { - DEBUG(dbgs() << "Loop is not is loop-simplify form"); + LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form"); return false; } diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp index 6102890bc5e..561ceea1d88 100644 --- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp +++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp @@ -411,11 +411,11 @@ LoopPredication::generateLoopLatchCheck(Type *RangeCheckType) { if (!NewLatchCheck.IV) return None; NewLatchCheck.Limit = SE->getTruncateExpr(LatchCheck.Limit, RangeCheckType); - DEBUG(dbgs() << "IV of type: " << *LatchType - << "can be represented as range check type:" << *RangeCheckType - << "\n"); - DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n"); - DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n"); + LLVM_DEBUG(dbgs() << "IV of type: " << *LatchType + << "can be represented as range check type:" + << *RangeCheckType << "\n"); + LLVM_DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n"); + LLVM_DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n"); return NewLatchCheck; } @@ -448,15 +448,15 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop( SE->getMinusSCEV(LatchStart, SE->getOne(Ty))); if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) || !CanExpand(LatchLimit) || !CanExpand(RHS)) { - DEBUG(dbgs() << "Can't expand limit check!\n"); + LLVM_DEBUG(dbgs() << "Can't expand limit check!\n"); return None; } auto LimitCheckPred = ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred); - DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n"); - DEBUG(dbgs() << "RHS: " << *RHS << "\n"); - DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n"); + LLVM_DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n"); + LLVM_DEBUG(dbgs() << "RHS: " << *RHS << "\n"); + LLVM_DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n"); Instruction *InsertAt = Preheader->getTerminator(); auto *LimitCheck = @@ -475,16 +475,16 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop( const SCEV *LatchLimit = LatchCheck.Limit; if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) || !CanExpand(LatchLimit)) { - DEBUG(dbgs() << "Can't expand limit check!\n"); + LLVM_DEBUG(dbgs() << "Can't expand limit check!\n"); return None; } // The decrement of the latch check IV should be the same as the // rangeCheckIV. auto *PostDecLatchCheckIV = LatchCheck.IV->getPostIncExpr(*SE); if (RangeCheck.IV != PostDecLatchCheckIV) { - DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: " - << *PostDecLatchCheckIV - << " and RangeCheckIV: " << *RangeCheck.IV << "\n"); + LLVM_DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: " + << *PostDecLatchCheckIV + << " and RangeCheckIV: " << *RangeCheck.IV << "\n"); return None; } @@ -508,8 +508,8 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop( Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander, IRBuilder<> &Builder) { - DEBUG(dbgs() << "Analyzing ICmpInst condition:\n"); - DEBUG(ICI->dump()); + LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n"); + LLVM_DEBUG(ICI->dump()); // parseLoopStructure guarantees that the latch condition is: // ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=. @@ -517,34 +517,34 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, // i u< guardLimit auto RangeCheck = parseLoopICmp(ICI); if (!RangeCheck) { - DEBUG(dbgs() << "Failed to parse the loop latch condition!\n"); + LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n"); return None; } - DEBUG(dbgs() << "Guard check:\n"); - DEBUG(RangeCheck->dump()); + LLVM_DEBUG(dbgs() << "Guard check:\n"); + LLVM_DEBUG(RangeCheck->dump()); if (RangeCheck->Pred != ICmpInst::ICMP_ULT) { - DEBUG(dbgs() << "Unsupported range check predicate(" << RangeCheck->Pred - << ")!\n"); + LLVM_DEBUG(dbgs() << "Unsupported range check predicate(" + << RangeCheck->Pred << ")!\n"); return None; } auto *RangeCheckIV = RangeCheck->IV; if (!RangeCheckIV->isAffine()) { - DEBUG(dbgs() << "Range check IV is not affine!\n"); + LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n"); return None; } auto *Step = RangeCheckIV->getStepRecurrence(*SE); // We cannot just compare with latch IV step because the latch and range IVs // may have different types. if (!isSupportedStep(Step)) { - DEBUG(dbgs() << "Range check and latch have IVs different steps!\n"); + LLVM_DEBUG(dbgs() << "Range check and latch have IVs different steps!\n"); return None; } auto *Ty = RangeCheckIV->getType(); auto CurrLatchCheckOpt = generateLoopLatchCheck(Ty); if (!CurrLatchCheckOpt) { - DEBUG(dbgs() << "Failed to generate a loop latch check " - "corresponding to range type: " - << *Ty << "\n"); + LLVM_DEBUG(dbgs() << "Failed to generate a loop latch check " + "corresponding to range type: " + << *Ty << "\n"); return None; } @@ -555,7 +555,7 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, CurrLatchCheck.IV->getStepRecurrence(*SE)->getType() && "Range and latch steps should be of same type!"); if (Step != CurrLatchCheck.IV->getStepRecurrence(*SE)) { - DEBUG(dbgs() << "Range and latch have different step values!\n"); + LLVM_DEBUG(dbgs() << "Range and latch have different step values!\n"); return None; } @@ -571,8 +571,8 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard, SCEVExpander &Expander) { - DEBUG(dbgs() << "Processing guard:\n"); - DEBUG(Guard->dump()); + LLVM_DEBUG(dbgs() << "Processing guard:\n"); + LLVM_DEBUG(Guard->dump()); IRBuilder<> Builder(cast<Instruction>(Preheader->getTerminator())); @@ -625,7 +625,7 @@ bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard, LastCheck = Builder.CreateAnd(LastCheck, Check); Guard->setOperand(0, LastCheck); - DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n"); + LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n"); return true; } @@ -634,7 +634,7 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() { BasicBlock *LoopLatch = L->getLoopLatch(); if (!LoopLatch) { - DEBUG(dbgs() << "The loop doesn't have a single latch!\n"); + LLVM_DEBUG(dbgs() << "The loop doesn't have a single latch!\n"); return None; } @@ -645,7 +645,7 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() { if (!match(LoopLatch->getTerminator(), m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)), TrueDest, FalseDest))) { - DEBUG(dbgs() << "Failed to match the latch terminator!\n"); + LLVM_DEBUG(dbgs() << "Failed to match the latch terminator!\n"); return None; } assert((TrueDest == L->getHeader() || FalseDest == L->getHeader()) && @@ -655,20 +655,20 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() { auto Result = parseLoopICmp(Pred, LHS, RHS); if (!Result) { - DEBUG(dbgs() << "Failed to parse the loop latch condition!\n"); + LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n"); return None; } // Check affine first, so if it's not we don't try to compute the step // recurrence. if (!Result->IV->isAffine()) { - DEBUG(dbgs() << "The induction variable is not affine!\n"); + LLVM_DEBUG(dbgs() << "The induction variable is not affine!\n"); return None; } auto *Step = Result->IV->getStepRecurrence(*SE); if (!isSupportedStep(Step)) { - DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n"); + LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n"); return None; } @@ -684,8 +684,8 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() { }; if (IsUnsupportedPredicate(Step, Result->Pred)) { - DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred - << ")!\n"); + LLVM_DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred + << ")!\n"); return None; } return Result; @@ -751,11 +751,11 @@ bool LoopPredication::isLoopProfitableToPredicate() { // less than one, can invert the definition of profitable loop predication. float ScaleFactor = LatchExitProbabilityScale; if (ScaleFactor < 1) { - DEBUG( + LLVM_DEBUG( dbgs() << "Ignored user setting for loop-predication-latch-probability-scale: " << LatchExitProbabilityScale << "\n"); - DEBUG(dbgs() << "The value is set to 1.0\n"); + LLVM_DEBUG(dbgs() << "The value is set to 1.0\n"); ScaleFactor = 1.0; } const auto LatchProbabilityThreshold = @@ -778,8 +778,8 @@ bool LoopPredication::isLoopProfitableToPredicate() { bool LoopPredication::runOnLoop(Loop *Loop) { L = Loop; - DEBUG(dbgs() << "Analyzing "); - DEBUG(L->dump()); + LLVM_DEBUG(dbgs() << "Analyzing "); + LLVM_DEBUG(L->dump()); Module *M = L->getHeader()->getModule(); @@ -800,11 +800,11 @@ bool LoopPredication::runOnLoop(Loop *Loop) { return false; LatchCheck = *LatchCheckOpt; - DEBUG(dbgs() << "Latch check:\n"); - DEBUG(LatchCheck.dump()); + LLVM_DEBUG(dbgs() << "Latch check:\n"); + LLVM_DEBUG(LatchCheck.dump()); if (!isLoopProfitableToPredicate()) { - DEBUG(dbgs()<< "Loop not profitable to predicate!\n"); + LLVM_DEBUG(dbgs() << "Loop not profitable to predicate!\n"); return false; } // Collect all the guards into a vector and process later, so as not diff --git a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp index 1f693467647..83dd196f20e 100644 --- a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -644,14 +644,14 @@ void LoopReroll::collectPossibleIVs(Loop *L, if (IncSCEV->getValue()->isZero() || AInt.uge(MaxInc)) continue; IVToIncMap[&*I] = IncSCEV->getValue()->getSExtValue(); - DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV - << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV + << "\n"); if (isLoopControlIV(L, &*I)) { assert(!LoopControlIV && "Found two loop control only IV"); LoopControlIV = &(*I); - DEBUG(dbgs() << "LRR: Possible loop control only IV: " << *I << " = " - << *PHISCEV << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Possible loop control only IV: " << *I + << " = " << *PHISCEV << "\n"); } else PossibleIVs.push_back(&*I); } @@ -718,8 +718,8 @@ void LoopReroll::collectPossibleReductions(Loop *L, if (!SLR.valid()) continue; - DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with " << - SLR.size() << " chained instructions)\n"); + LLVM_DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with " + << SLR.size() << " chained instructions)\n"); Reductions.addSLR(SLR); } } @@ -857,7 +857,8 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) { BaseUsers.push_back(II); continue; } else { - DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I + << "\n"); return false; } } @@ -879,7 +880,7 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) { // away. if (BaseUsers.size()) { if (Roots.find(0) != Roots.end()) { - DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n"); + LLVM_DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n"); return false; } Roots[0] = Base; @@ -895,9 +896,9 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) { if (KV.first == 0) continue; if (!KV.second->hasNUses(NumBaseUses)) { - DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: " - << "#Base=" << NumBaseUses << ", #Root=" << - KV.second->getNumUses() << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: " + << "#Base=" << NumBaseUses + << ", #Root=" << KV.second->getNumUses() << "\n"); return false; } } @@ -1025,13 +1026,14 @@ bool LoopReroll::DAGRootTracker::findRoots() { // Ensure all sets have the same size. if (RootSets.empty()) { - DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n"); + LLVM_DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n"); return false; } for (auto &V : RootSets) { if (V.Roots.empty() || V.Roots.size() != RootSets[0].Roots.size()) { - DEBUG(dbgs() - << "LRR: Aborting because not all root sets have the same size\n"); + LLVM_DEBUG( + dbgs() + << "LRR: Aborting because not all root sets have the same size\n"); return false; } } @@ -1039,13 +1041,14 @@ bool LoopReroll::DAGRootTracker::findRoots() { Scale = RootSets[0].Roots.size() + 1; if (Scale > IL_MaxRerollIterations) { - DEBUG(dbgs() << "LRR: Aborting - too many iterations found. " - << "#Found=" << Scale << ", #Max=" << IL_MaxRerollIterations - << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Aborting - too many iterations found. " + << "#Found=" << Scale + << ", #Max=" << IL_MaxRerollIterations << "\n"); return false; } - DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale + << "\n"); return true; } @@ -1079,7 +1082,7 @@ bool LoopReroll::DAGRootTracker::collectUsedInstructions(SmallInstructionSet &Po // While we're here, check the use sets are the same size. if (V.size() != VBase.size()) { - DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n"); + LLVM_DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n"); return false; } @@ -1236,17 +1239,17 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { // set. for (auto &KV : Uses) { if (KV.second.count() != 1 && !isIgnorableInst(KV.first)) { - DEBUG(dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: " - << *KV.first << " (#uses=" << KV.second.count() << ")\n"); + LLVM_DEBUG( + dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: " + << *KV.first << " (#uses=" << KV.second.count() << ")\n"); return false; } } - DEBUG( - for (auto &KV : Uses) { - dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n"; - } - ); + LLVM_DEBUG(for (auto &KV + : Uses) { + dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n"; + }); for (unsigned Iter = 1; Iter < Scale; ++Iter) { // In addition to regular aliasing information, we need to look for @@ -1305,8 +1308,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { if (TryIt == Uses.end() || TryIt == RootIt || instrDependsOn(TryIt->first, RootIt, TryIt)) { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << - " vs. " << *RootInst << "\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " + << *BaseInst << " vs. " << *RootInst << "\n"); return false; } @@ -1342,8 +1345,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { // root instruction, does not also belong to the base set or the set of // some other root instruction. if (RootIt->second.count() > 1) { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << - " vs. " << *RootInst << " (prev. case overlap)\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst + << " vs. " << *RootInst << " (prev. case overlap)\n"); return false; } @@ -1353,8 +1356,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { if (RootInst->mayReadFromMemory()) for (auto &K : AST) { if (K.aliasesUnknownInst(RootInst, *AA)) { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << - " vs. " << *RootInst << " (depends on future store)\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " + << *BaseInst << " vs. " << *RootInst + << " (depends on future store)\n"); return false; } } @@ -1367,9 +1371,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { !isSafeToSpeculativelyExecute(BaseInst)) || (!isUnorderedLoadStore(RootInst) && !isSafeToSpeculativelyExecute(RootInst)))) { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << - " vs. " << *RootInst << - " (side effects prevent reordering)\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst + << " vs. " << *RootInst + << " (side effects prevent reordering)\n"); return false; } @@ -1420,8 +1424,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { BaseInst->getOperand(!j) == Op2) { Swapped = true; } else { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst - << " vs. " << *RootInst << " (operand " << j << ")\n"); + LLVM_DEBUG(dbgs() + << "LRR: iteration root match failed at " << *BaseInst + << " vs. " << *RootInst << " (operand " << j << ")\n"); return false; } } @@ -1434,8 +1439,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { hasUsesOutsideLoop(BaseInst, L)) || (!PossibleRedLastSet.count(RootInst) && hasUsesOutsideLoop(RootInst, L))) { - DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << - " vs. " << *RootInst << " (uses outside loop)\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst + << " vs. " << *RootInst << " (uses outside loop)\n"); return false; } @@ -1452,8 +1457,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { "Mismatched set sizes!"); } - DEBUG(dbgs() << "LRR: Matched all iteration increments for " << - *IV << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Matched all iteration increments for " << *IV + << "\n"); return true; } @@ -1465,7 +1470,7 @@ void LoopReroll::DAGRootTracker::replace(const SCEV *IterCount) { J != JE;) { unsigned I = Uses[&*J].find_first(); if (I > 0 && I < IL_All) { - DEBUG(dbgs() << "LRR: removing: " << *J << "\n"); + LLVM_DEBUG(dbgs() << "LRR: removing: " << *J << "\n"); J++->eraseFromParent(); continue; } @@ -1618,17 +1623,17 @@ bool LoopReroll::ReductionTracker::validateSelected() { int Iter = PossibleRedIter[J]; if (Iter != PrevIter && Iter != PrevIter + 1 && !PossibleReds[i].getReducedValue()->isAssociative()) { - DEBUG(dbgs() << "LRR: Out-of-order non-associative reduction: " << - J << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Out-of-order non-associative reduction: " + << J << "\n"); return false; } if (Iter != PrevIter) { if (Count != BaseCount) { - DEBUG(dbgs() << "LRR: Iteration " << PrevIter << - " reduction use count " << Count << - " is not equal to the base use count " << - BaseCount << "\n"); + LLVM_DEBUG(dbgs() + << "LRR: Iteration " << PrevIter << " reduction use count " + << Count << " is not equal to the base use count " + << BaseCount << "\n"); return false; } @@ -1724,8 +1729,8 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header, if (!DAGRoots.findRoots()) return false; - DEBUG(dbgs() << "LRR: Found all root induction increments for: " << - *IV << "\n"); + LLVM_DEBUG(dbgs() << "LRR: Found all root induction increments for: " << *IV + << "\n"); if (!DAGRoots.validate(Reductions)) return false; @@ -1753,9 +1758,9 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { PreserveLCSSA = mustPreserveAnalysisID(LCSSAID); BasicBlock *Header = L->getHeader(); - DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() << - "] Loop %" << Header->getName() << " (" << - L->getNumBlocks() << " block(s))\n"); + LLVM_DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() << "] Loop %" + << Header->getName() << " (" << L->getNumBlocks() + << " block(s))\n"); // For now, we'll handle only single BB loops. if (L->getNumBlocks() > 1) @@ -1766,8 +1771,8 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { const SCEV *LIBETC = SE->getBackedgeTakenCount(L); const SCEV *IterCount = SE->getAddExpr(LIBETC, SE->getOne(LIBETC->getType())); - DEBUG(dbgs() << "\n Before Reroll:\n" << *(L->getHeader()) << "\n"); - DEBUG(dbgs() << "LRR: iteration count = " << *IterCount << "\n"); + LLVM_DEBUG(dbgs() << "\n Before Reroll:\n" << *(L->getHeader()) << "\n"); + LLVM_DEBUG(dbgs() << "LRR: iteration count = " << *IterCount << "\n"); // First, we need to find the induction variable with respect to which we can // reroll (there may be several possible options). @@ -1777,7 +1782,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { collectPossibleIVs(L, PossibleIVs); if (PossibleIVs.empty()) { - DEBUG(dbgs() << "LRR: No possible IVs found\n"); + LLVM_DEBUG(dbgs() << "LRR: No possible IVs found\n"); return false; } @@ -1792,7 +1797,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { Changed = true; break; } - DEBUG(dbgs() << "\n After Reroll:\n" << *(L->getHeader()) << "\n"); + LLVM_DEBUG(dbgs() << "\n After Reroll:\n" << *(L->getHeader()) << "\n"); // Trip count of L has changed so SE must be re-evaluated. if (Changed) diff --git a/llvm/lib/Transforms/Scalar/LoopSink.cpp b/llvm/lib/Transforms/Scalar/LoopSink.cpp index a2983e60b8f..731506d9107 100644 --- a/llvm/lib/Transforms/Scalar/LoopSink.cpp +++ b/llvm/lib/Transforms/Scalar/LoopSink.cpp @@ -224,11 +224,11 @@ static bool sinkInstruction(Loop &L, Instruction &I, } // Replaces uses of I with IC in blocks dominated by N replaceDominatedUsesWith(&I, IC, DT, N); - DEBUG(dbgs() << "Sinking a clone of " << I << " To: " << N->getName() - << '\n'); + LLVM_DEBUG(dbgs() << "Sinking a clone of " << I << " To: " << N->getName() + << '\n'); NumLoopSunkCloned++; } - DEBUG(dbgs() << "Sinking " << I << " To: " << MoveBB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Sinking " << I << " To: " << MoveBB->getName() << '\n'); NumLoopSunk++; I.moveBefore(&*MoveBB->getFirstInsertionPt()); diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 4c0b3cc808c..b46dc749390 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2424,8 +2424,8 @@ LSRInstance::OptimizeLoopTermCond() { } } - DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " - << *Cond << '\n'); + LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " + << *Cond << '\n'); // It's possible for the setcc instruction to be anywhere in the loop, and // possible for it to have multiple users. If it is not immediately before @@ -2666,7 +2666,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() { if (Types.size() == 1) Types.clear(); - DEBUG(print_factors_and_types(dbgs())); + LLVM_DEBUG(print_factors_and_types(dbgs())); } /// Helper for CollectChains that finds an IV operand (computed by an AddRec in @@ -2797,10 +2797,9 @@ isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users, return false; if (!Users.empty()) { - DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; - for (Instruction *Inst : Users) { - dbgs() << " " << *Inst << "\n"; - }); + LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; + for (Instruction *Inst + : Users) { dbgs() << " " << *Inst << "\n"; }); return false; } assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); @@ -2853,8 +2852,8 @@ isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users, // the stride. cost -= NumReusedIncrements; - DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost - << "\n"); + LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost + << "\n"); return cost < 0; } @@ -2907,7 +2906,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, if (isa<PHINode>(UserInst)) return; if (NChains >= MaxChains && !StressIVChain) { - DEBUG(dbgs() << "IV Chain Limit\n"); + LLVM_DEBUG(dbgs() << "IV Chain Limit\n"); return; } LastIncExpr = OperExpr; @@ -2920,11 +2919,11 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), OperExprBase)); ChainUsersVec.resize(NChains); - DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst - << ") IV=" << *LastIncExpr << "\n"); + LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst + << ") IV=" << *LastIncExpr << "\n"); } else { - DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst - << ") IV+" << *LastIncExpr << "\n"); + LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst + << ") IV+" << *LastIncExpr << "\n"); // Add this IV user to the end of the chain. IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); } @@ -2994,7 +2993,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, /// loop latch. This will discover chains on side paths, but requires /// maintaining multiple copies of the Chains state. void LSRInstance::CollectChains() { - DEBUG(dbgs() << "Collecting IV Chains.\n"); + LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n"); SmallVector<ChainUsers, 8> ChainUsersVec; SmallVector<BasicBlock *,8> LatchPath; @@ -3063,10 +3062,10 @@ void LSRInstance::CollectChains() { void LSRInstance::FinalizeChain(IVChain &Chain) { assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); - DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); + LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); for (const IVInc &Inc : Chain) { - DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); + LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n"); auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand); assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); IVIncSet.insert(UseI); @@ -3123,11 +3122,11 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, } if (IVOpIter == IVOpEnd) { // Gracefully give up on this chain. - DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); + LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); return; } - DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); + LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); Type *IVTy = IVSrc->getType(); Type *IntTy = SE.getEffectiveSCEVType(IVTy); const SCEV *LeftOverExpr = nullptr; @@ -3203,7 +3202,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { find(UserInst->operands(), U.getOperandValToReplace()); assert(UseI != UserInst->op_end() && "cannot find IV operand"); if (IVIncSet.count(UseI)) { - DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n'); + LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n'); continue; } @@ -3279,7 +3278,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { } } - DEBUG(print_fixups(dbgs())); + LLVM_DEBUG(print_fixups(dbgs())); } /// Insert a formula for the given expression into the given use, separating out @@ -3995,10 +3994,11 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { if (Imms.size() == 1) continue; - DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; - for (const auto &Entry : Imms) - dbgs() << ' ' << Entry.first; - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; + for (const auto &Entry + : Imms) dbgs() + << ' ' << Entry.first; + dbgs() << '\n'); // Examine each offset. for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); @@ -4010,7 +4010,8 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { if (!isa<SCEVConstant>(OrigReg) && UsedByIndicesMap[Reg].count() == 1) { - DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); + LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg + << '\n'); continue; } @@ -4159,9 +4160,9 @@ LSRInstance::GenerateAllReuseFormulae() { GenerateCrossUseConstantOffsets(); - DEBUG(dbgs() << "\n" - "After generating reuse formulae:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "\n" + "After generating reuse formulae:\n"; + print_uses(dbgs())); } /// If there are multiple formulae with the same set of registers used @@ -4183,7 +4184,8 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; - DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); + dbgs() << '\n'); bool Any = false; for (size_t FIdx = 0, NumForms = LU.Formulae.size(); @@ -4207,8 +4209,8 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { // as the basis of rediscovering the desired formula that uses an AddRec // corresponding to the existing phi. Once all formulae have been // generated, these initial losers may be pruned. - DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); - dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); + dbgs() << "\n"); } else { SmallVector<const SCEV *, 4> Key; @@ -4235,10 +4237,10 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, SE, DT, LU); if (CostF.isLess(CostBest, TTI)) std::swap(F, Best); - DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); - dbgs() << "\n" - " in favor of formula "; Best.print(dbgs()); - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); + dbgs() << "\n" + " in favor of formula "; + Best.print(dbgs()); dbgs() << '\n'); } #ifndef NDEBUG ChangedFormulae = true; @@ -4257,11 +4259,11 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { BestFormulae.clear(); } - DEBUG(if (ChangedFormulae) { - dbgs() << "\n" - "After filtering out undesirable candidates:\n"; - print_uses(dbgs()); - }); + LLVM_DEBUG(if (ChangedFormulae) { + dbgs() << "\n" + "After filtering out undesirable candidates:\n"; + print_uses(dbgs()); + }); } // This is a rough guess that seems to work fairly well. @@ -4290,11 +4292,11 @@ size_t LSRInstance::EstimateSearchSpaceComplexity() const { /// register pressure); remove it to simplify the system. void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { - DEBUG(dbgs() << "The search space is too complex.\n"); + LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); - DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " - "which use a superset of registers used by other " - "formulae.\n"); + LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " + "which use a superset of registers used by other " + "formulae.\n"); for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; @@ -4312,7 +4314,8 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { - DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); + dbgs() << '\n'); LU.DeleteFormula(F); --i; --e; @@ -4327,8 +4330,8 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { - DEBUG(dbgs() << " Deleting "; F.print(dbgs()); - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); + dbgs() << '\n'); LU.DeleteFormula(F); --i; --e; @@ -4343,8 +4346,7 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { LU.RecomputeRegs(LUIdx, RegUses); } - DEBUG(dbgs() << "After pre-selection:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } @@ -4354,9 +4356,10 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { if (EstimateSearchSpaceComplexity() < ComplexityLimit) return; - DEBUG(dbgs() << "The search space is too complex.\n" - "Narrowing the search space by assuming that uses separated " - "by a constant offset will use the same registers.\n"); + LLVM_DEBUG( + dbgs() << "The search space is too complex.\n" + "Narrowing the search space by assuming that uses separated " + "by a constant offset will use the same registers.\n"); // This is especially useful for unrolled loops. @@ -4374,7 +4377,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { LU.Kind, LU.AccessTy)) continue; - DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; @@ -4382,7 +4385,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { for (LSRFixup &Fixup : LU.Fixups) { Fixup.Offset += F.BaseOffset; LUThatHas->pushFixup(Fixup); - DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); + LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); } // Delete formulae from the new use which are no longer legal. @@ -4391,8 +4394,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { Formula &F = LUThatHas->Formulae[i]; if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, LUThatHas->Kind, LUThatHas->AccessTy, F)) { - DEBUG(dbgs() << " Deleting "; F.print(dbgs()); - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LUThatHas->DeleteFormula(F); --i; --e; @@ -4411,7 +4413,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { } } - DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that @@ -4419,15 +4421,14 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { /// eliminate. void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { - DEBUG(dbgs() << "The search space is too complex.\n"); + LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); - DEBUG(dbgs() << "Narrowing the search space by re-filtering out " - "undesirable dedicated registers.\n"); + LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out " + "undesirable dedicated registers.\n"); FilterOutUndesirableDedicatedRegisters(); - DEBUG(dbgs() << "After pre-selection:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } @@ -4444,9 +4445,10 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { if (EstimateSearchSpaceComplexity() < ComplexityLimit) return; - DEBUG(dbgs() << "The search space is too complex.\n" - "Narrowing the search space by choosing the best Formula " - "from the Formulae with the same Scale and ScaledReg.\n"); + LLVM_DEBUG( + dbgs() << "The search space is too complex.\n" + "Narrowing the search space by choosing the best Formula " + "from the Formulae with the same Scale and ScaledReg.\n"); // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse. using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>; @@ -4460,7 +4462,8 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; - DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); + dbgs() << '\n'); // Return true if Formula FA is better than Formula FB. auto IsBetterThan = [&](Formula &FA, Formula &FB) { @@ -4504,10 +4507,10 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { Formula &Best = LU.Formulae[P.first->second]; if (IsBetterThan(F, Best)) std::swap(F, Best); - DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); - dbgs() << "\n" - " in favor of formula "; - Best.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); + dbgs() << "\n" + " in favor of formula "; + Best.print(dbgs()); dbgs() << '\n'); #ifndef NDEBUG ChangedFormulae = true; #endif @@ -4523,7 +4526,7 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() { BestFormulae.clear(); } - DEBUG(if (ChangedFormulae) { + LLVM_DEBUG(if (ChangedFormulae) { dbgs() << "\n" "After filtering out undesirable candidates:\n"; print_uses(dbgs()); @@ -4582,7 +4585,7 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { // Used in each formula of a solution (in example above this is reg(c)). // We can skip them in calculations. SmallPtrSet<const SCEV *, 4> UniqRegs; - DEBUG(dbgs() << "The search space is too complex.\n"); + LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); // Map each register to probability of not selecting DenseMap <const SCEV *, float> RegNumMap; @@ -4602,7 +4605,8 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { RegNumMap.insert(std::make_pair(Reg, PNotSel)); } - DEBUG(dbgs() << "Narrowing the search space by deleting costly formulas\n"); + LLVM_DEBUG( + dbgs() << "Narrowing the search space by deleting costly formulas\n"); // Delete formulas where registers number expectation is high. for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { @@ -4644,26 +4648,25 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() { MinIdx = i; } } - DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs()); - dbgs() << " with min reg num " << FMinRegNum << '\n'); + LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs()); + dbgs() << " with min reg num " << FMinRegNum << '\n'); if (MinIdx != 0) std::swap(LU.Formulae[MinIdx], LU.Formulae[0]); while (LU.Formulae.size() != 1) { - DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs()); - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs()); + dbgs() << '\n'); LU.Formulae.pop_back(); } LU.RecomputeRegs(LUIdx, RegUses); assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula"); Formula &F = LU.Formulae[0]; - DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n'); // When we choose the formula, the regs become unique. UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); if (F.ScaledReg) UniqRegs.insert(F.ScaledReg); } - DEBUG(dbgs() << "After pre-selection:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } /// Pick a register which seems likely to be profitable, and then in any use @@ -4676,7 +4679,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { // Ok, we have too many of formulae on our hands to conveniently handle. // Use a rough heuristic to thin out the list. - DEBUG(dbgs() << "The search space is too complex.\n"); + LLVM_DEBUG(dbgs() << "The search space is too complex.\n"); // Pick the register which is used by the most LSRUses, which is likely // to be a good reuse register candidate. @@ -4697,8 +4700,8 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { } } - DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best - << " will yield profitable reuse.\n"); + LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best + << " will yield profitable reuse.\n"); Taken.insert(Best); // In any use with formulae which references this register, delete formulae @@ -4711,7 +4714,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { Formula &F = LU.Formulae[i]; if (!F.referencesReg(Best)) { - DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LU.DeleteFormula(F); --e; --i; @@ -4725,8 +4728,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { LU.RecomputeRegs(LUIdx, RegUses); } - DEBUG(dbgs() << "After pre-selection:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } @@ -4808,11 +4810,11 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, if (F.getNumRegs() == 1 && Workspace.size() == 1) VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); } else { - DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); - dbgs() << ".\n Regs:"; - for (const SCEV *S : NewRegs) - dbgs() << ' ' << *S; - dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); + dbgs() << ".\n Regs:"; for (const SCEV *S + : NewRegs) dbgs() + << ' ' << *S; + dbgs() << '\n'); SolutionCost = NewCost; Solution = Workspace; @@ -4837,22 +4839,22 @@ void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { SolveRecurse(Solution, SolutionCost, Workspace, CurCost, CurRegs, VisitedRegs); if (Solution.empty()) { - DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); + LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); return; } // Ok, we've now made all our decisions. - DEBUG(dbgs() << "\n" - "The chosen solution requires "; SolutionCost.print(dbgs()); - dbgs() << ":\n"; - for (size_t i = 0, e = Uses.size(); i != e; ++i) { - dbgs() << " "; - Uses[i].print(dbgs()); - dbgs() << "\n" - " "; - Solution[i]->print(dbgs()); - dbgs() << '\n'; - }); + LLVM_DEBUG(dbgs() << "\n" + "The chosen solution requires "; + SolutionCost.print(dbgs()); dbgs() << ":\n"; + for (size_t i = 0, e = Uses.size(); i != e; ++i) { + dbgs() << " "; + Uses[i].print(dbgs()); + dbgs() << "\n" + " "; + Solution[i]->print(dbgs()); + dbgs() << '\n'; + }); assert(Solution.size() == Uses.size() && "Malformed solution!"); } @@ -5326,7 +5328,8 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, for (const IVStrideUse &U : IU) { if (++NumUsers > MaxIVUsers) { (void)U; - DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U << "\n"); + LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U + << "\n"); return; } // Bail out if we have a PHI on an EHPad that gets a value from a @@ -5359,9 +5362,9 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, } #endif // DEBUG - DEBUG(dbgs() << "\nLSR on loop "; - L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); - dbgs() << ":\n"); + LLVM_DEBUG(dbgs() << "\nLSR on loop "; + L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); + dbgs() << ":\n"); // First, perform some low-level loop optimizations. OptimizeShadowIV(); @@ -5372,7 +5375,7 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, // Skip nested loops until we can model them better with formulae. if (!L->empty()) { - DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); + LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); return; } @@ -5383,8 +5386,8 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, CollectLoopInvariantFixupsAndFormulae(); assert(!Uses.empty() && "IVUsers reported at least one use"); - DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; - print_uses(dbgs())); + LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; + print_uses(dbgs())); // Now use the reuse data to generate a bunch of interesting ways // to formulate the values needed for the uses. diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index 822f880f222..bbd1d3baa92 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -406,9 +406,9 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost( // First accumulate the cost of this instruction. if (!Cost.IsFree) { UnrolledCost += TTI.getUserCost(I); - DEBUG(dbgs() << "Adding cost of instruction (iteration " << Iteration - << "): "); - DEBUG(I->dump()); + LLVM_DEBUG(dbgs() << "Adding cost of instruction (iteration " + << Iteration << "): "); + LLVM_DEBUG(I->dump()); } // We must count the cost of every operand which is not free, @@ -443,14 +443,14 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost( assert(L->isLCSSAForm(DT) && "Must have loops in LCSSA form to track live-out values."); - DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n"); + LLVM_DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n"); // Simulate execution of each iteration of the loop counting instructions, // which would be simplified. // Since the same load will take different values on different iterations, // we literally have to go through all loop's iterations. for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) { - DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n"); + LLVM_DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n"); // Prepare for the iteration by collecting any simplified entry or backedge // inputs. @@ -525,10 +525,10 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost( // If unrolled body turns out to be too big, bail out. if (UnrolledCost > MaxUnrolledLoopSize) { - DEBUG(dbgs() << " Exceeded threshold.. exiting.\n" - << " UnrolledCost: " << UnrolledCost - << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize - << "\n"); + LLVM_DEBUG(dbgs() << " Exceeded threshold.. exiting.\n" + << " UnrolledCost: " << UnrolledCost + << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize + << "\n"); return None; } } @@ -581,8 +581,8 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost( // If we found no optimization opportunities on the first iteration, we // won't find them on later ones too. if (UnrolledCost == RolledDynamicCost) { - DEBUG(dbgs() << " No opportunities found.. exiting.\n" - << " UnrolledCost: " << UnrolledCost << "\n"); + LLVM_DEBUG(dbgs() << " No opportunities found.. exiting.\n" + << " UnrolledCost: " << UnrolledCost << "\n"); return None; } } @@ -603,9 +603,9 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost( } } - DEBUG(dbgs() << "Analysis finished:\n" - << "UnrolledCost: " << UnrolledCost << ", " - << "RolledDynamicCost: " << RolledDynamicCost << "\n"); + LLVM_DEBUG(dbgs() << "Analysis finished:\n" + << "UnrolledCost: " << UnrolledCost << ", " + << "RolledDynamicCost: " << RolledDynamicCost << "\n"); return {{UnrolledCost, RolledDynamicCost}}; } @@ -808,8 +808,8 @@ static bool computeUnrollCount( if (TripCount) { UP.Partial |= ExplicitUnroll; if (!UP.Partial) { - DEBUG(dbgs() << " will not try to unroll partially because " - << "-unroll-allow-partial not given\n"); + LLVM_DEBUG(dbgs() << " will not try to unroll partially because " + << "-unroll-allow-partial not given\n"); UP.Count = 0; return false; } @@ -896,8 +896,9 @@ static bool computeUnrollCount( // Reduce count based on the type of unrolling and the threshold values. UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount; if (!UP.Runtime) { - DEBUG(dbgs() << " will not try to unroll loop with runtime trip count " - << "-unroll-runtime not given\n"); + LLVM_DEBUG( + dbgs() << " will not try to unroll loop with runtime trip count " + << "-unroll-runtime not given\n"); UP.Count = 0; return false; } @@ -917,12 +918,13 @@ static bool computeUnrollCount( if (!UP.AllowRemainder && UP.Count != 0 && (TripMultiple % UP.Count) != 0) { while (UP.Count != 0 && TripMultiple % UP.Count != 0) UP.Count >>= 1; - DEBUG(dbgs() << "Remainder loop is restricted (that could architecture " - "specific or because the loop contains a convergent " - "instruction), so unroll count must divide the trip " - "multiple, " - << TripMultiple << ". Reducing unroll count from " - << OrigCount << " to " << UP.Count << ".\n"); + LLVM_DEBUG( + dbgs() << "Remainder loop is restricted (that could architecture " + "specific or because the loop contains a convergent " + "instruction), so unroll count must divide the trip " + "multiple, " + << TripMultiple << ". Reducing unroll count from " << OrigCount + << " to " << UP.Count << ".\n"); using namespace ore; @@ -944,7 +946,8 @@ static bool computeUnrollCount( if (UP.Count > UP.MaxCount) UP.Count = UP.MaxCount; - DEBUG(dbgs() << " partially unrolling with count: " << UP.Count << "\n"); + LLVM_DEBUG(dbgs() << " partially unrolling with count: " << UP.Count + << "\n"); if (UP.Count < 2) UP.Count = 0; return ExplicitUnroll; @@ -957,12 +960,13 @@ static LoopUnrollResult tryToUnrollLoop( Optional<unsigned> ProvidedCount, Optional<unsigned> ProvidedThreshold, Optional<bool> ProvidedAllowPartial, Optional<bool> ProvidedRuntime, Optional<bool> ProvidedUpperBound, Optional<bool> ProvidedAllowPeeling) { - DEBUG(dbgs() << "Loop Unroll: F[" << L->getHeader()->getParent()->getName() - << "] Loop %" << L->getHeader()->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Loop Unroll: F[" + << L->getHeader()->getParent()->getName() << "] Loop %" + << L->getHeader()->getName() << "\n"); if (HasUnrollDisablePragma(L)) return LoopUnrollResult::Unmodified; if (!L->isLoopSimplifyForm()) { - DEBUG( + LLVM_DEBUG( dbgs() << " Not unrolling loop which is not in loop-simplify form.\n"); return LoopUnrollResult::Unmodified; } @@ -984,14 +988,14 @@ static LoopUnrollResult tryToUnrollLoop( unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, TTI, EphValues, UP.BEInsns); - DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n"); + LLVM_DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n"); if (NotDuplicatable) { - DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable" - << " instructions.\n"); + LLVM_DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable" + << " instructions.\n"); return LoopUnrollResult::Unmodified; } if (NumInlineCandidates != 0) { - DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n"); + LLVM_DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n"); return LoopUnrollResult::Unmodified; } diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp index 91a30088fa0..b530f7c5dbd 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -298,9 +298,9 @@ bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI, MaxSize -= Props.SizeEstimation * Props.CanBeUnswitchedCount; if (Metrics.notDuplicatable) { - DEBUG(dbgs() << "NOT unswitching loop %" - << L->getHeader()->getName() << ", contents cannot be " - << "duplicated!\n"); + LLVM_DEBUG(dbgs() << "NOT unswitching loop %" << L->getHeader()->getName() + << ", contents cannot be " + << "duplicated!\n"); return false; } } @@ -856,20 +856,20 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val, TerminatorInst *TI) { // Check to see if it would be profitable to unswitch current loop. if (!BranchesInfo.CostAllowsUnswitching()) { - DEBUG(dbgs() << "NOT unswitching loop %" - << currentLoop->getHeader()->getName() - << " at non-trivial condition '" << *Val - << "' == " << *LoopCond << "\n" - << ". Cost too high.\n"); + LLVM_DEBUG(dbgs() << "NOT unswitching loop %" + << currentLoop->getHeader()->getName() + << " at non-trivial condition '" << *Val + << "' == " << *LoopCond << "\n" + << ". Cost too high.\n"); return false; } if (hasBranchDivergence && getAnalysis<DivergenceAnalysis>().isDivergent(LoopCond)) { - DEBUG(dbgs() << "NOT unswitching loop %" - << currentLoop->getHeader()->getName() - << " at non-trivial condition '" << *Val - << "' == " << *LoopCond << "\n" - << ". Condition is divergent.\n"); + LLVM_DEBUG(dbgs() << "NOT unswitching loop %" + << currentLoop->getHeader()->getName() + << " at non-trivial condition '" << *Val + << "' == " << *LoopCond << "\n" + << ". Condition is divergent.\n"); return false; } @@ -970,11 +970,11 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val, void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val, BasicBlock *ExitBlock, TerminatorInst *TI) { - DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %" - << loopHeader->getName() << " [" << L->getBlocks().size() - << " blocks] in Function " - << L->getHeader()->getParent()->getName() << " on cond: " << *Val - << " == " << *Cond << "\n"); + LLVM_DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %" + << loopHeader->getName() << " [" << L->getBlocks().size() + << " blocks] in Function " + << L->getHeader()->getParent()->getName() + << " on cond: " << *Val << " == " << *Cond << "\n"); // First step, split the preheader, so that we know that there is a safe place // to insert the conditional branch. We will change loopPreheader to have a @@ -1196,10 +1196,10 @@ void LoopUnswitch::SplitExitEdges(Loop *L, void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, Loop *L, TerminatorInst *TI) { Function *F = loopHeader->getParent(); - DEBUG(dbgs() << "loop-unswitch: Unswitching loop %" - << loopHeader->getName() << " [" << L->getBlocks().size() - << " blocks] in Function " << F->getName() - << " when '" << *Val << "' == " << *LIC << "\n"); + LLVM_DEBUG(dbgs() << "loop-unswitch: Unswitching loop %" + << loopHeader->getName() << " [" << L->getBlocks().size() + << " blocks] in Function " << F->getName() << " when '" + << *Val << "' == " << *LIC << "\n"); if (auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>()) SEWP->getSE().forgetLoop(L); @@ -1355,7 +1355,7 @@ static void RemoveFromWorklist(Instruction *I, static void ReplaceUsesOfWith(Instruction *I, Value *V, std::vector<Instruction*> &Worklist, Loop *L, LPPassManager *LPM) { - DEBUG(dbgs() << "Replace with '" << *V << "': " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Replace with '" << *V << "': " << *I << "\n"); // Add uses to the worklist, which may be dead now. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) @@ -1524,7 +1524,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) { // Simple DCE. if (isInstructionTriviallyDead(I)) { - DEBUG(dbgs() << "Remove dead instruction '" << *I << "\n"); + LLVM_DEBUG(dbgs() << "Remove dead instruction '" << *I << "\n"); // Add uses to the worklist, which may be dead now. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) @@ -1557,8 +1557,8 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) { if (!SinglePred) continue; // Nothing to do. assert(SinglePred == Pred && "CFG broken"); - DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- " - << Succ->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- " + << Succ->getName() << "\n"); // Resolve any single entry PHI nodes in Succ. while (PHINode *PN = dyn_cast<PHINode>(Succ->begin())) diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp index e0e2c1938aa..06e86081e8a 100644 --- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp +++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp @@ -246,48 +246,47 @@ private: bool LoopVersioningLICM::legalLoopStructure() { // Loop must be in loop simplify form. if (!CurLoop->isLoopSimplifyForm()) { - DEBUG( - dbgs() << " loop is not in loop-simplify form.\n"); + LLVM_DEBUG(dbgs() << " loop is not in loop-simplify form.\n"); return false; } // Loop should be innermost loop, if not return false. if (!CurLoop->getSubLoops().empty()) { - DEBUG(dbgs() << " loop is not innermost\n"); + LLVM_DEBUG(dbgs() << " loop is not innermost\n"); return false; } // Loop should have a single backedge, if not return false. if (CurLoop->getNumBackEdges() != 1) { - DEBUG(dbgs() << " loop has multiple backedges\n"); + LLVM_DEBUG(dbgs() << " loop has multiple backedges\n"); return false; } // Loop must have a single exiting block, if not return false. if (!CurLoop->getExitingBlock()) { - DEBUG(dbgs() << " loop has multiple exiting block\n"); + LLVM_DEBUG(dbgs() << " loop has multiple exiting block\n"); return false; } // We only handle bottom-tested loop, i.e. loop in which the condition is // checked at the end of each iteration. With that we can assume that all // instructions in the loop are executed the same number of times. if (CurLoop->getExitingBlock() != CurLoop->getLoopLatch()) { - DEBUG(dbgs() << " loop is not bottom tested\n"); + LLVM_DEBUG(dbgs() << " loop is not bottom tested\n"); return false; } // Parallel loops must not have aliasing loop-invariant memory accesses. // Hence we don't need to version anything in this case. if (CurLoop->isAnnotatedParallel()) { - DEBUG(dbgs() << " Parallel loop is not worth versioning\n"); + LLVM_DEBUG(dbgs() << " Parallel loop is not worth versioning\n"); return false; } // Loop depth more then LoopDepthThreshold are not allowed if (CurLoop->getLoopDepth() > LoopDepthThreshold) { - DEBUG(dbgs() << " loop depth is more then threshold\n"); + LLVM_DEBUG(dbgs() << " loop depth is more then threshold\n"); return false; } // We need to be able to compute the loop trip count in order // to generate the bound checks. const SCEV *ExitCount = SE->getBackedgeTakenCount(CurLoop); if (ExitCount == SE->getCouldNotCompute()) { - DEBUG(dbgs() << " loop does not has trip count\n"); + LLVM_DEBUG(dbgs() << " loop does not has trip count\n"); return false; } return true; @@ -335,18 +334,18 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() { } // Ensure types should be of same type. if (!TypeSafety) { - DEBUG(dbgs() << " Alias tracker type safety failed!\n"); + LLVM_DEBUG(dbgs() << " Alias tracker type safety failed!\n"); return false; } // Ensure loop body shouldn't be read only. if (!HasMod) { - DEBUG(dbgs() << " No memory modified in loop body\n"); + LLVM_DEBUG(dbgs() << " No memory modified in loop body\n"); return false; } // Make sure alias set has may alias case. // If there no alias memory ambiguity, return false. if (!HasMayAlias) { - DEBUG(dbgs() << " No ambiguity in memory access.\n"); + LLVM_DEBUG(dbgs() << " No ambiguity in memory access.\n"); return false; } return true; @@ -362,12 +361,12 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { assert(I != nullptr && "Null instruction found!"); // Check function call safety if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) { - DEBUG(dbgs() << " Unsafe call site found.\n"); + LLVM_DEBUG(dbgs() << " Unsafe call site found.\n"); return false; } // Avoid loops with possiblity of throw if (I->mayThrow()) { - DEBUG(dbgs() << " May throw instruction found in loop body\n"); + LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n"); return false; } // If current instruction is load instructions @@ -375,7 +374,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { if (I->mayReadFromMemory()) { LoadInst *Ld = dyn_cast<LoadInst>(I); if (!Ld || !Ld->isSimple()) { - DEBUG(dbgs() << " Found a non-simple load.\n"); + LLVM_DEBUG(dbgs() << " Found a non-simple load.\n"); return false; } LoadAndStoreCounter++; @@ -389,7 +388,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { else if (I->mayWriteToMemory()) { StoreInst *St = dyn_cast<StoreInst>(I); if (!St || !St->isSimple()) { - DEBUG(dbgs() << " Found a non-simple store.\n"); + LLVM_DEBUG(dbgs() << " Found a non-simple store.\n"); return false; } LoadAndStoreCounter++; @@ -428,13 +427,14 @@ bool LoopVersioningLICM::legalLoopInstructions() { LAI = &LAA->getInfo(CurLoop); // Check LoopAccessInfo for need of runtime check. if (LAI->getRuntimePointerChecking()->getChecks().empty()) { - DEBUG(dbgs() << " LAA: Runtime check not found !!\n"); + LLVM_DEBUG(dbgs() << " LAA: Runtime check not found !!\n"); return false; } // Number of runtime-checks should be less then RuntimeMemoryCheckThreshold if (LAI->getNumRuntimePointerChecks() > VectorizerParams::RuntimeMemoryCheckThreshold) { - DEBUG(dbgs() << " LAA: Runtime checks are more than threshold !!\n"); + LLVM_DEBUG( + dbgs() << " LAA: Runtime checks are more than threshold !!\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "RuntimeCheck", CurLoop->getStartLoc(), @@ -448,23 +448,25 @@ bool LoopVersioningLICM::legalLoopInstructions() { } // Loop should have at least one invariant load or store instruction. if (!InvariantCounter) { - DEBUG(dbgs() << " Invariant not found !!\n"); + LLVM_DEBUG(dbgs() << " Invariant not found !!\n"); return false; } // Read only loop not allowed. if (IsReadOnlyLoop) { - DEBUG(dbgs() << " Found a read-only loop!\n"); + LLVM_DEBUG(dbgs() << " Found a read-only loop!\n"); return false; } // Profitablity check: // Check invariant threshold, should be in limit. if (InvariantCounter * 100 < InvariantThreshold * LoadAndStoreCounter) { - DEBUG(dbgs() - << " Invariant load & store are less then defined threshold\n"); - DEBUG(dbgs() << " Invariant loads & stores: " - << ((InvariantCounter * 100) / LoadAndStoreCounter) << "%\n"); - DEBUG(dbgs() << " Invariant loads & store threshold: " - << InvariantThreshold << "%\n"); + LLVM_DEBUG( + dbgs() + << " Invariant load & store are less then defined threshold\n"); + LLVM_DEBUG(dbgs() << " Invariant loads & stores: " + << ((InvariantCounter * 100) / LoadAndStoreCounter) + << "%\n"); + LLVM_DEBUG(dbgs() << " Invariant loads & store threshold: " + << InvariantThreshold << "%\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "InvariantThreshold", CurLoop->getStartLoc(), @@ -497,16 +499,16 @@ bool LoopVersioningLICM::isLoopAlreadyVisited() { /// Return true if legal else returns false. bool LoopVersioningLICM::isLegalForVersioning() { using namespace ore; - DEBUG(dbgs() << "Loop: " << *CurLoop); + LLVM_DEBUG(dbgs() << "Loop: " << *CurLoop); // Make sure not re-visiting same loop again. if (isLoopAlreadyVisited()) { - DEBUG( + LLVM_DEBUG( dbgs() << " Revisiting loop in LoopVersioningLICM not allowed.\n\n"); return false; } // Check loop structure leagality. if (!legalLoopStructure()) { - DEBUG( + LLVM_DEBUG( dbgs() << " Loop structure not suitable for LoopVersioningLICM\n\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "IllegalLoopStruct", @@ -518,14 +520,16 @@ bool LoopVersioningLICM::isLegalForVersioning() { } // Check loop instruction leagality. if (!legalLoopInstructions()) { - DEBUG(dbgs() - << " Loop instructions not suitable for LoopVersioningLICM\n\n"); + LLVM_DEBUG( + dbgs() + << " Loop instructions not suitable for LoopVersioningLICM\n\n"); return false; } // Check loop memory access leagality. if (!legalLoopMemoryAccesses()) { - DEBUG(dbgs() - << " Loop memory access not suitable for LoopVersioningLICM\n\n"); + LLVM_DEBUG( + dbgs() + << " Loop memory access not suitable for LoopVersioningLICM\n\n"); ORE->emit([&]() { return OptimizationRemarkMissed(DEBUG_TYPE, "IllegalLoopMemoryAccess", CurLoop->getStartLoc(), @@ -535,7 +539,7 @@ bool LoopVersioningLICM::isLegalForVersioning() { return false; } // Loop versioning is feasible, return true. - DEBUG(dbgs() << " Loop Versioning found to be beneficial\n\n"); + LLVM_DEBUG(dbgs() << " Loop Versioning found to be beneficial\n\n"); ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "IsLegalForVersioning", CurLoop->getStartLoc(), CurLoop->getHeader()) diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index e21d09e65a1..437c70745d3 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -479,10 +479,10 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); - DEBUG(dbgs() << "Replace stores:\n"; - for (Instruction *SI : Range.TheStores) - dbgs() << *SI << '\n'; - dbgs() << "With: " << *AMemSet << '\n'); + LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI + : Range.TheStores) dbgs() + << *SI << '\n'; + dbgs() << "With: " << *AMemSet << '\n'); if (!Range.TheStores.empty()) AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); @@ -603,7 +603,7 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, // We made it, we need to lift for (auto *I : llvm::reverse(ToLift)) { - DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); + LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); I->moveBefore(P); } @@ -680,8 +680,8 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { LI->getPointerOperand(), findLoadAlignment(DL, LI), Size, SI->isVolatile()); - DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI - << " => " << *M << "\n"); + LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " + << *M << "\n"); MD->removeInstruction(SI); SI->eraseFromParent(); @@ -770,7 +770,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align, SI->isVolatile()); - DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); + LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); MD->removeInstruction(SI); SI->eraseFromParent(); @@ -1294,8 +1294,8 @@ bool MemCpyOptPass::processMemMove(MemMoveInst *M) { MemoryLocation::getForSource(M))) return false; - DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M - << "\n"); + LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M + << "\n"); // If not, then we know we can transform this. Type *ArgTys[3] = { M->getRawDest()->getType(), @@ -1377,9 +1377,9 @@ bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) { TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), "tmpcast", CS.getInstruction()); - DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" - << " " << *MDep << "\n" - << " " << *CS.getInstruction() << "\n"); + LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" + << " " << *MDep << "\n" + << " " << *CS.getInstruction() << "\n"); // Otherwise we're good! Update the byval argument. CS.setArgument(ArgNo, TmpCast); diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp index bb1b8a53110..57dd2292dfb 100644 --- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp +++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp @@ -76,25 +76,25 @@ struct BCEAtom { BCEAtom visitICmpLoadOperand(Value *const Val) { BCEAtom Result; if (auto *const LoadI = dyn_cast<LoadInst>(Val)) { - DEBUG(dbgs() << "load\n"); + LLVM_DEBUG(dbgs() << "load\n"); if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) { - DEBUG(dbgs() << "used outside of block\n"); + LLVM_DEBUG(dbgs() << "used outside of block\n"); return {}; } if (LoadI->isVolatile()) { - DEBUG(dbgs() << "volatile\n"); + LLVM_DEBUG(dbgs() << "volatile\n"); return {}; } Value *const Addr = LoadI->getOperand(0); if (auto *const GEP = dyn_cast<GetElementPtrInst>(Addr)) { - DEBUG(dbgs() << "GEP\n"); + LLVM_DEBUG(dbgs() << "GEP\n"); if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) { - DEBUG(dbgs() << "used outside of block\n"); + LLVM_DEBUG(dbgs() << "used outside of block\n"); return {}; } const auto &DL = GEP->getModule()->getDataLayout(); if (!isDereferenceablePointer(GEP, DL)) { - DEBUG(dbgs() << "not dereferenceable\n"); + LLVM_DEBUG(dbgs() << "not dereferenceable\n"); // We need to make sure that we can do comparison in any order, so we // require memory to be unconditionnally dereferencable. return {}; @@ -251,13 +251,13 @@ BCECmpBlock visitICmp(const ICmpInst *const CmpI, // If there are any other uses of the comparison, we cannot merge it with // other comparisons as we would create an orphan use of the value. if (!CmpI->hasOneUse()) { - DEBUG(dbgs() << "cmp has several uses\n"); + LLVM_DEBUG(dbgs() << "cmp has several uses\n"); return {}; } if (CmpI->getPredicate() == ExpectedPredicate) { - DEBUG(dbgs() << "cmp " - << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne") - << "\n"); + LLVM_DEBUG(dbgs() << "cmp " + << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne") + << "\n"); auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0)); if (!Lhs.Base()) return {}; auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1)); @@ -275,7 +275,7 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block, if (Block->empty()) return {}; auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator()); if (!BranchI) return {}; - DEBUG(dbgs() << "branch\n"); + LLVM_DEBUG(dbgs() << "branch\n"); if (BranchI->isUnconditional()) { // In this case, we expect an incoming value which is the result of the // comparison. This is the last link in the chain of comparisons (note @@ -283,7 +283,7 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block, // can be reordered). auto *const CmpI = dyn_cast<ICmpInst>(Val); if (!CmpI) return {}; - DEBUG(dbgs() << "icmp\n"); + LLVM_DEBUG(dbgs() << "icmp\n"); auto Result = visitICmp(CmpI, ICmpInst::ICMP_EQ); Result.CmpI = CmpI; Result.BranchI = BranchI; @@ -292,12 +292,12 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block, // In this case, we expect a constant incoming value (the comparison is // chained). const auto *const Const = dyn_cast<ConstantInt>(Val); - DEBUG(dbgs() << "const\n"); + LLVM_DEBUG(dbgs() << "const\n"); if (!Const->isZero()) return {}; - DEBUG(dbgs() << "false\n"); + LLVM_DEBUG(dbgs() << "false\n"); auto *const CmpI = dyn_cast<ICmpInst>(BranchI->getCondition()); if (!CmpI) return {}; - DEBUG(dbgs() << "icmp\n"); + LLVM_DEBUG(dbgs() << "icmp\n"); assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch"); BasicBlock *const FalseBlock = BranchI->getSuccessor(1); auto Result = visitICmp( @@ -311,12 +311,13 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block, static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons, BCECmpBlock &Comparison) { - DEBUG(dbgs() << "Block '" << Comparison.BB->getName() << "': Found cmp of " - << Comparison.SizeBits() << " bits between " - << Comparison.Lhs().Base() << " + " << Comparison.Lhs().Offset - << " and " << Comparison.Rhs().Base() << " + " - << Comparison.Rhs().Offset << "\n"); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName() + << "': Found cmp of " << Comparison.SizeBits() + << " bits between " << Comparison.Lhs().Base() << " + " + << Comparison.Lhs().Offset << " and " + << Comparison.Rhs().Base() << " + " + << Comparison.Rhs().Offset << "\n"); + LLVM_DEBUG(dbgs() << "\n"); Comparisons.push_back(Comparison); } @@ -367,12 +368,12 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi) Block, Phi.getParent()); Comparison.BB = Block; if (!Comparison.IsValid()) { - DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n"); + LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n"); return; } if (Comparison.doesOtherWork()) { - DEBUG(dbgs() << "block '" << Comparison.BB->getName() - << "' does extra work besides compare\n"); + LLVM_DEBUG(dbgs() << "block '" << Comparison.BB->getName() + << "' does extra work besides compare\n"); if (Comparisons.empty()) { // This is the initial block in the chain, in case this block does other // work, we can try to split the block and move the irrelevant @@ -388,13 +389,15 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi) // // NOTE: we only handle block with single predecessor for now. if (Comparison.canSplit()) { - DEBUG(dbgs() << "Split initial block '" << Comparison.BB->getName() - << "' that does extra work besides compare\n"); + LLVM_DEBUG(dbgs() + << "Split initial block '" << Comparison.BB->getName() + << "' that does extra work besides compare\n"); Comparison.RequireSplit = true; enqueueBlock(Comparisons, Comparison); } else { - DEBUG(dbgs() << "ignoring initial block '" << Comparison.BB->getName() - << "' that does extra work besides compare\n"); + LLVM_DEBUG(dbgs() + << "ignoring initial block '" << Comparison.BB->getName() + << "' that does extra work besides compare\n"); } continue; } @@ -428,7 +431,7 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi) // It is possible we have no suitable comparison to merge. if (Comparisons.empty()) { - DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n"); + LLVM_DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n"); return; } EntryBlock_ = Comparisons[0].BB; @@ -549,7 +552,7 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons, if (C != Comparisons.end()) C->split(EntryBlock_); - DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons\n"); + LLVM_DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons\n"); const auto TotalSize = std::accumulate(Comparisons.begin(), Comparisons.end(), 0, [](int Size, const BCECmpBlock &C) { @@ -594,17 +597,17 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons, } else { assert(Comparisons.size() == 1); // There are no blocks to merge, but we still need to update the branches. - DEBUG(dbgs() << "Only one comparison, updating branches\n"); + LLVM_DEBUG(dbgs() << "Only one comparison, updating branches\n"); if (NextBBInChain) { if (FirstComparison.BranchI->isConditional()) { - DEBUG(dbgs() << "conditional -> conditional\n"); + LLVM_DEBUG(dbgs() << "conditional -> conditional\n"); // Just update the "true" target, the "false" target should already be // the phi block. assert(FirstComparison.BranchI->getSuccessor(1) == Phi.getParent()); FirstComparison.BranchI->setSuccessor(0, NextBBInChain); Phi.addIncoming(ConstantInt::getFalse(Context), BB); } else { - DEBUG(dbgs() << "unconditional -> conditional\n"); + LLVM_DEBUG(dbgs() << "unconditional -> conditional\n"); // Replace the unconditional branch by a conditional one. FirstComparison.BranchI->eraseFromParent(); IRBuilder<> Builder(BB); @@ -614,14 +617,14 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons, } } else { if (FirstComparison.BranchI->isConditional()) { - DEBUG(dbgs() << "conditional -> unconditional\n"); + LLVM_DEBUG(dbgs() << "conditional -> unconditional\n"); // Replace the conditional branch by an unconditional one. FirstComparison.BranchI->eraseFromParent(); IRBuilder<> Builder(BB); Builder.CreateBr(Phi.getParent()); Phi.addIncoming(FirstComparison.CmpI, BB); } else { - DEBUG(dbgs() << "unconditional -> unconditional\n"); + LLVM_DEBUG(dbgs() << "unconditional -> unconditional\n"); Phi.addIncoming(FirstComparison.CmpI, BB); } } @@ -639,22 +642,22 @@ std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi, if (CurBlock->hasAddressTaken()) { // Somebody is jumping to the block through an address, all bets are // off. - DEBUG(dbgs() << "skip: block " << BlockIndex - << " has its address taken\n"); + LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex + << " has its address taken\n"); return {}; } Blocks[BlockIndex] = CurBlock; auto *SinglePredecessor = CurBlock->getSinglePredecessor(); if (!SinglePredecessor) { // The block has two or more predecessors. - DEBUG(dbgs() << "skip: block " << BlockIndex - << " has two or more predecessors\n"); + LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex + << " has two or more predecessors\n"); return {}; } if (Phi.getBasicBlockIndex(SinglePredecessor) < 0) { // The block does not link back to the phi. - DEBUG(dbgs() << "skip: block " << BlockIndex - << " does not link back to the phi\n"); + LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex + << " does not link back to the phi\n"); return {}; } CurBlock = SinglePredecessor; @@ -664,9 +667,9 @@ std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi, } bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) { - DEBUG(dbgs() << "processPhi()\n"); + LLVM_DEBUG(dbgs() << "processPhi()\n"); if (Phi.getNumIncomingValues() <= 1) { - DEBUG(dbgs() << "skip: only one incoming value in phi\n"); + LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n"); return false; } // We are looking for something that has the following structure: @@ -690,7 +693,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) { if (isa<ConstantInt>(Phi.getIncomingValue(I))) continue; if (LastBlock) { // There are several non-constant values. - DEBUG(dbgs() << "skip: several non-constant values\n"); + LLVM_DEBUG(dbgs() << "skip: several non-constant values\n"); return false; } if (!isa<ICmpInst>(Phi.getIncomingValue(I)) || @@ -701,7 +704,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) { // producing block more than once. // // This is an uncommon case, so we bail. - DEBUG( + LLVM_DEBUG( dbgs() << "skip: non-constant value not from cmp or not from last block.\n"); return false; @@ -710,11 +713,11 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) { } if (!LastBlock) { // There is no non-constant block. - DEBUG(dbgs() << "skip: no non-constant block\n"); + LLVM_DEBUG(dbgs() << "skip: no non-constant block\n"); return false; } if (LastBlock->getSingleSuccessor() != Phi.getParent()) { - DEBUG(dbgs() << "skip: last block non-phi successor\n"); + LLVM_DEBUG(dbgs() << "skip: last block non-phi successor\n"); return false; } @@ -724,7 +727,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) { BCECmpChain CmpChain(Blocks, Phi); if (CmpChain.size() < 2) { - DEBUG(dbgs() << "skip: only one compare block\n"); + LLVM_DEBUG(dbgs() << "skip: only one compare block\n"); return false; } @@ -759,7 +762,7 @@ class MergeICmps : public FunctionPass { PreservedAnalyses MergeICmps::runImpl(Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI) { - DEBUG(dbgs() << "MergeICmpsPass: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "MergeICmpsPass: " << F.getName() << "\n"); // We only try merging comparisons if the target wants to expand memcmp later. // The rationale is to avoid turning small chains into memcmp calls. diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp index 07d45144758..72ab175deca 100644 --- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -180,7 +180,7 @@ bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start, /// StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1, StoreInst *Store0) { - DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n"); BasicBlock *BB0 = Store0->getParent(); for (Instruction &Inst : reverse(*BB1)) { auto *Store1 = dyn_cast<StoreInst>(&Inst); @@ -229,9 +229,9 @@ bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() && (A0->getParent() == S0->getParent()) && A1->hasOneUse() && (A1->getParent() == S1->getParent()) && isa<GetElementPtrInst>(A0)) { - DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump(); - dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n"; - dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump(); + dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n"; + dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n"); // Hoist the instruction. BasicBlock::iterator InsertPt = BB->getFirstInsertionPt(); // Intersect optional metadata. @@ -313,7 +313,7 @@ bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) { break; RBI = Pred0->rbegin(); RBE = Pred0->rend(); - DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump()); + LLVM_DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump()); } } return MergedStores; @@ -323,7 +323,7 @@ bool MergedLoadStoreMotion::run(Function &F, AliasAnalysis &AA) { this->AA = &AA; bool Changed = false; - DEBUG(dbgs() << "Instruction Merger\n"); + LLVM_DEBUG(dbgs() << "Instruction Merger\n"); // Merge unconditional branches, allowing PRE to catch more // optimization opportunities. diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp index 369f59ec3c3..0cf9979b40a 100644 --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -221,13 +221,13 @@ private: Components.resize(Components.size() + 1); auto &Component = Components.back(); Component.insert(I); - DEBUG(dbgs() << "Component root is " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n"); InComponent.insert(I); ValueToComponent[I] = ComponentID; // Pop a component off the stack and label it. while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) { auto *Member = Stack.back(); - DEBUG(dbgs() << "Component member is " << *Member << "\n"); + LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n"); Component.insert(Member); InComponent.insert(Member); ValueToComponent[Member] = ComponentID; @@ -1068,8 +1068,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E, return nullptr; if (auto *C = dyn_cast<Constant>(V)) { if (I) - DEBUG(dbgs() << "Simplified " << *I << " to " - << " constant " << *C << "\n"); + LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " + << " constant " << *C << "\n"); NumGVNOpsSimplified++; assert(isa<BasicExpression>(E) && "We should always have had a basic expression here"); @@ -1077,8 +1077,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E, return createConstantExpression(C); } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { if (I) - DEBUG(dbgs() << "Simplified " << *I << " to " - << " variable " << *V << "\n"); + LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " + << " variable " << *V << "\n"); deleteExpression(E); return createVariableExpression(V); } @@ -1101,8 +1101,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E, } if (I) - DEBUG(dbgs() << "Simplified " << *I << " to " - << " expression " << *CC->getDefiningExpr() << "\n"); + LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " + << " expression " << *CC->getDefiningExpr() << "\n"); NumGVNOpsSimplified++; deleteExpression(E); return CC->getDefiningExpr(); @@ -1422,8 +1422,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, if (Offset >= 0) { if (auto *C = dyn_cast<Constant>( lookupOperandLeader(DepSI->getValueOperand()))) { - DEBUG(dbgs() << "Coercing load from store " << *DepSI << " to constant " - << *C << "\n"); + LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI + << " to constant " << *C << "\n"); return createConstantExpression( getConstantStoreValueForLoad(C, Offset, LoadType, DL)); } @@ -1438,8 +1438,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI))) if (auto *PossibleConstant = getConstantLoadValueForLoad(C, Offset, LoadType, DL)) { - DEBUG(dbgs() << "Coercing load from load " << *LI << " to constant " - << *PossibleConstant << "\n"); + LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI + << " to constant " << *PossibleConstant << "\n"); return createConstantExpression(PossibleConstant); } } @@ -1448,8 +1448,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, if (Offset >= 0) { if (auto *PossibleConstant = getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) { - DEBUG(dbgs() << "Coercing load from meminst " << *DepMI - << " to constant " << *PossibleConstant << "\n"); + LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI + << " to constant " << *PossibleConstant << "\n"); return createConstantExpression(PossibleConstant); } } @@ -1530,7 +1530,7 @@ NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const { if (!PI) return nullptr; - DEBUG(dbgs() << "Found predicate info from instruction !\n"); + LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n"); auto *PWC = dyn_cast<PredicateWithCondition>(PI); if (!PWC) @@ -1570,7 +1570,7 @@ NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const { return nullptr; if (CopyOf != Cmp->getOperand(0) && CopyOf != Cmp->getOperand(1)) { - DEBUG(dbgs() << "Copy is not of any condition operands!\n"); + LLVM_DEBUG(dbgs() << "Copy is not of any condition operands!\n"); return nullptr; } Value *FirstOp = lookupOperandLeader(Cmp->getOperand(0)); @@ -1653,10 +1653,11 @@ bool NewGVN::setMemoryClass(const MemoryAccess *From, CongruenceClass *NewClass) { assert(NewClass && "Every MemoryAccess should be getting mapped to a non-null class"); - DEBUG(dbgs() << "Setting " << *From); - DEBUG(dbgs() << " equivalent to congruence class "); - DEBUG(dbgs() << NewClass->getID() << " with current MemoryAccess leader "); - DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n"); + LLVM_DEBUG(dbgs() << "Setting " << *From); + LLVM_DEBUG(dbgs() << " equivalent to congruence class "); + LLVM_DEBUG(dbgs() << NewClass->getID() + << " with current MemoryAccess leader "); + LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n"); auto LookupResult = MemoryAccessToClass.find(From); bool Changed = false; @@ -1674,11 +1675,11 @@ bool NewGVN::setMemoryClass(const MemoryAccess *From, OldClass->setMemoryLeader(nullptr); } else { OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); - DEBUG(dbgs() << "Memory class leader change for class " - << OldClass->getID() << " to " - << *OldClass->getMemoryLeader() - << " due to removal of a memory member " << *From - << "\n"); + LLVM_DEBUG(dbgs() << "Memory class leader change for class " + << OldClass->getID() << " to " + << *OldClass->getMemoryLeader() + << " due to removal of a memory member " << *From + << "\n"); markMemoryLeaderChangeTouched(OldClass); } } @@ -1754,12 +1755,13 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps, // If it has undef at this point, it means there are no-non-undef arguments, // and thus, the value of the phi node must be undef. if (HasUndef) { - DEBUG(dbgs() << "PHI Node " << *I - << " has no non-undef arguments, valuing it as undef\n"); + LLVM_DEBUG( + dbgs() << "PHI Node " << *I + << " has no non-undef arguments, valuing it as undef\n"); return createConstantExpression(UndefValue::get(I->getType())); } - DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n"); + LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n"); deleteExpression(E); return createDeadExpression(); } @@ -1798,8 +1800,8 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps, InstrToDFSNum(AllSameValue) > InstrToDFSNum(I)) return E; NumGVNPhisAllSame++; - DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue - << "\n"); + LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue + << "\n"); deleteExpression(E); return createVariableOrConstant(AllSameValue); } @@ -2092,7 +2094,7 @@ void NewGVN::markUsersTouched(Value *V) { } void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const { - DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n"); + LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n"); MemoryToUsers[To].insert(U); } @@ -2228,8 +2230,9 @@ void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I, (isa<StoreInst>(I) && NewClass->getStoreCount() == 1)); NewClass->setMemoryLeader(InstMA); // Mark it touched if we didn't just create a singleton - DEBUG(dbgs() << "Memory class leader change for class " << NewClass->getID() - << " due to new memory instruction becoming leader\n"); + LLVM_DEBUG(dbgs() << "Memory class leader change for class " + << NewClass->getID() + << " due to new memory instruction becoming leader\n"); markMemoryLeaderChangeTouched(NewClass); } setMemoryClass(InstMA, NewClass); @@ -2237,10 +2240,10 @@ void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I, if (OldClass->getMemoryLeader() == InstMA) { if (!OldClass->definesNoMemory()) { OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); - DEBUG(dbgs() << "Memory class leader change for class " - << OldClass->getID() << " to " - << *OldClass->getMemoryLeader() - << " due to removal of old leader " << *InstMA << "\n"); + LLVM_DEBUG(dbgs() << "Memory class leader change for class " + << OldClass->getID() << " to " + << *OldClass->getMemoryLeader() + << " due to removal of old leader " << *InstMA << "\n"); markMemoryLeaderChangeTouched(OldClass); } else OldClass->setMemoryLeader(nullptr); @@ -2277,9 +2280,10 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, NewClass->setStoredValue(SE->getStoredValue()); markValueLeaderChangeTouched(NewClass); // Shift the new class leader to be the store - DEBUG(dbgs() << "Changing leader of congruence class " - << NewClass->getID() << " from " << *NewClass->getLeader() - << " to " << *SI << " because store joined class\n"); + LLVM_DEBUG(dbgs() << "Changing leader of congruence class " + << NewClass->getID() << " from " + << *NewClass->getLeader() << " to " << *SI + << " because store joined class\n"); // If we changed the leader, we have to mark it changed because we don't // know what it will do to symbolic evaluation. NewClass->setLeader(SI); @@ -2299,8 +2303,8 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, // See if we destroyed the class or need to swap leaders. if (OldClass->empty() && OldClass != TOPClass) { if (OldClass->getDefiningExpr()) { - DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr() - << " from table\n"); + LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr() + << " from table\n"); // We erase it as an exact expression to make sure we don't just erase an // equivalent one. auto Iter = ExpressionToClass.find_as( @@ -2317,8 +2321,8 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, // When the leader changes, the value numbering of // everything may change due to symbolization changes, so we need to // reprocess. - DEBUG(dbgs() << "Value class leader change for class " << OldClass->getID() - << "\n"); + LLVM_DEBUG(dbgs() << "Value class leader change for class " + << OldClass->getID() << "\n"); ++NumGVNLeaderChanges; // Destroy the stored value if there are no more stores to represent it. // Note that this is basically clean up for the expression removal that @@ -2381,12 +2385,14 @@ void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { "VariableExpression should have been handled already"); EClass = NewClass; - DEBUG(dbgs() << "Created new congruence class for " << *I - << " using expression " << *E << " at " << NewClass->getID() - << " and leader " << *(NewClass->getLeader())); + LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I + << " using expression " << *E << " at " + << NewClass->getID() << " and leader " + << *(NewClass->getLeader())); if (NewClass->getStoredValue()) - DEBUG(dbgs() << " and stored value " << *(NewClass->getStoredValue())); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " and stored value " + << *(NewClass->getStoredValue())); + LLVM_DEBUG(dbgs() << "\n"); } else { EClass = lookupResult.first->second; if (isa<ConstantExpression>(E)) @@ -2404,8 +2410,8 @@ void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { bool ClassChanged = IClass != EClass; bool LeaderChanged = LeaderChanges.erase(I); if (ClassChanged || LeaderChanged) { - DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " << *E - << "\n"); + LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " + << *E << "\n"); if (ClassChanged) { moveValueToNewCongruenceClass(I, E, IClass, EClass); markPhiOfOpsChanged(E); @@ -2443,13 +2449,15 @@ void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) { if (ReachableEdges.insert({From, To}).second) { // If this block wasn't reachable before, all instructions are touched. if (ReachableBlocks.insert(To).second) { - DEBUG(dbgs() << "Block " << getBlockName(To) << " marked reachable\n"); + LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) + << " marked reachable\n"); const auto &InstRange = BlockInstRange.lookup(To); TouchedInstructions.set(InstRange.first, InstRange.second); } else { - DEBUG(dbgs() << "Block " << getBlockName(To) - << " was reachable, but new edge {" << getBlockName(From) - << "," << getBlockName(To) << "} to it found\n"); + LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) + << " was reachable, but new edge {" + << getBlockName(From) << "," << getBlockName(To) + << "} to it found\n"); // We've made an edge reachable to an existing block, which may // impact predicates. Otherwise, only mark the phi nodes as touched, as @@ -2496,12 +2504,12 @@ void NewGVN::processOutgoingEdges(TerminatorInst *TI, BasicBlock *B) { BasicBlock *FalseSucc = BR->getSuccessor(1); if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) { if (CI->isOne()) { - DEBUG(dbgs() << "Condition for Terminator " << *TI - << " evaluated to true\n"); + LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI + << " evaluated to true\n"); updateReachableEdge(B, TrueSucc); } else if (CI->isZero()) { - DEBUG(dbgs() << "Condition for Terminator " << *TI - << " evaluated to false\n"); + LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI + << " evaluated to false\n"); updateReachableEdge(B, FalseSucc); } } else { @@ -2686,8 +2694,8 @@ Value *NewGVN::findLeaderForInst(Instruction *TransInst, auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB); if (!FoundVal) { ExpressionToPhiOfOps[E].insert(OrigInst); - DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst - << " in block " << getBlockName(PredBB) << "\n"); + LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst + << " in block " << getBlockName(PredBB) << "\n"); return nullptr; } if (auto *SI = dyn_cast<StoreInst>(FoundVal)) @@ -2736,15 +2744,16 @@ NewGVN::makePossiblePHIOfOps(Instruction *I, auto *ValuePHI = RealToTemp.lookup(Op); if (!ValuePHI) continue; - DEBUG(dbgs() << "Found possible dependent phi of ops\n"); + LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n"); Op = ValuePHI; } OpPHI = cast<PHINode>(Op); if (!SamePHIBlock) { SamePHIBlock = getBlockForValue(OpPHI); } else if (SamePHIBlock != getBlockForValue(OpPHI)) { - DEBUG(dbgs() - << "PHIs for operands are not all in the same block, aborting\n"); + LLVM_DEBUG( + dbgs() + << "PHIs for operands are not all in the same block, aborting\n"); return nullptr; } // No point in doing this for one-operand phis. @@ -2812,25 +2821,26 @@ NewGVN::makePossiblePHIOfOps(Instruction *I, } Deps.insert(CurrentDeps.begin(), CurrentDeps.end()); } else { - DEBUG(dbgs() << "Skipping phi of ops operand for incoming block " - << getBlockName(PredBB) - << " because the block is unreachable\n"); + LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block " + << getBlockName(PredBB) + << " because the block is unreachable\n"); FoundVal = UndefValue::get(I->getType()); RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); } PHIOps.push_back({FoundVal, PredBB}); - DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in " - << getBlockName(PredBB) << "\n"); + LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in " + << getBlockName(PredBB) << "\n"); } for (auto Dep : Deps) addAdditionalUsers(Dep, I); sortPHIOps(PHIOps); auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock); if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) { - DEBUG(dbgs() - << "Not creating real PHI of ops because it simplified to existing " - "value or constant\n"); + LLVM_DEBUG( + dbgs() + << "Not creating real PHI of ops because it simplified to existing " + "value or constant\n"); return E; } auto *ValuePHI = RealToTemp.lookup(I); @@ -2855,7 +2865,8 @@ NewGVN::makePossiblePHIOfOps(Instruction *I, } } RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); - DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I + << "\n"); return E; } @@ -2927,8 +2938,9 @@ void NewGVN::initializeCongruenceClasses(Function &F) { void NewGVN::cleanupTables() { for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) { - DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID() - << " has " << CongruenceClasses[i]->size() << " members\n"); + LLVM_DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID() + << " has " << CongruenceClasses[i]->size() + << " members\n"); // Make sure we delete the congruence class (probably worth switching to // a unique_ptr at some point. delete CongruenceClasses[i]; @@ -2998,7 +3010,7 @@ std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B, // we change its DFS number so that it doesn't get value numbered. if (isInstructionTriviallyDead(&I, TLI)) { InstrDFS[&I] = 0; - DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n"); + LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n"); markInstructionForDeletion(&I); continue; } @@ -3064,9 +3076,10 @@ void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) { [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; }); if (AllEqual) - DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue << "\n"); + LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue + << "\n"); else - DEBUG(dbgs() << "Memory Phi value numbered to itself\n"); + LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n"); // If it's equal to something, it's in that class. Otherwise, it has to be in // a class where it is the leader (other things may be equivalent to it, but // it needs to start off in its own class, which means it must have been the @@ -3085,7 +3098,7 @@ void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) { // Value number a single instruction, symbolically evaluating, performing // congruence finding, and updating mappings. void NewGVN::valueNumberInstruction(Instruction *I) { - DEBUG(dbgs() << "Processing instruction " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n"); if (!I->isTerminator()) { const Expression *Symbolized = nullptr; SmallPtrSet<Value *, 2> Visited; @@ -3271,7 +3284,7 @@ void NewGVN::verifyMemoryCongruency() const { // and redoing the iteration to see if anything changed. void NewGVN::verifyIterationSettled(Function &F) { #ifndef NDEBUG - DEBUG(dbgs() << "Beginning iteration verification\n"); + LLVM_DEBUG(dbgs() << "Beginning iteration verification\n"); if (DebugCounter::isCounterSet(VNCounter)) DebugCounter::setCounterValue(VNCounter, StartingVNCounter); @@ -3389,9 +3402,9 @@ void NewGVN::iterateTouchedInstructions() { // If it's not reachable, erase any touched instructions and move on. if (!BlockReachable) { TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second); - DEBUG(dbgs() << "Skipping instructions in block " - << getBlockName(CurrBlock) - << " because it is unreachable\n"); + LLVM_DEBUG(dbgs() << "Skipping instructions in block " + << getBlockName(CurrBlock) + << " because it is unreachable\n"); continue; } updateProcessedCount(CurrBlock); @@ -3401,7 +3414,7 @@ void NewGVN::iterateTouchedInstructions() { TouchedInstructions.reset(InstrNum); if (auto *MP = dyn_cast<MemoryPhi>(V)) { - DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n"); + LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n"); valueNumberMemoryPhi(MP); } else if (auto *I = dyn_cast<Instruction>(V)) { valueNumberInstruction(I); @@ -3471,8 +3484,8 @@ bool NewGVN::runGVN() { // Initialize the touched instructions to include the entry block. const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock()); TouchedInstructions.set(InstRange.first, InstRange.second); - DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock()) - << " marked reachable\n"); + LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock()) + << " marked reachable\n"); ReachableBlocks.insert(&F.getEntryBlock()); iterateTouchedInstructions(); @@ -3497,8 +3510,8 @@ bool NewGVN::runGVN() { }; for (auto &BB : make_filter_range(F, UnreachableBlockPred)) { - DEBUG(dbgs() << "We believe block " << getBlockName(&BB) - << " is unreachable\n"); + LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB) + << " is unreachable\n"); deleteInstructionsInBlock(&BB); Changed = true; } @@ -3720,7 +3733,7 @@ static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { } void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) { - DEBUG(dbgs() << " BasicBlock Dead:" << *BB); + LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); ++NumGVNBlocksDeleted; // Delete the instructions backwards, as it has a reduced likelihood of having @@ -3747,12 +3760,12 @@ void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) { } void NewGVN::markInstructionForDeletion(Instruction *I) { - DEBUG(dbgs() << "Marking " << *I << " for deletion\n"); + LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n"); InstructionsToErase.insert(I); } void NewGVN::replaceInstruction(Instruction *I, Value *V) { - DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n"); + LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n"); patchAndReplaceAllUsesWith(I, V); // We save the actual erasing to avoid invalidating memory // dependencies until we are done with everything. @@ -3878,9 +3891,10 @@ bool NewGVN::eliminateInstructions(Function &F) { auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) { for (auto &Operand : PHI->incoming_values()) if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) { - DEBUG(dbgs() << "Replacing incoming value of " << PHI << " for block " - << getBlockName(PHI->getIncomingBlock(Operand)) - << " with undef due to it being unreachable\n"); + LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI + << " for block " + << getBlockName(PHI->getIncomingBlock(Operand)) + << " with undef due to it being unreachable\n"); Operand.set(UndefValue::get(PHI->getType())); } }; @@ -3912,7 +3926,8 @@ bool NewGVN::eliminateInstructions(Function &F) { // Map to store the use counts DenseMap<const Value *, unsigned int> UseCounts; for (auto *CC : reverse(CongruenceClasses)) { - DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() << "\n"); + LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() + << "\n"); // Track the equivalent store info so we can decide whether to try // dead store elimination. SmallVector<ValueDFS, 8> PossibleDeadStores; @@ -3950,8 +3965,8 @@ bool NewGVN::eliminateInstructions(Function &F) { MembersLeft.insert(Member); continue; } - DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " << *Member - << "\n"); + LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " + << *Member << "\n"); auto *I = cast<Instruction>(Member); assert(Leader != I && "About to accidentally remove our leader"); replaceInstruction(I, Leader); @@ -3991,24 +4006,24 @@ bool NewGVN::eliminateInstructions(Function &F) { // remove from temp instruction list. AllTempInstructions.erase(PN); auto *DefBlock = getBlockForValue(Def); - DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def - << " into block " - << getBlockName(getBlockForValue(Def)) << "\n"); + LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def + << " into block " + << getBlockName(getBlockForValue(Def)) << "\n"); PN->insertBefore(&DefBlock->front()); Def = PN; NumGVNPHIOfOpsEliminations++; } if (EliminationStack.empty()) { - DEBUG(dbgs() << "Elimination Stack is empty\n"); + LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n"); } else { - DEBUG(dbgs() << "Elimination Stack Top DFS numbers are (" - << EliminationStack.dfs_back().first << "," - << EliminationStack.dfs_back().second << ")\n"); + LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are (" + << EliminationStack.dfs_back().first << "," + << EliminationStack.dfs_back().second << ")\n"); } - DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << "," - << MemberDFSOut << ")\n"); + LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << "," + << MemberDFSOut << ")\n"); // First, we see if we are out of scope or empty. If so, // and there equivalences, we try to replace the top of // stack with equivalences (if it's on the stack, it must @@ -4090,8 +4105,9 @@ bool NewGVN::eliminateInstructions(Function &F) { // Don't replace our existing users with ourselves. if (U->get() == DominatingLeader) continue; - DEBUG(dbgs() << "Found replacement " << *DominatingLeader << " for " - << *U->get() << " in " << *(U->getUser()) << "\n"); + LLVM_DEBUG(dbgs() + << "Found replacement " << *DominatingLeader << " for " + << *U->get() << " in " << *(U->getUser()) << "\n"); // If we replaced something in an instruction, handle the patching of // metadata. Skip this if we are replacing predicateinfo with its @@ -4157,8 +4173,8 @@ bool NewGVN::eliminateInstructions(Function &F) { (void)Leader; assert(DT->dominates(Leader->getParent(), Member->getParent())); // Member is dominater by Leader, and thus dead - DEBUG(dbgs() << "Marking dead store " << *Member - << " that is dominated by " << *Leader << "\n"); + LLVM_DEBUG(dbgs() << "Marking dead store " << *Member + << " that is dominated by " << *Leader << "\n"); markInstructionForDeletion(Member); CC->erase(Member); ++NumGVNDeadStores; diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp index 6b1ae2c3cab..a06c424f82b 100644 --- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -323,7 +323,7 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) { // avoiding the runtime cost of the actual safepoint. if (!AllBackedges) { if (mustBeFiniteCountedLoop(L, SE, Pred)) { - DEBUG(dbgs() << "skipping safepoint placement in finite loop\n"); + LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n"); FiniteExecution++; continue; } @@ -332,7 +332,9 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) { // Note: This is only semantically legal since we won't do any further // IPO or inlining before the actual call insertion.. If we hadn't, we // might latter loose this call safepoint. - DEBUG(dbgs() << "skipping safepoint placement due to unconditional call\n"); + LLVM_DEBUG( + dbgs() + << "skipping safepoint placement due to unconditional call\n"); CallInLoop++; continue; } @@ -348,7 +350,7 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) { // variables) and branches to the true header TerminatorInst *Term = Pred->getTerminator(); - DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term); + LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term); PollLocations.push_back(Term); } diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp index 0d1d57d6486..bb9802a66ea 100644 --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -169,8 +169,8 @@ void ReassociatePass::BuildRankMap(Function &F, // Assign distinct ranks to function arguments. for (auto &Arg : F.args()) { ValueRankMap[&Arg] = ++Rank; - DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank - << "\n"); + LLVM_DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank + << "\n"); } // Traverse basic blocks in ReversePostOrder @@ -210,7 +210,8 @@ unsigned ReassociatePass::getRank(Value *V) { !BinaryOperator::isFNeg(I)) ++Rank; - DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank << "\n"); + LLVM_DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank + << "\n"); return ValueRankMap[I] = Rank; } @@ -445,7 +446,7 @@ using RepeatedValue = std::pair<Value*, APInt>; /// type and thus make the expression bigger. static bool LinearizeExprTree(BinaryOperator *I, SmallVectorImpl<RepeatedValue> &Ops) { - DEBUG(dbgs() << "LINEARIZE: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "LINEARIZE: " << *I << '\n'); unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits(); unsigned Opcode = I->getOpcode(); assert(I->isAssociative() && I->isCommutative() && @@ -494,14 +495,14 @@ static bool LinearizeExprTree(BinaryOperator *I, for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands. Value *Op = I->getOperand(OpIdx); APInt Weight = P.second; // Number of paths to this operand. - DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n"); + LLVM_DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n"); assert(!Op->use_empty() && "No uses, so how did we get to it?!"); // If this is a binary operation of the right kind with only one use then // add its operands to the expression. if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { assert(Visited.insert(Op).second && "Not first visit!"); - DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); + LLVM_DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); Worklist.push_back(std::make_pair(BO, Weight)); continue; } @@ -514,7 +515,8 @@ static bool LinearizeExprTree(BinaryOperator *I, if (!Op->hasOneUse()) { // This value has uses not accounted for by the expression, so it is // not safe to modify. Mark it as being a leaf. - DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n"); + LLVM_DEBUG(dbgs() + << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n"); LeafOrder.push_back(Op); Leaves[Op] = Weight; continue; @@ -540,7 +542,7 @@ static bool LinearizeExprTree(BinaryOperator *I, // to the expression, then no longer consider it to be a leaf and add // its operands to the expression. if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { - DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); + LLVM_DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); Worklist.push_back(std::make_pair(BO, It->second)); Leaves.erase(It); continue; @@ -573,9 +575,10 @@ static bool LinearizeExprTree(BinaryOperator *I, if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) if ((Opcode == Instruction::Mul && BinaryOperator::isNeg(BO)) || (Opcode == Instruction::FMul && BinaryOperator::isFNeg(BO))) { - DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO "); + LLVM_DEBUG(dbgs() + << "MORPH LEAF: " << *Op << " (" << Weight << ") TO "); BO = LowerNegateToMultiply(BO); - DEBUG(dbgs() << *BO << '\n'); + LLVM_DEBUG(dbgs() << *BO << '\n'); Worklist.push_back(std::make_pair(BO, Weight)); Changed = true; continue; @@ -583,7 +586,7 @@ static bool LinearizeExprTree(BinaryOperator *I, // Failed to morph into an expression of the right type. This really is // a leaf. - DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n"); + LLVM_DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n"); assert(!isReassociableOp(Op, Opcode) && "Value was morphed?"); LeafOrder.push_back(Op); Leaves[Op] = Weight; @@ -675,9 +678,9 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, if (NewLHS == OldRHS && NewRHS == OldLHS) { // The order of the operands was reversed. Swap them. - DEBUG(dbgs() << "RA: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); Op->swapOperands(); - DEBUG(dbgs() << "TO: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); MadeChange = true; ++NumChanged; break; @@ -685,7 +688,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, // The new operation differs non-trivially from the original. Overwrite // the old operands with the new ones. - DEBUG(dbgs() << "RA: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); if (NewLHS != OldLHS) { BinaryOperator *BO = isReassociableOp(OldLHS, Opcode); if (BO && !NotRewritable.count(BO)) @@ -698,7 +701,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, NodesToRewrite.push_back(BO); Op->setOperand(1, NewRHS); } - DEBUG(dbgs() << "TO: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); ExpressionChanged = Op; MadeChange = true; @@ -711,7 +714,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, // while the right-hand side will be the current element of Ops. Value *NewRHS = Ops[i].Op; if (NewRHS != Op->getOperand(1)) { - DEBUG(dbgs() << "RA: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); if (NewRHS == Op->getOperand(0)) { // The new right-hand side was already present as the left operand. If // we are lucky then swapping the operands will sort out both of them. @@ -724,7 +727,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, Op->setOperand(1, NewRHS); ExpressionChanged = Op; } - DEBUG(dbgs() << "TO: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); MadeChange = true; ++NumChanged; } @@ -756,9 +759,9 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I, NewOp = NodesToRewrite.pop_back_val(); } - DEBUG(dbgs() << "RA: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); Op->setOperand(0, NewOp); - DEBUG(dbgs() << "TO: " << *Op << '\n'); + LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); ExpressionChanged = Op; MadeChange = true; ++NumChanged; @@ -941,7 +944,7 @@ static BinaryOperator *BreakUpSubtract(Instruction *Sub, Sub->replaceAllUsesWith(New); New->setDebugLoc(Sub->getDebugLoc()); - DEBUG(dbgs() << "Negated: " << *New << '\n'); + LLVM_DEBUG(dbgs() << "Negated: " << *New << '\n'); return New; } @@ -1427,7 +1430,8 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I, ++NumFound; } while (i != Ops.size() && Ops[i].Op == TheOp); - DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n'); + LLVM_DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp + << '\n'); ++NumFactor; // Insert a new multiply. @@ -1565,7 +1569,8 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I, // If any factor occurred more than one time, we can pull it out. if (MaxOcc > 1) { - DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n'); + LLVM_DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal + << '\n'); ++NumFactor; // Create a new instruction that uses the MaxOccVal twice. If we don't do @@ -1888,7 +1893,7 @@ void ReassociatePass::RecursivelyEraseDeadInsts(Instruction *I, /// Zap the given instruction, adding interesting operands to the work list. void ReassociatePass::EraseInst(Instruction *I) { assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!"); - DEBUG(dbgs() << "Erasing dead inst: "; I->dump()); + LLVM_DEBUG(dbgs() << "Erasing dead inst: "; I->dump()); SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end()); // Erase the dead instruction. @@ -2139,7 +2144,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) { ValueEntry(getRank(E.first), E.first)); } - DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); // Now that we have linearized the tree to a list and have gathered all of // the operands and their ranks, sort the operands by their rank. Use a @@ -2157,7 +2162,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) { return; // This expression tree simplified to something that isn't a tree, // eliminate it. - DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n'); + LLVM_DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n'); I->replaceAllUsesWith(V); if (Instruction *VI = dyn_cast<Instruction>(V)) if (I->getDebugLoc()) @@ -2188,7 +2193,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) { } } - DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n'); + LLVM_DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n'); if (Ops.size() == 1) { if (Ops[0].Op == I) diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index bf851ef2e71..342ba9bc145 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -616,8 +616,8 @@ static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { Value *&Cached = Cache[I]; if (!Cached) { Cached = findBaseDefiningValue(I).BDV; - DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " - << Cached->getName() << "\n"); + LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " + << Cached->getName() << "\n"); } assert(Cache[I] != nullptr); return Cached; @@ -848,9 +848,9 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { } #ifndef NDEBUG - DEBUG(dbgs() << "States after initialization:\n"); + LLVM_DEBUG(dbgs() << "States after initialization:\n"); for (auto Pair : States) { - DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); + LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); } #endif @@ -923,9 +923,9 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { } #ifndef NDEBUG - DEBUG(dbgs() << "States after meet iteration:\n"); + LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); for (auto Pair : States) { - DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); + LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); } #endif @@ -1124,10 +1124,11 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { assert(BDV && Base); assert(!isKnownBaseResult(BDV) && "why did it get added?"); - DEBUG(dbgs() << "Updating base value cache" - << " for: " << BDV->getName() << " from: " - << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") - << " to: " << Base->getName() << "\n"); + LLVM_DEBUG( + dbgs() << "Updating base value cache" + << " for: " << BDV->getName() << " from: " + << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") + << " to: " << Base->getName() << "\n"); if (Cache.count(BDV)) { assert(isKnownBaseResult(Base) && diff --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp index 5a6697fd9fe..27c51f07f1f 100644 --- a/llvm/lib/Transforms/Scalar/SCCP.cpp +++ b/llvm/lib/Transforms/Scalar/SCCP.cpp @@ -259,7 +259,7 @@ public: bool MarkBlockExecutable(BasicBlock *BB) { if (!BBExecutable.insert(BB).second) return false; - DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); BBWorkList.push_back(BB); // Add the block to the work list! return true; } @@ -415,7 +415,7 @@ private: // the users of the instruction are updated later. void markConstant(LatticeVal &IV, Value *V, Constant *C) { if (!IV.markConstant(C)) return; - DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); + LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); pushToWorkList(IV, V); } @@ -428,7 +428,7 @@ private: assert(!V->getType()->isStructTy() && "structs should use mergeInValue"); LatticeVal &IV = ValueState[V]; IV.markForcedConstant(C); - DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n'); + LLVM_DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n'); pushToWorkList(IV, V); } @@ -438,11 +438,10 @@ private: void markOverdefined(LatticeVal &IV, Value *V) { if (!IV.markOverdefined()) return; - DEBUG(dbgs() << "markOverdefined: "; - if (auto *F = dyn_cast<Function>(V)) - dbgs() << "Function '" << F->getName() << "'\n"; - else - dbgs() << *V << '\n'); + LLVM_DEBUG(dbgs() << "markOverdefined: "; + if (auto *F = dyn_cast<Function>(V)) dbgs() + << "Function '" << F->getName() << "'\n"; + else dbgs() << *V << '\n'); // Only instructions go on the work list pushToWorkList(IV, V); } @@ -540,8 +539,8 @@ private: // If the destination is already executable, we just made an *edge* // feasible that wasn't before. Revisit the PHI nodes in the block // because they have potentially new operands. - DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() - << " -> " << Dest->getName() << '\n'); + LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() + << " -> " << Dest->getName() << '\n'); for (PHINode &PN : Dest->phis()) visitPHINode(PN); @@ -612,7 +611,7 @@ private: void visitInstruction(Instruction &I) { // All the instructions we don't do any special handling for just // go to overdefined. - DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n'); + LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n'); markOverdefined(&I); } }; @@ -699,7 +698,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI, return; } - DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); + LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); llvm_unreachable("SCCP: Don't know how to handle this terminator!"); } @@ -759,7 +758,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) { return Addr->getBasicBlock() == To; } - DEBUG(dbgs() << "Unknown terminator instruction: " << *TI << '\n'); + LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << *TI << '\n'); llvm_unreachable("SCCP: Don't know how to handle this terminator!"); } @@ -1260,7 +1259,7 @@ void SCCPSolver::Solve() { while (!OverdefinedInstWorkList.empty()) { Value *I = OverdefinedInstWorkList.pop_back_val(); - DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); // "I" got into the work list because it either made the transition from // bottom to constant, or to overdefined. @@ -1278,7 +1277,7 @@ void SCCPSolver::Solve() { while (!InstWorkList.empty()) { Value *I = InstWorkList.pop_back_val(); - DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); + LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); // "I" got into the work list because it made the transition from undef to // constant. @@ -1298,7 +1297,7 @@ void SCCPSolver::Solve() { BasicBlock *BB = BBWorkList.back(); BBWorkList.pop_back(); - DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); + LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); // Notify all instructions in this basic block that they are newly // executable. @@ -1645,9 +1644,9 @@ static bool tryToReplaceWithConstantRange(SCCPSolver &Solver, Value *V) { Constant *C = A.getCompare(Icmp->getPredicate(), Icmp->getType(), B); if (C) { Icmp->replaceAllUsesWith(C); - DEBUG(dbgs() << "Replacing " << *Icmp << " with " << *C - << ", because of range information " << A << " " << B - << "\n"); + LLVM_DEBUG(dbgs() << "Replacing " << *Icmp << " with " << *C + << ", because of range information " << A << " " << B + << "\n"); Icmp->eraseFromParent(); Changed = true; } @@ -1699,12 +1698,12 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) { if (F) Solver.AddMustTailCallee(F); - DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI - << " as a constant\n"); + LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI + << " as a constant\n"); return false; } - DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n'); + LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n'); // Replaces all of the uses of a variable with uses of the constant. V->replaceAllUsesWith(Const); @@ -1715,7 +1714,7 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) { // and return true if the function was modified. static bool runSCCP(Function &F, const DataLayout &DL, const TargetLibraryInfo *TLI) { - DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); + LLVM_DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); SCCPSolver Solver(DL, TLI); // Mark the first block of the function as being executable. @@ -1729,7 +1728,7 @@ static bool runSCCP(Function &F, const DataLayout &DL, bool ResolvedUndefs = true; while (ResolvedUndefs) { Solver.Solve(); - DEBUG(dbgs() << "RESOLVING UNDEFs\n"); + LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n"); ResolvedUndefs = Solver.ResolvedUndefsIn(F); } @@ -1741,7 +1740,7 @@ static bool runSCCP(Function &F, const DataLayout &DL, for (BasicBlock &BB : F) { if (!Solver.isBlockExecutable(&BB)) { - DEBUG(dbgs() << " BasicBlock Dead:" << BB); + LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB); ++NumDeadBlocks; NumInstRemoved += removeAllNonTerminatorAndEHPadInstructions(&BB); @@ -1837,15 +1836,15 @@ static void findReturnsToZap(Function &F, // There is a non-removable musttail call site of this function. Zapping // returns is not allowed. if (Solver.isMustTailCallee(&F)) { - DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName() - << " due to present musttail call of it\n"); + LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName() + << " due to present musttail call of it\n"); return; } for (BasicBlock &BB : F) { if (CallInst *CI = BB.getTerminatingMustTailCall()) { - DEBUG(dbgs() << "Can't zap return of the block due to present " - << "musttail call : " << *CI << "\n"); + LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present " + << "musttail call : " << *CI << "\n"); (void)CI; return; } @@ -1900,7 +1899,7 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL, while (ResolvedUndefs) { Solver.Solve(); - DEBUG(dbgs() << "RESOLVING UNDEFS\n"); + LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n"); ResolvedUndefs = false; for (Function &F : M) ResolvedUndefs |= Solver.ResolvedUndefsIn(F); @@ -1930,7 +1929,7 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL, for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { if (!Solver.isBlockExecutable(&*BB)) { - DEBUG(dbgs() << " BasicBlock Dead:" << *BB); + LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); ++NumDeadBlocks; NumInstRemoved += @@ -2028,7 +2027,8 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL, GlobalVariable *GV = I->first; assert(!I->second.isOverdefined() && "Overdefined values should have been taken out of the map!"); - DEBUG(dbgs() << "Found that GV '" << GV->getName() << "' is constant!\n"); + LLVM_DEBUG(dbgs() << "Found that GV '" << GV->getName() + << "' is constant!\n"); while (!GV->use_empty()) { StoreInst *SI = cast<StoreInst>(GV->user_back()); SI->eraseFromParent(); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index b4200da99cc..2db08b72972 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -683,11 +683,12 @@ private: // Completely skip uses which have a zero size or start either before or // past the end of the allocation. if (Size == 0 || Offset.uge(AllocSize)) { - DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset - << " which has zero size or starts outside of the " - << AllocSize << " byte alloca:\n" - << " alloca: " << AS.AI << "\n" - << " use: " << I << "\n"); + LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" + << Offset + << " which has zero size or starts outside of the " + << AllocSize << " byte alloca:\n" + << " alloca: " << AS.AI << "\n" + << " use: " << I << "\n"); return markAsDead(I); } @@ -702,10 +703,11 @@ private: // them, and so have to record at least the information here. assert(AllocSize >= BeginOffset); // Established above. if (Size > AllocSize - BeginOffset) { - DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset - << " to remain within the " << AllocSize << " byte alloca:\n" - << " alloca: " << AS.AI << "\n" - << " use: " << I << "\n"); + LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" + << Offset << " to remain within the " << AllocSize + << " byte alloca:\n" + << " alloca: " << AS.AI << "\n" + << " use: " << I << "\n"); EndOffset = AllocSize; } @@ -805,11 +807,11 @@ private: // FIXME: We should instead consider the pointer to have escaped if this // function is being instrumented for addressing bugs or race conditions. if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { - DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset - << " which extends past the end of the " << AllocSize - << " byte alloca:\n" - << " alloca: " << AS.AI << "\n" - << " use: " << SI << "\n"); + LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" + << Offset << " which extends past the end of the " + << AllocSize << " byte alloca:\n" + << " alloca: " << AS.AI << "\n" + << " use: " << SI << "\n"); return markAsDead(SI); } @@ -1236,7 +1238,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { } static void speculatePHINodeLoads(PHINode &PN) { - DEBUG(dbgs() << " original: " << PN << "\n"); + LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); IRBuilderTy PHIBuilder(&PN); @@ -1274,7 +1276,7 @@ static void speculatePHINodeLoads(PHINode &PN) { NewPN->addIncoming(Load, Pred); } - DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); + LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); PN.eraseFromParent(); } @@ -1314,7 +1316,7 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) { } static void speculateSelectInstLoads(SelectInst &SI) { - DEBUG(dbgs() << " original: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); IRBuilderTy IRB(&SI); Value *TV = SI.getTrueValue(); @@ -1345,7 +1347,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, LI->getName() + ".sroa.speculated"); - DEBUG(dbgs() << " speculated to: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); LI->replaceAllUsesWith(V); LI->eraseFromParent(); } @@ -2068,7 +2070,7 @@ static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name) { - DEBUG(dbgs() << " start: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); IntegerType *IntTy = cast<IntegerType>(V->getType()); assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"); @@ -2077,13 +2079,13 @@ static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); if (ShAmt) { V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); - DEBUG(dbgs() << " shifted: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); } assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot extract to a larger integer!"); if (Ty != IntTy) { V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); - DEBUG(dbgs() << " trunced: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); } return V; } @@ -2094,10 +2096,10 @@ static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, IntegerType *Ty = cast<IntegerType>(V->getType()); assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot insert a larger integer!"); - DEBUG(dbgs() << " start: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); if (Ty != IntTy) { V = IRB.CreateZExt(V, IntTy, Name + ".ext"); - DEBUG(dbgs() << " extended: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); } assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element store outside of alloca store"); @@ -2106,15 +2108,15 @@ static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); if (ShAmt) { V = IRB.CreateShl(V, ShAmt, Name + ".shift"); - DEBUG(dbgs() << " shifted: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); } if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); - DEBUG(dbgs() << " masked: " << *Old << "\n"); + LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); V = IRB.CreateOr(Old, V, Name + ".insert"); - DEBUG(dbgs() << " inserted: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); } return V; } @@ -2131,7 +2133,7 @@ static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, if (NumElements == 1) { V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), Name + ".extract"); - DEBUG(dbgs() << " extract: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); return V; } @@ -2141,7 +2143,7 @@ static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, Mask.push_back(IRB.getInt32(i)); V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(Mask), Name + ".extract"); - DEBUG(dbgs() << " shuffle: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); return V; } @@ -2155,7 +2157,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, // Single element to insert. V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), Name + ".insert"); - DEBUG(dbgs() << " insert: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); return V; } @@ -2180,7 +2182,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, Mask.push_back(UndefValue::get(IRB.getInt32Ty())); V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(Mask), Name + ".expand"); - DEBUG(dbgs() << " shuffle: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); Mask.clear(); for (unsigned i = 0; i != VecTy->getNumElements(); ++i) @@ -2188,7 +2190,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); - DEBUG(dbgs() << " blend: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); return V; } @@ -2291,9 +2293,9 @@ public: IsSplittable = I->isSplittable(); IsSplit = BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; - DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); - DEBUG(AS.printSlice(dbgs(), I, "")); - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); + LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); + LLVM_DEBUG(dbgs() << "\n"); // Compute the intersecting offset range. assert(BeginOffset < NewAllocaEndOffset); @@ -2323,7 +2325,7 @@ private: // Every instruction which can end up as a user must have a rewrite rule. bool visitInstruction(Instruction &I) { - DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); + LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); llvm_unreachable("No rewrite rule for this instruction!"); } @@ -2427,7 +2429,7 @@ private: } bool visitLoadInst(LoadInst &LI) { - DEBUG(dbgs() << " original: " << LI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); @@ -2527,7 +2529,7 @@ private: Pass.DeadInsts.insert(&LI); deleteIfTriviallyDead(OldOp); - DEBUG(dbgs() << " to: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); return !LI.isVolatile() && !IsPtrAdjusted; } @@ -2554,7 +2556,7 @@ private: Store->setAAMetadata(AATags); Pass.DeadInsts.insert(&SI); - DEBUG(dbgs() << " to: " << *Store << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } @@ -2575,12 +2577,12 @@ private: if (AATags) Store->setAAMetadata(AATags); Pass.DeadInsts.insert(&SI); - DEBUG(dbgs() << " to: " << *Store << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } bool visitStoreInst(StoreInst &SI) { - DEBUG(dbgs() << " original: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); Value *OldOp = SI.getOperand(1); assert(OldOp == OldPtr); @@ -2648,7 +2650,7 @@ private: Pass.DeadInsts.insert(&SI); deleteIfTriviallyDead(OldOp); - DEBUG(dbgs() << " to: " << *NewSI << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); } @@ -2682,12 +2684,12 @@ private: /// Compute a vector splat for a given element value. Value *getVectorSplat(Value *V, unsigned NumElements) { V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); - DEBUG(dbgs() << " splat: " << *V << "\n"); + LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); return V; } bool visitMemSetInst(MemSetInst &II) { - DEBUG(dbgs() << " original: " << II << "\n"); + LLVM_DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getRawDest() == OldPtr); AAMDNodes AATags; @@ -2726,7 +2728,7 @@ private: getSliceAlign(), II.isVolatile()); if (AATags) New->setAAMetadata(AATags); - DEBUG(dbgs() << " to: " << *New << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); return false; } @@ -2792,7 +2794,7 @@ private: II.isVolatile()); if (AATags) New->setAAMetadata(AATags); - DEBUG(dbgs() << " to: " << *New << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); return !II.isVolatile(); } @@ -2800,7 +2802,7 @@ private: // Rewriting of memory transfer instructions can be a bit tricky. We break // them into two categories: split intrinsics and unsplit intrinsics. - DEBUG(dbgs() << " original: " << II << "\n"); + LLVM_DEBUG(dbgs() << " original: " << II << "\n"); AAMDNodes AATags; II.getAAMetadata(AATags); @@ -2829,7 +2831,7 @@ private: II.setSourceAlignment(SliceAlign); } - DEBUG(dbgs() << " to: " << II << "\n"); + LLVM_DEBUG(dbgs() << " to: " << II << "\n"); deleteIfTriviallyDead(OldPtr); return false; } @@ -2912,7 +2914,7 @@ private: Size, II.isVolatile()); if (AATags) New->setAAMetadata(AATags); - DEBUG(dbgs() << " to: " << *New << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); return false; } @@ -2984,14 +2986,14 @@ private: IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); if (AATags) Store->setAAMetadata(AATags); - DEBUG(dbgs() << " to: " << *Store << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); } bool visitIntrinsicInst(IntrinsicInst &II) { assert(II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end); - DEBUG(dbgs() << " original: " << II << "\n"); + LLVM_DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getArgOperand(1) == OldPtr); // Record this instruction for deletion. @@ -3019,13 +3021,13 @@ private: New = IRB.CreateLifetimeEnd(Ptr, Size); (void)New; - DEBUG(dbgs() << " to: " << *New << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); return true; } bool visitPHINode(PHINode &PN) { - DEBUG(dbgs() << " original: " << PN << "\n"); + LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); @@ -3044,7 +3046,7 @@ private: // Replace the operands which were using the old pointer. std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); - DEBUG(dbgs() << " to: " << PN << "\n"); + LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); deleteIfTriviallyDead(OldPtr); // PHIs can't be promoted on their own, but often can be speculated. We @@ -3055,7 +3057,7 @@ private: } bool visitSelectInst(SelectInst &SI) { - DEBUG(dbgs() << " original: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && "Pointer isn't an operand!"); assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); @@ -3068,7 +3070,7 @@ private: if (SI.getOperand(2) == OldPtr) SI.setOperand(2, NewPtr); - DEBUG(dbgs() << " to: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); // Selects can't be promoted on their own, but often can be speculated. We @@ -3104,7 +3106,7 @@ public: /// Rewrite loads and stores through a pointer and all pointers derived from /// it. bool rewrite(Instruction &I) { - DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); + LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); enqueueUsers(I); bool Changed = false; while (!Queue.empty()) { @@ -3218,7 +3220,7 @@ private: if (AATags) Load->setAAMetadata(AATags); Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); - DEBUG(dbgs() << " to: " << *Load << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); } }; @@ -3228,7 +3230,7 @@ private: return false; // We have an aggregate being loaded, split it apart. - DEBUG(dbgs() << " original: " << LI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); AAMDNodes AATags; LI.getAAMetadata(AATags); LoadOpSplitter Splitter(&LI, *U, AATags); @@ -3259,7 +3261,7 @@ private: StoreInst *Store = IRB.CreateStore(ExtractValue, InBoundsGEP); if (AATags) Store->setAAMetadata(AATags); - DEBUG(dbgs() << " to: " << *Store << "\n"); + LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); } }; @@ -3271,7 +3273,7 @@ private: return false; // We have an aggregate being stored, split it apart. - DEBUG(dbgs() << " original: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); AAMDNodes AATags; SI.getAAMetadata(AATags); StoreOpSplitter Splitter(&SI, *U, AATags); @@ -3470,7 +3472,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, /// /// \returns true if any changes are made. bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { - DEBUG(dbgs() << "Pre-splitting loads and stores\n"); + LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); // Track the loads and stores which are candidates for pre-splitting here, in // the order they first appear during the partition scan. These give stable @@ -3502,7 +3504,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { // maybe it would make it more principled? SmallPtrSet<LoadInst *, 8> UnsplittableLoads; - DEBUG(dbgs() << " Searching for candidate loads and stores\n"); + LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); for (auto &P : AS.partitions()) { for (Slice &S : P) { Instruction *I = cast<Instruction>(S.getUse()->getUser()); @@ -3557,7 +3559,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { } // Record the initial split. - DEBUG(dbgs() << " Candidate: " << *I << "\n"); + LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); auto &Offsets = SplitOffsetsMap[I]; assert(Offsets.Splits.empty() && "Should not have splits the first time we see an instruction!"); @@ -3617,10 +3619,11 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { if (LoadOffsets.Splits == StoreOffsets.Splits) return false; - DEBUG(dbgs() - << " Mismatched splits for load and store:\n" - << " " << *LI << "\n" - << " " << *SI << "\n"); + LLVM_DEBUG( + dbgs() + << " Mismatched splits for load and store:\n" + << " " << *LI << "\n" + << " " << *SI << "\n"); // We've found a store and load that we need to split // with mismatched relative splits. Just give up on them @@ -3693,7 +3696,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); IRB.SetInsertPoint(LI); - DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); + LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); int Idx = 0, Size = Offsets.Splits.size(); @@ -3718,9 +3721,9 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), /*IsSplittable*/ false)); - DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() - << ", " << NewSlices.back().endOffset() << "): " << *PLoad - << "\n"); + LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() + << ", " << NewSlices.back().endOffset() + << "): " << *PLoad << "\n"); // See if we've handled all the splits. if (Idx >= Size) @@ -3740,14 +3743,15 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { StoreInst *SI = cast<StoreInst>(LU); if (!Stores.empty() && SplitOffsetsMap.count(SI)) { DeferredStores = true; - DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n"); + LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI + << "\n"); continue; } Value *StoreBasePtr = SI->getPointerOperand(); IRB.SetInsertPoint(SI); - DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); + LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { LoadInst *PLoad = SplitLoads[Idx]; @@ -3763,7 +3767,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { PartPtrTy, StoreBasePtr->getName() + "."), getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); - DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); + LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); } // We want to immediately iterate on any allocas impacted by splitting @@ -3812,7 +3816,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { Value *LoadBasePtr = LI->getPointerOperand(); Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); - DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); + LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); // Check whether we have an already split load. auto SplitLoadsMapI = SplitLoadsMap.find(LI); @@ -3822,7 +3826,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { assert(SplitLoads->size() == Offsets.Splits.size() + 1 && "Too few split loads for the number of splits in the store!"); } else { - DEBUG(dbgs() << " of load: " << *LI << "\n"); + LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); } uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); @@ -3862,11 +3866,11 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, &PStore->getOperandUse(PStore->getPointerOperandIndex()), /*IsSplittable*/ false)); - DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() - << ", " << NewSlices.back().endOffset() << "): " << *PStore - << "\n"); + LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() + << ", " << NewSlices.back().endOffset() + << "): " << *PStore << "\n"); if (!SplitLoads) { - DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); + LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); } // See if we've finished all the splits. @@ -3921,10 +3925,10 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { // sequence. AS.insert(NewSlices); - DEBUG(dbgs() << " Pre-split slices:\n"); + LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); #ifndef NDEBUG for (auto I = AS.begin(), E = AS.end(); I != E; ++I) - DEBUG(AS.print(dbgs(), I, " ")); + LLVM_DEBUG(AS.print(dbgs(), I, " ")); #endif // Finally, don't try to promote any allocas that new require re-splitting. @@ -4008,9 +4012,9 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, ++NumNewAllocas; } - DEBUG(dbgs() << "Rewriting alloca partition " - << "[" << P.beginOffset() << "," << P.endOffset() - << ") to: " << *NewAI << "\n"); + LLVM_DEBUG(dbgs() << "Rewriting alloca partition " + << "[" << P.beginOffset() << "," << P.endOffset() + << ") to: " << *NewAI << "\n"); // Track the high watermark on the worklist as it is only relevant for // promoted allocas. We will reset it to this point if the alloca is not in @@ -4269,7 +4273,7 @@ void SROA::clobberUse(Use &U) { /// the slices of the alloca, and then hands it off to be split and /// rewritten as needed. bool SROA::runOnAlloca(AllocaInst &AI) { - DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); + LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); ++NumAllocasAnalyzed; // Special case dead allocas, as they're trivial. @@ -4293,7 +4297,7 @@ bool SROA::runOnAlloca(AllocaInst &AI) { // Build the slices using a recursive instruction-visiting builder. AllocaSlices AS(DL, AI); - DEBUG(AS.print(dbgs())); + LLVM_DEBUG(AS.print(dbgs())); if (AS.isEscaped()) return Changed; @@ -4321,11 +4325,11 @@ bool SROA::runOnAlloca(AllocaInst &AI) { Changed |= splitAlloca(AI, AS); - DEBUG(dbgs() << " Speculating PHIs\n"); + LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); while (!SpeculatablePHIs.empty()) speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); - DEBUG(dbgs() << " Speculating Selects\n"); + LLVM_DEBUG(dbgs() << " Speculating Selects\n"); while (!SpeculatableSelects.empty()) speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); @@ -4346,7 +4350,7 @@ bool SROA::deleteDeadInstructions( bool Changed = false; while (!DeadInsts.empty()) { Instruction *I = DeadInsts.pop_back_val(); - DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); + LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); // If the instruction is an alloca, find the possible dbg.declare connected // to it, and remove it too. We must do this before calling RAUW or we will @@ -4385,7 +4389,7 @@ bool SROA::promoteAllocas(Function &F) { NumPromoted += PromotableAllocas.size(); - DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); + LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); PromoteMemToReg(PromotableAllocas, *DT, AC); PromotableAllocas.clear(); return true; @@ -4393,7 +4397,7 @@ bool SROA::promoteAllocas(Function &F) { PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, AssumptionCache &RunAC) { - DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); + LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); DT = &RunDT; AC = &RunAC; diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index 1cd61bfffc2..09b91781e9d 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -184,7 +184,7 @@ static void rewritePHINodesForExitAndUnswitchedBlocks(BasicBlock &ExitBB, static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT, LoopInfo &LI) { assert(BI.isConditional() && "Can only unswitch a conditional branch!"); - DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n"); + LLVM_DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n"); Value *LoopCond = BI.getCondition(); @@ -212,8 +212,8 @@ static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT, if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, *LoopExitBB)) return false; - DEBUG(dbgs() << " unswitching trivial branch when: " << CondVal - << " == " << LoopCond << "\n"); + LLVM_DEBUG(dbgs() << " unswitching trivial branch when: " << CondVal + << " == " << LoopCond << "\n"); // Split the preheader, so that we know that there is a safe place to insert // the conditional branch. We will change the preheader to have a conditional @@ -292,7 +292,7 @@ static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT, /// branch. Still more cleanup can be done with some simplify-cfg like pass. static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT, LoopInfo &LI) { - DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n"); + LLVM_DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n"); Value *LoopCond = SI.getCondition(); // If this isn't switching on an invariant condition, we can't unswitch it. @@ -316,7 +316,7 @@ static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT, else if (ExitCaseIndices.empty()) return false; - DEBUG(dbgs() << " unswitching trivial cases...\n"); + LLVM_DEBUG(dbgs() << " unswitching trivial cases...\n"); SmallVector<std::pair<ConstantInt *, BasicBlock *>, 4> ExitCases; ExitCases.reserve(ExitCaseIndices.size()); @@ -1798,8 +1798,9 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) return Changed; - DEBUG(dbgs() << "Considering " << UnswitchCandidates.size() - << " non-trivial loop invariant conditions for unswitching.\n"); + LLVM_DEBUG( + dbgs() << "Considering " << UnswitchCandidates.size() + << " non-trivial loop invariant conditions for unswitching.\n"); // Given that unswitching these terminators will require duplicating parts of // the loop, so we need to be able to model that cost. Compute the ephemeral @@ -1835,7 +1836,7 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, assert(LoopCost >= 0 && "Must not have negative loop costs!"); BBCostMap[BB] = Cost; } - DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n"); + LLVM_DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n"); // Now we find the best candidate by searching for the one with the following // properties in order: @@ -1889,8 +1890,8 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, int BestUnswitchCost; for (TerminatorInst *CandidateTI : UnswitchCandidates) { int CandidateCost = ComputeUnswitchedCost(CandidateTI); - DEBUG(dbgs() << " Computed cost of " << CandidateCost - << " for unswitch candidate: " << *CandidateTI << "\n"); + LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost + << " for unswitch candidate: " << *CandidateTI << "\n"); if (!BestUnswitchTI || CandidateCost < BestUnswitchCost) { BestUnswitchTI = CandidateTI; BestUnswitchCost = CandidateCost; @@ -1898,14 +1899,14 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC, } if (BestUnswitchCost < UnswitchThreshold) { - DEBUG(dbgs() << " Trying to unswitch non-trivial (cost = " - << BestUnswitchCost << ") branch: " << *BestUnswitchTI - << "\n"); + LLVM_DEBUG(dbgs() << " Trying to unswitch non-trivial (cost = " + << BestUnswitchCost << ") branch: " << *BestUnswitchTI + << "\n"); Changed |= unswitchInvariantBranch(L, cast<BranchInst>(*BestUnswitchTI), DT, LI, AC, NonTrivialUnswitchCB); } else { - DEBUG(dbgs() << "Cannot unswitch, lowest cost found: " << BestUnswitchCost - << "\n"); + LLVM_DEBUG(dbgs() << "Cannot unswitch, lowest cost found: " + << BestUnswitchCost << "\n"); } return Changed; @@ -1917,7 +1918,8 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM, Function &F = *L.getHeader()->getParent(); (void)F; - DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L << "\n"); + LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L + << "\n"); // Save the current loop name in a variable so that we can report it even // after it has been deleted. @@ -1977,7 +1979,8 @@ bool SimpleLoopUnswitchLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) { Function &F = *L->getHeader()->getParent(); - DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << *L << "\n"); + LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << *L + << "\n"); auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp index 81176288049..ca6b93e0b4a 100644 --- a/llvm/lib/Transforms/Scalar/Sink.cpp +++ b/llvm/lib/Transforms/Scalar/Sink.cpp @@ -187,11 +187,9 @@ static bool SinkInstruction(Instruction *Inst, if (!SuccToSinkTo) return false; - DEBUG(dbgs() << "Sink" << *Inst << " ("; - Inst->getParent()->printAsOperand(dbgs(), false); - dbgs() << " -> "; - SuccToSinkTo->printAsOperand(dbgs(), false); - dbgs() << ")\n"); + LLVM_DEBUG(dbgs() << "Sink" << *Inst << " ("; + Inst->getParent()->printAsOperand(dbgs(), false); dbgs() << " -> "; + SuccToSinkTo->printAsOperand(dbgs(), false); dbgs() << ")\n"); // Move the instruction. Inst->moveBefore(&*SuccToSinkTo->getFirstInsertionPt()); @@ -244,7 +242,7 @@ static bool iterativelySinkInstructions(Function &F, DominatorTree &DT, do { MadeChange = false; - DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n"); + LLVM_DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n"); // Process all basic blocks. for (BasicBlock &I : F) MadeChange |= ProcessBlock(I, DT, LI, AA); diff --git a/llvm/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp b/llvm/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp index 23156d5a4d8..3b0971804e6 100644 --- a/llvm/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp +++ b/llvm/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp @@ -64,7 +64,7 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT, // block. We should consider using actual post-dominance here in the // future. if (UI->getParent() != PhiBB) { - DEBUG(dbgs() << " Unsafe: use in a different BB: " << *UI << "\n"); + LLVM_DEBUG(dbgs() << " Unsafe: use in a different BB: " << *UI << "\n"); return false; } @@ -75,7 +75,7 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT, // probably change this to do at least a limited scan of the intervening // instructions and allow handling stores in easily proven safe cases. if (mayBeMemoryDependent(*UI)) { - DEBUG(dbgs() << " Unsafe: can't speculate use: " << *UI << "\n"); + LLVM_DEBUG(dbgs() << " Unsafe: can't speculate use: " << *UI << "\n"); return false; } @@ -126,8 +126,8 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT, // If when we directly test whether this is safe it fails, bail. if (UnsafeSet.count(OpI) || ParentBB != PhiBB || mayBeMemoryDependent(*OpI)) { - DEBUG(dbgs() << " Unsafe: can't speculate transitive use: " << *OpI - << "\n"); + LLVM_DEBUG(dbgs() << " Unsafe: can't speculate transitive use: " + << *OpI << "\n"); // Record the stack of instructions which reach this node as unsafe // so we prune subsequent searches. UnsafeSet.insert(OpI); @@ -229,7 +229,7 @@ static bool isSafeAndProfitableToSpeculateAroundPHI( NonFreeMat |= MatCost != TTI.TCC_Free; } if (!NonFreeMat) { - DEBUG(dbgs() << " Free: " << PN << "\n"); + LLVM_DEBUG(dbgs() << " Free: " << PN << "\n"); // No profit in free materialization. return false; } @@ -237,7 +237,7 @@ static bool isSafeAndProfitableToSpeculateAroundPHI( // Now check that the uses of this PHI can actually be speculated, // otherwise we'll still have to materialize the PHI value. if (!isSafeToSpeculatePHIUsers(PN, DT, PotentialSpecSet, UnsafeSet)) { - DEBUG(dbgs() << " Unsafe PHI: " << PN << "\n"); + LLVM_DEBUG(dbgs() << " Unsafe PHI: " << PN << "\n"); return false; } @@ -288,9 +288,13 @@ static bool isSafeAndProfitableToSpeculateAroundPHI( // just bail. We're only interested in cases where folding the incoming // constants is at least break-even on all paths. if (FoldedCost > MatCost) { - DEBUG(dbgs() << " Not profitable to fold imm: " << *IncomingC << "\n" - " Materializing cost: " << MatCost << "\n" - " Accumulated folded cost: " << FoldedCost << "\n"); + LLVM_DEBUG(dbgs() << " Not profitable to fold imm: " << *IncomingC + << "\n" + " Materializing cost: " + << MatCost + << "\n" + " Accumulated folded cost: " + << FoldedCost << "\n"); return false; } } @@ -310,8 +314,8 @@ static bool isSafeAndProfitableToSpeculateAroundPHI( "less that its materialized cost, " "the sum must be as well."); - DEBUG(dbgs() << " Cost savings " << (TotalMatCost - TotalFoldedCost) - << ": " << PN << "\n"); + LLVM_DEBUG(dbgs() << " Cost savings " << (TotalMatCost - TotalFoldedCost) + << ": " << PN << "\n"); CostSavingsMap[&PN] = TotalMatCost - TotalFoldedCost; return true; } @@ -489,9 +493,13 @@ findProfitablePHIs(ArrayRef<PHINode *> PNs, // and zero out the cost of everything it depends on. int CostSavings = CostSavingsMap.find(PN)->second; if (SpecCost > CostSavings) { - DEBUG(dbgs() << " Not profitable, speculation cost: " << *PN << "\n" - " Cost savings: " << CostSavings << "\n" - " Speculation cost: " << SpecCost << "\n"); + LLVM_DEBUG(dbgs() << " Not profitable, speculation cost: " << *PN + << "\n" + " Cost savings: " + << CostSavings + << "\n" + " Speculation cost: " + << SpecCost << "\n"); continue; } @@ -545,7 +553,7 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs, SmallPtrSetImpl<Instruction *> &PotentialSpecSet, SmallSetVector<BasicBlock *, 16> &PredSet, DominatorTree &DT) { - DEBUG(dbgs() << " Speculating around " << SpecPNs.size() << " PHIs!\n"); + LLVM_DEBUG(dbgs() << " Speculating around " << SpecPNs.size() << " PHIs!\n"); NumPHIsSpeculated += SpecPNs.size(); // Split any critical edges so that we have a block to hoist into. @@ -558,8 +566,8 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs, CriticalEdgeSplittingOptions(&DT).setMergeIdenticalEdges()); if (NewPredBB) { ++NumEdgesSplit; - DEBUG(dbgs() << " Split critical edge from: " << PredBB->getName() - << "\n"); + LLVM_DEBUG(dbgs() << " Split critical edge from: " << PredBB->getName() + << "\n"); SpecPreds.push_back(NewPredBB); } else { assert(PredBB->getSingleSuccessor() == ParentBB && @@ -593,8 +601,9 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs, int NumSpecInsts = SpecList.size() * SpecPreds.size(); int NumRedundantInsts = NumSpecInsts - SpecList.size(); - DEBUG(dbgs() << " Inserting " << NumSpecInsts << " speculated instructions, " - << NumRedundantInsts << " redundancies\n"); + LLVM_DEBUG(dbgs() << " Inserting " << NumSpecInsts + << " speculated instructions, " << NumRedundantInsts + << " redundancies\n"); NumSpeculatedInstructions += NumSpecInsts; NumNewRedundantInstructions += NumRedundantInsts; @@ -716,7 +725,7 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs, /// true when at least some speculation occurs. static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs, DominatorTree &DT, TargetTransformInfo &TTI) { - DEBUG(dbgs() << "Evaluating phi nodes for speculation:\n"); + LLVM_DEBUG(dbgs() << "Evaluating phi nodes for speculation:\n"); // Savings in cost from speculating around a PHI node. SmallDenseMap<PHINode *, int, 16> CostSavingsMap; @@ -745,7 +754,7 @@ static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs, PNs.end()); // If no PHIs were profitable, skip. if (PNs.empty()) { - DEBUG(dbgs() << " No safe and profitable PHIs found!\n"); + LLVM_DEBUG(dbgs() << " No safe and profitable PHIs found!\n"); return false; } @@ -763,13 +772,13 @@ static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs, // differently. if (isa<IndirectBrInst>(PredBB->getTerminator()) || isa<InvokeInst>(PredBB->getTerminator())) { - DEBUG(dbgs() << " Invalid: predecessor terminator: " << PredBB->getName() - << "\n"); + LLVM_DEBUG(dbgs() << " Invalid: predecessor terminator: " + << PredBB->getName() << "\n"); return false; } } if (PredSet.size() < 2) { - DEBUG(dbgs() << " Unimportant: phi with only one predecessor\n"); + LLVM_DEBUG(dbgs() << " Unimportant: phi with only one predecessor\n"); return false; } diff --git a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp index a7c308b5987..488c4df2b19 100644 --- a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp +++ b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp @@ -151,8 +151,8 @@ namespace llvm { bool SpeculativeExecutionPass::runImpl(Function &F, TargetTransformInfo *TTI) { if (OnlyIfDivergentTarget && !TTI->hasBranchDivergence()) { - DEBUG(dbgs() << "Not running SpeculativeExecution because " - "TTI->hasBranchDivergence() is false.\n"); + LLVM_DEBUG(dbgs() << "Not running SpeculativeExecution because " + "TTI->hasBranchDivergence() is false.\n"); return false; } diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp index 25816f70c56..f0ddc30beff 100644 --- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp +++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp @@ -489,10 +489,10 @@ void StructurizeCFG::collectInfos() { Visited.clear(); for (RegionNode *RN : reverse(Order)) { - DEBUG(dbgs() << "Visiting: " - << (RN->isSubRegion() ? "SubRegion with entry: " : "") - << RN->getEntry()->getName() << " Loop Depth: " - << LI->getLoopDepth(RN->getEntry()) << "\n"); + LLVM_DEBUG(dbgs() << "Visiting: " + << (RN->isSubRegion() ? "SubRegion with entry: " : "") + << RN->getEntry()->getName() << " Loop Depth: " + << LI->getLoopDepth(RN->getEntry()) << "\n"); // Analyze all the conditions leading to a node gatherPredicates(RN); @@ -901,8 +901,8 @@ static bool hasOnlyUniformBranches(Region *R, unsigned UniformMDKindID, if (!DA.isUniform(Br)) return false; - DEBUG(dbgs() << "BB: " << Br->getParent()->getName() - << " has uniform terminator\n"); + LLVM_DEBUG(dbgs() << "BB: " << Br->getParent()->getName() + << " has uniform terminator\n"); } else { // Explicitly refuse to treat regions as uniform if they have non-uniform // subregions. We cannot rely on DivergenceAnalysis for branches in @@ -943,7 +943,8 @@ bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) { DA = &getAnalysis<DivergenceAnalysis>(); if (hasOnlyUniformBranches(R, UniformMDKindID, *DA)) { - DEBUG(dbgs() << "Skipping region with uniform control flow: " << *R << '\n'); + LLVM_DEBUG(dbgs() << "Skipping region with uniform control flow: " << *R + << '\n'); // Mark all direct child block terminators as having been treated as // uniform. To account for a possible future in which non-uniform diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index 37ea4375a4c..f8cd6c17a5a 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -302,7 +302,7 @@ static bool markTails(Function &F, bool &AllCallsAreTailCalls, if (Visited[CI->getParent()] != ESCAPED) { // If the escape point was part way through the block, calls after the // escape point wouldn't have been put into DeferredTails. - DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n"); + LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n"); CI->setTailCall(); Modified = true; } else { @@ -699,8 +699,8 @@ static bool foldReturnAndProcessPred( BranchInst *BI = UncondBranchPreds.pop_back_val(); BasicBlock *Pred = BI->getParent(); if (CallInst *CI = findTRECandidate(BI, CannotTailCallElimCallsMarkedTail, TTI)){ - DEBUG(dbgs() << "FOLDING: " << *BB - << "INTO UNCOND BRANCH PRED: " << *Pred); + LLVM_DEBUG(dbgs() << "FOLDING: " << *BB + << "INTO UNCOND BRANCH PRED: " << *Pred); ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred); // Cleanup: if all predecessors of BB have been eliminated by diff --git a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp index 9a4996e5475..e3ef4236222 100644 --- a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp +++ b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp @@ -210,9 +210,9 @@ static bool addDiscriminators(Function &F) { // it in 1 byte ULEB128 representation. unsigned Discriminator = R.second ? ++LDM[L] : LDM[L]; I.setDebugLoc(DIL->setBaseDiscriminator(Discriminator)); - DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":" - << DIL->getColumn() << ":" << Discriminator << " " << I - << "\n"); + LLVM_DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":" + << DIL->getColumn() << ":" << Discriminator << " " << I + << "\n"); Changed = true; } } diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index 3d7149b49bc..f31dab9f96a 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -205,7 +205,7 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, // Make sure that the first block is not a landing pad. if (BB == Result.front()) { if (BB->isEHPad()) { - DEBUG(dbgs() << "The first block cannot be an unwind block\n"); + LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n"); return {}; } continue; @@ -215,8 +215,9 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, // the subgraph which is being extracted. for (auto *PBB : predecessors(BB)) if (!Result.count(PBB)) { - DEBUG(dbgs() << "No blocks in this region may have entries from " - "outside the region except for the first block!\n"); + LLVM_DEBUG( + dbgs() << "No blocks in this region may have entries from " + "outside the region except for the first block!\n"); return {}; } } @@ -623,8 +624,8 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs, BasicBlock *newHeader, Function *oldFunction, Module *M) { - DEBUG(dbgs() << "inputs: " << inputs.size() << "\n"); - DEBUG(dbgs() << "outputs: " << outputs.size() << "\n"); + LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n"); + LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n"); // This function returns unsigned, outputs will go back by reference. switch (NumExitBlocks) { @@ -638,20 +639,20 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs, // Add the types of the input values to the function's argument list for (Value *value : inputs) { - DEBUG(dbgs() << "value used in func: " << *value << "\n"); + LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n"); paramTy.push_back(value->getType()); } // Add the types of the output values to the function's argument list. for (Value *output : outputs) { - DEBUG(dbgs() << "instr used in func: " << *output << "\n"); + LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n"); if (AggregateArgs) paramTy.push_back(output->getType()); else paramTy.push_back(PointerType::getUnqual(output->getType())); } - DEBUG({ + LLVM_DEBUG({ dbgs() << "Function type: " << *RetTy << " f("; for (Type *i : paramTy) dbgs() << *i << ", "; @@ -1277,7 +1278,7 @@ Function *CodeExtractor::extractCodeRegion() { } } - DEBUG(if (verifyFunction(*newFunction)) - report_fatal_error("verifyFunction failed!")); + LLVM_DEBUG(if (verifyFunction(*newFunction)) + report_fatal_error("verifyFunction failed!")); return newFunction; } diff --git a/llvm/lib/Transforms/Utils/CtorUtils.cpp b/llvm/lib/Transforms/Utils/CtorUtils.cpp index 82b67c29310..9a0240144d0 100644 --- a/llvm/lib/Transforms/Utils/CtorUtils.cpp +++ b/llvm/lib/Transforms/Utils/CtorUtils.cpp @@ -138,7 +138,7 @@ bool optimizeGlobalCtorsList(Module &M, if (!F) continue; - DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n"); + LLVM_DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n"); // We cannot simplify external ctor functions. if (F->empty()) diff --git a/llvm/lib/Transforms/Utils/Evaluator.cpp b/llvm/lib/Transforms/Utils/Evaluator.cpp index cb5280992fb..05a318a3f22 100644 --- a/llvm/lib/Transforms/Utils/Evaluator.cpp +++ b/llvm/lib/Transforms/Utils/Evaluator.cpp @@ -226,22 +226,23 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, while (true) { Constant *InstResult = nullptr; - DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); + LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { if (!SI->isSimple()) { - DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); + LLVM_DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); return false; // no volatile/atomic accesses. } Constant *Ptr = getVal(SI->getOperand(1)); if (auto *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI)) { - DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); + LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); Ptr = FoldedPtr; - DEBUG(dbgs() << "; To: " << *Ptr << "\n"); + LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n"); } if (!isSimpleEnoughPointerToCommit(Ptr)) { // If this is too complex for us to commit, reject it. - DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); + LLVM_DEBUG( + dbgs() << "Pointer is too complex for us to evaluate store."); return false; } @@ -250,14 +251,15 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // If this might be too difficult for the backend to handle (e.g. the addr // of one global variable divided by another) then we can't commit it. if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { - DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val - << "\n"); + LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. " + << *Val << "\n"); return false; } if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { if (CE->getOpcode() == Instruction::BitCast) { - DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); + LLVM_DEBUG(dbgs() + << "Attempting to resolve bitcast on constant ptr.\n"); // If we're evaluating a store through a bitcast, then we need // to pull the bitcast off the pointer type and push it onto the // stored value. @@ -287,14 +289,14 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // If we can't improve the situation by introspecting NewTy, // we have to give up. } else { - DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " - "evaluate.\n"); + LLVM_DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " + "evaluate.\n"); return false; } } Val = NewVal; - DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); + LLVM_DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); } } @@ -303,37 +305,37 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, InstResult = ConstantExpr::get(BO->getOpcode(), getVal(BO->getOperand(0)), getVal(BO->getOperand(1))); - DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " + << *InstResult << "\n"); } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { InstResult = ConstantExpr::getCompare(CI->getPredicate(), getVal(CI->getOperand(0)), getVal(CI->getOperand(1))); - DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult + << "\n"); } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { InstResult = ConstantExpr::getCast(CI->getOpcode(), getVal(CI->getOperand(0)), CI->getType()); - DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult + << "\n"); } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), getVal(SI->getOperand(1)), getVal(SI->getOperand(2))); - DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult + << "\n"); } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) { InstResult = ConstantExpr::getExtractValue( getVal(EVI->getAggregateOperand()), EVI->getIndices()); - DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " + << *InstResult << "\n"); } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) { InstResult = ConstantExpr::getInsertValue( getVal(IVI->getAggregateOperand()), getVal(IVI->getInsertedValueOperand()), IVI->getIndices()); - DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " + << *InstResult << "\n"); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { Constant *P = getVal(GEP->getOperand(0)); SmallVector<Constant*, 8> GEPOps; @@ -343,31 +345,33 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, InstResult = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps, cast<GEPOperator>(GEP)->isInBounds()); - DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult - << "\n"); + LLVM_DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult << "\n"); } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { if (!LI->isSimple()) { - DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); + LLVM_DEBUG( + dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); return false; // no volatile/atomic accesses. } Constant *Ptr = getVal(LI->getOperand(0)); if (auto *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI)) { Ptr = FoldedPtr; - DEBUG(dbgs() << "Found a constant pointer expression, constant " - "folding: " << *Ptr << "\n"); + LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant " + "folding: " + << *Ptr << "\n"); } InstResult = ComputeLoadResult(Ptr); if (!InstResult) { - DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." - "\n"); + LLVM_DEBUG( + dbgs() << "Failed to compute load result. Can not evaluate load." + "\n"); return false; // Could not evaluate load. } - DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); + LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { if (AI->isArrayAllocation()) { - DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); + LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); return false; // Cannot handle array allocs. } Type *Ty = AI->getAllocatedType(); @@ -375,28 +379,28 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty), AI->getName())); InstResult = AllocaTmps.back().get(); - DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); + LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { CallSite CS(&*CurInst); // Debug info can safely be ignored here. if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { - DEBUG(dbgs() << "Ignoring debug info.\n"); + LLVM_DEBUG(dbgs() << "Ignoring debug info.\n"); ++CurInst; continue; } // Cannot handle inline asm. if (isa<InlineAsm>(CS.getCalledValue())) { - DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); + LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); return false; } if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { if (MSI->isVolatile()) { - DEBUG(dbgs() << "Can not optimize a volatile memset " << - "intrinsic.\n"); + LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset " + << "intrinsic.\n"); return false; } Constant *Ptr = getVal(MSI->getDest()); @@ -404,7 +408,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, Constant *DestVal = ComputeLoadResult(getVal(Ptr)); if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { // This memset is a no-op. - DEBUG(dbgs() << "Ignoring no-op memset.\n"); + LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n"); ++CurInst; continue; } @@ -412,7 +416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { - DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); + LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); ++CurInst; continue; } @@ -421,7 +425,8 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // We don't insert an entry into Values, as it doesn't have a // meaningful return value. if (!II->use_empty()) { - DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); + LLVM_DEBUG(dbgs() + << "Found unused invariant_start. Can't evaluate.\n"); return false; } ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); @@ -433,34 +438,35 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, Size->getValue().getLimitedValue() >= DL.getTypeStoreSize(ElemTy)) { Invariants.insert(GV); - DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV - << "\n"); + LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: " + << *GV << "\n"); } else { - DEBUG(dbgs() << "Found a global var, but can not treat it as an " - "invariant.\n"); + LLVM_DEBUG(dbgs() + << "Found a global var, but can not treat it as an " + "invariant.\n"); } } // Continue even if we do nothing. ++CurInst; continue; } else if (II->getIntrinsicID() == Intrinsic::assume) { - DEBUG(dbgs() << "Skipping assume intrinsic.\n"); + LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n"); ++CurInst; continue; } else if (II->getIntrinsicID() == Intrinsic::sideeffect) { - DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n"); + LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n"); ++CurInst; continue; } - DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); + LLVM_DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); return false; } // Resolve function pointers. Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); if (!Callee || Callee->isInterposable()) { - DEBUG(dbgs() << "Can not resolve function pointer.\n"); + LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n"); return false; // Cannot resolve. } @@ -472,15 +478,15 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // If this is a function we can constant fold, do it. if (Constant *C = ConstantFoldCall(CS, Callee, Formals, TLI)) { InstResult = C; - DEBUG(dbgs() << "Constant folded function call. Result: " << - *InstResult << "\n"); + LLVM_DEBUG(dbgs() << "Constant folded function call. Result: " + << *InstResult << "\n"); } else { - DEBUG(dbgs() << "Can not constant fold function call.\n"); + LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n"); return false; } } else { if (Callee->getFunctionType()->isVarArg()) { - DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); + LLVM_DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); return false; } @@ -488,21 +494,22 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // Execute the call, if successful, use the return value. ValueStack.emplace_back(); if (!EvaluateFunction(Callee, RetVal, Formals)) { - DEBUG(dbgs() << "Failed to evaluate function.\n"); + LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n"); return false; } ValueStack.pop_back(); InstResult = RetVal; if (InstResult) { - DEBUG(dbgs() << "Successfully evaluated function. Result: " - << *InstResult << "\n\n"); + LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: " + << *InstResult << "\n\n"); } else { - DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); + LLVM_DEBUG(dbgs() + << "Successfully evaluated function. Result: 0\n\n"); } } } else if (isa<TerminatorInst>(CurInst)) { - DEBUG(dbgs() << "Found a terminator instruction.\n"); + LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n"); if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { if (BI->isUnconditional()) { @@ -529,17 +536,18 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, NextBB = nullptr; } else { // invoke, unwind, resume, unreachable. - DEBUG(dbgs() << "Can not handle terminator."); + LLVM_DEBUG(dbgs() << "Can not handle terminator."); return false; // Cannot handle this terminator. } // We succeeded at evaluating this block! - DEBUG(dbgs() << "Successfully evaluated block.\n"); + LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n"); return true; } else { // Did not know how to evaluate this! - DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." - "\n"); + LLVM_DEBUG( + dbgs() << "Failed to evaluate block due to unhandled instruction." + "\n"); return false; } @@ -553,7 +561,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, // If we just processed an invoke, we finished evaluating the block. if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { NextBB = II->getNormalDest(); - DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); + LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); return true; } @@ -592,7 +600,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, while (true) { BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings. - DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); + LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); if (!EvaluateBlock(CurInst, NextBB)) return false; diff --git a/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/llvm/lib/Transforms/Utils/FlattenCFG.cpp index a1adc31e499..1b8bb05fc91 100644 --- a/llvm/lib/Transforms/Utils/FlattenCFG.cpp +++ b/llvm/lib/Transforms/Utils/FlattenCFG.cpp @@ -312,7 +312,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder) { new UnreachableInst(CB->getContext(), CB); } while (Iteration); - DEBUG(dbgs() << "Use parallel and/or in:\n" << *FirstCondBlock); + LLVM_DEBUG(dbgs() << "Use parallel and/or in:\n" << *FirstCondBlock); return true; } @@ -469,7 +469,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) { // Remove \param SecondEntryBlock SecondEntryBlock->dropAllReferences(); SecondEntryBlock->eraseFromParent(); - DEBUG(dbgs() << "If conditions merged into:\n" << *FirstEntryBlock); + LLVM_DEBUG(dbgs() << "If conditions merged into:\n" << *FirstEntryBlock); return true; } diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp index 75539428b68..aada9f542bd 100644 --- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp +++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp @@ -377,7 +377,7 @@ int FunctionComparator::cmpConstants(const Constant *L, } } default: // Unknown constant, abort. - DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n"); + LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n"); llvm_unreachable("Constant ValueID not recognized."); return -1; } diff --git a/llvm/lib/Transforms/Utils/LibCallsShrinkWrap.cpp b/llvm/lib/Transforms/Utils/LibCallsShrinkWrap.cpp index 80fb9cb1aae..9832a6f24e1 100644 --- a/llvm/lib/Transforms/Utils/LibCallsShrinkWrap.cpp +++ b/llvm/lib/Transforms/Utils/LibCallsShrinkWrap.cpp @@ -79,11 +79,11 @@ public: bool perform() { bool Changed = false; for (auto &CI : WorkList) { - DEBUG(dbgs() << "CDCE calls: " << CI->getCalledFunction()->getName() - << "\n"); + LLVM_DEBUG(dbgs() << "CDCE calls: " << CI->getCalledFunction()->getName() + << "\n"); if (perform(CI)) { Changed = true; - DEBUG(dbgs() << "Transformed\n"); + LLVM_DEBUG(dbgs() << "Transformed\n"); } } return Changed; @@ -421,7 +421,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI, const LibFunc &Func) { // FIXME: LibFunc_powf and powl TBD. if (Func != LibFunc_pow) { - DEBUG(dbgs() << "Not handled powf() and powl()\n"); + LLVM_DEBUG(dbgs() << "Not handled powf() and powl()\n"); return nullptr; } @@ -433,7 +433,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI, if (ConstantFP *CF = dyn_cast<ConstantFP>(Base)) { double D = CF->getValueAPF().convertToDouble(); if (D < 1.0f || D > APInt::getMaxValue(8).getZExtValue()) { - DEBUG(dbgs() << "Not handled pow(): constant base out of range\n"); + LLVM_DEBUG(dbgs() << "Not handled pow(): constant base out of range\n"); return nullptr; } @@ -447,7 +447,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI, // If the Base value coming from an integer type. Instruction *I = dyn_cast<Instruction>(Base); if (!I) { - DEBUG(dbgs() << "Not handled pow(): FP type base\n"); + LLVM_DEBUG(dbgs() << "Not handled pow(): FP type base\n"); return nullptr; } unsigned Opcode = I->getOpcode(); @@ -461,7 +461,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI, else if (BW == 32) UpperV = 32.0f; else { - DEBUG(dbgs() << "Not handled pow(): type too wide\n"); + LLVM_DEBUG(dbgs() << "Not handled pow(): type too wide\n"); return nullptr; } @@ -477,7 +477,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI, Value *Cond0 = BBBuilder.CreateFCmp(CmpInst::FCMP_OLE, Base, V0); return BBBuilder.CreateOr(Cond0, Cond); } - DEBUG(dbgs() << "Not handled pow(): base not from integer convert\n"); + LLVM_DEBUG(dbgs() << "Not handled pow(): base not from integer convert\n"); return nullptr; } @@ -496,9 +496,9 @@ void LibCallsShrinkWrap::shrinkWrapCI(CallInst *CI, Value *Cond) { SuccBB->setName("cdce.end"); CI->removeFromParent(); CallBB->getInstList().insert(CallBB->getFirstInsertionPt(), CI); - DEBUG(dbgs() << "== Basic Block After =="); - DEBUG(dbgs() << *CallBB->getSinglePredecessor() << *CallBB - << *CallBB->getSingleSuccessor() << "\n"); + LLVM_DEBUG(dbgs() << "== Basic Block After =="); + LLVM_DEBUG(dbgs() << *CallBB->getSinglePredecessor() << *CallBB + << *CallBB->getSingleSuccessor() << "\n"); } // Perform the transformation to a single candidate. diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 75847cc783a..5eaeccb5a29 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -740,8 +740,8 @@ static bool CanMergeValues(Value *First, Value *Second) { static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); - DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " - << Succ->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " + << Succ->getName() << "\n"); // Shortcut, if there is only a single predecessor it must be BB and merging // is always safe if (Succ->getSinglePredecessor()) return true; @@ -764,10 +764,11 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { if (BBPreds.count(IBB) && !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), PN->getIncomingValue(PI))) { - DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " - << Succ->getName() << " is conflicting with " - << BBPN->getName() << " with regard to common predecessor " - << IBB->getName() << "\n"); + LLVM_DEBUG(dbgs() + << "Can't fold, phi node " << PN->getName() << " in " + << Succ->getName() << " is conflicting with " + << BBPN->getName() << " with regard to common predecessor " + << IBB->getName() << "\n"); return false; } } @@ -780,9 +781,10 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { BasicBlock *IBB = PN->getIncomingBlock(PI); if (BBPreds.count(IBB) && !CanMergeValues(Val, PN->getIncomingValue(PI))) { - DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " - << Succ->getName() << " is conflicting with regard to common " - << "predecessor " << IBB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() + << " in " << Succ->getName() + << " is conflicting with regard to common " + << "predecessor " << IBB->getName() << "\n"); return false; } } @@ -970,7 +972,7 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, } } - DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); + LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); std::vector<DominatorTree::UpdateType> Updates; if (DDT) { @@ -1530,7 +1532,7 @@ void llvm::salvageDebugInfo(Instruction &I) { DIExpression::prependOpcodes(DIExpr, Ops, DIExpression::WithStackValue); DII->setOperand(0, wrapMD(I.getOperand(0))); DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr)); - DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); + LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); }; auto applyOffset = [&](DbgInfoIntrinsic *DII, uint64_t Offset) { @@ -1553,7 +1555,7 @@ void llvm::salvageDebugInfo(Instruction &I) { MetadataAsValue *CastSrc = wrapMD(I.getOperand(0)); for (auto *DII : DbgUsers) { DII->setOperand(0, CastSrc); - DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); + LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); } } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { unsigned BitWidth = @@ -1620,7 +1622,7 @@ void llvm::salvageDebugInfo(Instruction &I) { DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref); DII->setOperand(0, AddrMD); DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr)); - DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); + LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); } } } @@ -2083,8 +2085,8 @@ static unsigned replaceDominatedUsesWith(Value *From, Value *To, if (!Dominates(Root, U)) continue; U.set(To); - DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as " - << *To << " in " << *U << "\n"); + LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName() + << "' as " << *To << " in " << *U << "\n"); ++Count; } return Count; diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp index e178c99b456..1678781a41e 100644 --- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp @@ -235,15 +235,16 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { CodeMetrics Metrics; Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); if (Metrics.notDuplicatable) { - DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" - << " instructions: "; - L->dump()); + LLVM_DEBUG( + dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" + << " instructions: "; + L->dump()); return false; } if (Metrics.convergent) { - DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " - "instructions: "; - L->dump()); + LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " + "instructions: "; + L->dump()); return false; } if (Metrics.NumInsts > MaxHeaderSize) @@ -266,7 +267,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { if (SE) SE->forgetTopmostLoop(L); - DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); + LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); // Find new Loop header. NewHeader is a Header's one and only successor // that is inside loop. Header's other successor is outside the @@ -477,7 +478,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // emitted code isn't too gross in this common case. MergeBlockIntoPredecessor(OrigHeader, DT, LI); - DEBUG(dbgs() << "LoopRotation: into "; L->dump()); + LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump()); ++NumRotated; return true; @@ -580,8 +581,8 @@ bool LoopRotate::simplifyLoopLatch(Loop *L) { if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) return false; - DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " - << LastExit->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " + << LastExit->getName() << "\n"); // Hoist the instructions from Latch into LastExit. LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(), diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp index d70fc4ac028..b33f4f8a810 100644 --- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp +++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp @@ -141,8 +141,8 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT, if (!PreheaderBB) return nullptr; - DEBUG(dbgs() << "LoopSimplify: Creating pre-header " - << PreheaderBB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Creating pre-header " + << PreheaderBB->getName() << "\n"); // Make sure that NewBB is put someplace intelligent, which doesn't mess up // code layout too horribly. @@ -242,7 +242,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader, OuterLoopPreds.push_back(PN->getIncomingBlock(i)); } } - DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n"); // If ScalarEvolution is around and knows anything about values in // this loop, tell it to forget them, because we're about to @@ -371,8 +371,8 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader, BranchInst *BETerminator = BranchInst::Create(Header, BEBlock); BETerminator->setDebugLoc(Header->getFirstNonPHI()->getDebugLoc()); - DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block " - << BEBlock->getName() << "\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block " + << BEBlock->getName() << "\n"); // Move the new backedge block to right after the last backedge block. Function::iterator InsertPos = ++BackedgeBlocks.back()->getIterator(); @@ -484,8 +484,8 @@ ReprocessLoop: // Delete each unique out-of-loop (and thus dead) predecessor. for (BasicBlock *P : BadPreds) { - DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor " - << P->getName() << "\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor " + << P->getName() << "\n"); // Zap the dead pred's terminator and replace it with unreachable. TerminatorInst *TI = P->getTerminator(); @@ -504,8 +504,9 @@ ReprocessLoop: if (BI->isConditional()) { if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) { - DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in " - << ExitingBlock->getName() << "\n"); + LLVM_DEBUG(dbgs() + << "LoopSimplify: Resolving \"br i1 undef\" to exit in " + << ExitingBlock->getName() << "\n"); BI->setCondition(ConstantInt::get(Cond->getType(), !L->contains(BI->getSuccessor(0)))); @@ -641,8 +642,8 @@ ReprocessLoop: // Success. The block is now dead, so remove it from the loop, // update the dominator tree and delete it. - DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block " - << ExitingBlock->getName() << "\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block " + << ExitingBlock->getName() << "\n"); assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock)); Changed = true; diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp index 980f0f73ddf..ad50d9601ec 100644 --- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp @@ -110,7 +110,7 @@ foldBlockIntoPredecessor(BasicBlock *BB, LoopInfo *LI, ScalarEvolution *SE, if (OnlyPred->getTerminator()->getNumSuccessors() != 1) return nullptr; - DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred); + LLVM_DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred); // Resolve any PHI nodes at the start of the block. They are all // guaranteed to have exactly one entry if they exist, unless there are @@ -297,19 +297,19 @@ LoopUnrollResult llvm::UnrollLoop( BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) { - DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); + LLVM_DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); return LoopUnrollResult::Unmodified; } BasicBlock *LatchBlock = L->getLoopLatch(); if (!LatchBlock) { - DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n"); + LLVM_DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n"); return LoopUnrollResult::Unmodified; } // Loops with indirectbr cannot be cloned. if (!L->isSafeToClone()) { - DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n"); + LLVM_DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n"); return LoopUnrollResult::Unmodified; } @@ -322,8 +322,9 @@ LoopUnrollResult llvm::UnrollLoop( if (!BI || BI->isUnconditional()) { // The loop-rotate pass can be helpful to avoid this in many cases. - DEBUG(dbgs() << - " Can't unroll; loop not terminated by a conditional branch.\n"); + LLVM_DEBUG( + dbgs() + << " Can't unroll; loop not terminated by a conditional branch.\n"); return LoopUnrollResult::Unmodified; } @@ -332,22 +333,22 @@ LoopUnrollResult llvm::UnrollLoop( }; if (!CheckSuccessors(0, 1) && !CheckSuccessors(1, 0)) { - DEBUG(dbgs() << "Can't unroll; only loops with one conditional latch" - " exiting the loop can be unrolled\n"); + LLVM_DEBUG(dbgs() << "Can't unroll; only loops with one conditional latch" + " exiting the loop can be unrolled\n"); return LoopUnrollResult::Unmodified; } if (Header->hasAddressTaken()) { // The loop-rotate pass can be helpful to avoid this in many cases. - DEBUG(dbgs() << - " Won't unroll loop: address of header block is taken.\n"); + LLVM_DEBUG( + dbgs() << " Won't unroll loop: address of header block is taken.\n"); return LoopUnrollResult::Unmodified; } if (TripCount != 0) - DEBUG(dbgs() << " Trip Count = " << TripCount << "\n"); + LLVM_DEBUG(dbgs() << " Trip Count = " << TripCount << "\n"); if (TripMultiple != 1) - DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n"); + LLVM_DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n"); // Effectively "DCE" unrolled iterations that are beyond the tripcount // and will never be executed. @@ -356,7 +357,7 @@ LoopUnrollResult llvm::UnrollLoop( // Don't enter the unroll code if there is nothing to do. if (TripCount == 0 && Count < 2 && PeelCount == 0) { - DEBUG(dbgs() << "Won't unroll; almost nothing to do\n"); + LLVM_DEBUG(dbgs() << "Won't unroll; almost nothing to do\n"); return LoopUnrollResult::Unmodified; } @@ -407,7 +408,7 @@ LoopUnrollResult llvm::UnrollLoop( // Loops containing convergent instructions must have a count that divides // their TripMultiple. - DEBUG( + LLVM_DEBUG( { bool HasConvergent = false; for (auto &BB : L->blocks()) @@ -430,9 +431,8 @@ LoopUnrollResult llvm::UnrollLoop( if (Force) RuntimeTripCount = false; else { - DEBUG( - dbgs() << "Wont unroll; remainder loop could not be generated" - "when assuming runtime trip count\n"); + LLVM_DEBUG(dbgs() << "Wont unroll; remainder loop could not be generated" + "when assuming runtime trip count\n"); return LoopUnrollResult::Unmodified; } } @@ -451,8 +451,8 @@ LoopUnrollResult llvm::UnrollLoop( using namespace ore; // Report the unrolling decision. if (CompletelyUnroll) { - DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() - << " with trip count " << TripCount << "!\n"); + LLVM_DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() + << " with trip count " << TripCount << "!\n"); if (ORE) ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(), @@ -461,8 +461,8 @@ LoopUnrollResult llvm::UnrollLoop( << NV("UnrollCount", TripCount) << " iterations"; }); } else if (PeelCount) { - DEBUG(dbgs() << "PEELING loop %" << Header->getName() - << " with iteration count " << PeelCount << "!\n"); + LLVM_DEBUG(dbgs() << "PEELING loop %" << Header->getName() + << " with iteration count " << PeelCount << "!\n"); if (ORE) ORE->emit([&]() { return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(), @@ -478,29 +478,29 @@ LoopUnrollResult llvm::UnrollLoop( << NV("UnrollCount", Count); }; - DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() - << " by " << Count); + LLVM_DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() << " by " + << Count); if (TripMultiple == 0 || BreakoutTrip != TripMultiple) { - DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip); + LLVM_DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip); if (ORE) ORE->emit([&]() { return DiagBuilder() << " with a breakout at trip " << NV("BreakoutTrip", BreakoutTrip); }); } else if (TripMultiple != 1) { - DEBUG(dbgs() << " with " << TripMultiple << " trips per branch"); + LLVM_DEBUG(dbgs() << " with " << TripMultiple << " trips per branch"); if (ORE) ORE->emit([&]() { return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple) << " trips per branch"; }); } else if (RuntimeTripCount) { - DEBUG(dbgs() << " with run-time trip count"); + LLVM_DEBUG(dbgs() << " with run-time trip count"); if (ORE) ORE->emit( [&]() { return DiagBuilder() << " with run-time trip count"; }); } - DEBUG(dbgs() << "!\n"); + LLVM_DEBUG(dbgs() << "!\n"); } // We are going to make changes to this loop. SCEV may be keeping cached info diff --git a/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp b/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp index 96d9acddb1a..13794c53f24 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp @@ -253,8 +253,8 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, // If the user provided a peel count, use that. bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0; if (UserPeelCount) { - DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount - << " iterations.\n"); + LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount + << " iterations.\n"); UP.PeelCount = UnrollForcePeelCount; return; } @@ -298,8 +298,9 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount); // Consider max peel count limitation. assert(DesiredPeelCount > 0 && "Wrong loop size estimation?"); - DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn" - << " some Phis into invariants.\n"); + LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount + << " iteration(s) to turn" + << " some Phis into invariants.\n"); UP.PeelCount = DesiredPeelCount; return; } @@ -320,20 +321,22 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, if (!PeelCount) return; - DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount - << "\n"); + LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount + << "\n"); if (*PeelCount) { if ((*PeelCount <= UnrollPeelMaxCount) && (LoopSize * (*PeelCount + 1) <= UP.Threshold)) { - DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n"); + LLVM_DEBUG(dbgs() << "Peeling first " << *PeelCount + << " iterations.\n"); UP.PeelCount = *PeelCount; return; } - DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n"); - DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); - DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n"); - DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n"); + LLVM_DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n"); + LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); + LLVM_DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) + << "\n"); + LLVM_DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n"); } } } diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp index 0db7c0467dc..1b0e3a72c2d 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp @@ -418,8 +418,9 @@ canSafelyUnrollMultiExitLoop(Loop *L, SmallVectorImpl<BasicBlock *> &OtherExits, // UnrollRuntimeMultiExit is true. This will need updating the logic in // connectEpilog/connectProlog. if (!LatchExit->getSinglePredecessor()) { - DEBUG(dbgs() << "Bailout for multi-exit handling when latch exit has >1 " - "predecessor.\n"); + LLVM_DEBUG( + dbgs() << "Bailout for multi-exit handling when latch exit has >1 " + "predecessor.\n"); return false; } // FIXME: We bail out of multi-exit unrolling when epilog loop is generated @@ -528,14 +529,14 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA) { - DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n"); - DEBUG(L->dump()); - DEBUG(UseEpilogRemainder ? dbgs() << "Using epilog remainder.\n" : - dbgs() << "Using prolog remainder.\n"); + LLVM_DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n"); + LLVM_DEBUG(L->dump()); + LLVM_DEBUG(UseEpilogRemainder ? dbgs() << "Using epilog remainder.\n" + : dbgs() << "Using prolog remainder.\n"); // Make sure the loop is in canonical form. if (!L->isLoopSimplifyForm()) { - DEBUG(dbgs() << "Not in simplify form!\n"); + LLVM_DEBUG(dbgs() << "Not in simplify form!\n"); return false; } @@ -561,7 +562,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, // Support only single exit and exiting block unless multi-exit loop unrolling is enabled. if (!isMultiExitUnrollingEnabled && (!L->getExitingBlock() || OtherExits.size())) { - DEBUG( + LLVM_DEBUG( dbgs() << "Multiple exit/exiting blocks in loop and multi-exit unrolling not " "enabled!\n"); @@ -581,7 +582,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, const SCEV *BECountSC = SE->getExitCount(L, Latch); if (isa<SCEVCouldNotCompute>(BECountSC) || !BECountSC->getType()->isIntegerTy()) { - DEBUG(dbgs() << "Could not compute exit block SCEV\n"); + LLVM_DEBUG(dbgs() << "Could not compute exit block SCEV\n"); return false; } @@ -591,7 +592,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, const SCEV *TripCountSC = SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1)); if (isa<SCEVCouldNotCompute>(TripCountSC)) { - DEBUG(dbgs() << "Could not compute trip count SCEV.\n"); + LLVM_DEBUG(dbgs() << "Could not compute trip count SCEV.\n"); return false; } @@ -601,15 +602,16 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, SCEVExpander Expander(*SE, DL, "loop-unroll"); if (!AllowExpensiveTripCount && Expander.isHighCostExpansion(TripCountSC, L, PreHeaderBR)) { - DEBUG(dbgs() << "High cost for expanding trip count scev!\n"); + LLVM_DEBUG(dbgs() << "High cost for expanding trip count scev!\n"); return false; } // This constraint lets us deal with an overflowing trip count easily; see the // comment on ModVal below. if (Log2_32(Count) > BEWidth) { - DEBUG(dbgs() - << "Count failed constraint on overflow trip count calculation.\n"); + LLVM_DEBUG( + dbgs() + << "Count failed constraint on overflow trip count calculation.\n"); return false; } @@ -896,7 +898,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, } if (remainderLoop && UnrollRemainder) { - DEBUG(dbgs() << "Unrolling remainder loop\n"); + LLVM_DEBUG(dbgs() << "Unrolling remainder loop\n"); UnrollLoop(remainderLoop, /*Count*/ Count - 1, /*TripCount*/ Count - 1, /*Force*/ false, /*AllowRuntime*/ false, /*AllowExpensiveTripCount*/ false, /*PreserveCondBr*/ true, diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index cec34b09f20..46af120a428 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -555,47 +555,48 @@ bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop, if (AddReductionVar(Phi, RK_IntegerAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_IntegerMult, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_IntegerOr, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_IntegerAnd, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_IntegerXor, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_IntegerMinMax, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_FloatMult, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_FloatAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n"); return true; } if (AddReductionVar(Phi, RK_FloatMinMax, TheLoop, HasFunNoNaNAttr, RedDes, DB, AC, DT)) { - DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi + << "\n"); return true; } // Not a reduction of known type. @@ -1052,7 +1053,7 @@ bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop, AR = PSE.getAsAddRec(Phi); if (!AR) { - DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); + LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); return false; } @@ -1086,14 +1087,15 @@ bool InductionDescriptor::isInductionPHI( const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev); if (!AR) { - DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); + LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); return false; } if (AR->getLoop() != TheLoop) { // FIXME: We should treat this as a uniform. Unfortunately, we // don't currently know how to handled uniform PHIs. - DEBUG(dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"); + LLVM_DEBUG( + dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"); return false; } @@ -1174,11 +1176,12 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, BB, InLoopPredecessors, ".loopexit", DT, LI, PreserveLCSSA); if (!NewExitBB) - DEBUG(dbgs() << "WARNING: Can't create a dedicated exit block for loop: " - << *L << "\n"); + LLVM_DEBUG( + dbgs() << "WARNING: Can't create a dedicated exit block for loop: " + << *L << "\n"); else - DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block " - << NewExitBB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block " + << NewExitBB->getName() << "\n"); return true; }; diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp index 8643ad8cb00..441c5fd8b5a 100644 --- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp +++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp @@ -242,14 +242,13 @@ LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound, unsigned Mid = Size / 2; std::vector<CaseRange> LHS(Begin, Begin + Mid); - DEBUG(dbgs() << "LHS: " << LHS << "\n"); + LLVM_DEBUG(dbgs() << "LHS: " << LHS << "\n"); std::vector<CaseRange> RHS(Begin + Mid, End); - DEBUG(dbgs() << "RHS: " << RHS << "\n"); + LLVM_DEBUG(dbgs() << "RHS: " << RHS << "\n"); CaseRange &Pivot = *(Begin + Mid); - DEBUG(dbgs() << "Pivot ==> " - << Pivot.Low->getValue() - << " -" << Pivot.High->getValue() << "\n"); + LLVM_DEBUG(dbgs() << "Pivot ==> " << Pivot.Low->getValue() << " -" + << Pivot.High->getValue() << "\n"); // NewLowerBound here should never be the integer minimal value. // This is because it is computed from a case range that is never @@ -271,20 +270,14 @@ LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound, NewUpperBound = LHS.back().High; } - DEBUG(dbgs() << "LHS Bounds ==> "; - if (LowerBound) { - dbgs() << LowerBound->getSExtValue(); - } else { - dbgs() << "NONE"; - } - dbgs() << " - " << NewUpperBound->getSExtValue() << "\n"; - dbgs() << "RHS Bounds ==> "; - dbgs() << NewLowerBound->getSExtValue() << " - "; - if (UpperBound) { - dbgs() << UpperBound->getSExtValue() << "\n"; - } else { - dbgs() << "NONE\n"; - }); + LLVM_DEBUG(dbgs() << "LHS Bounds ==> "; if (LowerBound) { + dbgs() << LowerBound->getSExtValue(); + } else { dbgs() << "NONE"; } dbgs() << " - " + << NewUpperBound->getSExtValue() << "\n"; + dbgs() << "RHS Bounds ==> "; + dbgs() << NewLowerBound->getSExtValue() << " - "; if (UpperBound) { + dbgs() << UpperBound->getSExtValue() << "\n"; + } else { dbgs() << "NONE\n"; }); // Create a new node that checks if the value is < pivot. Go to the // left branch if it is and right branch if not. @@ -440,9 +433,9 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI, // Prepare cases vector. CaseVector Cases; unsigned numCmps = Clusterify(Cases, SI); - DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size() - << ". Total compares: " << numCmps << "\n"); - DEBUG(dbgs() << "Cases: " << Cases << "\n"); + LLVM_DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size() + << ". Total compares: " << numCmps << "\n"); + LLVM_DEBUG(dbgs() << "Cases: " << Cases << "\n"); (void)numCmps; ConstantInt *LowerBound = nullptr; diff --git a/llvm/lib/Transforms/Utils/PredicateInfo.cpp b/llvm/lib/Transforms/Utils/PredicateInfo.cpp index 62235895a1a..6da3c7a90b6 100644 --- a/llvm/lib/Transforms/Utils/PredicateInfo.cpp +++ b/llvm/lib/Transforms/Utils/PredicateInfo.cpp @@ -625,15 +625,15 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) { // we want to. bool PossibleCopy = VD.PInfo != nullptr; if (RenameStack.empty()) { - DEBUG(dbgs() << "Rename Stack is empty\n"); + LLVM_DEBUG(dbgs() << "Rename Stack is empty\n"); } else { - DEBUG(dbgs() << "Rename Stack Top DFS numbers are (" - << RenameStack.back().DFSIn << "," - << RenameStack.back().DFSOut << ")\n"); + LLVM_DEBUG(dbgs() << "Rename Stack Top DFS numbers are (" + << RenameStack.back().DFSIn << "," + << RenameStack.back().DFSOut << ")\n"); } - DEBUG(dbgs() << "Current DFS numbers are (" << VD.DFSIn << "," - << VD.DFSOut << ")\n"); + LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << VD.DFSIn << "," + << VD.DFSOut << ")\n"); bool ShouldPush = (VD.Def || PossibleCopy); bool OutOfScope = !stackIsInScope(RenameStack, VD); @@ -652,7 +652,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) { if (VD.Def || PossibleCopy) continue; if (!DebugCounter::shouldExecute(RenameCounter)) { - DEBUG(dbgs() << "Skipping execution due to debug counter\n"); + LLVM_DEBUG(dbgs() << "Skipping execution due to debug counter\n"); continue; } ValueDFS &Result = RenameStack.back(); @@ -663,8 +663,9 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) { if (!Result.Def) Result.Def = materializeStack(Counter, RenameStack, Op); - DEBUG(dbgs() << "Found replacement " << *Result.Def << " for " - << *VD.U->get() << " in " << *(VD.U->getUser()) << "\n"); + LLVM_DEBUG(dbgs() << "Found replacement " << *Result.Def << " for " + << *VD.U->get() << " in " << *(VD.U->getUser()) + << "\n"); assert(DT.dominates(cast<Instruction>(Result.Def), *VD.U) && "Predicateinfo def should have dominated this use"); VD.U->set(Result.Def); diff --git a/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/llvm/lib/Transforms/Utils/SSAUpdater.cpp index b2231d68a30..ca184ed7c4e 100644 --- a/llvm/lib/Transforms/Utils/SSAUpdater.cpp +++ b/llvm/lib/Transforms/Utils/SSAUpdater.cpp @@ -178,7 +178,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) { // If the client wants to know about all new instructions, tell it. if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI); - DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n"); + LLVM_DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n"); return InsertedPHI; } diff --git a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp index 61ceb9c9277..397bac2940a 100644 --- a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp +++ b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp @@ -40,8 +40,8 @@ static BasicBlock *getUserBB(Use *U) { /// AddAvailableValue or AddUse calls. unsigned SSAUpdaterBulk::AddVariable(StringRef Name, Type *Ty) { unsigned Var = Rewrites.size(); - DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": initialized with Ty = " << *Ty - << ", Name = " << Name << "\n"); + LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": initialized with Ty = " + << *Ty << ", Name = " << Name << "\n"); RewriteInfo RI(Name, Ty); Rewrites.push_back(RI); return Var; @@ -51,8 +51,9 @@ unsigned SSAUpdaterBulk::AddVariable(StringRef Name, Type *Ty) { /// specified value. void SSAUpdaterBulk::AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V) { assert(Var < Rewrites.size() && "Variable not found!"); - DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added new available value" - << *V << " in " << BB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var + << ": added new available value" << *V << " in " + << BB->getName() << "\n"); Rewrites[Var].Defines[BB] = V; } @@ -60,8 +61,8 @@ void SSAUpdaterBulk::AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V) { /// rewritten value when RewriteAllUses is called. void SSAUpdaterBulk::AddUse(unsigned Var, Use *U) { assert(Var < Rewrites.size() && "Variable not found!"); - DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added a use" << *U->get() - << " in " << getUserBB(U)->getName() << "\n"); + LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added a use" << *U->get() + << " in " << getUserBB(U)->getName() << "\n"); Rewrites[Var].Uses.push_back(U); } @@ -134,7 +135,8 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT, // this set for computing iterated dominance frontier (IDF). // The IDF blocks are the blocks where we need to insert new phi-nodes. ForwardIDFCalculator IDF(*DT); - DEBUG(dbgs() << "SSAUpdater: rewriting " << R.Uses.size() << " use(s)\n"); + LLVM_DEBUG(dbgs() << "SSAUpdater: rewriting " << R.Uses.size() + << " use(s)\n"); SmallPtrSet<BasicBlock *, 2> DefBlocks; for (auto &Def : R.Defines) @@ -181,8 +183,8 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT, // Notify that users of the existing value that it is being replaced. if (OldVal != V && OldVal->hasValueHandle()) ValueHandleBase::ValueIsRAUWd(OldVal, V); - DEBUG(dbgs() << "SSAUpdater: replacing " << *OldVal << " with " << *V - << "\n"); + LLVM_DEBUG(dbgs() << "SSAUpdater: replacing " << *OldVal << " with " << *V + << "\n"); U->set(V); } } diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 28d2606e9ac..833e13f890d 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -845,9 +845,9 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor( // Remove PHI node entries for the dead edge. ThisCases[0].Dest->removePredecessor(TI->getParent()); - DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() - << "Through successor TI: " << *TI << "Leaving: " << *NI - << "\n"); + LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() + << "Through successor TI: " << *TI << "Leaving: " << *NI + << "\n"); EraseTerminatorInstAndDCECond(TI); return true; @@ -859,8 +859,8 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor( for (unsigned i = 0, e = PredCases.size(); i != e; ++i) DeadCases.insert(PredCases[i].Value); - DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() - << "Through successor TI: " << *TI); + LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() + << "Through successor TI: " << *TI); // Collect branch weights into a vector. SmallVector<uint32_t, 8> Weights; @@ -886,7 +886,7 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor( if (HasWeight && Weights.size() >= 2) setBranchWeights(SI, Weights); - DEBUG(dbgs() << "Leaving: " << *TI << "\n"); + LLVM_DEBUG(dbgs() << "Leaving: " << *TI << "\n"); return true; } @@ -927,9 +927,9 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor( Instruction *NI = Builder.CreateBr(TheRealDest); (void)NI; - DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() - << "Through successor TI: " << *TI << "Leaving: " << *NI - << "\n"); + LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() + << "Through successor TI: " << *TI << "Leaving: " << *NI + << "\n"); EraseTerminatorInstAndDCECond(TI); return true; @@ -1739,7 +1739,8 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) { LockstepReverseIterator LRI(UnconditionalPreds); while (LRI.isValid() && canSinkInstructions(*LRI, PHIOperands)) { - DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0] << "\n"); + LLVM_DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0] + << "\n"); InstructionsToSink.insert((*LRI).begin(), (*LRI).end()); ++ScanIdx; --LRI; @@ -1751,7 +1752,7 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) { for (auto *V : PHIOperands[I]) if (InstructionsToSink.count(V) == 0) ++NumPHIdValues; - DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n"); + LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n"); unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size(); if ((NumPHIdValues % UnconditionalPreds.size()) != 0) NumPHIInsts++; @@ -1779,7 +1780,7 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) { if (!Profitable) return false; - DEBUG(dbgs() << "SINK: Splitting edge\n"); + LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n"); // We have a conditional edge and we're going to sink some instructions. // Insert a new block postdominating all blocks we're going to sink from. if (!SplitBlockPredecessors(BB, UnconditionalPreds, ".sink.split")) @@ -1801,16 +1802,17 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) { // and never actually sink it which means we produce more PHIs than intended. // This is unlikely in practice though. for (unsigned SinkIdx = 0; SinkIdx != ScanIdx; ++SinkIdx) { - DEBUG(dbgs() << "SINK: Sink: " - << *UnconditionalPreds[0]->getTerminator()->getPrevNode() - << "\n"); + LLVM_DEBUG(dbgs() << "SINK: Sink: " + << *UnconditionalPreds[0]->getTerminator()->getPrevNode() + << "\n"); // Because we've sunk every instruction in turn, the current instruction to // sink is always at index 0. LRI.reset(); if (!ProfitableToSinkInstruction(LRI)) { // Too many PHIs would be created. - DEBUG(dbgs() << "SINK: stopping here, too many PHIs would be created!\n"); + LLVM_DEBUG( + dbgs() << "SINK: stopping here, too many PHIs would be created!\n"); break; } @@ -2053,7 +2055,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB, return false; // If we get here, we can hoist the instruction and if-convert. - DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";); + LLVM_DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";); // Insert a select of the value of the speculated store. if (SpeculatedStoreValue) { @@ -2359,8 +2361,9 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI, } } - DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: " - << IfTrue->getName() << " F: " << IfFalse->getName() << "\n"); + LLVM_DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond + << " T: " << IfTrue->getName() + << " F: " << IfFalse->getName() << "\n"); // If we can still promote the PHI nodes after this gauntlet of tests, // do all of the PHI's now. @@ -2484,9 +2487,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, (void)RI; - DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:" - << "\n " << *BI << "NewRet = " << *RI - << "TRUEBLOCK: " << *TrueSucc << "FALSEBLOCK: " << *FalseSucc); + LLVM_DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:" + << "\n " << *BI << "NewRet = " << *RI << "TRUEBLOCK: " + << *TrueSucc << "FALSEBLOCK: " << *FalseSucc); EraseTerminatorInstAndDCECond(BI); @@ -2659,7 +2662,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) { continue; } - DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB); + LLVM_DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB); IRBuilder<> Builder(PBI); // If we need to invert the condition in the pred block to match, do so now. @@ -3282,8 +3285,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, // Finally, if everything is ok, fold the branches to logical ops. BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1); - DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent() - << "AND: " << *BI->getParent()); + LLVM_DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent() + << "AND: " << *BI->getParent()); // If OtherDest *is* BB, then BB is a basic block with a single conditional // branch in it, where one edge (OtherDest) goes back to itself but the other @@ -3301,7 +3304,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, OtherDest = InfLoopBlock; } - DEBUG(dbgs() << *PBI->getParent()->getParent()); + LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent()); // BI may have other predecessors. Because of this, we leave // it alone, but modify PBI. @@ -3385,8 +3388,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, } } - DEBUG(dbgs() << "INTO: " << *PBI->getParent()); - DEBUG(dbgs() << *PBI->getParent()->getParent()); + LLVM_DEBUG(dbgs() << "INTO: " << *PBI->getParent()); + LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent()); // This basic block is probably dead. We know it has at least // one fewer predecessor. @@ -3686,9 +3689,9 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, BasicBlock *BB = BI->getParent(); - DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size() - << " cases into SWITCH. BB is:\n" - << *BB); + LLVM_DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size() + << " cases into SWITCH. BB is:\n" + << *BB); // If there are any extra values that couldn't be folded into the switch // then we evaluate them with an explicit branch first. Split the block @@ -3711,8 +3714,8 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, // for the edge we just added. AddPredecessorToBlock(EdgeBB, BB, NewBB); - DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase - << "\nEXTRABB = " << *BB); + LLVM_DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase + << "\nEXTRABB = " << *BB); BB = NewBB; } @@ -3743,7 +3746,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder, // Erase the old branch instruction. EraseTerminatorInstAndDCECond(BI); - DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n'); + LLVM_DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n'); return true; } @@ -4071,8 +4074,8 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { if (!UncondBranchPreds.empty() && DupRet) { while (!UncondBranchPreds.empty()) { BasicBlock *Pred = UncondBranchPreds.pop_back_val(); - DEBUG(dbgs() << "FOLDING: " << *BB - << "INTO UNCOND BRANCH PRED: " << *Pred); + LLVM_DEBUG(dbgs() << "FOLDING: " << *BB + << "INTO UNCOND BRANCH PRED: " << *Pred); (void)FoldReturnIntoUncondBranch(RI, BB, Pred); } @@ -4396,7 +4399,8 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC, if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) || (CaseVal.getMinSignedBits() > MaxSignificantBitsInCond)) { DeadCases.push_back(Case.getCaseValue()); - DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal << " is dead.\n"); + LLVM_DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal + << " is dead.\n"); } } @@ -4412,7 +4416,7 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC, if (HasDefault && DeadCases.empty() && NumUnknownBits < 64 /* avoid overflow */ && SI->getNumCases() == (1ULL << NumUnknownBits)) { - DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n"); + LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n"); BasicBlock *NewDefault = SplitBlockPredecessors(SI->getDefaultDest(), SI->getParent(), ""); SI->setDefaultDest(&*NewDefault); @@ -5996,7 +6000,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { // or that just have themself as a predecessor. These are unreachable. if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) || BB->getSinglePredecessor() == BB) { - DEBUG(dbgs() << "Removing BB: \n" << *BB); + LLVM_DEBUG(dbgs() << "Removing BB: \n" << *BB); DeleteDeadBlock(BB); return true; } diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp index ad1faea0a7a..a31403907ff 100644 --- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -147,8 +147,8 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) if (SE->getSCEV(UseInst) != FoldedExpr) return nullptr; - DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand - << " -> " << *UseInst << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand + << " -> " << *UseInst << '\n'); UseInst->setOperand(OperIdx, IVSrc); assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper"); @@ -221,7 +221,7 @@ bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp, // for now. return false; - DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n'); ICmp->setPredicate(InvariantPredicate); ICmp->setOperand(0, NewLHS); ICmp->setOperand(1, NewRHS); @@ -252,11 +252,11 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) { if (SE->isKnownPredicate(Pred, S, X)) { ICmp->replaceAllUsesWith(ConstantInt::getTrue(ICmp->getContext())); DeadInsts.emplace_back(ICmp); - DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n'); } else if (SE->isKnownPredicate(ICmpInst::getInversePredicate(Pred), S, X)) { ICmp->replaceAllUsesWith(ConstantInt::getFalse(ICmp->getContext())); DeadInsts.emplace_back(ICmp); - DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n'); } else if (makeIVComparisonInvariant(ICmp, IVOperand)) { // fallthrough to end of function } else if (ICmpInst::isSigned(OriginalPred) && @@ -267,7 +267,8 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) { // we turn the instruction's predicate to its unsigned version. Note that // we cannot rely on Pred here unless we check if we have swapped it. assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?"); - DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp + << '\n'); ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred)); } else return; @@ -293,7 +294,7 @@ bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) { SDiv->getName() + ".udiv", SDiv); UDiv->setIsExact(SDiv->isExact()); SDiv->replaceAllUsesWith(UDiv); - DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n'); ++NumSimplifiedSDiv; Changed = true; DeadInsts.push_back(SDiv); @@ -309,7 +310,7 @@ void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) { auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D, Rem->getName() + ".urem", Rem); Rem->replaceAllUsesWith(URem); - DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n'); ++NumSimplifiedSRem; Changed = true; DeadInsts.emplace_back(Rem); @@ -318,7 +319,7 @@ void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) { // i % n --> i if i is in [0,n). void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) { Rem->replaceAllUsesWith(Rem->getOperand(0)); - DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n'); ++NumElimRem; Changed = true; DeadInsts.emplace_back(Rem); @@ -332,7 +333,7 @@ void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) { SelectInst *Sel = SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem); Rem->replaceAllUsesWith(Sel); - DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n'); ++NumElimRem; Changed = true; DeadInsts.emplace_back(Rem); @@ -548,8 +549,8 @@ bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) { auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP); I->replaceAllUsesWith(Invariant); - DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I - << " with loop invariant: " << *S << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I + << " with loop invariant: " << *S << '\n'); ++NumFoldedUser; Changed = true; DeadInsts.emplace_back(I); @@ -589,7 +590,7 @@ bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst, if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand)) return false; - DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n'); + LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n'); UseInst->replaceAllUsesWith(IVOperand); ++NumElimIdentity; diff --git a/llvm/lib/Transforms/Utils/SplitModule.cpp b/llvm/lib/Transforms/Utils/SplitModule.cpp index 39a4e565c2e..f8d758c5498 100644 --- a/llvm/lib/Transforms/Utils/SplitModule.cpp +++ b/llvm/lib/Transforms/Utils/SplitModule.cpp @@ -101,7 +101,8 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap, // At this point module should have the proper mix of globals and locals. // As we attempt to partition this module, we must not change any // locals to globals. - DEBUG(dbgs() << "Partition module with (" << M->size() << ")functions\n"); + LLVM_DEBUG(dbgs() << "Partition module with (" << M->size() + << ")functions\n"); ClusterMapType GVtoClusterMap; ComdatMembersType ComdatMembers; @@ -194,16 +195,17 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap, unsigned CurrentClusterSize = BalancinQueue.top().second; BalancinQueue.pop(); - DEBUG(dbgs() << "Root[" << CurrentClusterID << "] cluster_size(" << I.first - << ") ----> " << I.second->getData()->getName() << "\n"); + LLVM_DEBUG(dbgs() << "Root[" << CurrentClusterID << "] cluster_size(" + << I.first << ") ----> " << I.second->getData()->getName() + << "\n"); for (ClusterMapType::member_iterator MI = GVtoClusterMap.findLeader(I.second); MI != GVtoClusterMap.member_end(); ++MI) { if (!Visited.insert(*MI).second) continue; - DEBUG(dbgs() << "----> " << (*MI)->getName() - << ((*MI)->hasLocalLinkage() ? " l " : " e ") << "\n"); + LLVM_DEBUG(dbgs() << "----> " << (*MI)->getName() + << ((*MI)->hasLocalLinkage() ? " l " : " e ") << "\n"); Visited.insert(*MI); ClusterIDMap[*MI] = CurrentClusterID; CurrentClusterSize++; diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp index c3feea6a0a4..5f71bdcf597 100644 --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -389,8 +389,8 @@ Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, NewLoad->takeName(SrcVal); NewLoad->setAlignment(SrcVal->getAlignment()); - DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); - DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); + LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); + LLVM_DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); // Replace uses of the original load with the wider load. On a big endian // system, we need to shift down to get the relevant bits. diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index c1d3f925515..a6acf3e558a 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -510,7 +510,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { SmallVector<Instruction *, 16> ChainInstrs; bool IsLoadChain = isa<LoadInst>(Chain[0]); - DEBUG({ + LLVM_DEBUG({ for (Instruction *I : Chain) { if (IsLoadChain) assert(isa<LoadInst>(I) && @@ -532,11 +532,12 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { Intrinsic::sideeffect) { // Ignore llvm.sideeffect calls. } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) { - DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I << '\n'); + LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I + << '\n'); break; } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) { - DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I - << '\n'); + LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I + << '\n'); break; } } @@ -588,7 +589,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { if (!AA.isNoAlias(MemoryLocation::get(MemInstr), MemoryLocation::get(ChainInstr))) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "LSV: Found alias:\n" " Aliasing instruction and pointer:\n" << " " << *MemInstr << '\n' @@ -744,7 +745,7 @@ bool Vectorizer::vectorizeChains(InstrListMap &Map) { if (Size < 2) continue; - DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); + LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); // Process the stores in chunks of 64. for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { @@ -758,7 +759,8 @@ bool Vectorizer::vectorizeChains(InstrListMap &Map) { } bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) { - DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n"); + LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() + << " instructions.\n"); SmallVector<int, 16> Heads, Tails; int ConsecutiveChain[64]; @@ -894,14 +896,14 @@ bool Vectorizer::vectorizeStoreChain( // vector factor, break it into two pieces. unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy); if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { - DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." - " Creating two separate arrays.\n"); + LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." + " Creating two separate arrays.\n"); return vectorizeStoreChain(Chain.slice(0, TargetVF), InstructionsProcessed) | vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed); } - DEBUG({ + LLVM_DEBUG({ dbgs() << "LSV: Stores to vectorize:\n"; for (Instruction *I : Chain) dbgs() << " " << *I << "\n"; @@ -1042,8 +1044,8 @@ bool Vectorizer::vectorizeLoadChain( // vector factor, break it into two pieces. unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy); if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { - DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." - " Creating two separate arrays.\n"); + LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." + " Creating two separate arrays.\n"); return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) | vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed); } @@ -1066,7 +1068,7 @@ bool Vectorizer::vectorizeLoadChain( Alignment = NewAlign; } - DEBUG({ + LLVM_DEBUG({ dbgs() << "LSV: Loads to vectorize:\n"; for (Instruction *I : Chain) I->dump(); @@ -1149,7 +1151,7 @@ bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), SzInBytes * 8, AddressSpace, Alignment, &Fast); - DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows - << " and fast? " << Fast << "\n";); + LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows + << " and fast? " << Fast << "\n";); return !Allows || !Fast; } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index d1fd2eb68a8..697bc1b448d 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -98,26 +98,26 @@ LoopVectorizeHints::LoopVectorizeHints(const Loop *L, bool DisableInterleaving, // consider the loop to have been already vectorized because there's // nothing more that we can do. IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1; - DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() - << "LV: Interleaving disabled by the pass manager\n"); + LLVM_DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() + << "LV: Interleaving disabled by the pass manager\n"); } bool LoopVectorizeHints::allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { if (getForce() == LoopVectorizeHints::FK_Disabled) { - DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); emitRemarkWithHints(); return false; } if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { - DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); emitRemarkWithHints(); return false; } if (getIsVectorized() == 1) { - DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); // FIXME: Add interleave.disable metadata. This will allow // vectorize.disable to be used without disabling the pass and errors // to differentiate between disabled vectorization and a width of 1. @@ -223,7 +223,7 @@ void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) { if (H->validate(Val)) H->Value = Val; else - DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); + LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); break; } } @@ -309,7 +309,7 @@ bool LoopVectorizationRequirements::doesNotMeet( << "loop not vectorized: cannot prove it is safe to reorder " "memory operations"; }); - DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); + LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); Failed = true; } @@ -350,7 +350,7 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) { // 1. PHINode *IV = Lp->getCanonicalInductionVariable(); if (!IV) { - DEBUG(dbgs() << "LV: Canonical IV not found.\n"); + LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n"); return false; } @@ -358,14 +358,15 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) { BasicBlock *Latch = Lp->getLoopLatch(); auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); if (!LatchBr || LatchBr->isUnconditional()) { - DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n"); + LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n"); return false; } // 3. auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition()); if (!LatchCmp) { - DEBUG(dbgs() << "LV: Loop latch condition is not a compare instruction.\n"); + LLVM_DEBUG( + dbgs() << "LV: Loop latch condition is not a compare instruction.\n"); return false; } @@ -374,7 +375,7 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) { Value *IVUpdate = IV->getIncomingValueForBlock(Latch); if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) && !(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) { - DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n"); + LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n"); return false; } @@ -441,7 +442,7 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, Instruction *UI = cast<Instruction>(U); // This user may be a reduction exit value. if (!TheLoop->contains(UI)) { - DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); return true; } } @@ -474,7 +475,7 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() { // not supported yet. auto *Br = dyn_cast<BranchInst>(BB->getTerminator()); if (!Br) { - DEBUG(dbgs() << "LV: Unsupported basic block terminator.\n"); + LLVM_DEBUG(dbgs() << "LV: Unsupported basic block terminator.\n"); ORE->emit(createMissedAnalysis("CFGNotUnderstood") << "loop control flow is not understood by vectorizer"); if (DoExtraAnalysis) @@ -490,7 +491,7 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() { !TheLoop->isLoopInvariant(Br->getCondition()) && !LI->isLoopHeader(Br->getSuccessor(0)) && !LI->isLoopHeader(Br->getSuccessor(1))) { - DEBUG(dbgs() << "LV: Unsupported conditional branch.\n"); + LLVM_DEBUG(dbgs() << "LV: Unsupported conditional branch.\n"); ORE->emit(createMissedAnalysis("CFGNotUnderstood") << "loop control flow is not understood by vectorizer"); if (DoExtraAnalysis) @@ -504,8 +505,9 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() { // simple outer loops scenarios with uniform nested loops. if (!isUniformLoopNest(TheLoop /*loop nest*/, TheLoop /*context outer loop*/)) { - DEBUG(dbgs() - << "LV: Not vectorizing: Outer loop contains divergent loops.\n"); + LLVM_DEBUG( + dbgs() + << "LV: Not vectorizing: Outer loop contains divergent loops.\n"); ORE->emit(createMissedAnalysis("CFGNotUnderstood") << "loop control flow is not understood by vectorizer"); if (DoExtraAnalysis) @@ -565,7 +567,7 @@ void LoopVectorizationLegality::addInductionPhi( AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); } - DEBUG(dbgs() << "LV: Found an induction variable.\n"); + LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n"); } bool LoopVectorizationLegality::canVectorizeInstrs() { @@ -587,7 +589,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { !PhiTy->isPointerTy()) { ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) << "loop control flow is not understood by vectorizer"); - DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); + LLVM_DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); return false; } @@ -609,7 +611,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { if (Phi->getNumIncomingValues() != 2) { ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) << "control flow not understood by vectorizer"); - DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); + LLVM_DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); return false; } @@ -647,7 +649,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) << "value that could not be identified as " "reduction is used outside the loop"); - DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); return false; } // end of PHI handling @@ -662,7 +664,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) << "call instruction cannot be vectorized"); - DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); + LLVM_DEBUG( + dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); return false; } @@ -674,7 +677,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) << "intrinsic instruction cannot be vectorized"); - DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); + LLVM_DEBUG(dbgs() + << "LV: Found unvectorizable intrinsic " << *CI << "\n"); return false; } } @@ -686,7 +690,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { isa<ExtractElementInst>(I)) { ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) << "instruction return type cannot be vectorized"); - DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); + LLVM_DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); return false; } @@ -706,7 +710,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { // semantics. } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && !I.isFast()) { - DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); + LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); Hints->setPotentiallyUnsafe(); } @@ -721,7 +725,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { } if (!PrimaryInduction) { - DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); + LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); if (Inductions.empty()) { ORE->emit(createMissedAnalysis("NoInductionVariable") << "loop induction variable could not be identified"); @@ -753,7 +757,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() { if (LAI->hasStoreToLoopInvariantAddress()) { ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") << "write to a loop invariant address could not be vectorized"); - DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); + LLVM_DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); return false; } @@ -903,7 +907,7 @@ bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp, // We must have a loop in canonical form. Loops with indirectbr in them cannot // be canonicalized. if (!Lp->getLoopPreheader()) { - DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n"); + LLVM_DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n"); ORE->emit(createMissedAnalysis("CFGNotUnderstood") << "loop control flow is not understood by vectorizer"); if (DoExtraAnalysis) @@ -989,8 +993,8 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { } // We need to have a loop header. - DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() - << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() + << '\n'); // Specific checks for outer loops. We skip the remaining legal checks at this // point because they don't support outer loops. @@ -998,13 +1002,13 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { assert(UseVPlanNativePath && "VPlan-native path is not enabled."); if (!canVectorizeOuterLoop()) { - DEBUG(dbgs() << "LV: Not vectorizing: Unsupported outer loop.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Unsupported outer loop.\n"); // TODO: Implement DoExtraAnalysis when subsequent legal checks support // outer loops. return false; } - DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n"); + LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n"); return Result; } @@ -1012,7 +1016,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { // Check if we can if-convert non-single-bb loops. unsigned NumBlocks = TheLoop->getNumBlocks(); if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { - DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); + LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); if (DoExtraAnalysis) Result = false; else @@ -1021,7 +1025,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { // Check if we can vectorize the instructions and CFG in this loop. if (!canVectorizeInstrs()) { - DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); + LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); if (DoExtraAnalysis) Result = false; else @@ -1030,18 +1034,18 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { // Go over each instruction and look at memory deps. if (!canVectorizeMemory()) { - DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); + LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); if (DoExtraAnalysis) Result = false; else return false; } - DEBUG(dbgs() << "LV: We can vectorize this loop" - << (LAI->getRuntimePointerChecking()->Need - ? " (with a runtime bound check)" - : "") - << "!\n"); + LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop" + << (LAI->getRuntimePointerChecking()->Need + ? " (with a runtime bound check)" + : "") + << "!\n"); unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) @@ -1051,7 +1055,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) { ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") << "Too many SCEV assumptions need to be made and checked " << "at runtime"); - DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); + LLVM_DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); if (DoExtraAnalysis) Result = false; else diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index eefaf22d028..a65dc09baa6 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1628,20 +1628,20 @@ static bool isExplicitVecOuterLoop(Loop *OuterLp, Function *Fn = OuterLp->getHeader()->getParent(); if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) { - DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); + LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); return false; } if (!Hints.getWidth()) { - DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); emitMissedWarning(Fn, OuterLp, Hints, ORE); return false; } if (Hints.getInterleave() > 1) { // TODO: Interleave support is future work. - DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " - "outer loops.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " + "outer loops.\n"); emitMissedWarning(Fn, OuterLp, Hints, ORE); return false; } @@ -4123,7 +4123,7 @@ void InnerLoopVectorizer::widenInstruction(Instruction &I) { default: // This instruction is not vectorized by simple widening. - DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); + LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); llvm_unreachable("Unhandled instruction!"); } // end of switch. } @@ -4235,7 +4235,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { } for (auto *I : ScalarPtrs) if (!PossibleNonScalarPtrs.count(I)) { - DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); Worklist.insert(I); } @@ -4252,8 +4252,9 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { continue; Worklist.insert(Ind); Worklist.insert(IndUpdate); - DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); - DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate + << "\n"); } // Insert the forced scalars. @@ -4280,7 +4281,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { isScalarUse(J, Src)); })) { Worklist.insert(Src); - DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); } } @@ -4320,8 +4321,9 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { // The induction variable and its update instruction will remain scalar. Worklist.insert(Ind); Worklist.insert(IndUpdate); - DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); - DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate + << "\n"); } Scalars[VF].insert(Worklist.begin(), Worklist.end()); @@ -4413,7 +4415,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { Worklist.insert(Cmp); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); } // Holds consecutive and consecutive-like pointers. Consecutive-like pointers @@ -4474,7 +4476,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { // aren't also identified as possibly non-uniform. for (auto *V : ConsecutiveLikePtrs) if (!PossibleNonUniformPtrs.count(V)) { - DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); Worklist.insert(V); } @@ -4497,7 +4499,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { isUniformDecision(J, VF)); })) { Worklist.insert(OI); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); } } } @@ -4542,8 +4544,9 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { // The induction variable and its update instruction will remain uniform. Worklist.insert(Ind); Worklist.insert(IndUpdate); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); + LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate + << "\n"); } Uniforms[VF].insert(Worklist.begin(), Worklist.end()); @@ -4630,7 +4633,7 @@ void InterleavedAccessInfo::collectConstStrideAccesses( // with other accesses that may precede it in program order. Note that a // bottom-up order does not imply that WAW dependences should not be checked. void InterleavedAccessInfo::analyzeInterleaving() { - DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); + LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); const ValueToValueMap &Strides = LAI->getSymbolicStrides(); // Holds all accesses with a constant stride. @@ -4672,7 +4675,8 @@ void InterleavedAccessInfo::analyzeInterleaving() { if (isStrided(DesB.Stride)) { Group = getInterleaveGroup(B); if (!Group) { - DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); + LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B + << '\n'); Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); } if (B->mayWriteToMemory()) @@ -4775,8 +4779,9 @@ void InterleavedAccessInfo::analyzeInterleaving() { // Try to insert A into B's group. if (Group->insertMember(A, IndexA, DesA.Align)) { - DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' - << " into the interleave group with" << *B << '\n'); + LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' + << " into the interleave group with" << *B + << '\n'); InterleaveGroupMap[A] = Group; // Set the first load in program order as the insert position. @@ -4789,8 +4794,9 @@ void InterleavedAccessInfo::analyzeInterleaving() { // Remove interleaved store groups with gaps. for (InterleaveGroup *Group : StoreGroups) if (Group->getNumMembers() != Group->getFactor()) { - DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due " - "to gaps.\n"); + LLVM_DEBUG( + dbgs() << "LV: Invalidate candidate interleaved store group due " + "to gaps.\n"); releaseGroup(Group); } // Remove interleaved groups with gaps (currently only loads) whose memory @@ -4822,8 +4828,9 @@ void InterleavedAccessInfo::analyzeInterleaving() { Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { - DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " - "first group member potentially pointer-wrapping.\n"); + LLVM_DEBUG( + dbgs() << "LV: Invalidate candidate interleaved group due to " + "first group member potentially pointer-wrapping.\n"); releaseGroup(Group); continue; } @@ -4832,8 +4839,9 @@ void InterleavedAccessInfo::analyzeInterleaving() { Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { - DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " - "last group member potentially pointer-wrapping.\n"); + LLVM_DEBUG( + dbgs() << "LV: Invalidate candidate interleaved group due to " + "last group member potentially pointer-wrapping.\n"); releaseGroup(Group); } } else { @@ -4843,12 +4851,14 @@ void InterleavedAccessInfo::analyzeInterleaving() { // to look for a member at index factor - 1, since every group must have // a member at index zero. if (Group->isReverse()) { - DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " - "a reverse access with gaps.\n"); + LLVM_DEBUG( + dbgs() << "LV: Invalidate candidate interleaved group due to " + "a reverse access with gaps.\n"); releaseGroup(Group); continue; } - DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); + LLVM_DEBUG( + dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); RequiresScalarEpilogue = true; } } @@ -4858,7 +4868,8 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { // TODO: It may by useful to do since it's still likely to be dynamically // uniform if the target can skip. - DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); + LLVM_DEBUG( + dbgs() << "LV: Not inserting runtime ptr check for divergent target"); ORE->emit( createMissedAnalysis("CantVersionLoopWithDivergentTarget") @@ -4876,20 +4887,22 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { << "runtime pointer checks needed. Enable vectorization of this " "loop with '#pragma clang loop vectorize(enable)' when " "compiling with -Os/-Oz"); - DEBUG(dbgs() - << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); + LLVM_DEBUG( + dbgs() + << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); return None; } // If we optimize the program for size, avoid creating the tail loop. - DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); // If we don't know the precise trip count, don't try to vectorize. if (TC < 2) { ORE->emit( createMissedAnalysis("UnknownLoopCountComplexCFG") << "unable to calculate the loop count due to complex control flow"); - DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); + LLVM_DEBUG( + dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); return None; } @@ -4907,7 +4920,8 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { "same time. Enable vectorization of this loop " "with '#pragma clang loop vectorize(enable)' " "when compiling with -Os/-Oz"); - DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); + LLVM_DEBUG( + dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); return None; } @@ -4932,23 +4946,23 @@ LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, unsigned MaxVectorSize = WidestRegister / WidestType; - DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " - << WidestType << " bits.\n"); - DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister - << " bits.\n"); + LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType + << " / " << WidestType << " bits.\n"); + LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " + << WidestRegister << " bits.\n"); assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" " into one vector!"); if (MaxVectorSize == 0) { - DEBUG(dbgs() << "LV: The target has no vector registers.\n"); + LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); MaxVectorSize = 1; return MaxVectorSize; } else if (ConstTripCount && ConstTripCount < MaxVectorSize && isPowerOf2_32(ConstTripCount)) { // We need to clamp the VF to be the ConstTripCount. There is no point in // choosing a higher viable VF as done in the loop below. - DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " - << ConstTripCount << "\n"); + LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " + << ConstTripCount << "\n"); MaxVectorSize = ConstTripCount; return MaxVectorSize; } @@ -4977,8 +4991,8 @@ LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, } if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { if (MaxVF < MinVF) { - DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF - << ") with target's minimum: " << MinVF << '\n'); + LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF + << ") with target's minimum: " << MinVF << '\n'); MaxVF = MinVF; } } @@ -4991,7 +5005,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { float Cost = expectedCost(1).first; const float ScalarCost = Cost; unsigned Width = 1; - DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); + LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; // Ignore scalar width, because the user explicitly wants vectorization. @@ -5006,10 +5020,10 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { // the vector elements. VectorizationCostTy C = expectedCost(i); float VectorCost = C.first / (float)i; - DEBUG(dbgs() << "LV: Vector loop of width " << i - << " costs: " << (int)VectorCost << ".\n"); + LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i + << " costs: " << (int)VectorCost << ".\n"); if (!C.second && !ForceVectorization) { - DEBUG( + LLVM_DEBUG( dbgs() << "LV: Not considering vector loop of width " << i << " because it will not generate any vector instructions.\n"); continue; @@ -5023,15 +5037,16 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { if (!EnableCondStoresVectorization && NumPredStores) { ORE->emit(createMissedAnalysis("ConditionalStore") << "store that is conditionally executed prevents vectorization"); - DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); + LLVM_DEBUG( + dbgs() << "LV: No vectorization. There are conditional stores.\n"); Width = 1; Cost = ScalarCost; } - DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() - << "LV: Vectorization seems to be not beneficial, " - << "but was forced by a user.\n"); - DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); + LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() + << "LV: Vectorization seems to be not beneficial, " + << "but was forced by a user.\n"); + LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; return Factor; } @@ -5123,8 +5138,8 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, return 1; unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); - DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters - << " registers\n"); + LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters + << " registers\n"); if (VF == 1) { if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) @@ -5182,7 +5197,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // Interleave if we vectorized this loop and there is a reduction that could // benefit from interleaving. if (VF > 1 && !Legal->getReductionVars()->empty()) { - DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); + LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); return IC; } @@ -5193,7 +5208,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // We want to interleave small loops in order to reduce the loop overhead and // potentially expose ILP opportunities. - DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); + LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { // We assume that the cost overhead is 1 and we use the cost model // to estimate the cost of the loop and interleave until the cost of the @@ -5221,11 +5236,12 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, if (EnableLoadStoreRuntimeInterleave && std::max(StoresIC, LoadsIC) > SmallIC) { - DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); + LLVM_DEBUG( + dbgs() << "LV: Interleaving to saturate store or load ports.\n"); return std::max(StoresIC, LoadsIC); } - DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); + LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); return SmallIC; } @@ -5233,11 +5249,11 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // this point) that could benefit from interleaving. bool HasReductions = !Legal->getReductionVars()->empty(); if (TTI.enableAggressiveInterleaving(HasReductions)) { - DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); + LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); return IC; } - DEBUG(dbgs() << "LV: Not Interleaving.\n"); + LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); return 1; } @@ -5327,7 +5343,7 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { SmallVector<RegisterUsage, 8> RUs(VFs.size()); SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); - DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); + LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); // A lambda that gets the register usage for the given type and VF. auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { @@ -5372,8 +5388,8 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { MaxUsages[j] = std::max(MaxUsages[j], RegUsage); } - DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " - << OpenIntervals.size() << '\n'); + LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " + << OpenIntervals.size() << '\n'); // Add the current instruction to the list of open intervals. OpenIntervals.insert(I); @@ -5388,9 +5404,10 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { Invariant += GetRegUsage(Inst->getType(), VFs[i]); } - DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); - DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); - DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); + LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); + LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); + LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant + << '\n'); RU.LoopInvariantRegs = Invariant; RU.MaxLocalUsers = MaxUsages[i]; @@ -5587,8 +5604,9 @@ LoopVectorizationCostModel::expectedCost(unsigned VF) { BlockCost.first += C.first; BlockCost.second |= C.second; - DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " - << VF << " For instruction: " << I << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first + << " for VF " << VF << " For instruction: " << I + << '\n'); } // If we are vectorizing a predicated block, it will have been @@ -6247,14 +6265,15 @@ LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); assert(UserVF && "Expected UserVF for outer loop vectorization."); assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); - DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); + LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); buildVPlans(UserVF, UserVF); return {UserVF, 0}; } - DEBUG(dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " - "VPlan-native path.\n"); + LLVM_DEBUG( + dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " + "VPlan-native path.\n"); return NoVectorization; } @@ -6268,13 +6287,13 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { return NoVectorization; if (UserVF) { - DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); + LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); // Collect the instructions (and their associated costs) that will be more // profitable to scalarize. CM.selectUserVectorizationFactor(UserVF); buildVPlans(UserVF, UserVF); - DEBUG(printPlans(dbgs())); + LLVM_DEBUG(printPlans(dbgs())); return {UserVF, 0}; } @@ -6292,7 +6311,7 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { } buildVPlans(1, MaxVF); - DEBUG(printPlans(dbgs())); + LLVM_DEBUG(printPlans(dbgs())); if (MaxVF == 1) return NoVectorization; @@ -6301,7 +6320,8 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { } void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { - DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'); + LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF + << '\n'); BestVF = VF; BestUF = UF; @@ -6777,11 +6797,11 @@ VPBasicBlock *LoopVectorizationPlanner::handleReplication( // Finalize the recipe for Instr, first if it is not predicated. if (!IsPredicated) { - DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); + LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); VPBB->appendRecipe(Recipe); return VPBB; } - DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); + LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); assert(VPBB->getSuccessors().empty() && "VPBB has successors when handling predicated replication."); // Record predicated instructions for above packing optimizations. @@ -6906,8 +6926,9 @@ LoopVectorizationPlanner::buildVPlan(VFRange &Range, // should follow. auto SAIt = SinkAfter.find(Instr); if (SAIt != SinkAfter.end()) { - DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second - << " to vectorize a 1st order recurrence.\n"); + LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" + << *SAIt->second + << " to vectorize a 1st order recurrence.\n"); SinkAfterInverse[SAIt->second] = Instr; continue; } @@ -7208,21 +7229,22 @@ bool LoopVectorizePass::processLoop(Loop *L) { const std::string DebugLocStr = getDebugLocString(L); #endif /* NDEBUG */ - DEBUG(dbgs() << "\nLV: Checking a loop in \"" - << L->getHeader()->getParent()->getName() << "\" from " - << DebugLocStr << "\n"); + LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" + << L->getHeader()->getParent()->getName() << "\" from " + << DebugLocStr << "\n"); LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); - DEBUG(dbgs() << "LV: Loop hints:" - << " force=" - << (Hints.getForce() == LoopVectorizeHints::FK_Disabled - ? "disabled" - : (Hints.getForce() == LoopVectorizeHints::FK_Enabled - ? "enabled" - : "?")) - << " width=" << Hints.getWidth() - << " unroll=" << Hints.getInterleave() << "\n"); + LLVM_DEBUG( + dbgs() << "LV: Loop hints:" + << " force=" + << (Hints.getForce() == LoopVectorizeHints::FK_Disabled + ? "disabled" + : (Hints.getForce() == LoopVectorizeHints::FK_Enabled + ? "enabled" + : "?")) + << " width=" << Hints.getWidth() + << " unroll=" << Hints.getInterleave() << "\n"); // Function containing loop Function *F = L->getHeader()->getParent(); @@ -7236,7 +7258,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // benefit from vectorization, respectively. if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { - DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); + LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); return false; } @@ -7247,7 +7269,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, &Requirements, &Hints, DB, AC); if (!LVL.canVectorize(EnableVPlanNativePath)) { - DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); emitMissedWarning(F, L, Hints, ORE); return false; } @@ -7297,13 +7319,13 @@ bool LoopVectorizePass::processLoop(Loop *L) { } if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { - DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " - << "This loop is worth vectorizing only if no scalar " - << "iteration overheads are incurred."); + LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " + << "This loop is worth vectorizing only if no scalar " + << "iteration overheads are incurred."); if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) - DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); + LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); else { - DEBUG(dbgs() << "\n"); + LLVM_DEBUG(dbgs() << "\n"); // Loops with a very small trip count are considered for vectorization // under OptForSize, thereby making sure the cost of their loop body is // dominant, free of runtime guards and scalar iteration overheads. @@ -7316,8 +7338,8 @@ bool LoopVectorizePass::processLoop(Loop *L) { // an integer loop and the vector instructions selected are purely integer // vector instructions? if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { - DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" - "attribute is used.\n"); + LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" + "attribute is used.\n"); ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "NoImplicitFloat", L) << "loop not vectorized due to NoImplicitFloat attribute"); @@ -7331,7 +7353,8 @@ bool LoopVectorizePass::processLoop(Loop *L) { // additional fp-math flags can help. if (Hints.isPotentiallyUnsafe() && TTI->isFPVectorizationPotentiallyUnsafe()) { - DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); + LLVM_DEBUG( + dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); ORE->emit( createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) << "loop not vectorized due to unsafe FP support."); @@ -7375,14 +7398,14 @@ bool LoopVectorizePass::processLoop(Loop *L) { std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; bool VectorizeLoop = true, InterleaveLoop = true; if (Requirements.doesNotMeet(F, L, Hints)) { - DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " - "requirements.\n"); + LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " + "requirements.\n"); emitMissedWarning(F, L, Hints, ORE); return false; } if (VF.Width == 1) { - DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); + LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); VecDiagMsg = std::make_pair( "VectorizationNotBeneficial", "the cost-model indicates that vectorization is not beneficial"); @@ -7391,7 +7414,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { if (IC == 1 && UserIC <= 1) { // Tell the user interleaving is not beneficial. - DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); + LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); IntDiagMsg = std::make_pair( "InterleavingNotBeneficial", "the cost-model indicates that interleaving is not beneficial"); @@ -7403,8 +7426,8 @@ bool LoopVectorizePass::processLoop(Loop *L) { } } else if (IC > 1 && UserIC == 1) { // Tell the user interleaving is beneficial, but it explicitly disabled. - DEBUG(dbgs() - << "LV: Interleaving is beneficial but is explicitly disabled."); + LLVM_DEBUG( + dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); IntDiagMsg = std::make_pair( "InterleavingBeneficialButDisabled", "the cost-model indicates that interleaving is beneficial " @@ -7431,24 +7454,24 @@ bool LoopVectorizePass::processLoop(Loop *L) { }); return false; } else if (!VectorizeLoop && InterleaveLoop) { - DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); + LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); ORE->emit([&]() { return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, L->getStartLoc(), L->getHeader()) << VecDiagMsg.second; }); } else if (VectorizeLoop && !InterleaveLoop) { - DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " - << DebugLocStr << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width + << ") in " << DebugLocStr << '\n'); ORE->emit([&]() { return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, L->getStartLoc(), L->getHeader()) << IntDiagMsg.second; }); } else if (VectorizeLoop && InterleaveLoop) { - DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " - << DebugLocStr << '\n'); - DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); + LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width + << ") in " << DebugLocStr << '\n'); + LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); } LVP.setBestPlan(VF.Width, IC); @@ -7495,7 +7518,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Mark the loop as already vectorized to avoid vectorizing again. Hints.setAlreadyVectorized(); - DEBUG(verifyFunction(*L->getHeader()->getParent())); + LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); return true; } diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 639a0525624..2f9fcc7ec1a 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1059,7 +1059,7 @@ private: template <typename ReadyListType> void schedule(ScheduleData *SD, ReadyListType &ReadyList) { SD->IsScheduled = true; - DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); + LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); ScheduleData *BundleMember = SD; while (BundleMember) { @@ -1082,8 +1082,8 @@ private: assert(!DepBundle->IsScheduled && "already scheduled bundle gets ready"); ReadyList.insert(DepBundle); - DEBUG(dbgs() - << "SLP: gets ready (def): " << *DepBundle << "\n"); + LLVM_DEBUG(dbgs() + << "SLP: gets ready (def): " << *DepBundle << "\n"); } }); } @@ -1096,8 +1096,8 @@ private: assert(!DepBundle->IsScheduled && "already scheduled bundle gets ready"); ReadyList.insert(DepBundle); - DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle - << "\n"); + LLVM_DEBUG(dbgs() + << "SLP: gets ready (mem): " << *DepBundle << "\n"); } } BundleMember = BundleMember->NextInBundle; @@ -1122,7 +1122,8 @@ private: doForAllOpcodes(I, [&](ScheduleData *SD) { if (SD->isSchedulingEntity() && SD->isReady()) { ReadyList.insert(SD); - DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); + LLVM_DEBUG(dbgs() + << "SLP: initially in ready list: " << *I << "\n"); } }); } @@ -1398,12 +1399,12 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, // Check if the scalar is externally used as an extra arg. auto ExtI = ExternallyUsedValues.find(Scalar); if (ExtI != ExternallyUsedValues.end()) { - DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << - Lane << " from " << *Scalar << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " + << Lane << " from " << *Scalar << ".\n"); ExternalUses.emplace_back(Scalar, nullptr, FoundLane); } for (User *U : Scalar->users()) { - DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); Instruction *UserInst = dyn_cast<Instruction>(U); if (!UserInst) @@ -1417,8 +1418,8 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, // be used. if (UseScalar != U || !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { - DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U - << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U + << ".\n"); assert(!UseEntry->NeedToGather && "Bad state"); continue; } @@ -1428,8 +1429,8 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, if (is_contained(UserIgnoreList, UserInst)) continue; - DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << - Lane << " from " << *Scalar << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " + << Lane << " from " << *Scalar << ".\n"); ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); } } @@ -1442,28 +1443,28 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, InstructionsState S = getSameOpcode(VL); if (Depth == RecursionMaxDepth) { - DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } // Don't handle vectors. if (S.OpValue->getType()->isVectorTy()) { - DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) if (SI->getValueOperand()->getType()->isVectorTy()) { - DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } // If all of the operands are identical or constant we have a simple solution. if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { - DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1474,8 +1475,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, // Don't vectorize ephemeral values. for (unsigned i = 0, e = VL.size(); i != e; ++i) { if (EphValues.count(VL[i])) { - DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << - ") is ephemeral.\n"); + LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] + << ") is ephemeral.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1483,16 +1484,17 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, // Check if this is a duplicate of another entry. if (TreeEntry *E = getTreeEntry(S.OpValue)) { - DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); if (!E->isSame(VL)) { - DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } // Record the reuse of the tree node. FIXME, currently this is only used to // properly draw the graph rather than for the actual vectorization. E->UserTreeIndices.push_back(UserTreeIdx); - DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue + << ".\n"); return; } @@ -1502,8 +1504,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!I) continue; if (getTreeEntry(I)) { - DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << - ") is already in tree.\n"); + LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] + << ") is already in tree.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1513,7 +1515,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, // we need to gather the scalars. for (unsigned i = 0, e = VL.size(); i != e; ++i) { if (MustGather.count(VL[i])) { - DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1527,7 +1529,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!DT->isReachableFromEntry(BB)) { // Don't go into unreachable blocks. They may contain instructions with // dependency cycles which confuse the final scheduling. - DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); + LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1545,9 +1547,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (UniqueValues.size() == VL.size()) { ReuseShuffleIndicies.clear(); } else { - DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); + LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); if (UniqueValues.size() <= 1 || !llvm::isPowerOf2_32(UniqueValues.size())) { - DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); + LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); newTreeEntry(VL, false, UserTreeIdx); return; } @@ -1561,14 +1563,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, BlockScheduling &BS = *BSRef.get(); if (!BS.tryScheduleBundle(VL, this, VL0)) { - DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); + LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); assert((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; } - DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); + LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); unsigned ShuffleOrOp = S.IsAltShuffle ? (unsigned) Instruction::ShuffleVector : S.Opcode; @@ -1582,7 +1584,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, TerminatorInst *Term = dyn_cast<TerminatorInst>( cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); if (Term) { - DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); + LLVM_DEBUG( + dbgs() + << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; @@ -1590,7 +1594,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { ValueList Operands; @@ -1608,14 +1612,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, OrdersType CurrentOrder; bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); if (Reuse) { - DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); + LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); ++NumOpsWantToKeepOriginalOrder; newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, ReuseShuffleIndicies); return; } if (!CurrentOrder.empty()) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) @@ -1631,7 +1635,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, StoredCurrentOrderAndNum->getFirst()); return; } - DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); newTreeEntry(VL, /*Vectorized=*/false, UserTreeIdx, ReuseShuffleIndicies); BS.cancelScheduling(VL, VL0); return; @@ -1649,7 +1653,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, DL->getTypeAllocSizeInBits(ScalarTy)) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); return; } @@ -1662,7 +1666,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!L->isSimple()) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); return; } *POIter = L->getPointerOperand(); @@ -1693,20 +1697,20 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, ++NumOpsWantToKeepOriginalOrder; newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of loads.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); } else { // Need to reorder. auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first; ++I->getSecond(); newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, ReuseShuffleIndicies, I->getFirst()); - DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); } return; } } - DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; @@ -1729,12 +1733,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (Ty != SrcTy || !isValidElementType(Ty)) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); + LLVM_DEBUG(dbgs() + << "SLP: Gathering casts with different src types.\n"); return; } } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of casts.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; @@ -1757,13 +1762,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, Cmp->getOperand(0)->getType() != ComparedTy) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); + LLVM_DEBUG(dbgs() + << "SLP: Gathering cmp with different predicate.\n"); return; } } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of compares.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; @@ -1795,7 +1801,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, case Instruction::Or: case Instruction::Xor: newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); // Sort operands of the instructions so that each side is more likely to // have the same opcode. @@ -1821,7 +1827,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, // We don't combine GEPs with complicated (nested) indexing. for (unsigned j = 0; j < VL.size(); ++j) { if (cast<Instruction>(VL[j])->getNumOperands() != 2) { - DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); + LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; @@ -1834,7 +1840,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, for (unsigned j = 0; j < VL.size(); ++j) { Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); if (Ty0 != CurTy) { - DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); + LLVM_DEBUG(dbgs() + << "SLP: not-vectorizable GEP (different types).\n"); BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; @@ -1845,8 +1852,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, for (unsigned j = 0; j < VL.size(); ++j) { auto Op = cast<Instruction>(VL[j])->getOperand(1); if (!isa<ConstantInt>(Op)) { - DEBUG( - dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); + LLVM_DEBUG(dbgs() + << "SLP: not-vectorizable GEP (non-constant indexes).\n"); BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); return; @@ -1854,7 +1861,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); for (unsigned i = 0, e = 2; i < e; ++i) { ValueList Operands; // Prepare the operand vector. @@ -1871,12 +1878,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); + LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); return; } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a vector of stores.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); ValueList Operands; for (Value *j : VL) @@ -1894,7 +1901,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!isTriviallyVectorizable(ID)) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); + LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); return; } Function *Int = CI->getCalledFunction(); @@ -1908,8 +1915,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, !CI->hasIdenticalOperandBundleSchema(*CI2)) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] - << "\n"); + LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] + << "\n"); return; } // ctlz,cttz and powi are special intrinsics whose second argument @@ -1919,9 +1926,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (A1I != A1J) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI - << " argument "<< A1I<<"!=" << A1J - << "\n"); + LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI + << " argument " << A1I << "!=" << A1J << "\n"); return; } } @@ -1932,8 +1938,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" - << *VL[i] << '\n'); + LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" + << *CI << "!=" << *VL[i] << '\n'); return; } } @@ -1956,11 +1962,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, if (!S.IsAltShuffle) { BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); + LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); return; } newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); + LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); // Reorder operands if reordering would enable vectorization. if (isa<BinaryOperator>(VL0)) { @@ -1984,7 +1990,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, default: BS.cancelScheduling(VL, VL0); newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); - DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); + LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); return; } } @@ -2411,9 +2417,9 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, VecTy->getNumElements()); - DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost - << " (" << VecCallCost << "-" << ScalarCallCost << ")" - << " for " << *CI << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost + << " (" << VecCallCost << "-" << ScalarCallCost << ")" + << " for " << *CI << "\n"); return ReuseShuffleCost + VecCallCost - ScalarCallCost; } @@ -2465,8 +2471,8 @@ int BoUpSLP::getEntryCost(TreeEntry *E) { } bool BoUpSLP::isFullyVectorizableTinyTree() { - DEBUG(dbgs() << "SLP: Check whether the tree with height " << - VectorizableTree.size() << " is fully vectorizable .\n"); + LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " + << VectorizableTree.size() << " is fully vectorizable .\n"); // We only handle trees of heights 1 and 2. if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) @@ -2536,7 +2542,7 @@ int BoUpSLP::getSpillCost() { LiveValues.insert(cast<Instruction>(&*J)); } - DEBUG({ + LLVM_DEBUG({ dbgs() << "SLP: #LV: " << LiveValues.size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); @@ -2575,8 +2581,8 @@ int BoUpSLP::getSpillCost() { int BoUpSLP::getTreeCost() { int Cost = 0; - DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << - VectorizableTree.size() << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " + << VectorizableTree.size() << ".\n"); unsigned BundleWidth = VectorizableTree[0].Scalars.size(); @@ -2603,8 +2609,9 @@ int BoUpSLP::getTreeCost() { continue; int C = getEntryCost(&TE); - DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " - << *TE.Scalars[0] << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C + << " for bundle that starts with " << *TE.Scalars[0] + << ".\n"); Cost += C; } @@ -2649,7 +2656,7 @@ int BoUpSLP::getTreeCost() { << "SLP: Extract Cost = " << ExtractCost << ".\n" << "SLP: Total Cost = " << Cost << ".\n"; } - DEBUG(dbgs() << Str); + LLVM_DEBUG(dbgs() << Str); if (ViewSLPTree) ViewGraph(this, "SLP" + F->getName(), false, Str); @@ -3080,7 +3087,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { IRBuilder<>::InsertPointGuard Guard(Builder); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); return E->VectorizedValue; } @@ -3240,7 +3247,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *InVec = vectorizeTree(INVL); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); return E->VectorizedValue; } @@ -3268,7 +3275,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *R = vectorizeTree(RHSV); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); return E->VectorizedValue; } @@ -3303,7 +3310,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *False = vectorizeTree(FalseVec); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); return E->VectorizedValue; } @@ -3351,7 +3358,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *RHS = vectorizeTree(RHSVL); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); return E->VectorizedValue; } @@ -3509,7 +3516,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { } Value *OpVec = vectorizeTree(OpVL); - DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); + LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); OpVecs.push_back(OpVec); } @@ -3547,7 +3554,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *RHS = vectorizeTree(RHSVL); if (E->VectorizedValue) { - DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); return E->VectorizedValue; } @@ -3627,7 +3634,8 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { VectorizableTree[0].VectorizedValue = Trunc; } - DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); + LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() + << " values .\n"); // If necessary, sign-extend or zero-extend ScalarRoot to the larger type // specified by ScalarType. @@ -3713,7 +3721,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { User->replaceUsesOfWith(Scalar, Ex); } - DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); } // For each vectorized value: @@ -3734,7 +3742,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { if (!Ty->isVoidTy()) { #ifndef NDEBUG for (User *U : Scalar->users()) { - DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); // It is legal to replace users in the ignorelist by undef. assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && @@ -3744,7 +3752,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { Value *Undef = UndefValue::get(Ty); Scalar->replaceAllUsesWith(Undef); } - DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); eraseInstruction(cast<Instruction>(Scalar)); } } @@ -3755,8 +3763,8 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { } void BoUpSLP::optimizeGatherSequence() { - DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() - << " gather sequences instructions.\n"); + LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() + << " gather sequences instructions.\n"); // LICM InsertElementInst sequences. for (Instruction *I : GatherSeq) { if (!isa<InsertElementInst>(I) && !isa<ShuffleVectorInst>(I)) @@ -3849,7 +3857,7 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, ScheduleData *PrevInBundle = nullptr; ScheduleData *Bundle = nullptr; bool ReSchedule = false; - DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); + LLVM_DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); // Make sure that the scheduling region contains all // instructions of the bundle. @@ -3866,8 +3874,8 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, // A bundle member was scheduled as single instruction before and now // needs to be scheduled as part of the bundle. We just get rid of the // existing schedule. - DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember - << " was already scheduled\n"); + LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember + << " was already scheduled\n"); ReSchedule = true; } assert(BundleMember->isSchedulingEntity() && @@ -3902,8 +3910,8 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, initialFillReadyList(ReadyInsts); } - DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " - << BB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " + << BB->getName() << "\n"); calculateDependencies(Bundle, true, SLP); @@ -3933,7 +3941,7 @@ void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, return; ScheduleData *Bundle = getScheduleData(OpValue); - DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); + LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); assert(!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled"); assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && @@ -3992,7 +4000,7 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, if (isOneOf(OpValue, I) != I) CheckSheduleForI(I); assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); - DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); + LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); return true; } // Search up and down at the same time, because we don't know if the new @@ -4004,7 +4012,7 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, BasicBlock::iterator LowerEnd = BB->end(); while (true) { if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { - DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); + LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); return false; } @@ -4014,7 +4022,8 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, ScheduleStart = I; if (isOneOf(OpValue, I) != I) CheckSheduleForI(I); - DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); + LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I + << "\n"); return true; } UpIter++; @@ -4027,7 +4036,8 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, if (isOneOf(OpValue, I) != I) CheckSheduleForI(I); assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); - DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); + LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I + << "\n"); return true; } DownIter++; @@ -4091,7 +4101,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, assert(isInSchedulingRegion(BundleMember)); if (!BundleMember->hasValidDependencies()) { - DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); + LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember + << "\n"); BundleMember->Dependencies = 0; BundleMember->resetUnscheduledDeps(); @@ -4192,7 +4203,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, } if (InsertInReadyList && SD->isReady()) { ReadyInsts.push_back(SD); - DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); + LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst + << "\n"); } } } @@ -4215,7 +4227,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) { if (!BS->ScheduleStart) return; - DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); + LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); BS->resetSchedule(); @@ -4648,7 +4660,7 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, if (F.hasFnAttribute(Attribute::NoImplicitFloat)) return false; - DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); // Use the bottom up slp vectorizer to construct chains that start with // store instructions. @@ -4663,8 +4675,8 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, // Vectorize trees that end at stores. if (!Stores.empty()) { - DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() - << " underlying objects.\n"); + LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() + << " underlying objects.\n"); Changed |= vectorizeStoreChains(R); } @@ -4675,16 +4687,16 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, // is primarily intended to catch gather-like idioms ending at // non-consecutive loads. if (!GEPs.empty()) { - DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() - << " underlying objects.\n"); + LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() + << " underlying objects.\n"); Changed |= vectorizeGEPIndices(BB, R); } } if (Changed) { R.optimizeGatherSequence(); - DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); - DEBUG(verifyFunction(F)); + LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); + LLVM_DEBUG(verifyFunction(F)); } return Changed; } @@ -4705,8 +4717,8 @@ static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, unsigned VecRegSize) { const unsigned ChainLen = Chain.size(); - DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen - << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen + << "\n"); const unsigned Sz = R.getVectorElementSize(Chain[0]); const unsigned VF = VecRegSize / Sz; @@ -4724,8 +4736,8 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) continue; - DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i - << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i + << "\n"); ArrayRef<Value *> Operands = Chain.slice(i, VF); R.buildTree(Operands); @@ -4736,9 +4748,10 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, int Cost = R.getTreeCost(); - DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF + << "\n"); if (Cost < -SLPCostThreshold) { - DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); using namespace ore; @@ -4883,8 +4896,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, if (VL.size() < 2) return false; - DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() - << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " + << VL.size() << ".\n"); // Check that all of the parts are scalar instructions of the same type. Instruction *I0 = dyn_cast<Instruction>(VL[0]); @@ -4969,8 +4982,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) continue; - DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " - << "\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " + << "\n"); ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); R.buildTree(Ops); @@ -4995,7 +5008,7 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, MinCost = std::min(MinCost, Cost); if (Cost < -SLPCostThreshold) { - DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", cast<Instruction>(Ops[0])) << "SLP vectorized with cost " << ore::NV("Cost", Cost) @@ -5752,8 +5765,8 @@ public: break; } - DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost - << ". (HorRdx)\n"); + LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" + << Cost << ". (HorRdx)\n"); V.getORE()->emit([&]() { return OptimizationRemark( SV_NAME, "VectorizedHorizontalReduction", cast<Instruction>(VL[0])) @@ -5874,11 +5887,11 @@ private: } ScalarReduxCost *= (ReduxWidth - 1); - DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost - << " for reduction that starts with " << *FirstReducedVal - << " (It is a " - << (IsPairwiseReduction ? "pairwise" : "splitting") - << " reduction)\n"); + LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost + << " for reduction that starts with " << *FirstReducedVal + << " (It is a " + << (IsPairwiseReduction ? "pairwise" : "splitting") + << " reduction)\n"); return VecReduxCost - ScalarReduxCost; } @@ -6144,7 +6157,7 @@ bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, if (!findBuildAggregate(IVI, BuildVectorOpds)) return false; - DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); + LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); // Aggregate value is unlikely to be processed in vector register, we need to // extract scalars into scalar registers, so NeedExtraction is set true. return tryToVectorizeList(BuildVectorOpds, R); @@ -6234,8 +6247,8 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { // Try to vectorize them. unsigned NumElts = (SameTypeIt - IncIt); - DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts - << ")\n"); + LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" + << NumElts << ")\n"); // The order in which the phi nodes appear in the program does not matter. // So allow tryToVectorizeList to reorder them if it is beneficial. This // is done when there are exactly two elements since tryToVectorizeList @@ -6336,8 +6349,8 @@ bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { if (Entry.second.size() < 2) continue; - DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " - << Entry.second.size() << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " + << Entry.second.size() << ".\n"); // We process the getelementptr list in chunks of 16 (like we do for // stores) to minimize compile-time. @@ -6419,8 +6432,8 @@ bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { if (it->second.size() < 2) continue; - DEBUG(dbgs() << "SLP: Analyzing a store chain of length " - << it->second.size() << ".\n"); + LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " + << it->second.size() << ".\n"); // Process the stores in chunks of 16. // TODO: The limit of 16 inhibits greater vectorization factors. diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 7146fcc098b..50c71a32385 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -116,7 +116,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { BasicBlock *PrevBB = CFG.PrevBB; BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), PrevBB->getParent(), CFG.LastBB); - DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); // Hook up the new basic block to its predecessors. for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { @@ -125,7 +125,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; assert(PredBB && "Predecessor basic-block not found building successor."); auto *PredBBTerminator = PredBB->getTerminator(); - DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); if (isa<UnreachableInst>(PredBBTerminator)) { assert(PredVPSuccessors.size() == 1 && "Predecessor ending w/o branch must have single successor."); @@ -175,8 +175,8 @@ void VPBasicBlock::execute(VPTransformState *State) { } // 2. Fill the IR basic block with IR instructions. - DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() - << " in BB:" << NewBB->getName() << '\n'); + LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() + << " in BB:" << NewBB->getName() << '\n'); State->CFG.VPBB2IRBB[this] = NewBB; State->CFG.PrevVPBB = this; @@ -184,7 +184,7 @@ void VPBasicBlock::execute(VPTransformState *State) { for (VPRecipeBase &Recipe : Recipes) Recipe.execute(*State); - DEBUG(dbgs() << "LV: filled BB:" << *NewBB); + LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB); } void VPRegionBlock::execute(VPTransformState *State) { @@ -193,7 +193,7 @@ void VPRegionBlock::execute(VPTransformState *State) { if (!isReplicator()) { // Visit the VPBlocks connected to "this", starting from it. for (VPBlockBase *Block : RPOT) { - DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); + LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); Block->execute(State); } return; @@ -210,7 +210,7 @@ void VPRegionBlock::execute(VPTransformState *State) { State->Instance->Lane = Lane; // Visit the VPBlocks connected to \p this, starting from it. for (VPBlockBase *Block : RPOT) { - DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); + LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); Block->execute(State); } } |