diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Analysis/TargetTransformInfo.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCCallingConv.td | 53 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCFastISel.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCFrameLowering.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 17 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp | 18 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp | 13 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Transforms/IPO/GlobalOpt.cpp | 164 |
9 files changed, 275 insertions, 12 deletions
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index b744cae51ed..e046984ffc3 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -226,6 +226,10 @@ bool TargetTransformInfo::shouldBuildLookupTablesForConstant(Constant *C) const return TTIImpl->shouldBuildLookupTablesForConstant(C); } +bool TargetTransformInfo::useColdCCForColdCall(Function &F) const { + return TTIImpl->useColdCCForColdCall(F); +} + unsigned TargetTransformInfo:: getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const { return TTIImpl->getScalarizationOverhead(Ty, Insert, Extract); diff --git a/llvm/lib/Target/PowerPC/PPCCallingConv.td b/llvm/lib/Target/PowerPC/PPCCallingConv.td index a4f4c8688cc..d7d2cad1e5f 100644 --- a/llvm/lib/Target/PowerPC/PPCCallingConv.td +++ b/llvm/lib/Target/PowerPC/PPCCallingConv.td @@ -45,6 +45,29 @@ def RetCC_PPC64_AnyReg : CallingConv<[ CCCustom<"CC_PPC_AnyReg_Error"> ]>; +// Return-value convention for PowerPC coldcc. +def RetCC_PPC_Cold : CallingConv<[ + // Use the same return registers as RetCC_PPC, but limited to only + // one return value. The remaining return values will be saved to + // the stack. + CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, + CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, + + CCIfType<[i32], CCAssignToReg<[R3]>>, + CCIfType<[i64], CCAssignToReg<[X3]>>, + CCIfType<[i128], CCAssignToReg<[X3]>>, + + CCIfType<[f32], CCAssignToReg<[F1]>>, + CCIfType<[f64], CCAssignToReg<[F1]>>, + + CCIfType<[v4f64, v4f32, v4i1], + CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1]>>>, + + CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], + CCIfSubtarget<"hasAltivec()", + CCAssignToReg<[V2]>>> +]>; + // Return-value convention for PowerPC def RetCC_PPC : CallingConv<[ CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, @@ -271,6 +294,36 @@ def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)> def CSR_NoRegs : CalleeSavedRegs<(add)>; +// coldcc calling convection marks most registers as non-volatile. +// Do not include r1 since the stack pointer is never considered a CSR. +// Do not include r2, since it is the TOC register and is added depending +// on wether or not the function uses the TOC and is a non-leaf. +// Do not include r0,r11,r13 as they are optional in functional linkage +// and value may be altered by inter-library calls. +// Do not include r12 as it is used as a scratch register. +// Do not include return registers r3, f1, v2. +def CSR_SVR32_ColdCC : CalleeSavedRegs<(add (sequence "R%u", 4, 10), + (sequence "R%u", 14, 31), + F0, (sequence "F%u", 2, 31), + (sequence "CR%u", 0, 7))>; + +def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC, + (sequence "V%u", 0, 1), + (sequence "V%u", 3, 31))>; + +def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10), + (sequence "X%u", 14, 31), + F0, (sequence "F%u", 2, 31), + (sequence "CR%u", 0, 7))>; + +def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>; + +def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC, + (sequence "V%u", 0, 1), + (sequence "V%u", 3, 31))>; + +def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>; + def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), (sequence "X%u", 14, 31), (sequence "F%u", 0, 31), diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index 402e29cdff7..03083908d84 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -206,6 +206,8 @@ CCAssignFn *PPCFastISel::usePPC32CCs(unsigned Flag) { return CC_PPC32_SVR4_ByVal; else if (Flag == 3) return CC_PPC32_SVR4_VarArg; + else if (Flag == 4) + return RetCC_PPC_Cold; else return RetCC_PPC; } diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp index 7902da20a01..bdda9d13ad8 100644 --- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -1950,7 +1950,14 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4; // Add the callee-saved register as live-in; it's killed at the spill. - MBB.addLiveIn(Reg); + // Do not do this for callee-saved registers that are live-in to the + // function because they will already be marked live-in and this will be + // adding it for a second time. It is an error to add the same register + // to the set more than once. + const MachineRegisterInfo &MRI = MF->getRegInfo(); + bool IsLiveIn = MRI.isLiveIn(Reg); + if (!IsLiveIn) + MBB.addLiveIn(Reg); if (CRSpilled && IsCRField) { CRMIB.addReg(Reg, RegState::ImplicitKill); @@ -1980,7 +1987,10 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, } } else { const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); - TII.storeRegToStackSlot(MBB, MI, Reg, true, + // Use !IsLiveIn for the kill flag. + // We do not want to kill registers that are live in this function + // before their use because they will become undefined registers. + TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, CSI[i].getFrameIdx(), RC, TRI); } } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 3c09ab8d755..34f7fc99f58 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -4939,7 +4939,11 @@ SDValue PPCTargetLowering::LowerCallResult( SmallVector<CCValAssign, 16> RVLocs; CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); + + CCRetInfo.AnalyzeCallResult( + Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) + ? RetCC_PPC_Cold + : RetCC_PPC); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { @@ -5159,6 +5163,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4( // of the 32-bit SVR4 ABI stack frame layout. assert((CallConv == CallingConv::C || + CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!"); unsigned PtrByteSize = 4; @@ -6420,7 +6425,10 @@ PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, LLVMContext &Context) const { SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); - return CCInfo.CheckReturn(Outs, RetCC_PPC); + return CCInfo.CheckReturn( + Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) + ? RetCC_PPC_Cold + : RetCC_PPC); } SDValue @@ -6432,7 +6440,10 @@ PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - CCInfo.AnalyzeReturn(Outs, RetCC_PPC); + CCInfo.AnalyzeReturn(Outs, + (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) + ? RetCC_PPC_Cold + : RetCC_PPC); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp index 6b62a82ef7b..a938bb98ce1 100644 --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -144,6 +144,17 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { // On PPC64, we might need to save r2 (but only if it is not reserved). bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2); + if (MF->getFunction().getCallingConv() == CallingConv::Cold) { + return TM.isPPC64() + ? (Subtarget.hasAltivec() + ? (SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList + : CSR_SVR64_ColdCC_Altivec_SaveList) + : (SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList + : CSR_SVR64_ColdCC_SaveList)) + : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_SaveList + : CSR_SVR32_ColdCC_SaveList); + } + return TM.isPPC64() ? (Subtarget.hasAltivec() ? (SaveR2 ? CSR_SVR464_R2_Altivec_SaveList @@ -196,6 +207,13 @@ PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF, : (Subtarget.hasAltivec() ? CSR_Darwin32_Altivec_RegMask : CSR_Darwin32_RegMask); + if (CC == CallingConv::Cold) { + return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask + : CSR_SVR64_ColdCC_RegMask) + : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask + : CSR_SVR32_ColdCC_RegMask); + } + return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR464_Altivec_RegMask : CSR_SVR464_RegMask) : (Subtarget.hasAltivec() ? CSR_SVR432_Altivec_RegMask diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp index aa4073f7ea0..226c75f704f 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -27,6 +27,11 @@ static cl::opt<unsigned> CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), cl::desc("The loop prefetch cache line size")); +static cl::opt<bool> +EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), + cl::desc("Enable using coldcc calling conv for cold " + "internal functions")); + //===----------------------------------------------------------------------===// // // PPC cost model. @@ -215,6 +220,14 @@ void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, BaseT::getUnrollingPreferences(L, SE, UP); } +// This function returns true to allow using coldcc calling convention. +// Returning true results in coldcc being used for functions which are cold at +// all call sites when the callers of the functions are not calling any other +// non coldcc functions. +bool PPCTTIImpl::useColdCCForColdCall(Function &F) { + return EnablePPCColdCC; +} + bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { // On the A2, always unroll aggressively. For QPX unaligned loads, we depend // on combining the loads generated for consecutive accesses, and failure to diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h index b42dae4a025..2ee2b3eb808 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h @@ -61,7 +61,7 @@ public: /// \name Vector TTI Implementations /// @{ - + bool useColdCCForColdCall(Function &F); bool enableAggressiveInterleaving(bool LoopHasReductions); const TTI::MemCmpExpansionOptions *enableMemCmpExpansion( bool IsZeroCmp) const; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 65dcd281009..73f351b7c16 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -22,9 +22,11 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/iterator_range.h" +#include "llvm/Analysis/BlockFrequencyInfo.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/BinaryFormat/Dwarf.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" @@ -55,6 +57,7 @@ #include "llvm/Pass.h" #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" @@ -88,6 +91,21 @@ STATISTIC(NumNestRemoved , "Number of nest attributes removed"); STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); +STATISTIC(NumInternalFunc, "Number of internal functions"); +STATISTIC(NumColdCC, "Number of functions marked coldcc"); + +static cl::opt<bool> + EnableColdCCStressTest("enable-coldcc-stress-test", + cl::desc("Enable stress test of coldcc by adding " + "calling conv to all internal functions."), + cl::init(false), cl::Hidden); + +static cl::opt<int> ColdCCRelFreq( + "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, + cl::desc( + "Maximum block frequency, expressed as a percentage of caller's " + "entry frequency, for a call site to be considered cold for enabling" + "coldcc")); /// Is this global variable possibly used by a leak checker as a root? If so, /// we might not really want to eliminate the stores to it. @@ -2097,20 +2115,114 @@ static void RemoveNestAttribute(Function *F) { /// idea here is that we don't want to mess with the convention if the user /// explicitly requested something with performance implications like coldcc, /// GHC, or anyregcc. -static bool isProfitableToMakeFastCC(Function *F) { +static bool hasChangeableCC(Function *F) { CallingConv::ID CC = F->getCallingConv(); // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? return CC == CallingConv::C || CC == CallingConv::X86_ThisCall; } +/// Return true if the block containing the call site has a BlockFrequency of +/// less than ColdCCRelFreq% of the entry block. +static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) { + const BranchProbability ColdProb(ColdCCRelFreq, 100); + auto CallSiteBB = CS.getInstruction()->getParent(); + auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); + auto CallerEntryFreq = + CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock())); + return CallSiteFreq < CallerEntryFreq * ColdProb; +} + +// This function checks if the input function F is cold at all call sites. It +// also looks each call site's containing function, returning false if the +// caller function contains other non cold calls. The input vector AllCallsCold +// contains a list of functions that only have call sites in cold blocks. +static bool +isValidCandidateForColdCC(Function &F, + function_ref<BlockFrequencyInfo &(Function &)> GetBFI, + const std::vector<Function *> &AllCallsCold) { + + if (F.user_empty()) + return false; + + for (User *U : F.users()) { + if (isa<BlockAddress>(U)) + continue; + + CallSite CS(cast<Instruction>(U)); + Function *CallerFunc = CS.getInstruction()->getParent()->getParent(); + BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); + if (!isColdCallSite(CS, CallerBFI)) + return false; + auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc); + if (It == AllCallsCold.end()) + return false; + } + return true; +} + +static void changeCallSitesToColdCC(Function *F) { + for (User *U : F->users()) { + if (isa<BlockAddress>(U)) + continue; + CallSite CS(cast<Instruction>(U)); + CS.setCallingConv(CallingConv::Cold); + } +} + +// This function iterates over all the call instructions in the input Function +// and checks that all call sites are in cold blocks and are allowed to use the +// coldcc calling convention. +static bool +hasOnlyColdCalls(Function &F, + function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { + for (BasicBlock &BB : F) { + for (Instruction &I : BB) { + if (CallInst *CI = dyn_cast<CallInst>(&I)) { + CallSite CS(cast<Instruction>(CI)); + // Skip over isline asm instructions since they aren't function calls. + if (CI->isInlineAsm()) + continue; + Function *CalledFn = CI->getCalledFunction(); + if (!CalledFn) + return false; + if (!CalledFn->hasLocalLinkage()) + return false; + // Skip over instrinsics since they won't remain as function calls. + if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) + continue; + // Check if it's valid to use coldcc calling convention. + if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || + CalledFn->hasAddressTaken()) + return false; + BlockFrequencyInfo &CallerBFI = GetBFI(F); + if (!isColdCallSite(CS, CallerBFI)) + return false; + } + } + } + return true; +} + static bool OptimizeFunctions(Module &M, TargetLibraryInfo *TLI, + function_ref<TargetTransformInfo &(Function &)> GetTTI, + function_ref<BlockFrequencyInfo &(Function &)> GetBFI, function_ref<DominatorTree &(Function &)> LookupDomTree, SmallSet<const Comdat *, 8> &NotDiscardableComdats) { + bool Changed = false; + + std::vector<Function *> AllCallsCold; + for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) { + Function *F = &*FI++; + if (hasOnlyColdCalls(*F, GetBFI)) + AllCallsCold.push_back(F); + } + // Optimize functions. for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { Function *F = &*FI++; + // Functions without names cannot be referenced outside this module. if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) F->setLinkage(GlobalValue::InternalLinkage); @@ -2142,7 +2254,25 @@ OptimizeFunctions(Module &M, TargetLibraryInfo *TLI, if (!F->hasLocalLinkage()) continue; - if (isProfitableToMakeFastCC(F) && !F->isVarArg() && + + if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { + NumInternalFunc++; + TargetTransformInfo &TTI = GetTTI(*F); + // Change the calling convention to coldcc if either stress testing is + // enabled or the target would like to use coldcc on functions which are + // cold at all call sites and the callers contain no other non coldcc + // calls. + if (EnableColdCCStressTest || + (isValidCandidateForColdCC(*F, GetBFI, AllCallsCold) && + TTI.useColdCCForColdCall(*F))) { + F->setCallingConv(CallingConv::Cold); + changeCallSitesToColdCC(F); + Changed = true; + NumColdCC++; + } + } + + if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { // If this function has a calling convention worth changing, is not a // varargs function, and is only called directly, promote it to use the @@ -2620,6 +2750,8 @@ static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { static bool optimizeGlobalsInModule( Module &M, const DataLayout &DL, TargetLibraryInfo *TLI, + function_ref<TargetTransformInfo &(Function &)> GetTTI, + function_ref<BlockFrequencyInfo &(Function &)> GetBFI, function_ref<DominatorTree &(Function &)> LookupDomTree) { SmallSet<const Comdat *, 8> NotDiscardableComdats; bool Changed = false; @@ -2642,8 +2774,8 @@ static bool optimizeGlobalsInModule( NotDiscardableComdats.insert(C); // Delete functions that are trivially dead, ccc -> fastcc - LocalChange |= - OptimizeFunctions(M, TLI, LookupDomTree, NotDiscardableComdats); + LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree, + NotDiscardableComdats); // Optimize global_ctors list. LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { @@ -2680,7 +2812,15 @@ PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ return FAM.getResult<DominatorTreeAnalysis>(F); }; - if (!optimizeGlobalsInModule(M, DL, &TLI, LookupDomTree)) + auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { + return FAM.getResult<TargetIRAnalysis>(F); + }; + + auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { + return FAM.getResult<BlockFrequencyAnalysis>(F); + }; + + if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree)) return PreservedAnalyses::all(); return PreservedAnalyses::none(); } @@ -2703,11 +2843,21 @@ struct GlobalOptLegacyPass : public ModulePass { auto LookupDomTree = [this](Function &F) -> DominatorTree & { return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); }; - return optimizeGlobalsInModule(M, DL, TLI, LookupDomTree); + auto GetTTI = [this](Function &F) -> TargetTransformInfo & { + return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + }; + + auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { + return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); + }; + + return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<TargetLibraryInfoWrapperPass>(); + AU.addRequired<TargetTransformInfoWrapperPass>(); + AU.addRequired<BlockFrequencyInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); } }; @@ -2719,6 +2869,8 @@ char GlobalOptLegacyPass::ID = 0; INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt", "Global Variable Optimizer", false, false) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt", "Global Variable Optimizer", false, false) |

