diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-07-10 14:03:41 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-07-10 14:03:41 +0000 |
| commit | a680199a966b518e1cc7444aa025a4fcda8b1ba4 (patch) | |
| tree | 6ee8b80e9bcd57af6f18ede6aa24d0656d0e37d1 /llvm/lib | |
| parent | 3467fac0918e16dc6a176150f6094629e593e4c3 (diff) | |
| download | bcm5719-llvm-a680199a966b518e1cc7444aa025a4fcda8b1ba4.tar.gz bcm5719-llvm-a680199a966b518e1cc7444aa025a4fcda8b1ba4.zip | |
Reapply "AMDGPU: Force inlining if LDS global address is used"
This reverts commit r336623
llvm-svn: 336675
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp | 108 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 12 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h | 1 |
3 files changed, 95 insertions, 26 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp index c27425443ab..d4bbb2c1eb8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp @@ -14,6 +14,9 @@ //===----------------------------------------------------------------------===// #include "AMDGPU.h" +#include "AMDGPUTargetMachine.h" +#include "Utils/AMDGPUBaseInfo.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Utils/Cloning.h" @@ -30,13 +33,18 @@ static cl::opt<bool> StressCalls( class AMDGPUAlwaysInline : public ModulePass { bool GlobalOpt; + void recursivelyVisitUsers(GlobalValue &GV, + SmallPtrSetImpl<Function *> &FuncsToAlwaysInline); public: static char ID; AMDGPUAlwaysInline(bool GlobalOpt = false) : ModulePass(ID), GlobalOpt(GlobalOpt) { } bool runOnModule(Module &M) override; - StringRef getPassName() const override { return "AMDGPU Always Inline Pass"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesAll(); + } }; } // End anonymous namespace @@ -46,15 +54,53 @@ INITIALIZE_PASS(AMDGPUAlwaysInline, "amdgpu-always-inline", char AMDGPUAlwaysInline::ID = 0; +void AMDGPUAlwaysInline::recursivelyVisitUsers( + GlobalValue &GV, + SmallPtrSetImpl<Function *> &FuncsToAlwaysInline) { + SmallVector<User *, 16> Stack; + + SmallPtrSet<const Value *, 8> Visited; + + for (User *U : GV.users()) + Stack.push_back(U); + + while (!Stack.empty()) { + User *U = Stack.pop_back_val(); + if (!Visited.insert(U).second) + continue; + + if (Instruction *I = dyn_cast<Instruction>(U)) { + Function *F = I->getParent()->getParent(); + if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) { + FuncsToAlwaysInline.insert(F); + Stack.push_back(F); + } + + // No need to look at further users, but we do need to inline any callers. + continue; + } + + for (User *UU : U->users()) + Stack.push_back(UU); + } +} + bool AMDGPUAlwaysInline::runOnModule(Module &M) { + AMDGPUAS AMDGPUAS = AMDGPU::getAMDGPUAS(M); + std::vector<GlobalAlias*> AliasesToRemove; - std::vector<Function *> FuncsToClone; + + SmallPtrSet<Function *, 8> FuncsToAlwaysInline; + SmallPtrSet<Function *, 8> FuncsToNoInline; for (GlobalAlias &A : M.aliases()) { if (Function* F = dyn_cast<Function>(A.getAliasee())) { A.replaceAllUsesWith(F); AliasesToRemove.push_back(&A); } + + // FIXME: If the aliasee isn't a function, it's some kind of constant expr + // cast that won't be inlined through. } if (GlobalOpt) { @@ -63,31 +109,51 @@ bool AMDGPUAlwaysInline::runOnModule(Module &M) { } } - auto NewAttr = StressCalls ? Attribute::NoInline : Attribute::AlwaysInline; - auto IncompatAttr - = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline; - - for (Function &F : M) { - if (!F.hasLocalLinkage() && !F.isDeclaration() && !F.use_empty() && - !F.hasFnAttribute(IncompatAttr)) - FuncsToClone.push_back(&F); - } - - for (Function *F : FuncsToClone) { - ValueToValueMapTy VMap; - Function *NewFunc = CloneFunction(F, VMap); - NewFunc->setLinkage(GlobalValue::InternalLinkage); - F->replaceAllUsesWith(NewFunc); + // Always force inlining of any function that uses an LDS global address. This + // is something of a workaround because we don't have a way of supporting LDS + // objects defined in functions. LDS is always allocated by a kernel, and it + // is difficult to manage LDS usage if a function may be used by multiple + // kernels. + // + // OpenCL doesn't allow declaring LDS in non-kernels, so in practice this + // should only appear when IPO passes manages to move LDs defined in a kernel + // into a single user function. + + for (GlobalVariable &GV : M.globals()) { + // TODO: Region address + unsigned AS = GV.getType()->getAddressSpace(); + if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS.REGION_ADDRESS) + continue; + + recursivelyVisitUsers(GV, FuncsToAlwaysInline); } - for (Function &F : M) { - if (F.hasLocalLinkage() && !F.hasFnAttribute(IncompatAttr)) { - F.addFnAttr(NewAttr); + if (!AMDGPUTargetMachine::EnableFunctionCalls || StressCalls) { + auto IncompatAttr + = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline; + + for (Function &F : M) { + if (!F.isDeclaration() && !F.use_empty() && + !F.hasFnAttribute(IncompatAttr)) { + if (StressCalls) { + if (!FuncsToAlwaysInline.count(&F)) + FuncsToNoInline.insert(&F); + } else + FuncsToAlwaysInline.insert(&F); + } } } - return false; + + for (Function *F : FuncsToAlwaysInline) + F->addFnAttr(Attribute::AlwaysInline); + + for (Function *F : FuncsToNoInline) + F->addFnAttr(Attribute::NoInline); + + return !FuncsToAlwaysInline.empty() || !FuncsToNoInline.empty(); } ModulePass *llvm::createAMDGPUAlwaysInlinePass(bool GlobalOpt) { return new AMDGPUAlwaysInline(GlobalOpt); } + diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 22ea16049bf..b4066102553 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -117,11 +117,12 @@ static cl::opt<bool, true> LateCFGStructurize( cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden); -static cl::opt<bool> EnableAMDGPUFunctionCalls( +static cl::opt<bool, true> EnableAMDGPUFunctionCalls( "amdgpu-function-calls", - cl::Hidden, cl::desc("Enable AMDGPU function call support"), - cl::init(false)); + cl::location(AMDGPUTargetMachine::EnableFunctionCalls), + cl::init(false), + cl::Hidden); // Enable lib calls simplifications static cl::opt<bool> EnableLibCallSimplify( @@ -311,9 +312,10 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, initAsmInfo(); } -AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; - bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; +bool AMDGPUTargetMachine::EnableFunctionCalls = false; + +AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { Attribute GPUAttr = F.getFnAttribute("target-cpu"); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h index 4dcb1afd313..50b219d639c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h @@ -41,6 +41,7 @@ protected: public: static bool EnableLateStructurizeCFG; + static bool EnableFunctionCalls; AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, TargetOptions Options, |

