diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-24 19:52:05 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-03-24 19:52:05 +0000 |
commit | b8f8dbc227e8d08d6685bb2bc3131ac86e3ac24e (patch) | |
tree | bc699fbcc8daebe4e572a7841766102ebba64788 /llvm/lib | |
parent | cbc69712da58c6e9a4d72fe37e829bb27409d3b8 (diff) | |
download | bcm5719-llvm-b8f8dbc227e8d08d6685bb2bc3131ac86e3ac24e.tar.gz bcm5719-llvm-b8f8dbc227e8d08d6685bb2bc3131ac86e3ac24e.zip |
AMDGPU: Unify divergent function exits.
StructurizeCFG can't handle cases with multiple
returns creating regions with multiple exits.
Create a copy of UnifyFunctionExitNodes that only
unifies exit nodes that skips exit nodes
with uniform branch sources.
llvm-svn: 298729
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPU.h | 3 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp | 225 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/CMakeLists.txt | 1 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrFormats.td | 12 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstructions.td | 15 |
7 files changed, 254 insertions, 15 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h index 3e1e64a032e..99d71f50374 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.h +++ b/llvm/lib/Target/AMDGPU/AMDGPU.h @@ -123,6 +123,9 @@ extern char &SIDebuggerInsertNopsID; void initializeSIInsertWaitsPass(PassRegistry&); extern char &SIInsertWaitsID; +void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry&); +extern char &AMDGPUUnifyDivergentExitNodesID; + ImmutablePass *createAMDGPUAAWrapperPass(); void initializeAMDGPUAAWrapperPassPass(PassRegistry&); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 93c28149621..46e458a2e38 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -132,6 +132,7 @@ extern "C" void LLVMInitializeAMDGPUTarget() { initializeSIInsertSkipsPass(*PR); initializeSIDebuggerInsertNopsPass(*PR); initializeSIOptimizeExecMaskingPass(*PR); + initializeAMDGPUUnifyDivergentExitNodesPass(*PR); initializeAMDGPUAAWrapperPassPass(*PR); } @@ -673,6 +674,10 @@ bool GCNPassConfig::addPreISel() { // supported. const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); addPass(createAMDGPUAnnotateKernelFeaturesPass(&TM)); + + // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit + // regions formed by them. + addPass(&AMDGPUUnifyDivergentExitNodesID); addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions addPass(createSinkingPass()); addPass(createSITypeRewriter()); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp new file mode 100644 index 00000000000..309913f87fb --- /dev/null +++ b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp @@ -0,0 +1,225 @@ +//===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring +// there is at most one ret and one unreachable instruction, it ensures there is +// at most one divergent exiting block. +// +// StructurizeCFG can't deal with multi-exit regions formed by branches to +// multiple return nodes. It is not desirable to structurize regions with +// uniform branches, so unifying those to the same return block as divergent +// branches inhibits use of scalar branching. It still can't deal with the case +// where one branch goes to return, and one unreachable. Replace unreachable in +// this case with a return. +// +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/DivergenceAnalysis.h" +#include "llvm/Analysis/PostDominators.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Type.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/Local.h" +using namespace llvm; + +#define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes" + +namespace { + +class AMDGPUUnifyDivergentExitNodes : public FunctionPass { +public: + static char ID; // Pass identification, replacement for typeid + AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) { + initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry()); + } + + // We can preserve non-critical-edgeness when we unify function exit nodes + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnFunction(Function &F) override; +}; + +} + +char AMDGPUUnifyDivergentExitNodes::ID = 0; +INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, + "Unify divergent function exit nodes", false, false) +INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis) +INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, + "Unify divergent function exit nodes", false, false) + +char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID; + +void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{ + // TODO: Preserve dominator tree. + AU.addRequired<PostDominatorTreeWrapperPass>(); + + AU.addRequired<DivergenceAnalysis>(); + + // No divergent values are changed, only blocks and branch edges. + AU.addPreserved<DivergenceAnalysis>(); + + // We preserve the non-critical-edgeness property + AU.addPreservedID(BreakCriticalEdgesID); + + // This is a cluster of orthogonal Transforms + AU.addPreservedID(LowerSwitchID); + FunctionPass::getAnalysisUsage(AU); + + AU.addRequired<TargetTransformInfoWrapperPass>(); +} + +/// \returns true if \p BB is reachable through only uniform branches. +/// XXX - Is there a more efficient way to find this? +static bool isUniformlyReached(const DivergenceAnalysis &DA, + BasicBlock &BB) { + SmallVector<BasicBlock *, 8> Stack; + SmallPtrSet<BasicBlock *, 8> Visited; + + for (BasicBlock *Pred : predecessors(&BB)) + Stack.push_back(Pred); + + while (!Stack.empty()) { + BasicBlock *Top = Stack.pop_back_val(); + if (!DA.isUniform(Top->getTerminator())) + return false; + + for (BasicBlock *Pred : predecessors(Top)) { + if (Visited.insert(Pred).second) + Stack.push_back(Pred); + } + } + + return true; +} + +static BasicBlock *unifyReturnBlockSet(Function &F, + ArrayRef<BasicBlock *> ReturningBlocks, + const TargetTransformInfo &TTI, + StringRef Name) { + // Otherwise, we need to insert a new basic block into the function, add a PHI + // nodes (if the function returns values), and convert all of the return + // instructions into unconditional branches. + // + BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F); + + PHINode *PN = nullptr; + if (F.getReturnType()->isVoidTy()) { + ReturnInst::Create(F.getContext(), nullptr, NewRetBlock); + } else { + // If the function doesn't return void... add a PHI node to the block... + PN = PHINode::Create(F.getReturnType(), ReturningBlocks.size(), + "UnifiedRetVal"); + NewRetBlock->getInstList().push_back(PN); + ReturnInst::Create(F.getContext(), PN, NewRetBlock); + } + + // Loop over all of the blocks, replacing the return instruction with an + // unconditional branch. + // + for (BasicBlock *BB : ReturningBlocks) { + // Add an incoming element to the PHI node for every return instruction that + // is merging into this new block... + if (PN) + PN->addIncoming(BB->getTerminator()->getOperand(0), BB); + + BB->getInstList().pop_back(); // Remove the return insn + BranchInst::Create(NewRetBlock, BB); + } + + for (BasicBlock *BB : ReturningBlocks) { + // Cleanup possible branch to unconditional branch to the return. + SimplifyCFG(BB, TTI, 2); + } + + return NewRetBlock; +} + +bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) { + auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); + if (PDT.getRoots().size() <= 1) + return false; + + DivergenceAnalysis &DA = getAnalysis<DivergenceAnalysis>(); + + // Loop over all of the blocks in a function, tracking all of the blocks that + // return. + // + SmallVector<BasicBlock *, 4> ReturningBlocks; + SmallVector<BasicBlock *, 4> UnreachableBlocks; + + for (BasicBlock *BB : PDT.getRoots()) { + if (isa<ReturnInst>(BB->getTerminator())) { + if (!isUniformlyReached(DA, *BB)) + ReturningBlocks.push_back(BB); + } else if (isa<UnreachableInst>(BB->getTerminator())) { + if (!isUniformlyReached(DA, *BB)) + UnreachableBlocks.push_back(BB); + } + } + + if (!UnreachableBlocks.empty()) { + BasicBlock *UnreachableBlock = nullptr; + + if (UnreachableBlocks.size() == 1) { + UnreachableBlock = UnreachableBlocks.front(); + } else { + UnreachableBlock = BasicBlock::Create(F.getContext(), + "UnifiedUnreachableBlock", &F); + new UnreachableInst(F.getContext(), UnreachableBlock); + + for (BasicBlock *BB : UnreachableBlocks) { + BB->getInstList().pop_back(); // Remove the unreachable inst. + BranchInst::Create(UnreachableBlock, BB); + } + } + + if (!ReturningBlocks.empty()) { + // Don't create a new unreachable inst if we have a return. The + // structurizer/annotator can't handle the multiple exits + + Type *RetTy = F.getReturnType(); + Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); + UnreachableBlock->getInstList().pop_back(); // Remove the unreachable inst. + + Function *UnreachableIntrin = + Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable); + + // Insert a call to an intrinsic tracking that this is an unreachable + // point, in case we want to kill the active lanes or something later. + CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock); + + // Don't create a scalar trap. We would only want to trap if this code was + // really reached, but a scalar trap would happen even if no lanes + // actually reached here. + ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock); + ReturningBlocks.push_back(UnreachableBlock); + } + } + + // Now handle return blocks. + if (ReturningBlocks.empty()) + return false; // No blocks return + + if (ReturningBlocks.size() == 1) + return false; // Already has a single return block + + const TargetTransformInfo &TTI + = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + + unifyReturnBlockSet(F, ReturningBlocks, TTI, "UnifiedReturnBlock"); + return true; +} diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt index ffdcadd3394..689fc972621 100644 --- a/llvm/lib/Target/AMDGPU/CMakeLists.txt +++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt @@ -58,6 +58,7 @@ add_llvm_target(AMDGPUCodeGen AMDGPUInstrInfo.cpp AMDGPUPromoteAlloca.cpp AMDGPURegisterInfo.cpp + AMDGPUUnifyDivergentExitNodes.cpp GCNHazardRecognizer.cpp GCNSchedStrategy.cpp R600ClauseMergePass.cpp diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td index 1db22120f21..b83a1fe187e 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td +++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td @@ -138,19 +138,19 @@ class InstSI <dag outs, dag ins, string asm = "", let AsmVariantName = AMDGPUAsmVariants.Default; } -class PseudoInstSI<dag outs, dag ins, list<dag> pattern = []> - : InstSI<outs, ins, "", pattern> { +class PseudoInstSI<dag outs, dag ins, list<dag> pattern = [], string asm = ""> + : InstSI<outs, ins, asm, pattern> { let isPseudo = 1; let isCodeGenOnly = 1; } -class SPseudoInstSI<dag outs, dag ins, list<dag> pattern = []> - : PseudoInstSI<outs, ins, pattern> { +class SPseudoInstSI<dag outs, dag ins, list<dag> pattern = [], string asm = ""> + : PseudoInstSI<outs, ins, pattern, asm> { let SALU = 1; } -class VPseudoInstSI<dag outs, dag ins, list<dag> pattern = []> - : PseudoInstSI<outs, ins, pattern> { +class VPseudoInstSI<dag outs, dag ins, list<dag> pattern = [], string asm = ""> + : PseudoInstSI<outs, ins, pattern, asm> { let VALU = 1; let Uses = [EXEC]; } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 40d35bf393d..e2e0895f899 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3803,16 +3803,11 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { if (DescSize != 0 && DescSize != 4) return DescSize; - if (Opc == AMDGPU::WAVE_BARRIER) - return 0; - // 4-byte instructions may have a 32-bit literal encoded after them. Check // operands that coud ever be literals. if (isVALU(MI) || isSALU(MI)) { - if (isFixedSize(MI)) { - assert(DescSize == 4); + if (isFixedSize(MI)) return DescSize; - } int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); if (Src0Idx == -1) @@ -3835,7 +3830,6 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { return 4; switch (Opc) { - case AMDGPU::SI_MASK_BRANCH: case TargetOpcode::IMPLICIT_DEF: case TargetOpcode::KILL: case TargetOpcode::DBG_VALUE: diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index cc766489f2e..9e343aab71e 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -152,6 +152,8 @@ def WAVE_BARRIER : SPseudoInstSI<(outs), (ins), let mayStore = 1; let isBarrier = 1; let isConvergent = 1; + let FixedSize = 1; + let Size = 0; } // SI pseudo instructions. These are used by the CFG structurizer pass @@ -159,14 +161,15 @@ def WAVE_BARRIER : SPseudoInstSI<(outs), (ins), // Dummy terminator instruction to use after control flow instructions // replaced with exec mask operations. -def SI_MASK_BRANCH : PseudoInstSI < +def SI_MASK_BRANCH : VPseudoInstSI < (outs), (ins brtarget:$target)> { let isBranch = 0; let isTerminator = 1; let isBarrier = 0; - let Uses = [EXEC]; let SchedRW = []; let hasNoSchedulingInfo = 1; + let FixedSize = 1; + let Size = 0; } let isTerminator = 1 in { @@ -260,6 +263,14 @@ def SI_PS_LIVE : PseudoInstSI < let SALU = 1; } +def SI_MASKED_UNREACHABLE : SPseudoInstSI <(outs), (ins), + [(int_amdgcn_unreachable)], + "; divergent unreachable"> { + let Size = 0; + let hasNoSchedulingInfo = 1; + let FixedSize = 1; +} + // Used as an isel pseudo to directly emit initialization with an // s_mov_b32 rather than a copy of another initialized // register. MachineCSE skips copies, and we don't want to have to |