summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/CGSCCPassManager.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2017-02-09 23:24:13 +0000
committerChandler Carruth <chandlerc@gmail.com>2017-02-09 23:24:13 +0000
commitaaad9f84be2a6a3eb8202ed4eaa5e5e2021d055e (patch)
treec26c36e00c5b92fe6eb07426ef44608f4d515838 /llvm/lib/Analysis/CGSCCPassManager.cpp
parentb6afd2070a0c29475a60acd68be6ad3b183e448a (diff)
downloadbcm5719-llvm-aaad9f84be2a6a3eb8202ed4eaa5e5e2021d055e.tar.gz
bcm5719-llvm-aaad9f84be2a6a3eb8202ed4eaa5e5e2021d055e.zip
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges. This is motivated by porting argument promotion to the new pass manager. Because of how LLVM IR Function objects work, in order to change their signature a new object needs to be created. This is efficient and straight forward in the IR but previously was very hard to implement in LCG. We could easily replace the function a node in the graph represents. The challenging part is how to handle updating the edges in the graph. LCG previously used an edge to a raw function to represent a node that had not yet been scanned for calls and references. This was the core of its laziness. However, that model causes this kind of update to be very hard: 1) The keys to lookup an edge need to be `Function*`s that would all need to be updated when we update the node. 2) There will be some unknown number of edges that haven't transitioned from `Function*` edges to `Node*` edges. All of this complexity isn't necessary. Instead, we can always build a node around any function, always pointing edges at it and always using it as the key to lookup an edge. To maintain the laziness, we need to sink the *edges* of a node into a secondary object and explicitly model transitioning a node from empty to populated by scanning the function. This design seems much cleaner in a number of ways, but importantly there is now exactly *one* place where the `Function*` has to be updated! Some other cleanups that fall out of this include having something to model the *entry* edges more accurately. Rather than hand rolling parts of the node in the graph itself, we have an explicit `EdgeSequence` object that gives us exactly the functionality needed. We also have a consistent place to define the edge iterators and can use them for both the entry edges and the internal edges of the graph. The API used to model the separation between a node and its edges is intentionally very thin as most clients are expected to deal with nodes that have populated edges. We model this exactly as an optional does with an additional method to populate the edges when that is a reasonable thing for a client to do. This is based on API design suggestions from Richard Smith and David Blaikie, credit goes to them for helping pick how to model this without it being either too explicit or too implicit. The patch is somewhat noisy due to shifting around iterator types and new syntax for walking the edges of a node, but most of the functionality change is in the `Edge`, `EdgeSequence`, and `Node` types. Differential Revision: https://reviews.llvm.org/D29577 llvm-svn: 294653
Diffstat (limited to 'llvm/lib/Analysis/CGSCCPassManager.cpp')
-rw-r--r--llvm/lib/Analysis/CGSCCPassManager.cpp54
1 files changed, 27 insertions, 27 deletions
diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp
index 7cede981313..9d4521221f4 100644
--- a/llvm/lib/Analysis/CGSCCPassManager.cpp
+++ b/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -274,9 +274,9 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// demoted edges.
SmallVector<Constant *, 16> Worklist;
SmallPtrSet<Constant *, 16> Visited;
- SmallPtrSet<Function *, 16> RetainedEdges;
- SmallSetVector<Function *, 4> PromotedRefTargets;
- SmallSetVector<Function *, 4> DemotedCallTargets;
+ SmallPtrSet<Node *, 16> RetainedEdges;
+ SmallSetVector<Node *, 4> PromotedRefTargets;
+ SmallSetVector<Node *, 4> DemotedCallTargets;
// First walk the function and handle all called functions. We do this first
// because if there is a single call edge, whether there are ref edges is
@@ -285,7 +285,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
if (auto CS = CallSite(&I))
if (Function *Callee = CS.getCalledFunction())
if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
- const Edge *E = N.lookup(*Callee);
+ Node &CalleeN = *G.lookup(*Callee);
+ Edge *E = N->lookup(CalleeN);
// FIXME: We should really handle adding new calls. While it will
// make downstream usage more complex, there is no fundamental
// limitation and it will allow passes within the CGSCC to be a bit
@@ -294,9 +295,9 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
assert(E && "No function transformations should introduce *new* "
"call edges! Any new calls should be modeled as "
"promoted existing ref edges!");
- RetainedEdges.insert(Callee);
+ RetainedEdges.insert(&CalleeN);
if (!E->isCall())
- PromotedRefTargets.insert(Callee);
+ PromotedRefTargets.insert(&CalleeN);
}
// Now walk all references.
@@ -307,24 +308,25 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
Worklist.push_back(C);
LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) {
- const Edge *E = N.lookup(Referee);
+ Node &RefereeN = *G.lookup(Referee);
+ Edge *E = N->lookup(RefereeN);
// FIXME: Similarly to new calls, we also currently preclude
// introducing new references. See above for details.
assert(E && "No function transformations should introduce *new* ref "
"edges! Any new ref edges would require IPO which "
"function passes aren't allowed to do!");
- RetainedEdges.insert(&Referee);
+ RetainedEdges.insert(&RefereeN);
if (E->isCall())
- DemotedCallTargets.insert(&Referee);
+ DemotedCallTargets.insert(&RefereeN);
});
// First remove all of the edges that are no longer present in this function.
// We have to build a list of dead targets first and then remove them as the
// data structures will all be invalidated by removing them.
SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets;
- for (Edge &E : N)
- if (!RetainedEdges.count(&E.getFunction()))
- DeadTargets.push_back({E.getNode(), E.getKind()});
+ for (Edge &E : *N)
+ if (!RetainedEdges.count(&E.getNode()))
+ DeadTargets.push_back({&E.getNode(), E.getKind()});
for (auto DeadTarget : DeadTargets) {
Node &TargetN = *DeadTarget.getPointer();
bool IsCall = DeadTarget.getInt() == Edge::Call;
@@ -398,9 +400,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// Next demote all the call edges that are now ref edges. This helps make
// the SCCs small which should minimize the work below as we don't want to
// form cycles that this would break.
- for (Function *RefTarget : DemotedCallTargets) {
- Node &TargetN = *G.lookup(*RefTarget);
- SCC &TargetC = *G.lookupSCC(TargetN);
+ for (Node *RefTarget : DemotedCallTargets) {
+ SCC &TargetC = *G.lookupSCC(*RefTarget);
RefSCC &TargetRC = TargetC.getOuterRefSCC();
// The easy case is when the target RefSCC is not this RefSCC. This is
@@ -408,10 +409,10 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
if (&TargetRC != RC) {
assert(RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!");
- RC->switchOutgoingEdgeToRef(N, TargetN);
+ RC->switchOutgoingEdgeToRef(N, *RefTarget);
if (DebugLogging)
dbgs() << "Switch outgoing call edge to a ref edge from '" << N
- << "' to '" << TargetN << "'\n";
+ << "' to '" << *RefTarget << "'\n";
continue;
}
@@ -419,7 +420,7 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// some SCCs.
if (C != &TargetC) {
// For separate SCCs this is trivial.
- RC->switchTrivialInternalEdgeToRef(N, TargetN);
+ RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
continue;
}
@@ -431,14 +432,13 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// structure is changed.
AM.invalidate(*C, PreservedAnalyses::none());
// Now update the call graph.
- C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G,
- N, C, AM, UR, DebugLogging);
+ C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
+ C, AM, UR, DebugLogging);
}
// Now promote ref edges into call edges.
- for (Function *CallTarget : PromotedRefTargets) {
- Node &TargetN = *G.lookup(*CallTarget);
- SCC &TargetC = *G.lookupSCC(TargetN);
+ for (Node *CallTarget : PromotedRefTargets) {
+ SCC &TargetC = *G.lookupSCC(*CallTarget);
RefSCC &TargetRC = TargetC.getOuterRefSCC();
// The easy case is when the target RefSCC is not this RefSCC. This is
@@ -446,22 +446,22 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
if (&TargetRC != RC) {
assert(RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!");
- RC->switchOutgoingEdgeToCall(N, TargetN);
+ RC->switchOutgoingEdgeToCall(N, *CallTarget);
if (DebugLogging)
dbgs() << "Switch outgoing ref edge to a call edge from '" << N
- << "' to '" << TargetN << "'\n";
+ << "' to '" << *CallTarget << "'\n";
continue;
}
if (DebugLogging)
dbgs() << "Switch an internal ref edge to a call edge from '" << N
- << "' to '" << TargetN << "'\n";
+ << "' to '" << *CallTarget << "'\n";
// Otherwise we are switching an internal ref edge to a call edge. This
// may merge away some SCCs, and we add those to the UpdateResult. We also
// need to make sure to update the worklist in the event SCCs have moved
// before the current one in the post-order sequence.
auto InitialSCCIndex = RC->find(*C) - RC->begin();
- auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, TargetN);
+ auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, *CallTarget);
if (!InvalidatedSCCs.empty()) {
C = &TargetC;
assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
OpenPOWER on IntegriCloud