summaryrefslogtreecommitdiffstats
path: root/clang/lib/StaticAnalyzer/Frontend
diff options
context:
space:
mode:
authorAnna Zaks <ganna@apple.com>2012-12-21 17:27:01 +0000
committerAnna Zaks <ganna@apple.com>2012-12-21 17:27:01 +0000
commit1ee76c1baee3dac926c5100c1ecded466273852b (patch)
treeda4c5c6e6b5ebef3a462830a79e7f91f7e310719 /clang/lib/StaticAnalyzer/Frontend
parenta6795968e6f4182ae67b3b7925bf8bac9d994073 (diff)
downloadbcm5719-llvm-1ee76c1baee3dac926c5100c1ecded466273852b.tar.gz
bcm5719-llvm-1ee76c1baee3dac926c5100c1ecded466273852b.zip
[analyzer] Re-apply r170826 and make the dumping of the GallGraph
deterministic. Commit message for r170826: [analyzer] Traverse the Call Graph in topological order. Modify the call graph by removing the parentless nodes. Instead all nodes are children of root to ensure they are all reachable. Remove the tracking of nodes that are "top level" or global. This information is not used and can be obtained from the Decls stored inside CallGraphNodes. Instead of existing ordering hacks, analyze the functions in topological order over the Call Graph. Together with the addition of devirtualizable ObjC message sends and blocks to the call graph, this gives around 6% performance improvement on several large ObjC benchmarks. llvm-svn: 170906
Diffstat (limited to 'clang/lib/StaticAnalyzer/Frontend')
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp62
1 files changed, 18 insertions, 44 deletions
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 025891adf8e..07aa5b074ae 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Timer.h"
@@ -417,61 +418,34 @@ AnalysisConsumer::getInliningModeForFunction(const Decl *D,
}
void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
- // Otherwise, use the Callgraph to derive the order.
- // Build the Call Graph.
- CallGraph CG;
-
- // Add all the top level declarations to the graph.
+ // Build the Call Graph by adding all the top level declarations to the graph.
// Note: CallGraph can trigger deserialization of more items from a pch
// (though HandleInterestingDecl); triggering additions to LocalTUDecls.
// We rely on random access to add the initially processed Decls to CG.
+ CallGraph CG;
for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
CG.addToCallGraph(LocalTUDecls[i]);
}
- // Find the top level nodes - children of root + the unreachable (parentless)
- // nodes.
- llvm::SmallVector<CallGraphNode*, 24> TopLevelFunctions;
- for (CallGraph::nodes_iterator TI = CG.parentless_begin(),
- TE = CG.parentless_end(); TI != TE; ++TI) {
- TopLevelFunctions.push_back(*TI);
- NumFunctionTopLevel++;
- }
- CallGraphNode *Entry = CG.getRoot();
- for (CallGraphNode::iterator I = Entry->begin(),
- E = Entry->end(); I != E; ++I) {
- TopLevelFunctions.push_back(*I);
- NumFunctionTopLevel++;
- }
-
- // Make sure the nodes are sorted in order reverse of their definition in the
- // translation unit. This step is very important for performance. It ensures
- // that we analyze the root functions before the externally available
- // subroutines.
- std::deque<CallGraphNode*> BFSQueue;
- for (llvm::SmallVector<CallGraphNode*, 24>::reverse_iterator
- TI = TopLevelFunctions.rbegin(), TE = TopLevelFunctions.rend();
- TI != TE; ++TI)
- BFSQueue.push_back(*TI);
-
- // BFS over all of the functions, while skipping the ones inlined into
- // the previously processed functions. Use external Visited set, which is
- // also modified when we inline a function.
+ // Walk over all of the call graph nodes in topological order, so that we
+ // analyze parents before the children. Skip the functions inlined into
+ // the previously processed functions. Use external Visited set to identify
+ // inlined functions. The topological order allows the "do not reanalyze
+ // previously inlined function" performance heuristic to be triggered more
+ // often.
SetOfConstDecls Visited;
SetOfConstDecls VisitedAsTopLevel;
- while(!BFSQueue.empty()) {
- CallGraphNode *N = BFSQueue.front();
- BFSQueue.pop_front();
-
- // Push the children into the queue.
- for (CallGraphNode::const_iterator CI = N->begin(),
- CE = N->end(); CI != CE; ++CI) {
- if (!shouldSkipFunction((*CI)->getDecl(), Visited, VisitedAsTopLevel))
- BFSQueue.push_back(*CI);
- }
+ llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
+ for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
+ I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ NumFunctionTopLevel++;
+ CallGraphNode *N = *I;
Decl *D = N->getDecl();
- assert(D);
+
+ // Skip the abstract root node.
+ if (!D)
+ continue;
// Skip the functions which have been processed already or previously
// inlined.
OpenPOWER on IntegriCloud