summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Bitcode
diff options
context:
space:
mode:
authorDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-04-23 04:42:39 +0000
committerDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-04-23 04:42:39 +0000
commit1483fff271d01ee16c55c0a4c678aac9fde45951 (patch)
treea428659a633b9cfe3ba5221674056e3bedb3d198 /llvm/lib/Bitcode
parent004eb55feb1f0a44afe462b48061c9b1ab78eab0 (diff)
downloadbcm5719-llvm-1483fff271d01ee16c55c0a4c678aac9fde45951.tar.gz
bcm5719-llvm-1483fff271d01ee16c55c0a4c678aac9fde45951.zip
BitcodeWriter: Emit distinct nodes before uniqued nodes
When an operand of a distinct node hasn't been read yet, the reader can use a DistinctMDOperandPlaceholder. This is much cheaper than forward referencing from a uniqued node. Change ValueEnumerator::organizeMetadata to partition distinct nodes and uniqued nodes to reduce the overhead of cycles broken by distinct nodes. Mehdi measured this for me; this removes most of the RAUW from the importing step of -flto=thin, even after a WIP patch that removes string-based DITypeRefs (introducing many more cycles to the metadata graph). llvm-svn: 267276
Diffstat (limited to 'llvm/lib/Bitcode')
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.cpp24
1 files changed, 18 insertions, 6 deletions
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index 4de0d261c57..72b0048b031 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -647,6 +647,22 @@ void ValueEnumerator::EnumerateFunctionLocalMetadata(
EnumerateValue(Local->getValue());
}
+static unsigned getMetadataTypeOrder(const Metadata *MD) {
+ // Strings are emitted in bulk and must come first.
+ if (isa<MDString>(MD))
+ return 0;
+
+ // ConstantAsMetadata doesn't reference anything. We may as well shuffle it
+ // to the front since we can detect it.
+ auto *N = dyn_cast<MDNode>(MD);
+ if (!N)
+ return 1;
+
+ // The reader is fast forward references for distinct node operands, but slow
+ // when uniqued operands are unresolved.
+ return N->isDistinct() ? 2 : 3;
+}
+
void ValueEnumerator::organizeMetadata() {
assert(MetadataMap.size() == MDs.size() &&
"Metadata map and vector out of sync");
@@ -668,14 +684,10 @@ void ValueEnumerator::organizeMetadata() {
// be unique, the result of std::sort will be deterministic. There's no need
// for std::stable_sort.
std::sort(Order.begin(), Order.end(), [this](MDIndex LHS, MDIndex RHS) {
- return std::make_tuple(LHS.F, !isa<MDString>(LHS.get(MDs)), LHS.ID) <
- std::make_tuple(RHS.F, !isa<MDString>(RHS.get(MDs)), RHS.ID);
+ return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) <
+ std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID);
});
- // Return early if nothing is moving to functions and there are no strings.
- if (!Order.back().F && !isa<MDString>(Order.front().get(MDs)))
- return;
-
// Rebuild MDs, index the metadata ranges for each function in FunctionMDs,
// and fix up MetadataMap.
std::vector<const Metadata *> OldMDs = std::move(MDs);
OpenPOWER on IntegriCloud