diff options
| author | Mehdi Amini <mehdi.amini@apple.com> | 2017-01-18 18:36:21 +0000 |
|---|---|---|
| committer | Mehdi Amini <mehdi.amini@apple.com> | 2017-01-18 18:36:21 +0000 |
| commit | 67d2cc1fadcf6283258df5033a9913c24154a4ca (patch) | |
| tree | 77017d35e9b8f75f062d0e56874c352fb62d7240 /llvm | |
| parent | 19b911cb75f2c37e1e16ea4e7b2046df96c8e5e4 (diff) | |
| download | bcm5719-llvm-67d2cc1fadcf6283258df5033a9913c24154a4ca.tar.gz bcm5719-llvm-67d2cc1fadcf6283258df5033a9913c24154a4ca.zip | |
[ThinLTO] Add a recursive step in Metadata lazy-loading
Summary:
Without this, we're stressing the RAUW of unique nodes,
which is a costly operation. This is intended to limit
the number of RAUW, and is very effective on the total
link-time of opt with ThinLTO, before:
real 4m4.587s user 15m3.401s sys 0m23.616s
after:
real 3m25.261s user 12m22.132s sys 0m24.152s
Reviewers: tejohnson, pcc
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D28751
llvm-svn: 292420
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Bitcode/Reader/MetadataLoader.cpp | 21 | ||||
| -rw-r--r-- | llvm/test/ThinLTO/X86/lazyload_metadata.ll | 6 |
2 files changed, 20 insertions, 7 deletions
diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp index 47019c50ba0..f76f7f8ef40 100644 --- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp +++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp @@ -768,13 +768,12 @@ void MetadataLoader::MetadataLoaderImpl::lazyLoadOneMetadata( unsigned ID, PlaceholderQueue &Placeholders) { assert(ID < (MDStringRef.size()) + GlobalMetadataBitPosIndex.size()); assert(ID >= MDStringRef.size() && "Unexpected lazy-loading of MDString"); -#ifndef NDEBUG // Lookup first if the metadata hasn't already been loaded. if (auto *MD = MetadataList.lookup(ID)) { auto *N = dyn_cast_or_null<MDNode>(MD); - assert(N && N->isTemporary() && "Lazy loading an already loaded metadata"); + if (!N->isTemporary()) + return; } -#endif SmallVector<uint64_t, 64> Record; StringRef Blob; IndexCursor.JumpToBit(GlobalMetadataBitPosIndex[ID - MDStringRef.size()]); @@ -827,8 +826,22 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( auto getMD = [&](unsigned ID) -> Metadata * { if (ID < MDStringRef.size()) return lazyLoadOneMDString(ID); - if (!IsDistinct) + if (!IsDistinct) { + if (auto *MD = MetadataList.lookup(ID)) + return MD; + // If lazy-loading is enabled, we try recursively to load the operand + // instead of creating a temporary. + if (ID < (MDStringRef.size() + GlobalMetadataBitPosIndex.size())) { + // Create a temporary for the node that is referencing the operand we + // will lazy-load. It is needed before recursing in case there are + // uniquing cycles. + MetadataList.getMetadataFwdRef(NextMetadataNo); + lazyLoadOneMetadata(ID, Placeholders); + return MetadataList.lookup(ID); + } + // Return a temporary. return MetadataList.getMetadataFwdRef(ID); + } if (auto *MD = MetadataList.getMetadataIfResolved(ID)) return MD; return &Placeholders.getPlaceholderOp(ID); diff --git a/llvm/test/ThinLTO/X86/lazyload_metadata.ll b/llvm/test/ThinLTO/X86/lazyload_metadata.ll index 3c4345831aa..7bd3e641bc7 100644 --- a/llvm/test/ThinLTO/X86/lazyload_metadata.ll +++ b/llvm/test/ThinLTO/X86/lazyload_metadata.ll @@ -17,7 +17,7 @@ ; RUN: -o /dev/null -disable-ondemand-mds-loading -stats \ ; RUN: 2>&1 | FileCheck %s -check-prefix=NOTLAZY ; NOTLAZY: 58 bitcode-reader - Number of Metadata records loaded -; NOTLAZY: 8 bitcode-reader - Number of MDStrings loaded +; NOTLAZY: 6 bitcode-reader - Number of MDStrings loaded target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" @@ -48,7 +48,7 @@ define void @globalfunc3(i32 %arg) { !3 = !{!"3"} !4 = !{!"4"} !5 = !{!"5"} -!6 = !{!"6"} +!6 = !{!9} !7 = !{!"7"} !8 = !{!"8"} -!9 = !{!"9"} +!9 = !{!6} |

