summaryrefslogtreecommitdiffstats
path: root/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
diff options
context:
space:
mode:
authorZachary Turner <zturner@google.com>2017-05-04 23:53:54 +0000
committerZachary Turner <zturner@google.com>2017-05-04 23:53:54 +0000
commitbedc85fb4b9dbea4f2a8e5ca5ec374e9c233fe33 (patch)
treee76416bbb24d106b270df57ee34409eb8d1d199c /llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
parent1eb9a0297cb639323e97ca7f02123b035bf0b24b (diff)
downloadbcm5719-llvm-bedc85fb4b9dbea4f2a8e5ca5ec374e9c233fe33.tar.gz
bcm5719-llvm-bedc85fb4b9dbea4f2a8e5ca5ec374e9c233fe33.zip
[pdb] Don't verify TPI hash values up front.
Verifying the hash values as we are currently doing results in iterating every type record before the user even tries to access the first one, and the API user has no control over, or ability to hook into this process. As a result, when the user wants to iterate over types to print them or index them, this results in a second iteration over the same list of types. When there's upwards of 1,000,000 type records, this is obviously quite undesirable. This patch raises the verification outside of TpiStream , and llvm-pdbdump hooks a hash verification visitor into the normal dumping process. So we still verify the hash records, but we can do it while not requiring a second iteration over the type stream. Differential Revision: https://reviews.llvm.org/D32873 llvm-svn: 302206
Diffstat (limited to 'llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp')
-rw-r--r--llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp26
1 files changed, 3 insertions, 23 deletions
diff --git a/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp b/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
index 5fef3edf8c2..c0999d93dbb 100644
--- a/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
+++ b/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
@@ -39,20 +39,6 @@ TpiStream::TpiStream(const PDBFile &File,
TpiStream::~TpiStream() = default;
-// Verifies that a given type record matches with a given hash value.
-// Currently we only verify SRC_LINE records.
-Error TpiStream::verifyHashValues() {
- TpiHashVerifier Verifier(HashValues, Header->NumHashBuckets);
- TypeDeserializer Deserializer;
-
- TypeVisitorCallbackPipeline Pipeline;
- Pipeline.addCallbackToPipeline(Deserializer);
- Pipeline.addCallbackToPipeline(Verifier);
-
- CVTypeVisitor Visitor(Pipeline);
- return Visitor.visitTypeStream(TypeRecords);
-}
-
Error TpiStream::reload() {
BinaryStreamReader Reader(*Stream);
@@ -98,7 +84,7 @@ Error TpiStream::reload() {
// There should be a hash value for every type record, or no hashes at all.
uint32_t NumHashValues =
Header->HashValueBuffer.Length / sizeof(ulittle32_t);
- if (NumHashValues != NumTypeRecords() && NumHashValues != 0)
+ if (NumHashValues != getNumTypeRecords() && NumHashValues != 0)
return make_error<RawError>(
raw_error_code::corrupt_file,
"TPI hash count does not match with the number of type records.");
@@ -122,12 +108,6 @@ Error TpiStream::reload() {
}
HashStream = std::move(HS);
-
- // TPI hash table is a parallel array for the type records.
- // Verify that the hash values match with type records.
- if (NumHashValues > 0)
- if (auto EC = verifyHashValues())
- return EC;
}
return Error::success();
@@ -142,7 +122,7 @@ uint32_t TpiStream::TypeIndexBegin() const { return Header->TypeIndexBegin; }
uint32_t TpiStream::TypeIndexEnd() const { return Header->TypeIndexEnd; }
-uint32_t TpiStream::NumTypeRecords() const {
+uint32_t TpiStream::getNumTypeRecords() const {
return TypeIndexEnd() - TypeIndexBegin();
}
@@ -154,7 +134,7 @@ uint16_t TpiStream::getTypeHashStreamAuxIndex() const {
return Header->HashAuxStreamIndex;
}
-uint32_t TpiStream::NumHashBuckets() const { return Header->NumHashBuckets; }
+uint32_t TpiStream::getNumHashBuckets() const { return Header->NumHashBuckets; }
uint32_t TpiStream::getHashKeySize() const { return Header->HashKeySize; }
FixedStreamArray<support::ulittle32_t> TpiStream::getHashValues() const {
OpenPOWER on IntegriCloud