summaryrefslogtreecommitdiffstats
path: root/llvm/lib/DebugInfo/PDB/Raw
diff options
context:
space:
mode:
authorZachary Turner <zturner@google.com>2016-05-27 03:51:53 +0000
committerZachary Turner <zturner@google.com>2016-05-27 03:51:53 +0000
commitb393d9535981685f2741107784eeab8e650c567c (patch)
tree56331204fbef0028f0c5d0f5ace58aeeb5ced9cb /llvm/lib/DebugInfo/PDB/Raw
parent18e9102a852193adda32906270fc5fe0bfe73fcb (diff)
downloadbcm5719-llvm-b393d9535981685f2741107784eeab8e650c567c.tar.gz
bcm5719-llvm-b393d9535981685f2741107784eeab8e650c567c.zip
[codeview] Remove StreamReader copying method.
Since we want to move toward zero-copy access to stream data, we want to remove all instances of copying operations. So get rid of some of those here. Differential Revision: http://reviews.llvm.org/D20720 Reviewed By: ruiu llvm-svn: 270960
Diffstat (limited to 'llvm/lib/DebugInfo/PDB/Raw')
-rw-r--r--llvm/lib/DebugInfo/PDB/Raw/NameHashTable.cpp9
-rw-r--r--llvm/lib/DebugInfo/PDB/Raw/PublicsStream.cpp50
2 files changed, 15 insertions, 44 deletions
diff --git a/llvm/lib/DebugInfo/PDB/Raw/NameHashTable.cpp b/llvm/lib/DebugInfo/PDB/Raw/NameHashTable.cpp
index 7eae7489a0e..7206907d600 100644
--- a/llvm/lib/DebugInfo/PDB/Raw/NameHashTable.cpp
+++ b/llvm/lib/DebugInfo/PDB/Raw/NameHashTable.cpp
@@ -105,11 +105,9 @@ Error NameHashTable::load(codeview::StreamReader &Stream) {
if (auto EC = Stream.readObject(HashCount))
return EC;
- std::vector<support::ulittle32_t> BucketArray(*HashCount);
- if (auto EC = Stream.readArray<support::ulittle32_t>(BucketArray))
+ if (auto EC = Stream.readArray(IDs, *HashCount))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read bucket array");
- IDs.assign(BucketArray.begin(), BucketArray.end());
if (Stream.bytesRemaining() < sizeof(support::ulittle32_t))
return make_error<RawError>(raw_error_code::corrupt_file,
@@ -154,4 +152,7 @@ uint32_t NameHashTable::getIDForString(StringRef Str) const {
return IDs[0];
}
-ArrayRef<uint32_t> NameHashTable::name_ids() const { return IDs; }
+codeview::FixedStreamArray<support::ulittle32_t>
+NameHashTable::name_ids() const {
+ return IDs;
+}
diff --git a/llvm/lib/DebugInfo/PDB/Raw/PublicsStream.cpp b/llvm/lib/DebugInfo/PDB/Raw/PublicsStream.cpp
index aba6a147afc..8d4c58e6569 100644
--- a/llvm/lib/DebugInfo/PDB/Raw/PublicsStream.cpp
+++ b/llvm/lib/DebugInfo/PDB/Raw/PublicsStream.cpp
@@ -70,21 +70,6 @@ struct PublicsStream::GSIHashHeader {
ulittle32_t NumBuckets;
};
-// This is HRFile.
-struct PublicsStream::HashRecord {
- ulittle32_t Off; // Offset in the symbol record stream
- ulittle32_t CRef;
-};
-
-// This struct is defined as "SO" in langapi/include/pdb.h.
-namespace {
-struct SectionOffset {
- ulittle32_t Off;
- ulittle16_t Isect;
- char Padding[2];
-};
-}
-
PublicsStream::PublicsStream(PDBFile &File, uint32_t StreamNum)
: Pdb(File), StreamNum(StreamNum), Stream(StreamNum, File) {}
@@ -116,18 +101,18 @@ Error PublicsStream::reload() {
"Publics Stream does not contain a header.");
// An array of HashRecord follows. Read them.
- if (HashHdr->HrSize % sizeof(HashRecord))
+ if (HashHdr->HrSize % sizeof(PSHashRecord))
return make_error<RawError>(raw_error_code::corrupt_file,
"Invalid HR array size.");
- HashRecords.resize(HashHdr->HrSize / sizeof(HashRecord));
- if (auto EC = Reader.readArray<HashRecord>(HashRecords))
+ uint32_t NumHashRecords = HashHdr->HrSize / sizeof(PSHashRecord);
+ if (auto EC = Reader.readArray(HashRecords, NumHashRecords))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read an HR array");
// A bitmap of a fixed length follows.
size_t BitmapSizeInBits = alignTo(IPHR_HASH + 1, 32);
- std::vector<uint8_t> Bitmap(BitmapSizeInBits / 8);
- if (auto EC = Reader.readArray<uint8_t>(Bitmap))
+ uint32_t NumBitmapEntries = BitmapSizeInBits / 8;
+ if (auto EC = Reader.readBytes(NumBitmapEntries, Bitmap))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read a bitmap.");
for (uint8_t B : Bitmap)
@@ -139,40 +124,25 @@ Error PublicsStream::reload() {
// corrupted streams.
// Hash buckets follow.
- std::vector<ulittle32_t> TempHashBuckets(NumBuckets);
- if (auto EC = Reader.readArray<ulittle32_t>(TempHashBuckets))
+ if (auto EC = Reader.readArray(HashBuckets, NumBuckets))
return make_error<RawError>(raw_error_code::corrupt_file,
"Hash buckets corrupted.");
- HashBuckets.resize(NumBuckets);
- std::copy(TempHashBuckets.begin(), TempHashBuckets.end(),
- HashBuckets.begin());
// Something called "address map" follows.
- std::vector<ulittle32_t> TempAddressMap(Header->AddrMap / sizeof(uint32_t));
- if (auto EC = Reader.readArray<ulittle32_t>(TempAddressMap))
+ uint32_t NumAddressMapEntries = Header->AddrMap / sizeof(uint32_t);
+ if (auto EC = Reader.readArray(AddressMap, NumAddressMapEntries))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read an address map.");
- AddressMap.resize(Header->AddrMap / sizeof(uint32_t));
- std::copy(TempAddressMap.begin(), TempAddressMap.end(), AddressMap.begin());
// Something called "thunk map" follows.
- std::vector<ulittle32_t> TempThunkMap(Header->NumThunks);
- ThunkMap.resize(Header->NumThunks);
- if (auto EC = Reader.readArray<ulittle32_t>(TempThunkMap))
+ if (auto EC = Reader.readArray(ThunkMap, Header->NumThunks))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read a thunk map.");
- ThunkMap.resize(Header->NumThunks);
- std::copy(TempThunkMap.begin(), TempThunkMap.end(), ThunkMap.begin());
// Something called "section map" follows.
- std::vector<SectionOffset> Offsets(Header->NumSections);
- if (auto EC = Reader.readArray<SectionOffset>(Offsets))
+ if (auto EC = Reader.readArray(SectionOffsets, Header->NumSections))
return make_error<RawError>(raw_error_code::corrupt_file,
"Could not read a section map.");
- for (auto &SO : Offsets) {
- SectionOffsets.push_back(SO.Off);
- SectionOffsets.push_back(SO.Isect);
- }
if (Reader.bytesRemaining() > 0)
return make_error<RawError>(raw_error_code::corrupt_file,
OpenPOWER on IntegriCloud