From c7bece56faa5eef1c3d141d0c0b0b68b28a9aed2 Mon Sep 17 00:00:00 2001 From: Greg Clayton Date: Fri, 25 Jan 2013 18:06:21 +0000 Subject: Major fixed to allow reading files that are over 4GB. The main problems were that the DataExtractor was using 32 bit offsets as a data cursor, and since we mmap all of our object files we could run into cases where if we had a very large core file that was over 4GB, we were running into the 4GB boundary. So I defined a new "lldb::offset_t" which should be used for all file offsets. After making this change, I enabled warnings for data loss and for enexpected implicit conversions temporarily and found a ton of things that I fixed. Any functions that take an index internally, should use "size_t" for any indexes and also should return "size_t" for any sizes of collections. llvm-svn: 173463 --- lldb/source/Core/Address.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lldb/source/Core/Address.cpp') diff --git a/lldb/source/Core/Address.cpp b/lldb/source/Core/Address.cpp index 9d4d8706c5a..5c9ed1a51fb 100644 --- a/lldb/source/Core/Address.cpp +++ b/lldb/source/Core/Address.cpp @@ -86,7 +86,7 @@ ReadUIntMax64 (ExecutionContextScope *exe_scope, const Address &address, uint32_ if (GetByteOrderAndAddressSize (exe_scope, address, byte_order, addr_size)) { DataExtractor data (&buf, sizeof(buf), byte_order, addr_size); - uint32_t offset = 0; + lldb::offset_t offset = 0; uval64 = data.GetU64(&offset); } else @@ -696,8 +696,8 @@ Address::Dump (Stream *s, ExecutionContextScope *exe_scope, DumpStyle style, Dum stop_if_block_is_inlined_function, &variable_list); - uint32_t num_variables = variable_list.GetSize(); - for (uint32_t var_idx = 0; var_idx < num_variables; ++var_idx) + const size_t num_variables = variable_list.GetSize(); + for (size_t var_idx = 0; var_idx < num_variables; ++var_idx) { Variable *var = variable_list.GetVariableAtIndex (var_idx).get(); if (var && var->LocationIsValidForAddress (*this)) -- cgit v1.2.3