summaryrefslogtreecommitdiffstats
path: root/lldb/source
diff options
context:
space:
mode:
authorGreg Clayton <gclayton@apple.com>2012-02-22 04:37:26 +0000
committerGreg Clayton <gclayton@apple.com>2012-02-22 04:37:26 +0000
commita9f40ad80a68f753e510aedf77b49983b8064ffe (patch)
treeba59f0d5178d663f8bea4232f6e0824601b5a449 /lldb/source
parent337cfaf75734ebef1ac1d400399eacbbb866ac4d (diff)
downloadbcm5719-llvm-a9f40ad80a68f753e510aedf77b49983b8064ffe.tar.gz
bcm5719-llvm-a9f40ad80a68f753e510aedf77b49983b8064ffe.zip
For stepping performance I added the ability to outlaw all memory accesseses
to the __PAGEZERO segment on darwin. The dynamic loader now correctly doesn't slide __PAGEZERO and it also registers it as an invalid region of memory. This allows us to not make any memory requests from the local or remote debug session for any addresses in this region. Stepping performance can improve when uninitialized local variables that point to locations in __PAGEZERO are attempted to be read from memory as we won't even make the memory read or write request. llvm-svn: 151128
Diffstat (limited to 'lldb/source')
-rw-r--r--lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOSXDYLD.cpp79
-rw-r--r--lldb/source/Target/Memory.cpp51
-rw-r--r--lldb/source/Target/Process.cpp13
3 files changed, 116 insertions, 27 deletions
diff --git a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOSXDYLD.cpp b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOSXDYLD.cpp
index e77f948cba8..4f78e3e88e8 100644
--- a/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOSXDYLD.cpp
+++ b/lldb/source/Plugins/DynamicLoader/MacOSX-DYLD/DynamicLoaderMacOSXDYLD.cpp
@@ -433,39 +433,80 @@ DynamicLoaderMacOSXDYLD::UpdateImageLoadAddress (Module *module, DYLDImageInfo&
SectionList *section_list = image_object_file->GetSectionList ();
if (section_list)
{
+ std::vector<uint32_t> inaccessible_segment_indexes;
// We now know the slide amount, so go through all sections
// and update the load addresses with the correct values.
uint32_t num_segments = info.segments.size();
for (uint32_t i=0; i<num_segments; ++i)
{
+ // Only load a segment if it has protections. Things like
+ // __PAGEZERO don't have any protections, and they shouldn't
+ // be slid
SectionSP section_sp(section_list->FindSectionByName(info.segments[i].name));
- const addr_t new_section_load_addr = info.segments[i].vmaddr + info.slide;
- static ConstString g_section_name_LINKEDIT ("__LINKEDIT");
- if (section_sp)
+ if (info.segments[i].maxprot == 0)
+ {
+ inaccessible_segment_indexes.push_back(i);
+ }
+ else
{
- // Don't ever load any __LINKEDIT sections since the ones in the shared
- // cached will be coalesced into a single section and we will get warnings
- // about multiple sections mapping to the same address.
- if (section_sp->GetName() != g_section_name_LINKEDIT)
+ const addr_t new_section_load_addr = info.segments[i].vmaddr + info.slide;
+ static ConstString g_section_name_LINKEDIT ("__LINKEDIT");
+
+ if (section_sp)
{
- const addr_t old_section_load_addr = m_process->GetTarget().GetSectionLoadList().GetSectionLoadAddress (section_sp.get());
- if (old_section_load_addr == LLDB_INVALID_ADDRESS ||
- old_section_load_addr != new_section_load_addr)
+ // Don't ever load any __LINKEDIT sections since the ones in the shared
+ // cached will be coalesced into a single section and we will get warnings
+ // about multiple sections mapping to the same address.
+ if (section_sp->GetName() != g_section_name_LINKEDIT)
{
- if (m_process->GetTarget().GetSectionLoadList().SetSectionLoadAddress (section_sp.get(), new_section_load_addr))
- changed = true;
+ const addr_t old_section_load_addr = m_process->GetTarget().GetSectionLoadList().GetSectionLoadAddress (section_sp.get());
+ if (old_section_load_addr == LLDB_INVALID_ADDRESS ||
+ old_section_load_addr != new_section_load_addr)
+ {
+ if (m_process->GetTarget().GetSectionLoadList().SetSectionLoadAddress (section_sp.get(), new_section_load_addr))
+ changed = true;
+ }
}
}
+ else
+ {
+ Host::SystemLog (Host::eSystemLogWarning,
+ "warning: unable to find and load segment named '%s' at 0x%llx in '%s/%s' in macosx dynamic loader plug-in.\n",
+ info.segments[i].name.AsCString("<invalid>"),
+ (uint64_t)new_section_load_addr,
+ image_object_file->GetFileSpec().GetDirectory().AsCString(),
+ image_object_file->GetFileSpec().GetFilename().AsCString());
+ }
}
- else
+ }
+
+ // If the loaded the file (it changed) and we have segments that
+ // are not readable or writeable, add them to the invalid memory
+ // region cache for the process. This will typically only be
+ // the __PAGEZERO segment in the main executable. We might be able
+ // to apply this more generally to more sections that have no
+ // protections in the future, but for now we are going to just
+ // do __PAGEZERO.
+ if (changed && !inaccessible_segment_indexes.empty())
+ {
+ for (uint32_t i=0; i<inaccessible_segment_indexes.size(); ++i)
{
- Host::SystemLog (Host::eSystemLogWarning,
- "warning: unable to find and load segment named '%s' at 0x%llx in '%s/%s' in macosx dynamic loader plug-in.\n",
- info.segments[i].name.AsCString("<invalid>"),
- (uint64_t)new_section_load_addr,
- image_object_file->GetFileSpec().GetDirectory().AsCString(),
- image_object_file->GetFileSpec().GetFilename().AsCString());
+ const uint32_t seg_idx = inaccessible_segment_indexes[i];
+ SectionSP section_sp(section_list->FindSectionByName(info.segments[seg_idx].name));
+
+ if (section_sp)
+ {
+ static ConstString g_pagezero_section_name("__PAGEZERO");
+ if (g_pagezero_section_name == section_sp->GetName())
+ {
+ // __PAGEZERO never slides...
+ const lldb::addr_t vmaddr = info.segments[seg_idx].vmaddr;
+ const lldb::addr_t vmsize = info.segments[seg_idx].vmsize;
+ Process::LoadRange pagezero_range (vmaddr, vmsize);
+ m_process->AddInvalidMemoryRegion(pagezero_range);
+ }
+ }
}
}
}
diff --git a/lldb/source/Target/Memory.cpp b/lldb/source/Target/Memory.cpp
index 3f5567b7c8f..fb05c9723c3 100644
--- a/lldb/source/Target/Memory.cpp
+++ b/lldb/source/Target/Memory.cpp
@@ -26,8 +26,9 @@ using namespace lldb_private;
MemoryCache::MemoryCache(Process &process) :
m_process (process),
m_cache_line_byte_size (512),
- m_cache_mutex (Mutex::eMutexTypeRecursive),
- m_cache ()
+ m_mutex (Mutex::eMutexTypeRecursive),
+ m_cache (),
+ m_invalid_ranges ()
{
}
@@ -41,7 +42,7 @@ MemoryCache::~MemoryCache()
void
MemoryCache::Clear()
{
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
m_cache.clear();
}
@@ -56,7 +57,7 @@ MemoryCache::Flush (addr_t addr, size_t size)
const addr_t flush_start_addr = addr - (addr % cache_line_byte_size);
const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size);
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
if (m_cache.empty())
return;
@@ -64,12 +65,43 @@ MemoryCache::Flush (addr_t addr, size_t size)
for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size)
{
- collection::iterator pos = m_cache.find (curr_addr);
+ BlockMap::iterator pos = m_cache.find (curr_addr);
if (pos != m_cache.end())
m_cache.erase(pos);
}
}
+void
+MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
+{
+ if (byte_size > 0)
+ {
+ Mutex::Locker locker (m_mutex);
+ InvalidRanges::Entry range (base_addr, byte_size);
+ m_invalid_ranges.Append(range);
+ m_invalid_ranges.Sort();
+ }
+}
+
+bool
+MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
+{
+ if (byte_size > 0)
+ {
+ Mutex::Locker locker (m_mutex);
+ const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
+ if (idx != UINT32_MAX)
+ {
+ const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx);
+ if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size)
+ return m_invalid_ranges.RemoveEntrtAtIndex (idx);
+ }
+ }
+ return false;
+}
+
+
+
size_t
MemoryCache::Read (addr_t addr,
void *dst,
@@ -83,12 +115,15 @@ MemoryCache::Read (addr_t addr,
uint8_t *dst_buf = (uint8_t *)dst;
addr_t curr_addr = addr - (addr % cache_line_byte_size);
addr_t cache_offset = addr - curr_addr;
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
while (bytes_left > 0)
{
- collection::const_iterator pos = m_cache.find (curr_addr);
- collection::const_iterator end = m_cache.end ();
+ if (m_invalid_ranges.FindEntryThatContains(curr_addr))
+ return dst_len - bytes_left;
+
+ BlockMap::const_iterator pos = m_cache.find (curr_addr);
+ BlockMap::const_iterator end = m_cache.end ();
if (pos != end)
{
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index e959e407001..d0b7a2b8a53 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -4451,6 +4451,19 @@ Process::GetThreadStatus (Stream &strm,
return num_thread_infos_dumped;
}
+void
+Process::AddInvalidMemoryRegion (const LoadRange &region)
+{
+ m_memory_cache.AddInvalidRange(region.GetRangeBase(), region.GetByteSize());
+}
+
+bool
+Process::RemoveInvalidMemoryRange (const LoadRange &region)
+{
+ return m_memory_cache.RemoveInvalidRange(region.GetRangeBase(), region.GetByteSize());
+}
+
+
//--------------------------------------------------------------
// class Process::SettingsController
//--------------------------------------------------------------
OpenPOWER on IntegriCloud