summaryrefslogtreecommitdiffstats
path: root/lldb/source/Target/Memory.cpp
diff options
context:
space:
mode:
authorGreg Clayton <gclayton@apple.com>2012-02-22 04:37:26 +0000
committerGreg Clayton <gclayton@apple.com>2012-02-22 04:37:26 +0000
commita9f40ad80a68f753e510aedf77b49983b8064ffe (patch)
treeba59f0d5178d663f8bea4232f6e0824601b5a449 /lldb/source/Target/Memory.cpp
parent337cfaf75734ebef1ac1d400399eacbbb866ac4d (diff)
downloadbcm5719-llvm-a9f40ad80a68f753e510aedf77b49983b8064ffe.tar.gz
bcm5719-llvm-a9f40ad80a68f753e510aedf77b49983b8064ffe.zip
For stepping performance I added the ability to outlaw all memory accesseses
to the __PAGEZERO segment on darwin. The dynamic loader now correctly doesn't slide __PAGEZERO and it also registers it as an invalid region of memory. This allows us to not make any memory requests from the local or remote debug session for any addresses in this region. Stepping performance can improve when uninitialized local variables that point to locations in __PAGEZERO are attempted to be read from memory as we won't even make the memory read or write request. llvm-svn: 151128
Diffstat (limited to 'lldb/source/Target/Memory.cpp')
-rw-r--r--lldb/source/Target/Memory.cpp51
1 files changed, 43 insertions, 8 deletions
diff --git a/lldb/source/Target/Memory.cpp b/lldb/source/Target/Memory.cpp
index 3f5567b7c8f..fb05c9723c3 100644
--- a/lldb/source/Target/Memory.cpp
+++ b/lldb/source/Target/Memory.cpp
@@ -26,8 +26,9 @@ using namespace lldb_private;
MemoryCache::MemoryCache(Process &process) :
m_process (process),
m_cache_line_byte_size (512),
- m_cache_mutex (Mutex::eMutexTypeRecursive),
- m_cache ()
+ m_mutex (Mutex::eMutexTypeRecursive),
+ m_cache (),
+ m_invalid_ranges ()
{
}
@@ -41,7 +42,7 @@ MemoryCache::~MemoryCache()
void
MemoryCache::Clear()
{
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
m_cache.clear();
}
@@ -56,7 +57,7 @@ MemoryCache::Flush (addr_t addr, size_t size)
const addr_t flush_start_addr = addr - (addr % cache_line_byte_size);
const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size);
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
if (m_cache.empty())
return;
@@ -64,12 +65,43 @@ MemoryCache::Flush (addr_t addr, size_t size)
for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size)
{
- collection::iterator pos = m_cache.find (curr_addr);
+ BlockMap::iterator pos = m_cache.find (curr_addr);
if (pos != m_cache.end())
m_cache.erase(pos);
}
}
+void
+MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
+{
+ if (byte_size > 0)
+ {
+ Mutex::Locker locker (m_mutex);
+ InvalidRanges::Entry range (base_addr, byte_size);
+ m_invalid_ranges.Append(range);
+ m_invalid_ranges.Sort();
+ }
+}
+
+bool
+MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
+{
+ if (byte_size > 0)
+ {
+ Mutex::Locker locker (m_mutex);
+ const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
+ if (idx != UINT32_MAX)
+ {
+ const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx);
+ if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size)
+ return m_invalid_ranges.RemoveEntrtAtIndex (idx);
+ }
+ }
+ return false;
+}
+
+
+
size_t
MemoryCache::Read (addr_t addr,
void *dst,
@@ -83,12 +115,15 @@ MemoryCache::Read (addr_t addr,
uint8_t *dst_buf = (uint8_t *)dst;
addr_t curr_addr = addr - (addr % cache_line_byte_size);
addr_t cache_offset = addr - curr_addr;
- Mutex::Locker locker (m_cache_mutex);
+ Mutex::Locker locker (m_mutex);
while (bytes_left > 0)
{
- collection::const_iterator pos = m_cache.find (curr_addr);
- collection::const_iterator end = m_cache.end ();
+ if (m_invalid_ranges.FindEntryThatContains(curr_addr))
+ return dst_len - bytes_left;
+
+ BlockMap::const_iterator pos = m_cache.find (curr_addr);
+ BlockMap::const_iterator end = m_cache.end ();
if (pos != end)
{
OpenPOWER on IntegriCloud