diff options
| author | Dan Liew <dan@su-root.co.uk> | 2018-12-28 19:30:51 +0000 |
|---|---|---|
| committer | Dan Liew <dan@su-root.co.uk> | 2018-12-28 19:30:51 +0000 |
| commit | 8c11fb3ed419a8c48f0f36af6e0a621aa75be320 (patch) | |
| tree | 1725a89afdd72dd48f0cd41cb27940833c60f19e /compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h | |
| parent | bae11e79990a97133cfced28796a5695d823015c (diff) | |
| download | bcm5719-llvm-8c11fb3ed419a8c48f0f36af6e0a621aa75be320.tar.gz bcm5719-llvm-8c11fb3ed419a8c48f0f36af6e0a621aa75be320.zip | |
Introduce `LocalAddressSpaceView::LoadWritable(...)` and make the `Load(...)` method return a const pointer.
Summary:
This is a follow-up to r346956 (https://reviews.llvm.org/D53975).
The purpose of this change to allow implementers of the
`AddressSpaceView` to be able to distinguish between when a caller wants
read-only memory and when a caller wants writable memory. Being able
distinguish these cases allows implementations to optimize for the
different cases and also provides a way to workaround possible platform
restrictions (e.g. the low level platform interface for reading
out-of-process memory may place memory in read-only pages).
For allocator enumeration in almost all cases read-only is sufficient so
we make `Load(...)` take on this new requirement and introduce the
`LoadWritable(...)` variants for cases where memory needs to be
writable.
The behaviour of `LoadWritable(...)` documented in comments are
deliberately very restrictive so that it will be possible in the future
to implement a simple write-cache (i.e. just a map from target address
to a writable region of memory). These restrictions can be loosened in
the future if necessary by implementing a more sophisticated
write-cache.
rdar://problem/45284065
Reviewers: kcc, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D54879
llvm-svn: 350136
Diffstat (limited to 'compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h')
| -rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h index e628a796471..0c8505c34c8 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -204,10 +204,10 @@ class LargeMmapAllocator { void EnsureSortedChunks() { if (chunks_sorted_) return; - Header **chunks = AddressSpaceView::Load(chunks_, n_chunks_); + Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_); Sort(reinterpret_cast<uptr *>(chunks), n_chunks_); for (uptr i = 0; i < n_chunks_; i++) - AddressSpaceView::Load(chunks[i])->chunk_idx = i; + AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i; chunks_sorted_ = true; } @@ -275,9 +275,9 @@ class LargeMmapAllocator { // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { EnsureSortedChunks(); // Avoid doing the sort while iterating. - Header **chunks = AddressSpaceView::Load(chunks_, n_chunks_); + const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); for (uptr i = 0; i < n_chunks_; i++) { - Header *t = chunks[i]; + const Header *t = chunks[i]; callback(reinterpret_cast<uptr>(GetUser(t)), arg); // Consistency check: verify that the array did not change. CHECK_EQ(chunks[i], t); @@ -301,7 +301,7 @@ class LargeMmapAllocator { return GetHeader(reinterpret_cast<uptr>(p)); } - void *GetUser(Header *h) { + void *GetUser(const Header *h) { CHECK(IsAligned((uptr)h, page_size_)); return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_); } |

