summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2018-09-19 19:50:35 +0000
committerKostya Kortchinsky <kostyak@google.com>2018-09-19 19:50:35 +0000
commit851a7c9b2b9e4f273f968885bd1df88fcca92d59 (patch)
tree390322d31367f44efe10d489daaf24b5ea042e3d
parentc62ab611732099180ae3e5d3774ed43747b40051 (diff)
downloadbcm5719-llvm-851a7c9b2b9e4f273f968885bd1df88fcca92d59.tar.gz
bcm5719-llvm-851a7c9b2b9e4f273f968885bd1df88fcca92d59.zip
[sanitizer][fuchsia] Fix VMAR leak
Summary: Destroy and close a range's vmar if all its memory was unmapped. This addresses some performance regression due to the proliferation of vmars when Secondary backed allocations are concerned with Scudo on Fuchsia. When a Secondary backed allocation was freed, the associated `ReservedAddressRange` was going away after unmapping the entirety of the mapping, but without getting rid of the associated vmar properly (which was created specifically for that mapping). This resulted in an increase of defunct vmars, that in turn slowed down further new vmar allocations. This appears to solve ZX-2560/ZX-2642, at least on QEMU. Reviewers: flowerhack, mcgrathr, phosek, mseaborn Reviewed By: mcgrathr Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits Differential Revision: https://reviews.llvm.org/D52242 llvm-svn: 342584
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc22
1 files changed, 15 insertions, 7 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc
index de641e86c27..9c54e1ed3eb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc
@@ -277,14 +277,22 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
CHECK_LE(size, size_);
- if (addr == reinterpret_cast<uptr>(base_))
- // If we unmap the whole range, just null out the base.
- base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
- else
+ const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
+ if (addr == reinterpret_cast<uptr>(base_)) {
+ if (size == size_) {
+ // Destroying the vmar effectively unmaps the whole mapping.
+ _zx_vmar_destroy(vmar);
+ _zx_handle_close(vmar);
+ os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
+ DecreaseTotalMmap(size);
+ return;
+ }
+ } else {
CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
- size_ -= size;
- UnmapOrDieVmar(reinterpret_cast<void *>(addr), size,
- static_cast<zx_handle_t>(os_handle_));
+ }
+ // Partial unmapping does not affect the fact that the initial range is still
+ // reserved, and the resulting unmapped memory can't be reused.
+ UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
}
// This should never be called.
OpenPOWER on IntegriCloud