From ae1fc9baae4ebfdb30c7f6a5440e6064d3f9f09c Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Thu, 1 Aug 2019 14:01:30 +0000 Subject: compiler-rt: Rename .cc file in lib/lsan to .cpp Like r367463, but for lsan. llvm-svn: 367561 --- compiler-rt/lib/lsan/CMakeLists.txt | 27 +- compiler-rt/lib/lsan/lsan.cc | 135 --- compiler-rt/lib/lsan/lsan.cpp | 135 +++ compiler-rt/lib/lsan/lsan_allocator.cc | 353 -------- compiler-rt/lib/lsan/lsan_allocator.cpp | 353 ++++++++ compiler-rt/lib/lsan/lsan_common.cc | 904 --------------------- compiler-rt/lib/lsan/lsan_common.cpp | 904 +++++++++++++++++++++ compiler-rt/lib/lsan/lsan_common_linux.cc | 140 ---- compiler-rt/lib/lsan/lsan_common_linux.cpp | 140 ++++ compiler-rt/lib/lsan/lsan_common_mac.cc | 202 ----- compiler-rt/lib/lsan/lsan_common_mac.cpp | 202 +++++ compiler-rt/lib/lsan/lsan_interceptors.cc | 465 ----------- compiler-rt/lib/lsan/lsan_interceptors.cpp | 465 +++++++++++ compiler-rt/lib/lsan/lsan_linux.cc | 32 - compiler-rt/lib/lsan/lsan_linux.cpp | 32 + compiler-rt/lib/lsan/lsan_mac.cc | 191 ----- compiler-rt/lib/lsan/lsan_mac.cpp | 191 +++++ compiler-rt/lib/lsan/lsan_malloc_mac.cc | 59 -- compiler-rt/lib/lsan/lsan_malloc_mac.cpp | 59 ++ compiler-rt/lib/lsan/lsan_preinit.cc | 21 - compiler-rt/lib/lsan/lsan_preinit.cpp | 21 + compiler-rt/lib/lsan/lsan_thread.cc | 162 ---- compiler-rt/lib/lsan/lsan_thread.cpp | 162 ++++ .../lib/sanitizer_common/scripts/check_lint.sh | 2 +- 24 files changed, 2680 insertions(+), 2677 deletions(-) delete mode 100644 compiler-rt/lib/lsan/lsan.cc create mode 100644 compiler-rt/lib/lsan/lsan.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_allocator.cc create mode 100644 compiler-rt/lib/lsan/lsan_allocator.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_common.cc create mode 100644 compiler-rt/lib/lsan/lsan_common.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_common_linux.cc create mode 100644 compiler-rt/lib/lsan/lsan_common_linux.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_common_mac.cc create mode 100644 compiler-rt/lib/lsan/lsan_common_mac.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_interceptors.cc create mode 100644 compiler-rt/lib/lsan/lsan_interceptors.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_linux.cc create mode 100644 compiler-rt/lib/lsan/lsan_linux.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_mac.cc create mode 100644 compiler-rt/lib/lsan/lsan_mac.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_malloc_mac.cc create mode 100644 compiler-rt/lib/lsan/lsan_malloc_mac.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_preinit.cc create mode 100644 compiler-rt/lib/lsan/lsan_preinit.cpp delete mode 100644 compiler-rt/lib/lsan/lsan_thread.cc create mode 100644 compiler-rt/lib/lsan/lsan_thread.cpp diff --git a/compiler-rt/lib/lsan/CMakeLists.txt b/compiler-rt/lib/lsan/CMakeLists.txt index 34f686135ac..65d47476939 100644 --- a/compiler-rt/lib/lsan/CMakeLists.txt +++ b/compiler-rt/lib/lsan/CMakeLists.txt @@ -4,26 +4,29 @@ set(LSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) append_rtti_flag(OFF LSAN_CFLAGS) set(LSAN_COMMON_SOURCES - lsan_common.cc - lsan_common_linux.cc - lsan_common_mac.cc) + lsan_common.cpp + lsan_common_linux.cpp + lsan_common_mac.cpp + ) set(LSAN_SOURCES - lsan.cc - lsan_allocator.cc - lsan_linux.cc - lsan_interceptors.cc - lsan_mac.cc - lsan_malloc_mac.cc - lsan_preinit.cc - lsan_thread.cc) + lsan.cpp + lsan_allocator.cpp + lsan_linux.cpp + lsan_interceptors.cpp + lsan_mac.cpp + lsan_malloc_mac.cpp + lsan_preinit.cpp + lsan_thread.cpp + ) set(LSAN_HEADERS lsan.h lsan_allocator.h lsan_common.h lsan_flags.inc - lsan_thread.h) + lsan_thread.h + ) set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/compiler-rt/lib/lsan/lsan.cc b/compiler-rt/lib/lsan/lsan.cc deleted file mode 100644 index 68697a6363e..00000000000 --- a/compiler-rt/lib/lsan/lsan.cc +++ /dev/null @@ -1,135 +0,0 @@ -//=-- lsan.cc -------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// Standalone LSan RTL. -// -//===----------------------------------------------------------------------===// - -#include "lsan.h" - -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_flag_parser.h" -#include "sanitizer_common/sanitizer_stacktrace.h" -#include "lsan_allocator.h" -#include "lsan_common.h" -#include "lsan_thread.h" - -bool lsan_inited; -bool lsan_init_is_running; - -namespace __lsan { - -///// Interface to the common LSan module. ///// -bool WordIsPoisoned(uptr addr) { - return false; -} - -} // namespace __lsan - -void __sanitizer::BufferedStackTrace::UnwindImpl( - uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { - using namespace __lsan; - uptr stack_top = 0, stack_bottom = 0; - ThreadContext *t; - if (StackTrace::WillUseFastUnwind(request_fast) && - (t = CurrentThreadContext())) { - stack_top = t->stack_end(); - stack_bottom = t->stack_begin(); - } - if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) { - if (StackTrace::WillUseFastUnwind(request_fast)) - Unwind(max_depth, pc, bp, nullptr, stack_top, stack_bottom, true); - else - Unwind(max_depth, pc, 0, context, 0, 0, false); - } -} - -using namespace __lsan; // NOLINT - -static void InitializeFlags() { - // Set all the default values. - SetCommonFlagsDefaults(); - { - CommonFlags cf; - cf.CopyFrom(*common_flags()); - cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); - cf.malloc_context_size = 30; - cf.intercept_tls_get_addr = true; - cf.detect_leaks = true; - cf.exitcode = 23; - OverrideCommonFlags(cf); - } - - Flags *f = flags(); - f->SetDefaults(); - - FlagParser parser; - RegisterLsanFlags(&parser, f); - RegisterCommonFlags(&parser); - - // Override from user-specified string. - const char *lsan_default_options = MaybeCallLsanDefaultOptions(); - parser.ParseString(lsan_default_options); - parser.ParseStringFromEnv("LSAN_OPTIONS"); - - SetVerbosity(common_flags()->verbosity); - - if (Verbosity()) ReportUnrecognizedFlags(); - - if (common_flags()->help) parser.PrintFlagDescriptions(); - - __sanitizer_set_report_path(common_flags()->log_path); -} - -static void OnStackUnwind(const SignalContext &sig, const void *, - BufferedStackTrace *stack) { - stack->Unwind(sig.pc, sig.bp, sig.context, - common_flags()->fast_unwind_on_fatal); -} - -static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { - HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind, - nullptr); -} - -extern "C" void __lsan_init() { - CHECK(!lsan_init_is_running); - if (lsan_inited) - return; - lsan_init_is_running = true; - SanitizerToolName = "LeakSanitizer"; - CacheBinaryName(); - AvoidCVE_2016_2143(); - InitializeFlags(); - InitCommonLsan(); - InitializeAllocator(); - ReplaceSystemMalloc(); - InitTlsSize(); - InitializeInterceptors(); - InitializeThreadRegistry(); - InstallDeadlySignalHandlers(LsanOnDeadlySignal); - u32 tid = ThreadCreate(0, 0, true); - CHECK_EQ(tid, 0); - ThreadStart(tid, GetTid()); - SetCurrentThread(tid); - - if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) - Atexit(DoLeakCheck); - - InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); - - lsan_inited = true; - lsan_init_is_running = false; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_print_stack_trace() { - GET_STACK_TRACE_FATAL; - stack.Print(); -} diff --git a/compiler-rt/lib/lsan/lsan.cpp b/compiler-rt/lib/lsan/lsan.cpp new file mode 100644 index 00000000000..5b5f6198a69 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan.cpp @@ -0,0 +1,135 @@ +//=-- lsan.cpp ------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL. +// +//===----------------------------------------------------------------------===// + +#include "lsan.h" + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_allocator.h" +#include "lsan_common.h" +#include "lsan_thread.h" + +bool lsan_inited; +bool lsan_init_is_running; + +namespace __lsan { + +///// Interface to the common LSan module. ///// +bool WordIsPoisoned(uptr addr) { + return false; +} + +} // namespace __lsan + +void __sanitizer::BufferedStackTrace::UnwindImpl( + uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { + using namespace __lsan; + uptr stack_top = 0, stack_bottom = 0; + ThreadContext *t; + if (StackTrace::WillUseFastUnwind(request_fast) && + (t = CurrentThreadContext())) { + stack_top = t->stack_end(); + stack_bottom = t->stack_begin(); + } + if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) { + if (StackTrace::WillUseFastUnwind(request_fast)) + Unwind(max_depth, pc, bp, nullptr, stack_top, stack_bottom, true); + else + Unwind(max_depth, pc, 0, context, 0, 0, false); + } +} + +using namespace __lsan; // NOLINT + +static void InitializeFlags() { + // Set all the default values. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = 30; + cf.intercept_tls_get_addr = true; + cf.detect_leaks = true; + cf.exitcode = 23; + OverrideCommonFlags(cf); + } + + Flags *f = flags(); + f->SetDefaults(); + + FlagParser parser; + RegisterLsanFlags(&parser, f); + RegisterCommonFlags(&parser); + + // Override from user-specified string. + const char *lsan_default_options = MaybeCallLsanDefaultOptions(); + parser.ParseString(lsan_default_options); + parser.ParseStringFromEnv("LSAN_OPTIONS"); + + SetVerbosity(common_flags()->verbosity); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) parser.PrintFlagDescriptions(); + + __sanitizer_set_report_path(common_flags()->log_path); +} + +static void OnStackUnwind(const SignalContext &sig, const void *, + BufferedStackTrace *stack) { + stack->Unwind(sig.pc, sig.bp, sig.context, + common_flags()->fast_unwind_on_fatal); +} + +static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { + HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind, + nullptr); +} + +extern "C" void __lsan_init() { + CHECK(!lsan_init_is_running); + if (lsan_inited) + return; + lsan_init_is_running = true; + SanitizerToolName = "LeakSanitizer"; + CacheBinaryName(); + AvoidCVE_2016_2143(); + InitializeFlags(); + InitCommonLsan(); + InitializeAllocator(); + ReplaceSystemMalloc(); + InitTlsSize(); + InitializeInterceptors(); + InitializeThreadRegistry(); + InstallDeadlySignalHandlers(LsanOnDeadlySignal); + u32 tid = ThreadCreate(0, 0, true); + CHECK_EQ(tid, 0); + ThreadStart(tid, GetTid()); + SetCurrentThread(tid); + + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) + Atexit(DoLeakCheck); + + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + lsan_inited = true; + lsan_init_is_running = false; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + GET_STACK_TRACE_FATAL; + stack.Print(); +} diff --git a/compiler-rt/lib/lsan/lsan_allocator.cc b/compiler-rt/lib/lsan/lsan_allocator.cc deleted file mode 100644 index 8b13e4c028d..00000000000 --- a/compiler-rt/lib/lsan/lsan_allocator.cc +++ /dev/null @@ -1,353 +0,0 @@ -//=-- lsan_allocator.cc ---------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// See lsan_allocator.h for details. -// -//===----------------------------------------------------------------------===// - -#include "lsan_allocator.h" - -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_checks.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_allocator_report.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stacktrace.h" -#include "lsan_common.h" - -extern "C" void *memset(void *ptr, int value, uptr num); - -namespace __lsan { -#if defined(__i386__) || defined(__arm__) -static const uptr kMaxAllowedMallocSize = 1UL << 30; -#elif defined(__mips64) || defined(__aarch64__) -static const uptr kMaxAllowedMallocSize = 4UL << 30; -#else -static const uptr kMaxAllowedMallocSize = 8UL << 30; -#endif - -static Allocator allocator; - -void InitializeAllocator() { - SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - allocator.InitLinkerInitialized( - common_flags()->allocator_release_to_os_interval_ms); -} - -void AllocatorThreadFinish() { - allocator.SwallowCache(GetAllocatorCache()); -} - -static ChunkMetadata *Metadata(const void *p) { - return reinterpret_cast(allocator.GetMetaData(p)); -} - -static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { - if (!p) return; - ChunkMetadata *m = Metadata(p); - CHECK(m); - m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; - m->stack_trace_id = StackDepotPut(stack); - m->requested_size = size; - atomic_store(reinterpret_cast(m), 1, memory_order_relaxed); -} - -static void RegisterDeallocation(void *p) { - if (!p) return; - ChunkMetadata *m = Metadata(p); - CHECK(m); - atomic_store(reinterpret_cast(m), 0, memory_order_relaxed); -} - -static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { - if (AllocatorMayReturnNull()) { - Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); - return nullptr; - } - ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); -} - -void *Allocate(const StackTrace &stack, uptr size, uptr alignment, - bool cleared) { - if (size == 0) - size = 1; - if (size > kMaxAllowedMallocSize) - return ReportAllocationSizeTooBig(size, stack); - void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); - if (UNLIKELY(!p)) { - SetAllocatorOutOfMemory(); - if (AllocatorMayReturnNull()) - return nullptr; - ReportOutOfMemory(size, &stack); - } - // Do not rely on the allocator to clear the memory (it's slow). - if (cleared && allocator.FromPrimary(p)) - memset(p, 0, size); - RegisterAllocation(stack, p, size); - if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); - RunMallocHooks(p, size); - return p; -} - -static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - if (AllocatorMayReturnNull()) - return nullptr; - ReportCallocOverflow(nmemb, size, &stack); - } - size *= nmemb; - return Allocate(stack, size, 1, true); -} - -void Deallocate(void *p) { - if (&__sanitizer_free_hook) __sanitizer_free_hook(p); - RunFreeHooks(p); - RegisterDeallocation(p); - allocator.Deallocate(GetAllocatorCache(), p); -} - -void *Reallocate(const StackTrace &stack, void *p, uptr new_size, - uptr alignment) { - RegisterDeallocation(p); - if (new_size > kMaxAllowedMallocSize) { - allocator.Deallocate(GetAllocatorCache(), p); - return ReportAllocationSizeTooBig(new_size, stack); - } - p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); - RegisterAllocation(stack, p, new_size); - return p; -} - -void GetAllocatorCacheRange(uptr *begin, uptr *end) { - *begin = (uptr)GetAllocatorCache(); - *end = *begin + sizeof(AllocatorCache); -} - -uptr GetMallocUsableSize(const void *p) { - ChunkMetadata *m = Metadata(p); - if (!m) return 0; - return m->requested_size; -} - -int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, - const StackTrace &stack) { - if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - if (AllocatorMayReturnNull()) - return errno_EINVAL; - ReportInvalidPosixMemalignAlignment(alignment, &stack); - } - void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); - if (UNLIKELY(!ptr)) - // OOM error is already taken care of by Allocate. - return errno_ENOMEM; - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; -} - -void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { - if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAlignedAllocAlignment(size, alignment, &stack); - } - return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); -} - -void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { - if (UNLIKELY(!IsPowerOfTwo(alignment))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAllocationAlignment(alignment, &stack); - } - return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); -} - -void *lsan_malloc(uptr size, const StackTrace &stack) { - return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); -} - -void lsan_free(void *p) { - Deallocate(p); -} - -void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { - return SetErrnoOnNull(Reallocate(stack, p, size, 1)); -} - -void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size, - const StackTrace &stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportReallocArrayOverflow(nmemb, size, &stack); - } - return lsan_realloc(ptr, nmemb * size, stack); -} - -void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { - return SetErrnoOnNull(Calloc(nmemb, size, stack)); -} - -void *lsan_valloc(uptr size, const StackTrace &stack) { - return SetErrnoOnNull( - Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); -} - -void *lsan_pvalloc(uptr size, const StackTrace &stack) { - uptr PageSize = GetPageSizeCached(); - if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportPvallocOverflow(size, &stack); - } - // pvalloc(0) should allocate one page. - size = size ? RoundUpTo(size, PageSize) : PageSize; - return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); -} - -uptr lsan_mz_size(const void *p) { - return GetMallocUsableSize(p); -} - -///// Interface to the common LSan module. ///// - -void LockAllocator() { - allocator.ForceLock(); -} - -void UnlockAllocator() { - allocator.ForceUnlock(); -} - -void GetAllocatorGlobalRange(uptr *begin, uptr *end) { - *begin = (uptr)&allocator; - *end = *begin + sizeof(allocator); -} - -uptr PointsIntoChunk(void* p) { - uptr addr = reinterpret_cast(p); - uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); - if (!chunk) return 0; - // LargeMmapAllocator considers pointers to the meta-region of a chunk to be - // valid, but we don't want that. - if (addr < chunk) return 0; - ChunkMetadata *m = Metadata(reinterpret_cast(chunk)); - CHECK(m); - if (!m->allocated) - return 0; - if (addr < chunk + m->requested_size) - return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) - return chunk; - return 0; -} - -uptr GetUserBegin(uptr chunk) { - return chunk; -} - -LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = Metadata(reinterpret_cast(chunk)); - CHECK(metadata_); -} - -bool LsanMetadata::allocated() const { - return reinterpret_cast(metadata_)->allocated; -} - -ChunkTag LsanMetadata::tag() const { - return reinterpret_cast(metadata_)->tag; -} - -void LsanMetadata::set_tag(ChunkTag value) { - reinterpret_cast(metadata_)->tag = value; -} - -uptr LsanMetadata::requested_size() const { - return reinterpret_cast(metadata_)->requested_size; -} - -u32 LsanMetadata::stack_trace_id() const { - return reinterpret_cast(metadata_)->stack_trace_id; -} - -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - allocator.ForEachChunk(callback, arg); -} - -IgnoreObjectResult IgnoreObjectLocked(const void *p) { - void *chunk = allocator.GetBlockBegin(p); - if (!chunk || p < chunk) return kIgnoreObjectInvalid; - ChunkMetadata *m = Metadata(chunk); - CHECK(m); - if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { - if (m->tag == kIgnored) - return kIgnoreObjectAlreadyIgnored; - m->tag = kIgnored; - return kIgnoreObjectSuccess; - } else { - return kIgnoreObjectInvalid; - } -} -} // namespace __lsan - -using namespace __lsan; - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_current_allocated_bytes() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatAllocated]; -} - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_heap_size() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatMapped]; -} - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_free_bytes() { return 0; } - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_unmapped_bytes() { return 0; } - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } - -SANITIZER_INTERFACE_ATTRIBUTE -int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __sanitizer_get_allocated_size(const void *p) { - return GetMallocUsableSize(p); -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_free_hook(void *ptr) { - (void)ptr; -} -#endif -} // extern "C" diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp new file mode 100644 index 00000000000..66a81ab350e --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_allocator.cpp @@ -0,0 +1,353 @@ +//=-- lsan_allocator.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_allocator.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_allocator.h" + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_common.h" + +extern "C" void *memset(void *ptr, int value, uptr num); + +namespace __lsan { +#if defined(__i386__) || defined(__arm__) +static const uptr kMaxAllowedMallocSize = 1UL << 30; +#elif defined(__mips64) || defined(__aarch64__) +static const uptr kMaxAllowedMallocSize = 4UL << 30; +#else +static const uptr kMaxAllowedMallocSize = 8UL << 30; +#endif + +static Allocator allocator; + +void InitializeAllocator() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.InitLinkerInitialized( + common_flags()->allocator_release_to_os_interval_ms); +} + +void AllocatorThreadFinish() { + allocator.SwallowCache(GetAllocatorCache()); +} + +static ChunkMetadata *Metadata(const void *p) { + return reinterpret_cast(allocator.GetMetaData(p)); +} + +static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; + m->stack_trace_id = StackDepotPut(stack); + m->requested_size = size; + atomic_store(reinterpret_cast(m), 1, memory_order_relaxed); +} + +static void RegisterDeallocation(void *p) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + atomic_store(reinterpret_cast(m), 0, memory_order_relaxed); +} + +static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { + if (AllocatorMayReturnNull()) { + Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); +} + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared) { + if (size == 0) + size = 1; + if (size > kMaxAllowedMallocSize) + return ReportAllocationSizeTooBig(size, stack); + void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, &stack); + } + // Do not rely on the allocator to clear the memory (it's slow). + if (cleared && allocator.FromPrimary(p)) + memset(p, 0, size); + RegisterAllocation(stack, p, size); + if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); + RunMallocHooks(p, size); + return p; +} + +static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, &stack); + } + size *= nmemb; + return Allocate(stack, size, 1, true); +} + +void Deallocate(void *p) { + if (&__sanitizer_free_hook) __sanitizer_free_hook(p); + RunFreeHooks(p); + RegisterDeallocation(p); + allocator.Deallocate(GetAllocatorCache(), p); +} + +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment) { + RegisterDeallocation(p); + if (new_size > kMaxAllowedMallocSize) { + allocator.Deallocate(GetAllocatorCache(), p); + return ReportAllocationSizeTooBig(new_size, stack); + } + p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); + RegisterAllocation(stack, p, new_size); + return p; +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + *begin = (uptr)GetAllocatorCache(); + *end = *begin + sizeof(AllocatorCache); +} + +uptr GetMallocUsableSize(const void *p) { + ChunkMetadata *m = Metadata(p); + if (!m) return 0; + return m->requested_size; +} + +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + +void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + +void *lsan_malloc(uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); +} + +void lsan_free(void *p) { + Deallocate(p); +} + +void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Reallocate(stack, p, size, 1)); +} + +void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size, + const StackTrace &stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportReallocArrayOverflow(nmemb, size, &stack); + } + return lsan_realloc(ptr, nmemb * size, stack); +} + +void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Calloc(nmemb, size, stack)); +} + +void *lsan_valloc(uptr size, const StackTrace &stack) { + return SetErrnoOnNull( + Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); +} + +void *lsan_pvalloc(uptr size, const StackTrace &stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); +} + +uptr lsan_mz_size(const void *p) { + return GetMallocUsableSize(p); +} + +///// Interface to the common LSan module. ///// + +void LockAllocator() { + allocator.ForceLock(); +} + +void UnlockAllocator() { + allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&allocator; + *end = *begin + sizeof(allocator); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast(p); + uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); + if (!chunk) return 0; + // LargeMmapAllocator considers pointers to the meta-region of a chunk to be + // valid, but we don't want that. + if (addr < chunk) return 0; + ChunkMetadata *m = Metadata(reinterpret_cast(chunk)); + CHECK(m); + if (!m->allocated) + return 0; + if (addr < chunk + m->requested_size) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + return chunk; +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = Metadata(reinterpret_cast(chunk)); + CHECK(metadata_); +} + +bool LsanMetadata::allocated() const { + return reinterpret_cast(metadata_)->allocated; +} + +ChunkTag LsanMetadata::tag() const { + return reinterpret_cast(metadata_)->tag; +} + +void LsanMetadata::set_tag(ChunkTag value) { + reinterpret_cast(metadata_)->tag = value; +} + +uptr LsanMetadata::requested_size() const { + return reinterpret_cast(metadata_)->requested_size; +} + +u32 LsanMetadata::stack_trace_id() const { + return reinterpret_cast(metadata_)->stack_trace_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + allocator.ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + void *chunk = allocator.GetBlockBegin(p); + if (!chunk || p < chunk) return kIgnoreObjectInvalid; + ChunkMetadata *m = Metadata(chunk); + CHECK(m); + if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { + if (m->tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->tag = kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +using namespace __lsan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_free_bytes() { return 0; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_unmapped_bytes() { return 0; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } + +SANITIZER_INTERFACE_ATTRIBUTE +int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_allocated_size(const void *p) { + return GetMallocUsableSize(p); +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_free_hook(void *ptr) { + (void)ptr; +} +#endif +} // extern "C" diff --git a/compiler-rt/lib/lsan/lsan_common.cc b/compiler-rt/lib/lsan/lsan_common.cc deleted file mode 100644 index 7c842a152d5..00000000000 --- a/compiler-rt/lib/lsan/lsan_common.cc +++ /dev/null @@ -1,904 +0,0 @@ -//=-- lsan_common.cc ------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// Implementation of common leak checking functionality. -// -//===----------------------------------------------------------------------===// - -#include "lsan_common.h" - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flag_parser.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_procmaps.h" -#include "sanitizer_common/sanitizer_report_decorator.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_suppressions.h" -#include "sanitizer_common/sanitizer_thread_registry.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" - -#if CAN_SANITIZE_LEAKS -namespace __lsan { - -// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and -// also to protect the global list of root regions. -BlockingMutex global_mutex(LINKER_INITIALIZED); - -Flags lsan_flags; - -void DisableCounterUnderflow() { - if (common_flags()->detect_leaks) { - Report("Unmatched call to __lsan_enable().\n"); - Die(); - } -} - -void Flags::SetDefaults() { -#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; -#include "lsan_flags.inc" -#undef LSAN_FLAG -} - -void RegisterLsanFlags(FlagParser *parser, Flags *f) { -#define LSAN_FLAG(Type, Name, DefaultValue, Description) \ - RegisterFlag(parser, #Name, Description, &f->Name); -#include "lsan_flags.inc" -#undef LSAN_FLAG -} - -#define LOG_POINTERS(...) \ - do { \ - if (flags()->log_pointers) Report(__VA_ARGS__); \ - } while (0) - -#define LOG_THREADS(...) \ - do { \ - if (flags()->log_threads) Report(__VA_ARGS__); \ - } while (0) - -ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; -static SuppressionContext *suppression_ctx = nullptr; -static const char kSuppressionLeak[] = "leak"; -static const char *kSuppressionTypes[] = { kSuppressionLeak }; -static const char kStdSuppressions[] = -#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT - // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT - // definition. - "leak:*pthread_exit*\n" -#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT -#if SANITIZER_MAC - // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 - "leak:*_os_trace*\n" -#endif - // TLS leak in some glibc versions, described in - // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. - "leak:*tls_get_addr*\n"; - -void InitializeSuppressions() { - CHECK_EQ(nullptr, suppression_ctx); - suppression_ctx = new (suppression_placeholder) // NOLINT - SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); - suppression_ctx->ParseFromFile(flags()->suppressions); - if (&__lsan_default_suppressions) - suppression_ctx->Parse(__lsan_default_suppressions()); - suppression_ctx->Parse(kStdSuppressions); -} - -static SuppressionContext *GetSuppressionContext() { - CHECK(suppression_ctx); - return suppression_ctx; -} - -static InternalMmapVector *root_regions; - -InternalMmapVector const *GetRootRegions() { return root_regions; } - -void InitializeRootRegions() { - CHECK(!root_regions); - ALIGNED(64) static char placeholder[sizeof(InternalMmapVector)]; - root_regions = new (placeholder) InternalMmapVector(); // NOLINT -} - -const char *MaybeCallLsanDefaultOptions() { - return (&__lsan_default_options) ? __lsan_default_options() : ""; -} - -void InitCommonLsan() { - InitializeRootRegions(); - if (common_flags()->detect_leaks) { - // Initialization which can fail or print warnings should only be done if - // LSan is actually enabled. - InitializeSuppressions(); - InitializePlatformSpecificModules(); - } -} - -class Decorator: public __sanitizer::SanitizerCommonDecorator { - public: - Decorator() : SanitizerCommonDecorator() { } - const char *Error() { return Red(); } - const char *Leak() { return Blue(); } -}; - -static inline bool CanBeAHeapPointer(uptr p) { - // Since our heap is located in mmap-ed memory, we can assume a sensible lower - // bound on heap addresses. - const uptr kMinAddress = 4 * 4096; - if (p < kMinAddress) return false; -#if defined(__x86_64__) - // Accept only canonical form user-space addresses. - return ((p >> 47) == 0); -#elif defined(__mips64) - return ((p >> 40) == 0); -#elif defined(__aarch64__) - unsigned runtimeVMA = - (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); - return ((p >> runtimeVMA) == 0); -#else - return true; -#endif -} - -// Scans the memory range, looking for byte patterns that point into allocator -// chunks. Marks those chunks with |tag| and adds them to |frontier|. -// There are two usage modes for this function: finding reachable chunks -// (|tag| = kReachable) and finding indirectly leaked chunks -// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, -// so |frontier| = 0. -void ScanRangeForPointers(uptr begin, uptr end, - Frontier *frontier, - const char *region_type, ChunkTag tag) { - CHECK(tag == kReachable || tag == kIndirectlyLeaked); - const uptr alignment = flags()->pointer_alignment(); - LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); - uptr pp = begin; - if (pp % alignment) - pp = pp + alignment - pp % alignment; - for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT - void *p = *reinterpret_cast(pp); - if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; - uptr chunk = PointsIntoChunk(p); - if (!chunk) continue; - // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. - if (chunk == begin) continue; - LsanMetadata m(chunk); - if (m.tag() == kReachable || m.tag() == kIgnored) continue; - - // Do this check relatively late so we can log only the interesting cases. - if (!flags()->use_poisoned && WordIsPoisoned(pp)) { - LOG_POINTERS( - "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " - "%zu.\n", - pp, p, chunk, chunk + m.requested_size(), m.requested_size()); - continue; - } - - m.set_tag(tag); - LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, - chunk, chunk + m.requested_size(), m.requested_size()); - if (frontier) - frontier->push_back(chunk); - } -} - -// Scans a global range for pointers -void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { - uptr allocator_begin = 0, allocator_end = 0; - GetAllocatorGlobalRange(&allocator_begin, &allocator_end); - if (begin <= allocator_begin && allocator_begin < end) { - CHECK_LE(allocator_begin, allocator_end); - CHECK_LE(allocator_end, end); - if (begin < allocator_begin) - ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", - kReachable); - if (allocator_end < end) - ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); - } else { - ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); - } -} - -void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { - Frontier *frontier = reinterpret_cast(arg); - ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); -} - -// Scans thread data (stacks and TLS) for heap pointers. -static void ProcessThreads(SuspendedThreadsList const &suspended_threads, - Frontier *frontier) { - InternalMmapVector registers(suspended_threads.RegisterCount()); - uptr registers_begin = reinterpret_cast(registers.data()); - uptr registers_end = - reinterpret_cast(registers.data() + registers.size()); - for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { - tid_t os_id = static_cast(suspended_threads.GetThreadID(i)); - LOG_THREADS("Processing thread %d.\n", os_id); - uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; - DTLS *dtls; - bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, - &tls_begin, &tls_end, - &cache_begin, &cache_end, &dtls); - if (!thread_found) { - // If a thread can't be found in the thread registry, it's probably in the - // process of destruction. Log this event and move on. - LOG_THREADS("Thread %d not found in registry.\n", os_id); - continue; - } - uptr sp; - PtraceRegistersStatus have_registers = - suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); - if (have_registers != REGISTERS_AVAILABLE) { - Report("Unable to get registers from thread %d.\n", os_id); - // If unable to get SP, consider the entire stack to be reachable unless - // GetRegistersAndSP failed with ESRCH. - if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; - sp = stack_begin; - } - - if (flags()->use_registers && have_registers) - ScanRangeForPointers(registers_begin, registers_end, frontier, - "REGISTERS", kReachable); - - if (flags()->use_stacks) { - LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); - if (sp < stack_begin || sp >= stack_end) { - // SP is outside the recorded stack range (e.g. the thread is running a - // signal handler on alternate stack, or swapcontext was used). - // Again, consider the entire stack range to be reachable. - LOG_THREADS("WARNING: stack pointer not in stack range.\n"); - uptr page_size = GetPageSizeCached(); - int skipped = 0; - while (stack_begin < stack_end && - !IsAccessibleMemoryRange(stack_begin, 1)) { - skipped++; - stack_begin += page_size; - } - LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", - skipped, stack_begin, stack_end); - } else { - // Shrink the stack range to ignore out-of-scope values. - stack_begin = sp; - } - ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", - kReachable); - ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); - } - - if (flags()->use_tls) { - if (tls_begin) { - LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); - // If the tls and cache ranges don't overlap, scan full tls range, - // otherwise, only scan the non-overlapping portions - if (cache_begin == cache_end || tls_end < cache_begin || - tls_begin > cache_end) { - ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); - } else { - if (tls_begin < cache_begin) - ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", - kReachable); - if (tls_end > cache_end) - ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", - kReachable); - } - } - if (dtls && !DTLSInDestruction(dtls)) { - for (uptr j = 0; j < dtls->dtv_size; ++j) { - uptr dtls_beg = dtls->dtv[j].beg; - uptr dtls_end = dtls_beg + dtls->dtv[j].size; - if (dtls_beg < dtls_end) { - LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); - ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", - kReachable); - } - } - } else { - // We are handling a thread with DTLS under destruction. Log about - // this and continue. - LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id); - } - } - } -} - -void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, - uptr region_begin, uptr region_end, bool is_readable) { - uptr intersection_begin = Max(root_region.begin, region_begin); - uptr intersection_end = Min(region_end, root_region.begin + root_region.size); - if (intersection_begin >= intersection_end) return; - LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", - root_region.begin, root_region.begin + root_region.size, - region_begin, region_end, - is_readable ? "readable" : "unreadable"); - if (is_readable) - ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", - kReachable); -} - -static void ProcessRootRegion(Frontier *frontier, - const RootRegion &root_region) { - MemoryMappingLayout proc_maps(/*cache_enabled*/ true); - MemoryMappedSegment segment; - while (proc_maps.Next(&segment)) { - ScanRootRegion(frontier, root_region, segment.start, segment.end, - segment.IsReadable()); - } -} - -// Scans root regions for heap pointers. -static void ProcessRootRegions(Frontier *frontier) { - if (!flags()->use_root_regions) return; - CHECK(root_regions); - for (uptr i = 0; i < root_regions->size(); i++) { - ProcessRootRegion(frontier, (*root_regions)[i]); - } -} - -static void FloodFillTag(Frontier *frontier, ChunkTag tag) { - while (frontier->size()) { - uptr next_chunk = frontier->back(); - frontier->pop_back(); - LsanMetadata m(next_chunk); - ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, - "HEAP", tag); - } -} - -// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks -// which are reachable from it as indirectly leaked. -static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable) { - ScanRangeForPointers(chunk, chunk + m.requested_size(), - /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); - } -} - -// ForEachChunk callback. If chunk is marked as ignored, adds its address to -// frontier. -static void CollectIgnoredCb(uptr chunk, void *arg) { - CHECK(arg); - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() == kIgnored) { - LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", - chunk, chunk + m.requested_size(), m.requested_size()); - reinterpret_cast(arg)->push_back(chunk); - } -} - -static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { - CHECK(stack_id); - StackTrace stack = map->Get(stack_id); - // The top frame is our malloc/calloc/etc. The next frame is the caller. - if (stack.size >= 2) - return stack.trace[1]; - return 0; -} - -struct InvalidPCParam { - Frontier *frontier; - StackDepotReverseMap *stack_depot_reverse_map; - bool skip_linker_allocations; -}; - -// ForEachChunk callback. If the caller pc is invalid or is within the linker, -// mark as reachable. Called by ProcessPlatformSpecificAllocations. -static void MarkInvalidPCCb(uptr chunk, void *arg) { - CHECK(arg); - InvalidPCParam *param = reinterpret_cast(arg); - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { - u32 stack_id = m.stack_trace_id(); - uptr caller_pc = 0; - if (stack_id > 0) - caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); - // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark - // it as reachable, as we can't properly report its allocation stack anyway. - if (caller_pc == 0 || (param->skip_linker_allocations && - GetLinker()->containsAddress(caller_pc))) { - m.set_tag(kReachable); - param->frontier->push_back(chunk); - } - } -} - -// On Linux, treats all chunks allocated from ld-linux.so as reachable, which -// covers dynamically allocated TLS blocks, internal dynamic loader's loaded -// modules accounting etc. -// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. -// They are allocated with a __libc_memalign() call in allocate_and_init() -// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those -// blocks, but we can make sure they come from our own allocator by intercepting -// __libc_memalign(). On top of that, there is no easy way to reach them. Their -// addresses are stored in a dynamically allocated array (the DTV) which is -// referenced from the static TLS. Unfortunately, we can't just rely on the DTV -// being reachable from the static TLS, and the dynamic TLS being reachable from -// the DTV. This is because the initial DTV is allocated before our interception -// mechanism kicks in, and thus we don't recognize it as allocated memory. We -// can't special-case it either, since we don't know its size. -// Our solution is to include in the root set all allocations made from -// ld-linux.so (which is where allocate_and_init() is implemented). This is -// guaranteed to include all dynamic TLS blocks (and possibly other allocations -// which we don't care about). -// On all other platforms, this simply checks to ensure that the caller pc is -// valid before reporting chunks as leaked. -void ProcessPC(Frontier *frontier) { - StackDepotReverseMap stack_depot_reverse_map; - InvalidPCParam arg; - arg.frontier = frontier; - arg.stack_depot_reverse_map = &stack_depot_reverse_map; - arg.skip_linker_allocations = - flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; - ForEachChunk(MarkInvalidPCCb, &arg); -} - -// Sets the appropriate tag on each chunk. -static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { - // Holds the flood fill frontier. - Frontier frontier; - - ForEachChunk(CollectIgnoredCb, &frontier); - ProcessGlobalRegions(&frontier); - ProcessThreads(suspended_threads, &frontier); - ProcessRootRegions(&frontier); - FloodFillTag(&frontier, kReachable); - - CHECK_EQ(0, frontier.size()); - ProcessPC(&frontier); - - // The check here is relatively expensive, so we do this in a separate flood - // fill. That way we can skip the check for chunks that are reachable - // otherwise. - LOG_POINTERS("Processing platform-specific allocations.\n"); - ProcessPlatformSpecificAllocations(&frontier); - FloodFillTag(&frontier, kReachable); - - // Iterate over leaked chunks and mark those that are reachable from other - // leaked chunks. - LOG_POINTERS("Scanning leaked chunks.\n"); - ForEachChunk(MarkIndirectlyLeakedCb, nullptr); -} - -// ForEachChunk callback. Resets the tags to pre-leak-check state. -static void ResetTagsCb(uptr chunk, void *arg) { - (void)arg; - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kIgnored) - m.set_tag(kDirectlyLeaked); -} - -static void PrintStackTraceById(u32 stack_trace_id) { - CHECK(stack_trace_id); - StackDepotGet(stack_trace_id).Print(); -} - -// ForEachChunk callback. Aggregates information about unreachable chunks into -// a LeakReport. -static void CollectLeaksCb(uptr chunk, void *arg) { - CHECK(arg); - LeakReport *leak_report = reinterpret_cast(arg); - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (!m.allocated()) return; - if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { - u32 resolution = flags()->resolution; - u32 stack_trace_id = 0; - if (resolution > 0) { - StackTrace stack = StackDepotGet(m.stack_trace_id()); - stack.size = Min(stack.size, resolution); - stack_trace_id = StackDepotPut(stack); - } else { - stack_trace_id = m.stack_trace_id(); - } - leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), - m.tag()); - } -} - -static void PrintMatchedSuppressions() { - InternalMmapVector matched; - GetSuppressionContext()->GetMatched(&matched); - if (!matched.size()) - return; - const char *line = "-----------------------------------------------------"; - Printf("%s\n", line); - Printf("Suppressions used:\n"); - Printf(" count bytes template\n"); - for (uptr i = 0; i < matched.size(); i++) - Printf("%7zu %10zu %s\n", static_cast(atomic_load_relaxed( - &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); - Printf("%s\n\n", line); -} - -struct CheckForLeaksParam { - bool success; - LeakReport leak_report; -}; - -static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { - const InternalMmapVector &suspended_threads = - *(const InternalMmapVector *)arg; - if (tctx->status == ThreadStatusRunning) { - uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), - tctx->os_id, CompareLess()); - if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) - Report("Running thread %d was not suspended. False leaks are possible.\n", - tctx->os_id); - }; -} - -static void ReportUnsuspendedThreads( - const SuspendedThreadsList &suspended_threads) { - InternalMmapVector threads(suspended_threads.ThreadCount()); - for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) - threads[i] = suspended_threads.GetThreadID(i); - - Sort(threads.data(), threads.size()); - - GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( - &ReportIfNotSuspended, &threads); -} - -static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, - void *arg) { - CheckForLeaksParam *param = reinterpret_cast(arg); - CHECK(param); - CHECK(!param->success); - ReportUnsuspendedThreads(suspended_threads); - ClassifyAllChunks(suspended_threads); - ForEachChunk(CollectLeaksCb, ¶m->leak_report); - // Clean up for subsequent leak checks. This assumes we did not overwrite any - // kIgnored tags. - ForEachChunk(ResetTagsCb, nullptr); - param->success = true; -} - -static bool CheckForLeaks() { - if (&__lsan_is_turned_off && __lsan_is_turned_off()) - return false; - EnsureMainThreadIDIsCorrect(); - CheckForLeaksParam param; - param.success = false; - LockThreadRegistry(); - LockAllocator(); - DoStopTheWorld(CheckForLeaksCallback, ¶m); - UnlockAllocator(); - UnlockThreadRegistry(); - - if (!param.success) { - Report("LeakSanitizer has encountered a fatal error.\n"); - Report( - "HINT: For debugging, try setting environment variable " - "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); - Report( - "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); - Die(); - } - param.leak_report.ApplySuppressions(); - uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); - if (unsuppressed_count > 0) { - Decorator d; - Printf("\n" - "=================================================================" - "\n"); - Printf("%s", d.Error()); - Report("ERROR: LeakSanitizer: detected memory leaks\n"); - Printf("%s", d.Default()); - param.leak_report.ReportTopLeaks(flags()->max_leaks); - } - if (common_flags()->print_suppressions) - PrintMatchedSuppressions(); - if (unsuppressed_count > 0) { - param.leak_report.PrintSummary(); - return true; - } - return false; -} - -static bool has_reported_leaks = false; -bool HasReportedLeaks() { return has_reported_leaks; } - -void DoLeakCheck() { - BlockingMutexLock l(&global_mutex); - static bool already_done; - if (already_done) return; - already_done = true; - has_reported_leaks = CheckForLeaks(); - if (has_reported_leaks) HandleLeaks(); -} - -static int DoRecoverableLeakCheck() { - BlockingMutexLock l(&global_mutex); - bool have_leaks = CheckForLeaks(); - return have_leaks ? 1 : 0; -} - -void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } - -static Suppression *GetSuppressionForAddr(uptr addr) { - Suppression *s = nullptr; - - // Suppress by module name. - SuppressionContext *suppressions = GetSuppressionContext(); - if (const char *module_name = - Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) - if (suppressions->Match(module_name, kSuppressionLeak, &s)) - return s; - - // Suppress by file or function name. - SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); - for (SymbolizedStack *cur = frames; cur; cur = cur->next) { - if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || - suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { - break; - } - } - frames->ClearAll(); - return s; -} - -static Suppression *GetSuppressionForStack(u32 stack_trace_id) { - StackTrace stack = StackDepotGet(stack_trace_id); - for (uptr i = 0; i < stack.size; i++) { - Suppression *s = GetSuppressionForAddr( - StackTrace::GetPreviousInstructionPc(stack.trace[i])); - if (s) return s; - } - return nullptr; -} - -///// LeakReport implementation. ///// - -// A hard limit on the number of distinct leaks, to avoid quadratic complexity -// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks -// in real-world applications. -// FIXME: Get rid of this limit by changing the implementation of LeakReport to -// use a hash table. -const uptr kMaxLeaksConsidered = 5000; - -void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, - uptr leaked_size, ChunkTag tag) { - CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); - bool is_directly_leaked = (tag == kDirectlyLeaked); - uptr i; - for (i = 0; i < leaks_.size(); i++) { - if (leaks_[i].stack_trace_id == stack_trace_id && - leaks_[i].is_directly_leaked == is_directly_leaked) { - leaks_[i].hit_count++; - leaks_[i].total_size += leaked_size; - break; - } - } - if (i == leaks_.size()) { - if (leaks_.size() == kMaxLeaksConsidered) return; - Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, - is_directly_leaked, /* is_suppressed */ false }; - leaks_.push_back(leak); - } - if (flags()->report_objects) { - LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; - leaked_objects_.push_back(obj); - } -} - -static bool LeakComparator(const Leak &leak1, const Leak &leak2) { - if (leak1.is_directly_leaked == leak2.is_directly_leaked) - return leak1.total_size > leak2.total_size; - else - return leak1.is_directly_leaked; -} - -void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { - CHECK(leaks_.size() <= kMaxLeaksConsidered); - Printf("\n"); - if (leaks_.size() == kMaxLeaksConsidered) - Printf("Too many leaks! Only the first %zu leaks encountered will be " - "reported.\n", - kMaxLeaksConsidered); - - uptr unsuppressed_count = UnsuppressedLeakCount(); - if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) - Printf("The %zu top leak(s):\n", num_leaks_to_report); - Sort(leaks_.data(), leaks_.size(), &LeakComparator); - uptr leaks_reported = 0; - for (uptr i = 0; i < leaks_.size(); i++) { - if (leaks_[i].is_suppressed) continue; - PrintReportForLeak(i); - leaks_reported++; - if (leaks_reported == num_leaks_to_report) break; - } - if (leaks_reported < unsuppressed_count) { - uptr remaining = unsuppressed_count - leaks_reported; - Printf("Omitting %zu more leak(s).\n", remaining); - } -} - -void LeakReport::PrintReportForLeak(uptr index) { - Decorator d; - Printf("%s", d.Leak()); - Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", - leaks_[index].is_directly_leaked ? "Direct" : "Indirect", - leaks_[index].total_size, leaks_[index].hit_count); - Printf("%s", d.Default()); - - PrintStackTraceById(leaks_[index].stack_trace_id); - - if (flags()->report_objects) { - Printf("Objects leaked above:\n"); - PrintLeakedObjectsForLeak(index); - Printf("\n"); - } -} - -void LeakReport::PrintLeakedObjectsForLeak(uptr index) { - u32 leak_id = leaks_[index].id; - for (uptr j = 0; j < leaked_objects_.size(); j++) { - if (leaked_objects_[j].leak_id == leak_id) - Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, - leaked_objects_[j].size); - } -} - -void LeakReport::PrintSummary() { - CHECK(leaks_.size() <= kMaxLeaksConsidered); - uptr bytes = 0, allocations = 0; - for (uptr i = 0; i < leaks_.size(); i++) { - if (leaks_[i].is_suppressed) continue; - bytes += leaks_[i].total_size; - allocations += leaks_[i].hit_count; - } - InternalScopedString summary(kMaxSummaryLength); - summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, - allocations); - ReportErrorSummary(summary.data()); -} - -void LeakReport::ApplySuppressions() { - for (uptr i = 0; i < leaks_.size(); i++) { - Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); - if (s) { - s->weight += leaks_[i].total_size; - atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + - leaks_[i].hit_count); - leaks_[i].is_suppressed = true; - } - } -} - -uptr LeakReport::UnsuppressedLeakCount() { - uptr result = 0; - for (uptr i = 0; i < leaks_.size(); i++) - if (!leaks_[i].is_suppressed) result++; - return result; -} - -} // namespace __lsan -#else // CAN_SANITIZE_LEAKS -namespace __lsan { -void InitCommonLsan() { } -void DoLeakCheck() { } -void DoRecoverableLeakCheckVoid() { } -void DisableInThisThread() { } -void EnableInThisThread() { } -} -#endif // CAN_SANITIZE_LEAKS - -using namespace __lsan; // NOLINT - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_ignore_object(const void *p) { -#if CAN_SANITIZE_LEAKS - if (!common_flags()->detect_leaks) - return; - // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not - // locked. - BlockingMutexLock l(&global_mutex); - IgnoreObjectResult res = IgnoreObjectLocked(p); - if (res == kIgnoreObjectInvalid) - VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); - if (res == kIgnoreObjectAlreadyIgnored) - VReport(1, "__lsan_ignore_object(): " - "heap object at %p is already being ignored\n", p); - if (res == kIgnoreObjectSuccess) - VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); -#endif // CAN_SANITIZE_LEAKS -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_register_root_region(const void *begin, uptr size) { -#if CAN_SANITIZE_LEAKS - BlockingMutexLock l(&global_mutex); - CHECK(root_regions); - RootRegion region = {reinterpret_cast(begin), size}; - root_regions->push_back(region); - VReport(1, "Registered root region at %p of size %llu\n", begin, size); -#endif // CAN_SANITIZE_LEAKS -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_unregister_root_region(const void *begin, uptr size) { -#if CAN_SANITIZE_LEAKS - BlockingMutexLock l(&global_mutex); - CHECK(root_regions); - bool removed = false; - for (uptr i = 0; i < root_regions->size(); i++) { - RootRegion region = (*root_regions)[i]; - if (region.begin == reinterpret_cast(begin) && region.size == size) { - removed = true; - uptr last_index = root_regions->size() - 1; - (*root_regions)[i] = (*root_regions)[last_index]; - root_regions->pop_back(); - VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); - break; - } - } - if (!removed) { - Report( - "__lsan_unregister_root_region(): region at %p of size %llu has not " - "been registered.\n", - begin, size); - Die(); - } -#endif // CAN_SANITIZE_LEAKS -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_disable() { -#if CAN_SANITIZE_LEAKS - __lsan::DisableInThisThread(); -#endif -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_enable() { -#if CAN_SANITIZE_LEAKS - __lsan::EnableInThisThread(); -#endif -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __lsan_do_leak_check() { -#if CAN_SANITIZE_LEAKS - if (common_flags()->detect_leaks) - __lsan::DoLeakCheck(); -#endif // CAN_SANITIZE_LEAKS -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __lsan_do_recoverable_leak_check() { -#if CAN_SANITIZE_LEAKS - if (common_flags()->detect_leaks) - return __lsan::DoRecoverableLeakCheck(); -#endif // CAN_SANITIZE_LEAKS - return 0; -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char * __lsan_default_options() { - return ""; -} - -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -int __lsan_is_turned_off() { - return 0; -} - -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char *__lsan_default_suppressions() { - return ""; -} -#endif -} // extern "C" diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp new file mode 100644 index 00000000000..c39fab97c64 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_common.cpp @@ -0,0 +1,904 @@ +//=-- lsan_common.cpp -----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. +// +//===----------------------------------------------------------------------===// + +#include "lsan_common.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" + +#if CAN_SANITIZE_LEAKS +namespace __lsan { + +// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and +// also to protect the global list of root regions. +BlockingMutex global_mutex(LINKER_INITIALIZED); + +Flags lsan_flags; + +void DisableCounterUnderflow() { + if (common_flags()->detect_leaks) { + Report("Unmatched call to __lsan_enable().\n"); + Die(); + } +} + +void Flags::SetDefaults() { +#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "lsan_flags.inc" +#undef LSAN_FLAG +} + +void RegisterLsanFlags(FlagParser *parser, Flags *f) { +#define LSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "lsan_flags.inc" +#undef LSAN_FLAG +} + +#define LOG_POINTERS(...) \ + do { \ + if (flags()->log_pointers) Report(__VA_ARGS__); \ + } while (0) + +#define LOG_THREADS(...) \ + do { \ + if (flags()->log_threads) Report(__VA_ARGS__); \ + } while (0) + +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kSuppressionLeak[] = "leak"; +static const char *kSuppressionTypes[] = { kSuppressionLeak }; +static const char kStdSuppressions[] = +#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT + // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT + // definition. + "leak:*pthread_exit*\n" +#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT +#if SANITIZER_MAC + // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 + "leak:*_os_trace*\n" +#endif + // TLS leak in some glibc versions, described in + // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. + "leak:*tls_get_addr*\n"; + +void InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); + if (&__lsan_default_suppressions) + suppression_ctx->Parse(__lsan_default_suppressions()); + suppression_ctx->Parse(kStdSuppressions); +} + +static SuppressionContext *GetSuppressionContext() { + CHECK(suppression_ctx); + return suppression_ctx; +} + +static InternalMmapVector *root_regions; + +InternalMmapVector const *GetRootRegions() { return root_regions; } + +void InitializeRootRegions() { + CHECK(!root_regions); + ALIGNED(64) static char placeholder[sizeof(InternalMmapVector)]; + root_regions = new (placeholder) InternalMmapVector(); // NOLINT +} + +const char *MaybeCallLsanDefaultOptions() { + return (&__lsan_default_options) ? __lsan_default_options() : ""; +} + +void InitCommonLsan() { + InitializeRootRegions(); + if (common_flags()->detect_leaks) { + // Initialization which can fail or print warnings should only be done if + // LSan is actually enabled. + InitializeSuppressions(); + InitializePlatformSpecificModules(); + } +} + +class Decorator: public __sanitizer::SanitizerCommonDecorator { + public: + Decorator() : SanitizerCommonDecorator() { } + const char *Error() { return Red(); } + const char *Leak() { return Blue(); } +}; + +static inline bool CanBeAHeapPointer(uptr p) { + // Since our heap is located in mmap-ed memory, we can assume a sensible lower + // bound on heap addresses. + const uptr kMinAddress = 4 * 4096; + if (p < kMinAddress) return false; +#if defined(__x86_64__) + // Accept only canonical form user-space addresses. + return ((p >> 47) == 0); +#elif defined(__mips64) + return ((p >> 40) == 0); +#elif defined(__aarch64__) + unsigned runtimeVMA = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); + return ((p >> runtimeVMA) == 0); +#else + return true; +#endif +} + +// Scans the memory range, looking for byte patterns that point into allocator +// chunks. Marks those chunks with |tag| and adds them to |frontier|. +// There are two usage modes for this function: finding reachable chunks +// (|tag| = kReachable) and finding indirectly leaked chunks +// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, +// so |frontier| = 0. +void ScanRangeForPointers(uptr begin, uptr end, + Frontier *frontier, + const char *region_type, ChunkTag tag) { + CHECK(tag == kReachable || tag == kIndirectlyLeaked); + const uptr alignment = flags()->pointer_alignment(); + LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); + uptr pp = begin; + if (pp % alignment) + pp = pp + alignment - pp % alignment; + for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT + void *p = *reinterpret_cast(pp); + if (!CanBeAHeapPointer(reinterpret_cast(p))) continue; + uptr chunk = PointsIntoChunk(p); + if (!chunk) continue; + // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. + if (chunk == begin) continue; + LsanMetadata m(chunk); + if (m.tag() == kReachable || m.tag() == kIgnored) continue; + + // Do this check relatively late so we can log only the interesting cases. + if (!flags()->use_poisoned && WordIsPoisoned(pp)) { + LOG_POINTERS( + "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " + "%zu.\n", + pp, p, chunk, chunk + m.requested_size(), m.requested_size()); + continue; + } + + m.set_tag(tag); + LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, + chunk, chunk + m.requested_size(), m.requested_size()); + if (frontier) + frontier->push_back(chunk); + } +} + +// Scans a global range for pointers +void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { + uptr allocator_begin = 0, allocator_end = 0; + GetAllocatorGlobalRange(&allocator_begin, &allocator_end); + if (begin <= allocator_begin && allocator_begin < end) { + CHECK_LE(allocator_begin, allocator_end); + CHECK_LE(allocator_end, end); + if (begin < allocator_begin) + ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", + kReachable); + if (allocator_end < end) + ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); + } else { + ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); + } +} + +void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { + Frontier *frontier = reinterpret_cast(arg); + ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); +} + +// Scans thread data (stacks and TLS) for heap pointers. +static void ProcessThreads(SuspendedThreadsList const &suspended_threads, + Frontier *frontier) { + InternalMmapVector registers(suspended_threads.RegisterCount()); + uptr registers_begin = reinterpret_cast(registers.data()); + uptr registers_end = + reinterpret_cast(registers.data() + registers.size()); + for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { + tid_t os_id = static_cast(suspended_threads.GetThreadID(i)); + LOG_THREADS("Processing thread %d.\n", os_id); + uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; + DTLS *dtls; + bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, + &tls_begin, &tls_end, + &cache_begin, &cache_end, &dtls); + if (!thread_found) { + // If a thread can't be found in the thread registry, it's probably in the + // process of destruction. Log this event and move on. + LOG_THREADS("Thread %d not found in registry.\n", os_id); + continue; + } + uptr sp; + PtraceRegistersStatus have_registers = + suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); + if (have_registers != REGISTERS_AVAILABLE) { + Report("Unable to get registers from thread %d.\n", os_id); + // If unable to get SP, consider the entire stack to be reachable unless + // GetRegistersAndSP failed with ESRCH. + if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; + sp = stack_begin; + } + + if (flags()->use_registers && have_registers) + ScanRangeForPointers(registers_begin, registers_end, frontier, + "REGISTERS", kReachable); + + if (flags()->use_stacks) { + LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); + if (sp < stack_begin || sp >= stack_end) { + // SP is outside the recorded stack range (e.g. the thread is running a + // signal handler on alternate stack, or swapcontext was used). + // Again, consider the entire stack range to be reachable. + LOG_THREADS("WARNING: stack pointer not in stack range.\n"); + uptr page_size = GetPageSizeCached(); + int skipped = 0; + while (stack_begin < stack_end && + !IsAccessibleMemoryRange(stack_begin, 1)) { + skipped++; + stack_begin += page_size; + } + LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", + skipped, stack_begin, stack_end); + } else { + // Shrink the stack range to ignore out-of-scope values. + stack_begin = sp; + } + ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", + kReachable); + ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); + } + + if (flags()->use_tls) { + if (tls_begin) { + LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); + // If the tls and cache ranges don't overlap, scan full tls range, + // otherwise, only scan the non-overlapping portions + if (cache_begin == cache_end || tls_end < cache_begin || + tls_begin > cache_end) { + ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); + } else { + if (tls_begin < cache_begin) + ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", + kReachable); + if (tls_end > cache_end) + ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", + kReachable); + } + } + if (dtls && !DTLSInDestruction(dtls)) { + for (uptr j = 0; j < dtls->dtv_size; ++j) { + uptr dtls_beg = dtls->dtv[j].beg; + uptr dtls_end = dtls_beg + dtls->dtv[j].size; + if (dtls_beg < dtls_end) { + LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); + ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", + kReachable); + } + } + } else { + // We are handling a thread with DTLS under destruction. Log about + // this and continue. + LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id); + } + } + } +} + +void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, + uptr region_begin, uptr region_end, bool is_readable) { + uptr intersection_begin = Max(root_region.begin, region_begin); + uptr intersection_end = Min(region_end, root_region.begin + root_region.size); + if (intersection_begin >= intersection_end) return; + LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", + root_region.begin, root_region.begin + root_region.size, + region_begin, region_end, + is_readable ? "readable" : "unreadable"); + if (is_readable) + ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", + kReachable); +} + +static void ProcessRootRegion(Frontier *frontier, + const RootRegion &root_region) { + MemoryMappingLayout proc_maps(/*cache_enabled*/ true); + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + ScanRootRegion(frontier, root_region, segment.start, segment.end, + segment.IsReadable()); + } +} + +// Scans root regions for heap pointers. +static void ProcessRootRegions(Frontier *frontier) { + if (!flags()->use_root_regions) return; + CHECK(root_regions); + for (uptr i = 0; i < root_regions->size(); i++) { + ProcessRootRegion(frontier, (*root_regions)[i]); + } +} + +static void FloodFillTag(Frontier *frontier, ChunkTag tag) { + while (frontier->size()) { + uptr next_chunk = frontier->back(); + frontier->pop_back(); + LsanMetadata m(next_chunk); + ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, + "HEAP", tag); + } +} + +// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks +// which are reachable from it as indirectly leaked. +static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kReachable) { + ScanRangeForPointers(chunk, chunk + m.requested_size(), + /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); + } +} + +// ForEachChunk callback. If chunk is marked as ignored, adds its address to +// frontier. +static void CollectIgnoredCb(uptr chunk, void *arg) { + CHECK(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() == kIgnored) { + LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", + chunk, chunk + m.requested_size(), m.requested_size()); + reinterpret_cast(arg)->push_back(chunk); + } +} + +static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { + CHECK(stack_id); + StackTrace stack = map->Get(stack_id); + // The top frame is our malloc/calloc/etc. The next frame is the caller. + if (stack.size >= 2) + return stack.trace[1]; + return 0; +} + +struct InvalidPCParam { + Frontier *frontier; + StackDepotReverseMap *stack_depot_reverse_map; + bool skip_linker_allocations; +}; + +// ForEachChunk callback. If the caller pc is invalid or is within the linker, +// mark as reachable. Called by ProcessPlatformSpecificAllocations. +static void MarkInvalidPCCb(uptr chunk, void *arg) { + CHECK(arg); + InvalidPCParam *param = reinterpret_cast(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { + u32 stack_id = m.stack_trace_id(); + uptr caller_pc = 0; + if (stack_id > 0) + caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); + // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark + // it as reachable, as we can't properly report its allocation stack anyway. + if (caller_pc == 0 || (param->skip_linker_allocations && + GetLinker()->containsAddress(caller_pc))) { + m.set_tag(kReachable); + param->frontier->push_back(chunk); + } + } +} + +// On Linux, treats all chunks allocated from ld-linux.so as reachable, which +// covers dynamically allocated TLS blocks, internal dynamic loader's loaded +// modules accounting etc. +// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. +// They are allocated with a __libc_memalign() call in allocate_and_init() +// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those +// blocks, but we can make sure they come from our own allocator by intercepting +// __libc_memalign(). On top of that, there is no easy way to reach them. Their +// addresses are stored in a dynamically allocated array (the DTV) which is +// referenced from the static TLS. Unfortunately, we can't just rely on the DTV +// being reachable from the static TLS, and the dynamic TLS being reachable from +// the DTV. This is because the initial DTV is allocated before our interception +// mechanism kicks in, and thus we don't recognize it as allocated memory. We +// can't special-case it either, since we don't know its size. +// Our solution is to include in the root set all allocations made from +// ld-linux.so (which is where allocate_and_init() is implemented). This is +// guaranteed to include all dynamic TLS blocks (and possibly other allocations +// which we don't care about). +// On all other platforms, this simply checks to ensure that the caller pc is +// valid before reporting chunks as leaked. +void ProcessPC(Frontier *frontier) { + StackDepotReverseMap stack_depot_reverse_map; + InvalidPCParam arg; + arg.frontier = frontier; + arg.stack_depot_reverse_map = &stack_depot_reverse_map; + arg.skip_linker_allocations = + flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; + ForEachChunk(MarkInvalidPCCb, &arg); +} + +// Sets the appropriate tag on each chunk. +static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { + // Holds the flood fill frontier. + Frontier frontier; + + ForEachChunk(CollectIgnoredCb, &frontier); + ProcessGlobalRegions(&frontier); + ProcessThreads(suspended_threads, &frontier); + ProcessRootRegions(&frontier); + FloodFillTag(&frontier, kReachable); + + CHECK_EQ(0, frontier.size()); + ProcessPC(&frontier); + + // The check here is relatively expensive, so we do this in a separate flood + // fill. That way we can skip the check for chunks that are reachable + // otherwise. + LOG_POINTERS("Processing platform-specific allocations.\n"); + ProcessPlatformSpecificAllocations(&frontier); + FloodFillTag(&frontier, kReachable); + + // Iterate over leaked chunks and mark those that are reachable from other + // leaked chunks. + LOG_POINTERS("Scanning leaked chunks.\n"); + ForEachChunk(MarkIndirectlyLeakedCb, nullptr); +} + +// ForEachChunk callback. Resets the tags to pre-leak-check state. +static void ResetTagsCb(uptr chunk, void *arg) { + (void)arg; + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kIgnored) + m.set_tag(kDirectlyLeaked); +} + +static void PrintStackTraceById(u32 stack_trace_id) { + CHECK(stack_trace_id); + StackDepotGet(stack_trace_id).Print(); +} + +// ForEachChunk callback. Aggregates information about unreachable chunks into +// a LeakReport. +static void CollectLeaksCb(uptr chunk, void *arg) { + CHECK(arg); + LeakReport *leak_report = reinterpret_cast(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (!m.allocated()) return; + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { + u32 resolution = flags()->resolution; + u32 stack_trace_id = 0; + if (resolution > 0) { + StackTrace stack = StackDepotGet(m.stack_trace_id()); + stack.size = Min(stack.size, resolution); + stack_trace_id = StackDepotPut(stack); + } else { + stack_trace_id = m.stack_trace_id(); + } + leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), + m.tag()); + } +} + +static void PrintMatchedSuppressions() { + InternalMmapVector matched; + GetSuppressionContext()->GetMatched(&matched); + if (!matched.size()) + return; + const char *line = "-----------------------------------------------------"; + Printf("%s\n", line); + Printf("Suppressions used:\n"); + Printf(" count bytes template\n"); + for (uptr i = 0; i < matched.size(); i++) + Printf("%7zu %10zu %s\n", static_cast(atomic_load_relaxed( + &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); + Printf("%s\n\n", line); +} + +struct CheckForLeaksParam { + bool success; + LeakReport leak_report; +}; + +static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { + const InternalMmapVector &suspended_threads = + *(const InternalMmapVector *)arg; + if (tctx->status == ThreadStatusRunning) { + uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), + tctx->os_id, CompareLess()); + if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) + Report("Running thread %d was not suspended. False leaks are possible.\n", + tctx->os_id); + }; +} + +static void ReportUnsuspendedThreads( + const SuspendedThreadsList &suspended_threads) { + InternalMmapVector threads(suspended_threads.ThreadCount()); + for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) + threads[i] = suspended_threads.GetThreadID(i); + + Sort(threads.data(), threads.size()); + + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + &ReportIfNotSuspended, &threads); +} + +static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, + void *arg) { + CheckForLeaksParam *param = reinterpret_cast(arg); + CHECK(param); + CHECK(!param->success); + ReportUnsuspendedThreads(suspended_threads); + ClassifyAllChunks(suspended_threads); + ForEachChunk(CollectLeaksCb, ¶m->leak_report); + // Clean up for subsequent leak checks. This assumes we did not overwrite any + // kIgnored tags. + ForEachChunk(ResetTagsCb, nullptr); + param->success = true; +} + +static bool CheckForLeaks() { + if (&__lsan_is_turned_off && __lsan_is_turned_off()) + return false; + EnsureMainThreadIDIsCorrect(); + CheckForLeaksParam param; + param.success = false; + LockThreadRegistry(); + LockAllocator(); + DoStopTheWorld(CheckForLeaksCallback, ¶m); + UnlockAllocator(); + UnlockThreadRegistry(); + + if (!param.success) { + Report("LeakSanitizer has encountered a fatal error.\n"); + Report( + "HINT: For debugging, try setting environment variable " + "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); + Report( + "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); + Die(); + } + param.leak_report.ApplySuppressions(); + uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); + if (unsuppressed_count > 0) { + Decorator d; + Printf("\n" + "=================================================================" + "\n"); + Printf("%s", d.Error()); + Report("ERROR: LeakSanitizer: detected memory leaks\n"); + Printf("%s", d.Default()); + param.leak_report.ReportTopLeaks(flags()->max_leaks); + } + if (common_flags()->print_suppressions) + PrintMatchedSuppressions(); + if (unsuppressed_count > 0) { + param.leak_report.PrintSummary(); + return true; + } + return false; +} + +static bool has_reported_leaks = false; +bool HasReportedLeaks() { return has_reported_leaks; } + +void DoLeakCheck() { + BlockingMutexLock l(&global_mutex); + static bool already_done; + if (already_done) return; + already_done = true; + has_reported_leaks = CheckForLeaks(); + if (has_reported_leaks) HandleLeaks(); +} + +static int DoRecoverableLeakCheck() { + BlockingMutexLock l(&global_mutex); + bool have_leaks = CheckForLeaks(); + return have_leaks ? 1 : 0; +} + +void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } + +static Suppression *GetSuppressionForAddr(uptr addr) { + Suppression *s = nullptr; + + // Suppress by module name. + SuppressionContext *suppressions = GetSuppressionContext(); + if (const char *module_name = + Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) + if (suppressions->Match(module_name, kSuppressionLeak, &s)) + return s; + + // Suppress by file or function name. + SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || + suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { + break; + } + } + frames->ClearAll(); + return s; +} + +static Suppression *GetSuppressionForStack(u32 stack_trace_id) { + StackTrace stack = StackDepotGet(stack_trace_id); + for (uptr i = 0; i < stack.size; i++) { + Suppression *s = GetSuppressionForAddr( + StackTrace::GetPreviousInstructionPc(stack.trace[i])); + if (s) return s; + } + return nullptr; +} + +///// LeakReport implementation. ///// + +// A hard limit on the number of distinct leaks, to avoid quadratic complexity +// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks +// in real-world applications. +// FIXME: Get rid of this limit by changing the implementation of LeakReport to +// use a hash table. +const uptr kMaxLeaksConsidered = 5000; + +void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, + uptr leaked_size, ChunkTag tag) { + CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); + bool is_directly_leaked = (tag == kDirectlyLeaked); + uptr i; + for (i = 0; i < leaks_.size(); i++) { + if (leaks_[i].stack_trace_id == stack_trace_id && + leaks_[i].is_directly_leaked == is_directly_leaked) { + leaks_[i].hit_count++; + leaks_[i].total_size += leaked_size; + break; + } + } + if (i == leaks_.size()) { + if (leaks_.size() == kMaxLeaksConsidered) return; + Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, + is_directly_leaked, /* is_suppressed */ false }; + leaks_.push_back(leak); + } + if (flags()->report_objects) { + LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; + leaked_objects_.push_back(obj); + } +} + +static bool LeakComparator(const Leak &leak1, const Leak &leak2) { + if (leak1.is_directly_leaked == leak2.is_directly_leaked) + return leak1.total_size > leak2.total_size; + else + return leak1.is_directly_leaked; +} + +void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + Printf("\n"); + if (leaks_.size() == kMaxLeaksConsidered) + Printf("Too many leaks! Only the first %zu leaks encountered will be " + "reported.\n", + kMaxLeaksConsidered); + + uptr unsuppressed_count = UnsuppressedLeakCount(); + if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) + Printf("The %zu top leak(s):\n", num_leaks_to_report); + Sort(leaks_.data(), leaks_.size(), &LeakComparator); + uptr leaks_reported = 0; + for (uptr i = 0; i < leaks_.size(); i++) { + if (leaks_[i].is_suppressed) continue; + PrintReportForLeak(i); + leaks_reported++; + if (leaks_reported == num_leaks_to_report) break; + } + if (leaks_reported < unsuppressed_count) { + uptr remaining = unsuppressed_count - leaks_reported; + Printf("Omitting %zu more leak(s).\n", remaining); + } +} + +void LeakReport::PrintReportForLeak(uptr index) { + Decorator d; + Printf("%s", d.Leak()); + Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", + leaks_[index].is_directly_leaked ? "Direct" : "Indirect", + leaks_[index].total_size, leaks_[index].hit_count); + Printf("%s", d.Default()); + + PrintStackTraceById(leaks_[index].stack_trace_id); + + if (flags()->report_objects) { + Printf("Objects leaked above:\n"); + PrintLeakedObjectsForLeak(index); + Printf("\n"); + } +} + +void LeakReport::PrintLeakedObjectsForLeak(uptr index) { + u32 leak_id = leaks_[index].id; + for (uptr j = 0; j < leaked_objects_.size(); j++) { + if (leaked_objects_[j].leak_id == leak_id) + Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, + leaked_objects_[j].size); + } +} + +void LeakReport::PrintSummary() { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + uptr bytes = 0, allocations = 0; + for (uptr i = 0; i < leaks_.size(); i++) { + if (leaks_[i].is_suppressed) continue; + bytes += leaks_[i].total_size; + allocations += leaks_[i].hit_count; + } + InternalScopedString summary(kMaxSummaryLength); + summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, + allocations); + ReportErrorSummary(summary.data()); +} + +void LeakReport::ApplySuppressions() { + for (uptr i = 0; i < leaks_.size(); i++) { + Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); + if (s) { + s->weight += leaks_[i].total_size; + atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + + leaks_[i].hit_count); + leaks_[i].is_suppressed = true; + } + } +} + +uptr LeakReport::UnsuppressedLeakCount() { + uptr result = 0; + for (uptr i = 0; i < leaks_.size(); i++) + if (!leaks_[i].is_suppressed) result++; + return result; +} + +} // namespace __lsan +#else // CAN_SANITIZE_LEAKS +namespace __lsan { +void InitCommonLsan() { } +void DoLeakCheck() { } +void DoRecoverableLeakCheckVoid() { } +void DisableInThisThread() { } +void EnableInThisThread() { } +} +#endif // CAN_SANITIZE_LEAKS + +using namespace __lsan; // NOLINT + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_ignore_object(const void *p) { +#if CAN_SANITIZE_LEAKS + if (!common_flags()->detect_leaks) + return; + // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not + // locked. + BlockingMutexLock l(&global_mutex); + IgnoreObjectResult res = IgnoreObjectLocked(p); + if (res == kIgnoreObjectInvalid) + VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); + if (res == kIgnoreObjectAlreadyIgnored) + VReport(1, "__lsan_ignore_object(): " + "heap object at %p is already being ignored\n", p); + if (res == kIgnoreObjectSuccess) + VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_register_root_region(const void *begin, uptr size) { +#if CAN_SANITIZE_LEAKS + BlockingMutexLock l(&global_mutex); + CHECK(root_regions); + RootRegion region = {reinterpret_cast(begin), size}; + root_regions->push_back(region); + VReport(1, "Registered root region at %p of size %llu\n", begin, size); +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_unregister_root_region(const void *begin, uptr size) { +#if CAN_SANITIZE_LEAKS + BlockingMutexLock l(&global_mutex); + CHECK(root_regions); + bool removed = false; + for (uptr i = 0; i < root_regions->size(); i++) { + RootRegion region = (*root_regions)[i]; + if (region.begin == reinterpret_cast(begin) && region.size == size) { + removed = true; + uptr last_index = root_regions->size() - 1; + (*root_regions)[i] = (*root_regions)[last_index]; + root_regions->pop_back(); + VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); + break; + } + } + if (!removed) { + Report( + "__lsan_unregister_root_region(): region at %p of size %llu has not " + "been registered.\n", + begin, size); + Die(); + } +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_disable() { +#if CAN_SANITIZE_LEAKS + __lsan::DisableInThisThread(); +#endif +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_enable() { +#if CAN_SANITIZE_LEAKS + __lsan::EnableInThisThread(); +#endif +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_do_leak_check() { +#if CAN_SANITIZE_LEAKS + if (common_flags()->detect_leaks) + __lsan::DoLeakCheck(); +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __lsan_do_recoverable_leak_check() { +#if CAN_SANITIZE_LEAKS + if (common_flags()->detect_leaks) + return __lsan::DoRecoverableLeakCheck(); +#endif // CAN_SANITIZE_LEAKS + return 0; +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char * __lsan_default_options() { + return ""; +} + +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +int __lsan_is_turned_off() { + return 0; +} + +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char *__lsan_default_suppressions() { + return ""; +} +#endif +} // extern "C" diff --git a/compiler-rt/lib/lsan/lsan_common_linux.cc b/compiler-rt/lib/lsan/lsan_common_linux.cc deleted file mode 100644 index ef4f591d88f..00000000000 --- a/compiler-rt/lib/lsan/lsan_common_linux.cc +++ /dev/null @@ -1,140 +0,0 @@ -//=-- lsan_common_linux.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// Implementation of common leak checking functionality. Linux/NetBSD-specific -// code. -// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#include "lsan_common.h" - -#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD) -#include - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_getauxval.h" -#include "sanitizer_common/sanitizer_linux.h" -#include "sanitizer_common/sanitizer_stackdepot.h" - -namespace __lsan { - -static const char kLinkerName[] = "ld"; - -static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64); -static LoadedModule *linker = nullptr; - -static bool IsLinker(const LoadedModule& module) { -#if SANITIZER_USE_GETAUXVAL - return module.base_address() == getauxval(AT_BASE); -#else - return LibraryNameIs(module.full_name(), kLinkerName); -#endif // SANITIZER_USE_GETAUXVAL -} - -__attribute__((tls_model("initial-exec"))) -THREADLOCAL int disable_counter; -bool DisabledInThisThread() { return disable_counter > 0; } -void DisableInThisThread() { disable_counter++; } -void EnableInThisThread() { - if (disable_counter == 0) { - DisableCounterUnderflow(); - } - disable_counter--; -} - -void InitializePlatformSpecificModules() { - ListOfModules modules; - modules.init(); - for (LoadedModule &module : modules) { - if (!IsLinker(module)) - continue; - if (linker == nullptr) { - linker = reinterpret_cast(linker_placeholder); - *linker = module; - module = LoadedModule(); - } else { - VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " - "TLS and other allocations originating from linker might be " - "falsely reported as leaks.\n", kLinkerName); - linker->clear(); - linker = nullptr; - return; - } - } - if (linker == nullptr) { - VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other " - "allocations originating from linker might be falsely reported " - "as leaks.\n"); - } -} - -static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, - void *data) { - Frontier *frontier = reinterpret_cast(data); - for (uptr j = 0; j < info->dlpi_phnum; j++) { - const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); - // We're looking for .data and .bss sections, which reside in writeable, - // loadable segments. - if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || - (phdr->p_memsz == 0)) - continue; - uptr begin = info->dlpi_addr + phdr->p_vaddr; - uptr end = begin + phdr->p_memsz; - ScanGlobalRange(begin, end, frontier); - } - return 0; -} - -// Scans global variables for heap pointers. -void ProcessGlobalRegions(Frontier *frontier) { - if (!flags()->use_globals) return; - dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); -} - -LoadedModule *GetLinker() { return linker; } - -void ProcessPlatformSpecificAllocations(Frontier *frontier) {} - -struct DoStopTheWorldParam { - StopTheWorldCallback callback; - void *argument; -}; - -// While calling Die() here is undefined behavior and can potentially -// cause race conditions, it isn't possible to intercept exit on linux, -// so we have no choice but to call Die() from the atexit handler. -void HandleLeaks() { - if (common_flags()->exitcode) Die(); -} - -static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size, - void *data) { - DoStopTheWorldParam *param = reinterpret_cast(data); - StopTheWorld(param->callback, param->argument); - return 1; -} - -// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one -// of the threads is frozen while holding the libdl lock, the tracer will hang -// in dl_iterate_phdr() forever. -// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the -// tracer task and the thread that spawned it. Thus, if we run the tracer task -// while holding the libdl lock in the parent thread, we can safely reenter it -// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() -// callback in the parent thread. -void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { - DoStopTheWorldParam param = {callback, argument}; - dl_iterate_phdr(DoStopTheWorldCallback, ¶m); -} - -} // namespace __lsan - -#endif diff --git a/compiler-rt/lib/lsan/lsan_common_linux.cpp b/compiler-rt/lib/lsan/lsan_common_linux.cpp new file mode 100644 index 00000000000..9ce27a983b5 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_common_linux.cpp @@ -0,0 +1,140 @@ +//=-- lsan_common_linux.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. Linux/NetBSD-specific +// code. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "lsan_common.h" + +#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD) +#include + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_getauxval.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __lsan { + +static const char kLinkerName[] = "ld"; + +static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64); +static LoadedModule *linker = nullptr; + +static bool IsLinker(const LoadedModule& module) { +#if SANITIZER_USE_GETAUXVAL + return module.base_address() == getauxval(AT_BASE); +#else + return LibraryNameIs(module.full_name(), kLinkerName); +#endif // SANITIZER_USE_GETAUXVAL +} + +__attribute__((tls_model("initial-exec"))) +THREADLOCAL int disable_counter; +bool DisabledInThisThread() { return disable_counter > 0; } +void DisableInThisThread() { disable_counter++; } +void EnableInThisThread() { + if (disable_counter == 0) { + DisableCounterUnderflow(); + } + disable_counter--; +} + +void InitializePlatformSpecificModules() { + ListOfModules modules; + modules.init(); + for (LoadedModule &module : modules) { + if (!IsLinker(module)) + continue; + if (linker == nullptr) { + linker = reinterpret_cast(linker_placeholder); + *linker = module; + module = LoadedModule(); + } else { + VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " + "TLS and other allocations originating from linker might be " + "falsely reported as leaks.\n", kLinkerName); + linker->clear(); + linker = nullptr; + return; + } + } + if (linker == nullptr) { + VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other " + "allocations originating from linker might be falsely reported " + "as leaks.\n"); + } +} + +static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, + void *data) { + Frontier *frontier = reinterpret_cast(data); + for (uptr j = 0; j < info->dlpi_phnum; j++) { + const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); + // We're looking for .data and .bss sections, which reside in writeable, + // loadable segments. + if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || + (phdr->p_memsz == 0)) + continue; + uptr begin = info->dlpi_addr + phdr->p_vaddr; + uptr end = begin + phdr->p_memsz; + ScanGlobalRange(begin, end, frontier); + } + return 0; +} + +// Scans global variables for heap pointers. +void ProcessGlobalRegions(Frontier *frontier) { + if (!flags()->use_globals) return; + dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); +} + +LoadedModule *GetLinker() { return linker; } + +void ProcessPlatformSpecificAllocations(Frontier *frontier) {} + +struct DoStopTheWorldParam { + StopTheWorldCallback callback; + void *argument; +}; + +// While calling Die() here is undefined behavior and can potentially +// cause race conditions, it isn't possible to intercept exit on linux, +// so we have no choice but to call Die() from the atexit handler. +void HandleLeaks() { + if (common_flags()->exitcode) Die(); +} + +static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size, + void *data) { + DoStopTheWorldParam *param = reinterpret_cast(data); + StopTheWorld(param->callback, param->argument); + return 1; +} + +// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one +// of the threads is frozen while holding the libdl lock, the tracer will hang +// in dl_iterate_phdr() forever. +// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the +// tracer task and the thread that spawned it. Thus, if we run the tracer task +// while holding the libdl lock in the parent thread, we can safely reenter it +// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() +// callback in the parent thread. +void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { + DoStopTheWorldParam param = {callback, argument}; + dl_iterate_phdr(DoStopTheWorldCallback, ¶m); +} + +} // namespace __lsan + +#endif diff --git a/compiler-rt/lib/lsan/lsan_common_mac.cc b/compiler-rt/lib/lsan/lsan_common_mac.cc deleted file mode 100644 index 14c2b371199..00000000000 --- a/compiler-rt/lib/lsan/lsan_common_mac.cc +++ /dev/null @@ -1,202 +0,0 @@ -//=-- lsan_common_mac.cc --------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// Implementation of common leak checking functionality. Darwin-specific code. -// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "lsan_common.h" - -#if CAN_SANITIZE_LEAKS && SANITIZER_MAC - -#include "sanitizer_common/sanitizer_allocator_internal.h" -#include "lsan_allocator.h" - -#include - -#include - -// Only introduced in Mac OS X 10.9. -#ifdef VM_MEMORY_OS_ALLOC_ONCE -static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE; -#else -static const int kSanitizerVmMemoryOsAllocOnce = 73; -#endif - -namespace __lsan { - -typedef struct { - int disable_counter; - u32 current_thread_id; - AllocatorCache cache; -} thread_local_data_t; - -static pthread_key_t key; -static pthread_once_t key_once = PTHREAD_ONCE_INIT; - -// The main thread destructor requires the current thread id, -// so we can't destroy it until it's been used and reset to invalid tid -void restore_tid_data(void *ptr) { - thread_local_data_t *data = (thread_local_data_t *)ptr; - if (data->current_thread_id != kInvalidTid) - pthread_setspecific(key, data); -} - -static void make_tls_key() { - CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); -} - -static thread_local_data_t *get_tls_val(bool alloc) { - pthread_once(&key_once, make_tls_key); - - thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); - if (ptr == NULL && alloc) { - ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); - ptr->disable_counter = 0; - ptr->current_thread_id = kInvalidTid; - ptr->cache = AllocatorCache(); - pthread_setspecific(key, ptr); - } - - return ptr; -} - -bool DisabledInThisThread() { - thread_local_data_t *data = get_tls_val(false); - return data ? data->disable_counter > 0 : false; -} - -void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } - -void EnableInThisThread() { - int *disable_counter = &get_tls_val(true)->disable_counter; - if (*disable_counter == 0) { - DisableCounterUnderflow(); - } - --*disable_counter; -} - -u32 GetCurrentThread() { - thread_local_data_t *data = get_tls_val(false); - return data ? data->current_thread_id : kInvalidTid; -} - -void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } - -AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } - -LoadedModule *GetLinker() { return nullptr; } - -// Required on Linux for initialization of TLS behavior, but should not be -// required on Darwin. -void InitializePlatformSpecificModules() {} - -// Sections which can't contain contain global pointers. This list errs on the -// side of caution to avoid false positives, at the expense of performance. -// -// Other potentially safe sections include: -// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break -// -// Sections which definitely cannot be included here are: -// __objc_data, __objc_const, __data, __bss, __common, __thread_data, -// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs -static const char *kSkippedSecNames[] = { - "__cfstring", "__la_symbol_ptr", "__mod_init_func", - "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist", - "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist", - "__objc_protolist", "__objc_selrefs", "__objc_superrefs"}; - -// Scans global variables for heap pointers. -void ProcessGlobalRegions(Frontier *frontier) { - for (auto name : kSkippedSecNames) - CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName); - - MemoryMappingLayout memory_mapping(false); - InternalMmapVector modules; - modules.reserve(128); - memory_mapping.DumpListOfModules(&modules); - for (uptr i = 0; i < modules.size(); ++i) { - // Even when global scanning is disabled, we still need to scan - // system libraries for stashed pointers - if (!flags()->use_globals && modules[i].instrumented()) continue; - - for (const __sanitizer::LoadedModule::AddressRange &range : - modules[i].ranges()) { - // Sections storing global variables are writable and non-executable - if (range.executable || !range.writable) continue; - - for (auto name : kSkippedSecNames) { - if (!internal_strcmp(range.name, name)) continue; - } - - ScanGlobalRange(range.beg, range.end, frontier); - } - } -} - -void ProcessPlatformSpecificAllocations(Frontier *frontier) { - unsigned depth = 1; - vm_size_t size = 0; - vm_address_t address = 0; - kern_return_t err = KERN_SUCCESS; - mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; - - InternalMmapVector const *root_regions = GetRootRegions(); - - while (err == KERN_SUCCESS) { - struct vm_region_submap_info_64 info; - err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, - (vm_region_info_t)&info, &count); - - uptr end_address = address + size; - - // libxpc stashes some pointers in the Kernel Alloc Once page, - // make sure not to report those as leaks. - if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) { - ScanRangeForPointers(address, end_address, frontier, "GLOBAL", - kReachable); - - // Recursing over the full memory map is very slow, break out - // early if we don't need the full iteration. - if (!flags()->use_root_regions || !root_regions->size()) - break; - } - - // This additional root region scan is required on Darwin in order to - // detect root regions contained within mmap'd memory regions, because - // the Darwin implementation of sanitizer_procmaps traverses images - // as loaded by dyld, and not the complete set of all memory regions. - // - // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same - // behavior as sanitizer_procmaps_linux and traverses all memory regions - if (flags()->use_root_regions) { - for (uptr i = 0; i < root_regions->size(); i++) { - ScanRootRegion(frontier, (*root_regions)[i], address, end_address, - info.protection & kProtectionRead); - } - } - - address = end_address; - } -} - -// On darwin, we can intercept _exit gracefully, and return a failing exit code -// if required at that point. Calling Die() here is undefined behavior and -// causes rare race conditions. -void HandleLeaks() {} - -void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { - StopTheWorld(callback, argument); -} - -} // namespace __lsan - -#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_common_mac.cpp b/compiler-rt/lib/lsan/lsan_common_mac.cpp new file mode 100644 index 00000000000..5204a6624ed --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_common_mac.cpp @@ -0,0 +1,202 @@ +//=-- lsan_common_mac.cpp -------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. Darwin-specific code. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "lsan_common.h" + +#if CAN_SANITIZE_LEAKS && SANITIZER_MAC + +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "lsan_allocator.h" + +#include + +#include + +// Only introduced in Mac OS X 10.9. +#ifdef VM_MEMORY_OS_ALLOC_ONCE +static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE; +#else +static const int kSanitizerVmMemoryOsAllocOnce = 73; +#endif + +namespace __lsan { + +typedef struct { + int disable_counter; + u32 current_thread_id; + AllocatorCache cache; +} thread_local_data_t; + +static pthread_key_t key; +static pthread_once_t key_once = PTHREAD_ONCE_INIT; + +// The main thread destructor requires the current thread id, +// so we can't destroy it until it's been used and reset to invalid tid +void restore_tid_data(void *ptr) { + thread_local_data_t *data = (thread_local_data_t *)ptr; + if (data->current_thread_id != kInvalidTid) + pthread_setspecific(key, data); +} + +static void make_tls_key() { + CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); +} + +static thread_local_data_t *get_tls_val(bool alloc) { + pthread_once(&key_once, make_tls_key); + + thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); + if (ptr == NULL && alloc) { + ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); + ptr->disable_counter = 0; + ptr->current_thread_id = kInvalidTid; + ptr->cache = AllocatorCache(); + pthread_setspecific(key, ptr); + } + + return ptr; +} + +bool DisabledInThisThread() { + thread_local_data_t *data = get_tls_val(false); + return data ? data->disable_counter > 0 : false; +} + +void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } + +void EnableInThisThread() { + int *disable_counter = &get_tls_val(true)->disable_counter; + if (*disable_counter == 0) { + DisableCounterUnderflow(); + } + --*disable_counter; +} + +u32 GetCurrentThread() { + thread_local_data_t *data = get_tls_val(false); + return data ? data->current_thread_id : kInvalidTid; +} + +void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } + +AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } + +LoadedModule *GetLinker() { return nullptr; } + +// Required on Linux for initialization of TLS behavior, but should not be +// required on Darwin. +void InitializePlatformSpecificModules() {} + +// Sections which can't contain contain global pointers. This list errs on the +// side of caution to avoid false positives, at the expense of performance. +// +// Other potentially safe sections include: +// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break +// +// Sections which definitely cannot be included here are: +// __objc_data, __objc_const, __data, __bss, __common, __thread_data, +// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs +static const char *kSkippedSecNames[] = { + "__cfstring", "__la_symbol_ptr", "__mod_init_func", + "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist", + "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist", + "__objc_protolist", "__objc_selrefs", "__objc_superrefs"}; + +// Scans global variables for heap pointers. +void ProcessGlobalRegions(Frontier *frontier) { + for (auto name : kSkippedSecNames) + CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName); + + MemoryMappingLayout memory_mapping(false); + InternalMmapVector modules; + modules.reserve(128); + memory_mapping.DumpListOfModules(&modules); + for (uptr i = 0; i < modules.size(); ++i) { + // Even when global scanning is disabled, we still need to scan + // system libraries for stashed pointers + if (!flags()->use_globals && modules[i].instrumented()) continue; + + for (const __sanitizer::LoadedModule::AddressRange &range : + modules[i].ranges()) { + // Sections storing global variables are writable and non-executable + if (range.executable || !range.writable) continue; + + for (auto name : kSkippedSecNames) { + if (!internal_strcmp(range.name, name)) continue; + } + + ScanGlobalRange(range.beg, range.end, frontier); + } + } +} + +void ProcessPlatformSpecificAllocations(Frontier *frontier) { + unsigned depth = 1; + vm_size_t size = 0; + vm_address_t address = 0; + kern_return_t err = KERN_SUCCESS; + mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; + + InternalMmapVector const *root_regions = GetRootRegions(); + + while (err == KERN_SUCCESS) { + struct vm_region_submap_info_64 info; + err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, + (vm_region_info_t)&info, &count); + + uptr end_address = address + size; + + // libxpc stashes some pointers in the Kernel Alloc Once page, + // make sure not to report those as leaks. + if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) { + ScanRangeForPointers(address, end_address, frontier, "GLOBAL", + kReachable); + + // Recursing over the full memory map is very slow, break out + // early if we don't need the full iteration. + if (!flags()->use_root_regions || !root_regions->size()) + break; + } + + // This additional root region scan is required on Darwin in order to + // detect root regions contained within mmap'd memory regions, because + // the Darwin implementation of sanitizer_procmaps traverses images + // as loaded by dyld, and not the complete set of all memory regions. + // + // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same + // behavior as sanitizer_procmaps_linux and traverses all memory regions + if (flags()->use_root_regions) { + for (uptr i = 0; i < root_regions->size(); i++) { + ScanRootRegion(frontier, (*root_regions)[i], address, end_address, + info.protection & kProtectionRead); + } + } + + address = end_address; + } +} + +// On darwin, we can intercept _exit gracefully, and return a failing exit code +// if required at that point. Calling Die() here is undefined behavior and +// causes rare race conditions. +void HandleLeaks() {} + +void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { + StopTheWorld(callback, argument); +} + +} // namespace __lsan + +#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_interceptors.cc b/compiler-rt/lib/lsan/lsan_interceptors.cc deleted file mode 100644 index 4a4c86a9dca..00000000000 --- a/compiler-rt/lib/lsan/lsan_interceptors.cc +++ /dev/null @@ -1,465 +0,0 @@ -//=-- lsan_interceptors.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// Interceptors for standalone LSan. -// -//===----------------------------------------------------------------------===// - -#include "interception/interception.h" -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_report.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_linux.h" -#include "sanitizer_common/sanitizer_platform_interceptors.h" -#include "sanitizer_common/sanitizer_platform_limits_netbsd.h" -#include "sanitizer_common/sanitizer_platform_limits_posix.h" -#include "sanitizer_common/sanitizer_posix.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "lsan.h" -#include "lsan_allocator.h" -#include "lsan_common.h" -#include "lsan_thread.h" - -#include - -using namespace __lsan; - -extern "C" { -int pthread_attr_init(void *attr); -int pthread_attr_destroy(void *attr); -int pthread_attr_getdetachstate(void *attr, int *v); -int pthread_key_create(unsigned *key, void (*destructor)(void* v)); -int pthread_setspecific(unsigned key, const void *v); -} - -///// Malloc/free interceptors. ///// - -namespace std { - struct nothrow_t; - enum class align_val_t: size_t; -} - -#if !SANITIZER_MAC -INTERCEPTOR(void*, malloc, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_malloc(size, stack); -} - -INTERCEPTOR(void, free, void *p) { - ENSURE_LSAN_INITED; - lsan_free(p); -} - -INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { - if (lsan_init_is_running) { - // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. - const uptr kCallocPoolSize = 1024; - static uptr calloc_memory_for_dlsym[kCallocPoolSize]; - static uptr allocated; - uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; - void *mem = (void*)&calloc_memory_for_dlsym[allocated]; - allocated += size_in_words; - CHECK(allocated < kCallocPoolSize); - return mem; - } - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_calloc(nmemb, size, stack); -} - -INTERCEPTOR(void*, realloc, void *q, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_realloc(q, size, stack); -} - -INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_reallocarray(q, nmemb, size, stack); -} - -INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_posix_memalign(memptr, alignment, size, stack); -} - -INTERCEPTOR(void*, valloc, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_valloc(size, stack); -} -#endif - -#if SANITIZER_INTERCEPT_MEMALIGN -INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_memalign(alignment, size, stack); -} -#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) - -INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - void *res = lsan_memalign(alignment, size, stack); - DTLS_on_libc_memalign(res, size); - return res; -} -#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign) -#else -#define LSAN_MAYBE_INTERCEPT_MEMALIGN -#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN -#endif // SANITIZER_INTERCEPT_MEMALIGN - -#if SANITIZER_INTERCEPT_ALIGNED_ALLOC -INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_aligned_alloc(alignment, size, stack); -} -#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) -#else -#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC -#endif - -#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE -INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { - ENSURE_LSAN_INITED; - return GetMallocUsableSize(ptr); -} -#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \ - INTERCEPT_FUNCTION(malloc_usable_size) -#else -#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE -#endif - -#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO -struct fake_mallinfo { - int x[10]; -}; - -INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { - struct fake_mallinfo res; - internal_memset(&res, 0, sizeof(res)); - return res; -} -#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo) - -INTERCEPTOR(int, mallopt, int cmd, int value) { - return 0; -} -#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt) -#else -#define LSAN_MAYBE_INTERCEPT_MALLINFO -#define LSAN_MAYBE_INTERCEPT_MALLOPT -#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO - -#if SANITIZER_INTERCEPT_PVALLOC -INTERCEPTOR(void*, pvalloc, uptr size) { - ENSURE_LSAN_INITED; - GET_STACK_TRACE_MALLOC; - return lsan_pvalloc(size, stack); -} -#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) -#else -#define LSAN_MAYBE_INTERCEPT_PVALLOC -#endif // SANITIZER_INTERCEPT_PVALLOC - -#if SANITIZER_INTERCEPT_CFREE -INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free)); -#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) -#else -#define LSAN_MAYBE_INTERCEPT_CFREE -#endif // SANITIZER_INTERCEPT_CFREE - -#if SANITIZER_INTERCEPT_MCHECK_MPROBE -INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) { - return 0; -} - -INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) { - return 0; -} - -INTERCEPTOR(int, mprobe, void *ptr) { - return 0; -} -#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE - - -// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -#define OPERATOR_NEW_BODY(nothrow)\ - ENSURE_LSAN_INITED;\ - GET_STACK_TRACE_MALLOC;\ - void *res = lsan_malloc(size, stack);\ - if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ - return res; -#define OPERATOR_NEW_BODY_ALIGN(nothrow)\ - ENSURE_LSAN_INITED;\ - GET_STACK_TRACE_MALLOC;\ - void *res = lsan_memalign((uptr)align, size, stack);\ - if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ - return res; - -#define OPERATOR_DELETE_BODY\ - ENSURE_LSAN_INITED;\ - lsan_free(ptr); - -// On OS X it's not enough to just provide our own 'operator new' and -// 'operator delete' implementations, because they're going to be in the runtime -// dylib, and the main executable will depend on both the runtime dylib and -// libstdc++, each of has its implementation of new and delete. -// To make sure that C++ allocation/deallocation operators are overridden on -// OS X we need to intercept them using their mangled names. -#if !SANITIZER_MAC - -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(true /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(true /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); } - -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const &) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t) NOEXCEPT -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t) NOEXCEPT -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT -{ OPERATOR_DELETE_BODY; } - -#else // SANITIZER_MAC - -INTERCEPTOR(void *, _Znwm, size_t size) -{ OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR(void *, _Znam, size_t size) -{ OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(true /*nothrow*/); } -INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(true /*nothrow*/); } - -INTERCEPTOR(void, _ZdlPv, void *ptr) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR(void, _ZdaPv, void *ptr) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY; } -INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY; } - -#endif // !SANITIZER_MAC - - -///// Thread initialization and finalization. ///// - -#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD -static unsigned g_thread_finalize_key; - -static void thread_finalize(void *v) { - uptr iter = (uptr)v; - if (iter > 1) { - if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { - Report("LeakSanitizer: failed to set thread key.\n"); - Die(); - } - return; - } - ThreadFinish(); -} -#endif - -#if SANITIZER_NETBSD -INTERCEPTOR(void, _lwp_exit) { - ENSURE_LSAN_INITED; - ThreadFinish(); - REAL(_lwp_exit)(); -} -#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit) -#else -#define LSAN_MAYBE_INTERCEPT__LWP_EXIT -#endif - -#if SANITIZER_INTERCEPT_THR_EXIT -INTERCEPTOR(void, thr_exit, tid_t *state) { - ENSURE_LSAN_INITED; - ThreadFinish(); - REAL(thr_exit)(state); -} -#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) -#else -#define LSAN_MAYBE_INTERCEPT_THR_EXIT -#endif - -struct ThreadParam { - void *(*callback)(void *arg); - void *param; - atomic_uintptr_t tid; -}; - -extern "C" void *__lsan_thread_start_func(void *arg) { - ThreadParam *p = (ThreadParam*)arg; - void* (*callback)(void *arg) = p->callback; - void *param = p->param; - // Wait until the last iteration to maximize the chance that we are the last - // destructor to run. -#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD - if (pthread_setspecific(g_thread_finalize_key, - (void*)GetPthreadDestructorIterations())) { - Report("LeakSanitizer: failed to set thread key.\n"); - Die(); - } -#endif - int tid = 0; - while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) - internal_sched_yield(); - SetCurrentThread(tid); - ThreadStart(tid, GetTid()); - atomic_store(&p->tid, 0, memory_order_release); - return callback(param); -} - -INTERCEPTOR(int, pthread_create, void *th, void *attr, - void *(*callback)(void *), void *param) { - ENSURE_LSAN_INITED; - EnsureMainThreadIDIsCorrect(); - __sanitizer_pthread_attr_t myattr; - if (!attr) { - pthread_attr_init(&myattr); - attr = &myattr; - } - AdjustStackSize(attr); - int detached = 0; - pthread_attr_getdetachstate(attr, &detached); - ThreadParam p; - p.callback = callback; - p.param = param; - atomic_store(&p.tid, 0, memory_order_relaxed); - int res; - { - // Ignore all allocations made by pthread_create: thread stack/TLS may be - // stored by pthread for future reuse even after thread destruction, and - // the linked list it's stored in doesn't even hold valid pointers to the - // objects, the latter are calculated by obscure pointer arithmetic. - ScopedInterceptorDisabler disabler; - res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); - } - if (res == 0) { - int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, - IsStateDetached(detached)); - CHECK_NE(tid, 0); - atomic_store(&p.tid, tid, memory_order_release); - while (atomic_load(&p.tid, memory_order_acquire) != 0) - internal_sched_yield(); - } - if (attr == &myattr) - pthread_attr_destroy(&myattr); - return res; -} - -INTERCEPTOR(int, pthread_join, void *th, void **ret) { - ENSURE_LSAN_INITED; - int tid = ThreadTid((uptr)th); - int res = REAL(pthread_join)(th, ret); - if (res == 0) - ThreadJoin(tid); - return res; -} - -INTERCEPTOR(void, _exit, int status) { - if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode; - REAL(_exit)(status); -} - -#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) -#include "sanitizer_common/sanitizer_signal_interceptors.inc" - -namespace __lsan { - -void InitializeInterceptors() { - InitializeSignalInterceptors(); - - INTERCEPT_FUNCTION(malloc); - INTERCEPT_FUNCTION(free); - LSAN_MAYBE_INTERCEPT_CFREE; - INTERCEPT_FUNCTION(calloc); - INTERCEPT_FUNCTION(realloc); - LSAN_MAYBE_INTERCEPT_MEMALIGN; - LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN; - LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC; - INTERCEPT_FUNCTION(posix_memalign); - INTERCEPT_FUNCTION(valloc); - LSAN_MAYBE_INTERCEPT_PVALLOC; - LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE; - LSAN_MAYBE_INTERCEPT_MALLINFO; - LSAN_MAYBE_INTERCEPT_MALLOPT; - INTERCEPT_FUNCTION(pthread_create); - INTERCEPT_FUNCTION(pthread_join); - INTERCEPT_FUNCTION(_exit); - - LSAN_MAYBE_INTERCEPT__LWP_EXIT; - LSAN_MAYBE_INTERCEPT_THR_EXIT; - -#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD - if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { - Report("LeakSanitizer: failed to create thread key.\n"); - Die(); - } -#endif -} - -} // namespace __lsan diff --git a/compiler-rt/lib/lsan/lsan_interceptors.cpp b/compiler-rt/lib/lsan/lsan_interceptors.cpp new file mode 100644 index 00000000000..f06d5fff706 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_interceptors.cpp @@ -0,0 +1,465 @@ +//=-- lsan_interceptors.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Interceptors for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" +#include "sanitizer_common/sanitizer_platform_limits_netbsd.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_posix.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_common.h" +#include "lsan_thread.h" + +#include + +using namespace __lsan; + +extern "C" { +int pthread_attr_init(void *attr); +int pthread_attr_destroy(void *attr); +int pthread_attr_getdetachstate(void *attr, int *v); +int pthread_key_create(unsigned *key, void (*destructor)(void* v)); +int pthread_setspecific(unsigned key, const void *v); +} + +///// Malloc/free interceptors. ///// + +namespace std { + struct nothrow_t; + enum class align_val_t: size_t; +} + +#if !SANITIZER_MAC +INTERCEPTOR(void*, malloc, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_malloc(size, stack); +} + +INTERCEPTOR(void, free, void *p) { + ENSURE_LSAN_INITED; + lsan_free(p); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (lsan_init_is_running) { + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + const uptr kCallocPoolSize = 1024; + static uptr calloc_memory_for_dlsym[kCallocPoolSize]; + static uptr allocated; + uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; + void *mem = (void*)&calloc_memory_for_dlsym[allocated]; + allocated += size_in_words; + CHECK(allocated < kCallocPoolSize); + return mem; + } + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_calloc(nmemb, size, stack); +} + +INTERCEPTOR(void*, realloc, void *q, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_realloc(q, size, stack); +} + +INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_reallocarray(q, nmemb, size, stack); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_posix_memalign(memptr, alignment, size, stack); +} + +INTERCEPTOR(void*, valloc, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_valloc(size, stack); +} +#endif + +#if SANITIZER_INTERCEPT_MEMALIGN +INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_memalign(alignment, size, stack); +} +#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) + +INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + void *res = lsan_memalign(alignment, size, stack); + DTLS_on_libc_memalign(res, size); + return res; +} +#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign) +#else +#define LSAN_MAYBE_INTERCEPT_MEMALIGN +#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN +#endif // SANITIZER_INTERCEPT_MEMALIGN + +#if SANITIZER_INTERCEPT_ALIGNED_ALLOC +INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_aligned_alloc(alignment, size, stack); +} +#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) +#else +#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC +#endif + +#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + ENSURE_LSAN_INITED; + return GetMallocUsableSize(ptr); +} +#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \ + INTERCEPT_FUNCTION(malloc_usable_size) +#else +#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE +#endif + +#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + internal_memset(&res, 0, sizeof(res)); + return res; +} +#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo) + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return 0; +} +#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt) +#else +#define LSAN_MAYBE_INTERCEPT_MALLINFO +#define LSAN_MAYBE_INTERCEPT_MALLOPT +#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO + +#if SANITIZER_INTERCEPT_PVALLOC +INTERCEPTOR(void*, pvalloc, uptr size) { + ENSURE_LSAN_INITED; + GET_STACK_TRACE_MALLOC; + return lsan_pvalloc(size, stack); +} +#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) +#else +#define LSAN_MAYBE_INTERCEPT_PVALLOC +#endif // SANITIZER_INTERCEPT_PVALLOC + +#if SANITIZER_INTERCEPT_CFREE +INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free)); +#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) +#else +#define LSAN_MAYBE_INTERCEPT_CFREE +#endif // SANITIZER_INTERCEPT_CFREE + +#if SANITIZER_INTERCEPT_MCHECK_MPROBE +INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) { + return 0; +} + +INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) { + return 0; +} + +INTERCEPTOR(int, mprobe, void *ptr) { + return 0; +} +#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE + + +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_malloc(size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ + return res; +#define OPERATOR_NEW_BODY_ALIGN(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_memalign((uptr)align, size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ + return res; + +#define OPERATOR_DELETE_BODY\ + ENSURE_LSAN_INITED;\ + lsan_free(ptr); + +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the runtime +// dylib, and the main executable will depend on both the runtime dylib and +// libstdc++, each of has its implementation of new and delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC + +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); } + +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const &) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t) NOEXCEPT +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t) NOEXCEPT +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT +{ OPERATOR_DELETE_BODY; } + +#else // SANITIZER_MAC + +INTERCEPTOR(void *, _Znwm, size_t size) +{ OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR(void *, _Znam, size_t size) +{ OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } + +INTERCEPTOR(void, _ZdlPv, void *ptr) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdaPv, void *ptr) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } + +#endif // !SANITIZER_MAC + + +///// Thread initialization and finalization. ///// + +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD +static unsigned g_thread_finalize_key; + +static void thread_finalize(void *v) { + uptr iter = (uptr)v; + if (iter > 1) { + if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } + return; + } + ThreadFinish(); +} +#endif + +#if SANITIZER_NETBSD +INTERCEPTOR(void, _lwp_exit) { + ENSURE_LSAN_INITED; + ThreadFinish(); + REAL(_lwp_exit)(); +} +#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit) +#else +#define LSAN_MAYBE_INTERCEPT__LWP_EXIT +#endif + +#if SANITIZER_INTERCEPT_THR_EXIT +INTERCEPTOR(void, thr_exit, tid_t *state) { + ENSURE_LSAN_INITED; + ThreadFinish(); + REAL(thr_exit)(state); +} +#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) +#else +#define LSAN_MAYBE_INTERCEPT_THR_EXIT +#endif + +struct ThreadParam { + void *(*callback)(void *arg); + void *param; + atomic_uintptr_t tid; +}; + +extern "C" void *__lsan_thread_start_func(void *arg) { + ThreadParam *p = (ThreadParam*)arg; + void* (*callback)(void *arg) = p->callback; + void *param = p->param; + // Wait until the last iteration to maximize the chance that we are the last + // destructor to run. +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD + if (pthread_setspecific(g_thread_finalize_key, + (void*)GetPthreadDestructorIterations())) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } +#endif + int tid = 0; + while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) + internal_sched_yield(); + SetCurrentThread(tid); + ThreadStart(tid, GetTid()); + atomic_store(&p->tid, 0, memory_order_release); + return callback(param); +} + +INTERCEPTOR(int, pthread_create, void *th, void *attr, + void *(*callback)(void *), void *param) { + ENSURE_LSAN_INITED; + EnsureMainThreadIDIsCorrect(); + __sanitizer_pthread_attr_t myattr; + if (!attr) { + pthread_attr_init(&myattr); + attr = &myattr; + } + AdjustStackSize(attr); + int detached = 0; + pthread_attr_getdetachstate(attr, &detached); + ThreadParam p; + p.callback = callback; + p.param = param; + atomic_store(&p.tid, 0, memory_order_relaxed); + int res; + { + // Ignore all allocations made by pthread_create: thread stack/TLS may be + // stored by pthread for future reuse even after thread destruction, and + // the linked list it's stored in doesn't even hold valid pointers to the + // objects, the latter are calculated by obscure pointer arithmetic. + ScopedInterceptorDisabler disabler; + res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); + } + if (res == 0) { + int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, + IsStateDetached(detached)); + CHECK_NE(tid, 0); + atomic_store(&p.tid, tid, memory_order_release); + while (atomic_load(&p.tid, memory_order_acquire) != 0) + internal_sched_yield(); + } + if (attr == &myattr) + pthread_attr_destroy(&myattr); + return res; +} + +INTERCEPTOR(int, pthread_join, void *th, void **ret) { + ENSURE_LSAN_INITED; + int tid = ThreadTid((uptr)th); + int res = REAL(pthread_join)(th, ret); + if (res == 0) + ThreadJoin(tid); + return res; +} + +INTERCEPTOR(void, _exit, int status) { + if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode; + REAL(_exit)(status); +} + +#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) +#include "sanitizer_common/sanitizer_signal_interceptors.inc" + +namespace __lsan { + +void InitializeInterceptors() { + InitializeSignalInterceptors(); + + INTERCEPT_FUNCTION(malloc); + INTERCEPT_FUNCTION(free); + LSAN_MAYBE_INTERCEPT_CFREE; + INTERCEPT_FUNCTION(calloc); + INTERCEPT_FUNCTION(realloc); + LSAN_MAYBE_INTERCEPT_MEMALIGN; + LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN; + LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC; + INTERCEPT_FUNCTION(posix_memalign); + INTERCEPT_FUNCTION(valloc); + LSAN_MAYBE_INTERCEPT_PVALLOC; + LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE; + LSAN_MAYBE_INTERCEPT_MALLINFO; + LSAN_MAYBE_INTERCEPT_MALLOPT; + INTERCEPT_FUNCTION(pthread_create); + INTERCEPT_FUNCTION(pthread_join); + INTERCEPT_FUNCTION(_exit); + + LSAN_MAYBE_INTERCEPT__LWP_EXIT; + LSAN_MAYBE_INTERCEPT_THR_EXIT; + +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD + if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { + Report("LeakSanitizer: failed to create thread key.\n"); + Die(); + } +#endif +} + +} // namespace __lsan diff --git a/compiler-rt/lib/lsan/lsan_linux.cc b/compiler-rt/lib/lsan/lsan_linux.cc deleted file mode 100644 index 22d034280d7..00000000000 --- a/compiler-rt/lib/lsan/lsan_linux.cc +++ /dev/null @@ -1,32 +0,0 @@ -//=-- lsan_linux.cc -------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. Linux/NetBSD-specific code. -// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" - -#if SANITIZER_LINUX || SANITIZER_NETBSD - -#include "lsan_allocator.h" - -namespace __lsan { - -static THREADLOCAL u32 current_thread_tid = kInvalidTid; -u32 GetCurrentThread() { return current_thread_tid; } -void SetCurrentThread(u32 tid) { current_thread_tid = tid; } - -static THREADLOCAL AllocatorCache allocator_cache; -AllocatorCache *GetAllocatorCache() { return &allocator_cache; } - -void ReplaceSystemMalloc() {} - -} // namespace __lsan - -#endif // SANITIZER_LINUX || SANITIZER_NETBSD diff --git a/compiler-rt/lib/lsan/lsan_linux.cpp b/compiler-rt/lib/lsan/lsan_linux.cpp new file mode 100644 index 00000000000..14a42b75d2a --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_linux.cpp @@ -0,0 +1,32 @@ +//=-- lsan_linux.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. Linux/NetBSD-specific code. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX || SANITIZER_NETBSD + +#include "lsan_allocator.h" + +namespace __lsan { + +static THREADLOCAL u32 current_thread_tid = kInvalidTid; +u32 GetCurrentThread() { return current_thread_tid; } +void SetCurrentThread(u32 tid) { current_thread_tid = tid; } + +static THREADLOCAL AllocatorCache allocator_cache; +AllocatorCache *GetAllocatorCache() { return &allocator_cache; } + +void ReplaceSystemMalloc() {} + +} // namespace __lsan + +#endif // SANITIZER_LINUX || SANITIZER_NETBSD diff --git a/compiler-rt/lib/lsan/lsan_mac.cc b/compiler-rt/lib/lsan/lsan_mac.cc deleted file mode 100644 index 435f41b6f8b..00000000000 --- a/compiler-rt/lib/lsan/lsan_mac.cc +++ /dev/null @@ -1,191 +0,0 @@ -//===-- lsan_mac.cc -------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer, a memory leak checker. -// -// Mac-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_MAC - -#include "interception/interception.h" -#include "lsan.h" -#include "lsan_allocator.h" -#include "lsan_thread.h" - -#include - -namespace __lsan { -// Support for the following functions from libdispatch on Mac OS: -// dispatch_async_f() -// dispatch_async() -// dispatch_sync_f() -// dispatch_sync() -// dispatch_after_f() -// dispatch_after() -// dispatch_group_async_f() -// dispatch_group_async() -// TODO(glider): libdispatch API contains other functions that we don't support -// yet. -// -// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are -// they can cause jobs to run on a thread different from the current one. -// TODO(glider): if so, we need a test for this (otherwise we should remove -// them). -// -// The following functions use dispatch_barrier_async_f() (which isn't a library -// function but is exported) and are thus supported: -// dispatch_source_set_cancel_handler_f() -// dispatch_source_set_cancel_handler() -// dispatch_source_set_event_handler_f() -// dispatch_source_set_event_handler() -// -// The reference manual for Grand Central Dispatch is available at -// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html -// The implementation details are at -// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c - -typedef void *dispatch_group_t; -typedef void *dispatch_queue_t; -typedef void *dispatch_source_t; -typedef u64 dispatch_time_t; -typedef void (*dispatch_function_t)(void *block); -typedef void *(*worker_t)(void *block); - -// A wrapper for the ObjC blocks used to support libdispatch. -typedef struct { - void *block; - dispatch_function_t func; - u32 parent_tid; -} lsan_block_context_t; - -ALWAYS_INLINE -void lsan_register_worker_thread(int parent_tid) { - if (GetCurrentThread() == kInvalidTid) { - u32 tid = ThreadCreate(parent_tid, 0, true); - ThreadStart(tid, GetTid()); - SetCurrentThread(tid); - } -} - -// For use by only those functions that allocated the context via -// alloc_lsan_context(). -extern "C" void lsan_dispatch_call_block_and_release(void *block) { - lsan_block_context_t *context = (lsan_block_context_t *)block; - VReport(2, - "lsan_dispatch_call_block_and_release(): " - "context: %p, pthread_self: %p\n", - block, pthread_self()); - lsan_register_worker_thread(context->parent_tid); - // Call the original dispatcher for the block. - context->func(context->block); - lsan_free(context); -} - -} // namespace __lsan - -using namespace __lsan; // NOLINT - -// Wrap |ctxt| and |func| into an lsan_block_context_t. -// The caller retains control of the allocated context. -extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt, - dispatch_function_t func) { - GET_STACK_TRACE_THREAD; - lsan_block_context_t *lsan_ctxt = - (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack); - lsan_ctxt->block = ctxt; - lsan_ctxt->func = func; - lsan_ctxt->parent_tid = GetCurrentThread(); - return lsan_ctxt; -} - -// Define interceptor for dispatch_*_f function with the three most common -// parameters: dispatch_queue_t, context, dispatch_function_t. -#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ - INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ - dispatch_function_t func) { \ - lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \ - return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \ - lsan_dispatch_call_block_and_release); \ - } - -INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) -INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) -INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) - -INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq, - void *ctxt, dispatch_function_t func) { - lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); - return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt, - lsan_dispatch_call_block_and_release); -} - -INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, - dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); - REAL(dispatch_group_async_f) - (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release); -} - -#if !defined(MISSING_BLOCKS_SUPPORT) -extern "C" { -void dispatch_async(dispatch_queue_t dq, void (^work)(void)); -void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, - void (^work)(void)); -void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - void (^work)(void)); -void dispatch_source_set_cancel_handler(dispatch_source_t ds, - void (^work)(void)); -void dispatch_source_set_event_handler(dispatch_source_t ds, - void (^work)(void)); -} - -#define GET_LSAN_BLOCK(work) \ - void (^lsan_block)(void); \ - int parent_tid = GetCurrentThread(); \ - lsan_block = ^(void) { \ - lsan_register_worker_thread(parent_tid); \ - work(); \ - } - -INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) { - GET_LSAN_BLOCK(work); - REAL(dispatch_async)(dq, lsan_block); -} - -INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg, - dispatch_queue_t dq, void (^work)(void)) { - GET_LSAN_BLOCK(work); - REAL(dispatch_group_async)(dg, dq, lsan_block); -} - -INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue, - void (^work)(void)) { - GET_LSAN_BLOCK(work); - REAL(dispatch_after)(when, queue, lsan_block); -} - -INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, - void (^work)(void)) { - if (!work) { - REAL(dispatch_source_set_cancel_handler)(ds, work); - return; - } - GET_LSAN_BLOCK(work); - REAL(dispatch_source_set_cancel_handler)(ds, lsan_block); -} - -INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds, - void (^work)(void)) { - GET_LSAN_BLOCK(work); - REAL(dispatch_source_set_event_handler)(ds, lsan_block); -} -#endif - -#endif // SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_mac.cpp b/compiler-rt/lib/lsan/lsan_mac.cpp new file mode 100644 index 00000000000..7bcd9c828ef --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_mac.cpp @@ -0,0 +1,191 @@ +//===-- lsan_mac.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer, a memory leak checker. +// +// Mac-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "interception/interception.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_thread.h" + +#include + +namespace __lsan { +// Support for the following functions from libdispatch on Mac OS: +// dispatch_async_f() +// dispatch_async() +// dispatch_sync_f() +// dispatch_sync() +// dispatch_after_f() +// dispatch_after() +// dispatch_group_async_f() +// dispatch_group_async() +// TODO(glider): libdispatch API contains other functions that we don't support +// yet. +// +// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are +// they can cause jobs to run on a thread different from the current one. +// TODO(glider): if so, we need a test for this (otherwise we should remove +// them). +// +// The following functions use dispatch_barrier_async_f() (which isn't a library +// function but is exported) and are thus supported: +// dispatch_source_set_cancel_handler_f() +// dispatch_source_set_cancel_handler() +// dispatch_source_set_event_handler_f() +// dispatch_source_set_event_handler() +// +// The reference manual for Grand Central Dispatch is available at +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// The implementation details are at +// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c + +typedef void *dispatch_group_t; +typedef void *dispatch_queue_t; +typedef void *dispatch_source_t; +typedef u64 dispatch_time_t; +typedef void (*dispatch_function_t)(void *block); +typedef void *(*worker_t)(void *block); + +// A wrapper for the ObjC blocks used to support libdispatch. +typedef struct { + void *block; + dispatch_function_t func; + u32 parent_tid; +} lsan_block_context_t; + +ALWAYS_INLINE +void lsan_register_worker_thread(int parent_tid) { + if (GetCurrentThread() == kInvalidTid) { + u32 tid = ThreadCreate(parent_tid, 0, true); + ThreadStart(tid, GetTid()); + SetCurrentThread(tid); + } +} + +// For use by only those functions that allocated the context via +// alloc_lsan_context(). +extern "C" void lsan_dispatch_call_block_and_release(void *block) { + lsan_block_context_t *context = (lsan_block_context_t *)block; + VReport(2, + "lsan_dispatch_call_block_and_release(): " + "context: %p, pthread_self: %p\n", + block, pthread_self()); + lsan_register_worker_thread(context->parent_tid); + // Call the original dispatcher for the block. + context->func(context->block); + lsan_free(context); +} + +} // namespace __lsan + +using namespace __lsan; // NOLINT + +// Wrap |ctxt| and |func| into an lsan_block_context_t. +// The caller retains control of the allocated context. +extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + lsan_block_context_t *lsan_ctxt = + (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack); + lsan_ctxt->block = ctxt; + lsan_ctxt->func = func; + lsan_ctxt->parent_tid = GetCurrentThread(); + return lsan_ctxt; +} + +// Define interceptor for dispatch_*_f function with the three most common +// parameters: dispatch_queue_t, context, dispatch_function_t. +#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ + INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ + dispatch_function_t func) { \ + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \ + return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \ + lsan_dispatch_call_block_and_release); \ + } + +INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) + +INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq, + void *ctxt, dispatch_function_t func) { + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); + return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt, + lsan_dispatch_call_block_and_release); +} + +INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); + REAL(dispatch_group_async_f) + (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release); +} + +#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" { +void dispatch_async(dispatch_queue_t dq, void (^work)(void)); +void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + void (^work)(void)); +void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void (^work)(void)); +void dispatch_source_set_cancel_handler(dispatch_source_t ds, + void (^work)(void)); +void dispatch_source_set_event_handler(dispatch_source_t ds, + void (^work)(void)); +} + +#define GET_LSAN_BLOCK(work) \ + void (^lsan_block)(void); \ + int parent_tid = GetCurrentThread(); \ + lsan_block = ^(void) { \ + lsan_register_worker_thread(parent_tid); \ + work(); \ + } + +INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_async)(dq, lsan_block); +} + +INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg, + dispatch_queue_t dq, void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_group_async)(dg, dq, lsan_block); +} + +INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue, + void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_after)(when, queue, lsan_block); +} + +INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, + void (^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } + GET_LSAN_BLOCK(work); + REAL(dispatch_source_set_cancel_handler)(ds, lsan_block); +} + +INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds, + void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_source_set_event_handler)(ds, lsan_block); +} +#endif + +#endif // SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_malloc_mac.cc b/compiler-rt/lib/lsan/lsan_malloc_mac.cc deleted file mode 100644 index 34447b4b39f..00000000000 --- a/compiler-rt/lib/lsan/lsan_malloc_mac.cc +++ /dev/null @@ -1,59 +0,0 @@ -//===-- lsan_malloc_mac.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer (LSan), a memory leak detector. -// -// Mac-specific malloc interception. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_MAC - -#include "lsan.h" -#include "lsan_allocator.h" -#include "lsan_thread.h" - -using namespace __lsan; -#define COMMON_MALLOC_ZONE_NAME "lsan" -#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED -#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited -#define COMMON_MALLOC_FORCE_LOCK() -#define COMMON_MALLOC_FORCE_UNLOCK() -#define COMMON_MALLOC_MEMALIGN(alignment, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = lsan_memalign(alignment, size, stack) -#define COMMON_MALLOC_MALLOC(size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = lsan_malloc(size, stack) -#define COMMON_MALLOC_REALLOC(ptr, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = lsan_realloc(ptr, size, stack) -#define COMMON_MALLOC_CALLOC(count, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = lsan_calloc(count, size, stack) -#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ - GET_STACK_TRACE_MALLOC; \ - int res = lsan_posix_memalign(memptr, alignment, size, stack) -#define COMMON_MALLOC_VALLOC(size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = lsan_valloc(size, stack) -#define COMMON_MALLOC_FREE(ptr) \ - lsan_free(ptr) -#define COMMON_MALLOC_SIZE(ptr) \ - uptr size = lsan_mz_size(ptr) -#define COMMON_MALLOC_FILL_STATS(zone, stats) -#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ - (void)zone_name; \ - Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr); -#define COMMON_MALLOC_NAMESPACE __lsan -#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 -#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 - -#include "sanitizer_common/sanitizer_malloc_mac.inc" - -#endif // SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_malloc_mac.cpp b/compiler-rt/lib/lsan/lsan_malloc_mac.cpp new file mode 100644 index 00000000000..d03eb2e915c --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_malloc_mac.cpp @@ -0,0 +1,59 @@ +//===-- lsan_malloc_mac.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer (LSan), a memory leak detector. +// +// Mac-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_thread.h" + +using namespace __lsan; +#define COMMON_MALLOC_ZONE_NAME "lsan" +#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED +#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited +#define COMMON_MALLOC_FORCE_LOCK() +#define COMMON_MALLOC_FORCE_UNLOCK() +#define COMMON_MALLOC_MEMALIGN(alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = lsan_memalign(alignment, size, stack) +#define COMMON_MALLOC_MALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = lsan_malloc(size, stack) +#define COMMON_MALLOC_REALLOC(ptr, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = lsan_realloc(ptr, size, stack) +#define COMMON_MALLOC_CALLOC(count, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = lsan_calloc(count, size, stack) +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = lsan_posix_memalign(memptr, alignment, size, stack) +#define COMMON_MALLOC_VALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = lsan_valloc(size, stack) +#define COMMON_MALLOC_FREE(ptr) \ + lsan_free(ptr) +#define COMMON_MALLOC_SIZE(ptr) \ + uptr size = lsan_mz_size(ptr) +#define COMMON_MALLOC_FILL_STATS(zone, stats) +#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + (void)zone_name; \ + Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr); +#define COMMON_MALLOC_NAMESPACE __lsan +#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 +#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 + +#include "sanitizer_common/sanitizer_malloc_mac.inc" + +#endif // SANITIZER_MAC diff --git a/compiler-rt/lib/lsan/lsan_preinit.cc b/compiler-rt/lib/lsan/lsan_preinit.cc deleted file mode 100644 index 5d0ad89a8b0..00000000000 --- a/compiler-rt/lib/lsan/lsan_preinit.cc +++ /dev/null @@ -1,21 +0,0 @@ -//===-- lsan_preinit.cc ---------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// -// Call __lsan_init at the very early stage of process startup. -//===----------------------------------------------------------------------===// - -#include "lsan.h" - -#if SANITIZER_CAN_USE_PREINIT_ARRAY - // We force __lsan_init to be called before anyone else by placing it into - // .preinit_array section. - __attribute__((section(".preinit_array"), used)) - void (*__local_lsan_preinit)(void) = __lsan_init; -#endif diff --git a/compiler-rt/lib/lsan/lsan_preinit.cpp b/compiler-rt/lib/lsan/lsan_preinit.cpp new file mode 100644 index 00000000000..cd94e1e8718 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_preinit.cpp @@ -0,0 +1,21 @@ +//===-- lsan_preinit.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// +// Call __lsan_init at the very early stage of process startup. +//===----------------------------------------------------------------------===// + +#include "lsan.h" + +#if SANITIZER_CAN_USE_PREINIT_ARRAY + // We force __lsan_init to be called before anyone else by placing it into + // .preinit_array section. + __attribute__((section(".preinit_array"), used)) + void (*__local_lsan_preinit)(void) = __lsan_init; +#endif diff --git a/compiler-rt/lib/lsan/lsan_thread.cc b/compiler-rt/lib/lsan/lsan_thread.cc deleted file mode 100644 index 77f6a9236dd..00000000000 --- a/compiler-rt/lib/lsan/lsan_thread.cc +++ /dev/null @@ -1,162 +0,0 @@ -//=-- lsan_thread.cc ------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of LeakSanitizer. -// See lsan_thread.h for details. -// -//===----------------------------------------------------------------------===// - -#include "lsan_thread.h" - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_thread_registry.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "lsan_allocator.h" -#include "lsan_common.h" - -namespace __lsan { - -static ThreadRegistry *thread_registry; - -static ThreadContextBase *CreateThreadContext(u32 tid) { - void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); - return new(mem) ThreadContext(tid); -} - -static const uptr kMaxThreads = 1 << 13; -static const uptr kThreadQuarantineSize = 64; - -void InitializeThreadRegistry() { - static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)]; - thread_registry = new(thread_registry_placeholder) - ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); -} - -ThreadContext::ThreadContext(int tid) - : ThreadContextBase(tid), - stack_begin_(0), - stack_end_(0), - cache_begin_(0), - cache_end_(0), - tls_begin_(0), - tls_end_(0), - dtls_(nullptr) {} - -struct OnStartedArgs { - uptr stack_begin, stack_end, - cache_begin, cache_end, - tls_begin, tls_end; - DTLS *dtls; -}; - -void ThreadContext::OnStarted(void *arg) { - OnStartedArgs *args = reinterpret_cast(arg); - stack_begin_ = args->stack_begin; - stack_end_ = args->stack_end; - tls_begin_ = args->tls_begin; - tls_end_ = args->tls_end; - cache_begin_ = args->cache_begin; - cache_end_ = args->cache_end; - dtls_ = args->dtls; -} - -void ThreadContext::OnFinished() { - AllocatorThreadFinish(); - DTLS_Destroy(); -} - -u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { - return thread_registry->CreateThread(user_id, detached, parent_tid, - /* arg */ nullptr); -} - -void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { - OnStartedArgs args; - uptr stack_size = 0; - uptr tls_size = 0; - GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, - &args.tls_begin, &tls_size); - args.stack_end = args.stack_begin + stack_size; - args.tls_end = args.tls_begin + tls_size; - GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); - args.dtls = DTLS_Get(); - thread_registry->StartThread(tid, os_id, thread_type, &args); -} - -void ThreadFinish() { - thread_registry->FinishThread(GetCurrentThread()); - SetCurrentThread(kInvalidTid); -} - -ThreadContext *CurrentThreadContext() { - if (!thread_registry) return nullptr; - if (GetCurrentThread() == kInvalidTid) - return nullptr; - // No lock needed when getting current thread. - return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); -} - -static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { - uptr uid = (uptr)arg; - if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { - return true; - } - return false; -} - -u32 ThreadTid(uptr uid) { - return thread_registry->FindThread(FindThreadByUid, (void*)uid); -} - -void ThreadJoin(u32 tid) { - CHECK_NE(tid, kInvalidTid); - thread_registry->JoinThread(tid, /* arg */nullptr); -} - -void EnsureMainThreadIDIsCorrect() { - if (GetCurrentThread() == 0) - CurrentThreadContext()->os_id = GetTid(); -} - -///// Interface to the common LSan module. ///// - -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, - uptr *tls_begin, uptr *tls_end, uptr *cache_begin, - uptr *cache_end, DTLS **dtls) { - ThreadContext *context = static_cast( - thread_registry->FindThreadContextByOsIDLocked(os_id)); - if (!context) return false; - *stack_begin = context->stack_begin(); - *stack_end = context->stack_end(); - *tls_begin = context->tls_begin(); - *tls_end = context->tls_end(); - *cache_begin = context->cache_begin(); - *cache_end = context->cache_end(); - *dtls = context->dtls(); - return true; -} - -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) { -} - -void LockThreadRegistry() { - thread_registry->Lock(); -} - -void UnlockThreadRegistry() { - thread_registry->Unlock(); -} - -ThreadRegistry *GetThreadRegistryLocked() { - thread_registry->CheckLocked(); - return thread_registry; -} - -} // namespace __lsan diff --git a/compiler-rt/lib/lsan/lsan_thread.cpp b/compiler-rt/lib/lsan/lsan_thread.cpp new file mode 100644 index 00000000000..84e7ce61b97 --- /dev/null +++ b/compiler-rt/lib/lsan/lsan_thread.cpp @@ -0,0 +1,162 @@ +//=-- lsan_thread.cpp -----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_thread.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_thread.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "lsan_allocator.h" +#include "lsan_common.h" + +namespace __lsan { + +static ThreadRegistry *thread_registry; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); + return new(mem) ThreadContext(tid); +} + +static const uptr kMaxThreads = 1 << 13; +static const uptr kThreadQuarantineSize = 64; + +void InitializeThreadRegistry() { + static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)]; + thread_registry = new(thread_registry_placeholder) + ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); +} + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid), + stack_begin_(0), + stack_end_(0), + cache_begin_(0), + cache_end_(0), + tls_begin_(0), + tls_end_(0), + dtls_(nullptr) {} + +struct OnStartedArgs { + uptr stack_begin, stack_end, + cache_begin, cache_end, + tls_begin, tls_end; + DTLS *dtls; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = reinterpret_cast(arg); + stack_begin_ = args->stack_begin; + stack_end_ = args->stack_end; + tls_begin_ = args->tls_begin; + tls_end_ = args->tls_end; + cache_begin_ = args->cache_begin; + cache_end_ = args->cache_end; + dtls_ = args->dtls; +} + +void ThreadContext::OnFinished() { + AllocatorThreadFinish(); + DTLS_Destroy(); +} + +u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { + return thread_registry->CreateThread(user_id, detached, parent_tid, + /* arg */ nullptr); +} + +void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { + OnStartedArgs args; + uptr stack_size = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, + &args.tls_begin, &tls_size); + args.stack_end = args.stack_begin + stack_size; + args.tls_end = args.tls_begin + tls_size; + GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); + args.dtls = DTLS_Get(); + thread_registry->StartThread(tid, os_id, thread_type, &args); +} + +void ThreadFinish() { + thread_registry->FinishThread(GetCurrentThread()); + SetCurrentThread(kInvalidTid); +} + +ThreadContext *CurrentThreadContext() { + if (!thread_registry) return nullptr; + if (GetCurrentThread() == kInvalidTid) + return nullptr; + // No lock needed when getting current thread. + return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); +} + +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + return true; + } + return false; +} + +u32 ThreadTid(uptr uid) { + return thread_registry->FindThread(FindThreadByUid, (void*)uid); +} + +void ThreadJoin(u32 tid) { + CHECK_NE(tid, kInvalidTid); + thread_registry->JoinThread(tid, /* arg */nullptr); +} + +void EnsureMainThreadIDIsCorrect() { + if (GetCurrentThread() == 0) + CurrentThreadContext()->os_id = GetTid(); +} + +///// Interface to the common LSan module. ///// + +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, uptr *cache_begin, + uptr *cache_end, DTLS **dtls) { + ThreadContext *context = static_cast( + thread_registry->FindThreadContextByOsIDLocked(os_id)); + if (!context) return false; + *stack_begin = context->stack_begin(); + *stack_end = context->stack_end(); + *tls_begin = context->tls_begin(); + *tls_end = context->tls_end(); + *cache_begin = context->cache_begin(); + *cache_end = context->cache_end(); + *dtls = context->dtls(); + return true; +} + +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, + void *arg) { +} + +void LockThreadRegistry() { + thread_registry->Lock(); +} + +void UnlockThreadRegistry() { + thread_registry->Unlock(); +} + +ThreadRegistry *GetThreadRegistryLocked() { + thread_registry->CheckLocked(); + return thread_registry; +} + +} // namespace __lsan diff --git a/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh b/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh index db3758ee529..e616725ecf2 100755 --- a/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh +++ b/compiler-rt/lib/sanitizer_common/scripts/check_lint.sh @@ -103,7 +103,7 @@ run_lint ${MSAN_RTL_LINT_FILTER} ${MSAN_RTL}/*.cc \ # LSan LSAN_RTL=${COMPILER_RT}/lib/lsan -run_lint ${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/*.cc \ +run_lint ${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/*.cpp \ ${LSAN_RTL}/*.h & run_lint ${LSAN_LIT_TEST_LINT_FILTER} ${LIT_TESTS}/lsan/*/*.cc & -- cgit v1.2.3