diff options
author | Sergey Matveev <earthdok@google.com> | 2013-06-03 11:21:34 +0000 |
---|---|---|
committer | Sergey Matveev <earthdok@google.com> | 2013-06-03 11:21:34 +0000 |
commit | 17ee1abfa7a093fea959037f668e8bb8c67c5f19 (patch) | |
tree | 9011a5ede00f79778f7fd7a7a6ac03015452055b | |
parent | 3786ae5c5453669f34aa10d444f5d7162cc6fd93 (diff) | |
download | bcm5719-llvm-17ee1abfa7a093fea959037f668e8bb8c67c5f19.tar.gz bcm5719-llvm-17ee1abfa7a093fea959037f668e8bb8c67c5f19.zip |
[lsan] Add __lsan_disable() and __lsan_enable().
Objects allocated after a call to __lsan_disable() will be treated as
live memory. Also add a ScopedDisabler.
llvm-svn: 183099
-rw-r--r-- | compiler-rt/include/sanitizer/lsan_interface.h | 39 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_allocator2.cc | 21 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_thread.cc | 1 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_thread.h | 10 | ||||
-rw-r--r-- | compiler-rt/lib/lsan/lit_tests/TestCases/disabler.cc | 23 | ||||
-rw-r--r-- | compiler-rt/lib/lsan/lsan_allocator.cc | 21 | ||||
-rw-r--r-- | compiler-rt/lib/lsan/lsan_common.cc | 70 | ||||
-rw-r--r-- | compiler-rt/lib/lsan/lsan_common.h | 17 |
8 files changed, 162 insertions, 40 deletions
diff --git a/compiler-rt/include/sanitizer/lsan_interface.h b/compiler-rt/include/sanitizer/lsan_interface.h new file mode 100644 index 00000000000..cdb3b39dd41 --- /dev/null +++ b/compiler-rt/include/sanitizer/lsan_interface.h @@ -0,0 +1,39 @@ +//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LSAN_INTERFACE_H +#define SANITIZER_LSAN_INTERFACE_H + +#include <sanitizer/common_interface_defs.h> + +#ifdef __cplusplus +extern "C" { +#endif + // Allocations made between calls to __lsan_disable() and __lsan_enable() will + // be treated as non-leaks. Disable/enable pairs can be nested. + void __lsan_disable(); + void __lsan_enable(); + +#ifdef __cplusplus +} // extern "C" + +namespace __lsan { +class ScopedDisabler { + public: + ScopedDisabler() { __lsan_disable(); } + ~ScopedDisabler() { __lsan_enable(); } +}; +} // namespace __lsan +#endif + +#endif // SANITIZER_LSAN_INTERFACE_H diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc index 67e6ef62a0d..f1302f04193 100644 --- a/compiler-rt/lib/asan/asan_allocator2.cc +++ b/compiler-rt/lib/asan/asan_allocator2.cc @@ -422,6 +422,8 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); REAL(memset)(res, fl.malloc_fill_byte, fill_size); } + m->lsan_tag = + t->lsan_disabled() ? __lsan::kSuppressed : __lsan::kDirectlyLeaked; // Must be the last mutation of metadata in this function. atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); ASAN_MALLOC_HOOK(res, size); @@ -787,10 +789,27 @@ template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback); template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback); template void ForEachChunk<MarkIndirectlyLeakedCb>( MarkIndirectlyLeakedCb const &callback); -template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback); +template void ForEachChunk<CollectSuppressedCb>( + CollectSuppressedCb const &callback); #endif // CAN_SANITIZE_LEAKS } // namespace __lsan +extern "C" { +void __lsan_disable() { + __asan_init(); + __asan::AsanThread *t = __asan::GetCurrentThread(); + CHECK(t); + t->disable_lsan(); +} + +void __lsan_enable() { + __asan_init(); + __asan::AsanThread *t = __asan::GetCurrentThread(); + CHECK(t); + t->enable_lsan(); +} +} // extern "C" + // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT diff --git a/compiler-rt/lib/asan/asan_thread.cc b/compiler-rt/lib/asan/asan_thread.cc index 0787e933789..adfbdac4349 100644 --- a/compiler-rt/lib/asan/asan_thread.cc +++ b/compiler-rt/lib/asan/asan_thread.cc @@ -108,6 +108,7 @@ void AsanThread::Destroy() { void AsanThread::Init() { SetThreadStackAndTls(); + lsan_disabled_ = 0; CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_top_ - 1)); ClearShadowForThreadStackAndTLS(); diff --git a/compiler-rt/lib/asan/asan_thread.h b/compiler-rt/lib/asan/asan_thread.h index bf08818da04..346c0440255 100644 --- a/compiler-rt/lib/asan/asan_thread.h +++ b/compiler-rt/lib/asan/asan_thread.h @@ -65,6 +65,15 @@ class AsanThread { uptr stack_size() { return stack_top_ - stack_bottom_; } uptr tls_begin() { return tls_begin_; } uptr tls_end() { return tls_end_; } + uptr lsan_disabled() { return lsan_disabled_; } + void disable_lsan() { lsan_disabled_++; } + void enable_lsan() { + if (!lsan_disabled_) { + Report("Unmatched call to __lsan_enable().\n"); + Die(); + } + lsan_disabled_--; + } u32 tid() { return context_->tid; } AsanThreadContext *context() { return context_; } void set_context(AsanThreadContext *context) { context_ = context; } @@ -90,6 +99,7 @@ class AsanThread { uptr stack_bottom_; uptr tls_begin_; uptr tls_end_; + uptr lsan_disabled_; FakeStack fake_stack_; AsanThreadLocalMallocStorage malloc_storage_; diff --git a/compiler-rt/lib/lsan/lit_tests/TestCases/disabler.cc b/compiler-rt/lib/lsan/lit_tests/TestCases/disabler.cc new file mode 100644 index 00000000000..66ed846490f --- /dev/null +++ b/compiler-rt/lib/lsan/lit_tests/TestCases/disabler.cc @@ -0,0 +1,23 @@ +// Test for ScopedDisabler. +// RUN: LSAN_BASE="report_blocks=1:use_registers=0:use_stacks=0:use_globals=0:use_tls=0" +// RUN: %clangxx_lsan -I %p/../../../../include %s -o %t +// RUN: LSAN_OPTIONS=$LSAN_BASE %t 2>&1 | FileCheck %s + +#include <stdio.h> +#include <stdlib.h> + +#include "sanitizer/lsan_interface.h" + +int main() { + void **p; + { + __lsan::ScopedDisabler d; + p = new void *; + } + *reinterpret_cast<void **>(p) = malloc(666); + void *q = malloc(1337); + // Break optimization. + fprintf(stderr, "Test alloc: %p.\n", q); + return 0; +} +// CHECK: SUMMARY: LeakSanitizer: 1337 byte(s) leaked in 1 allocation(s) diff --git a/compiler-rt/lib/lsan/lsan_allocator.cc b/compiler-rt/lib/lsan/lsan_allocator.cc index 3ae773b21bd..eca7399433a 100644 --- a/compiler-rt/lib/lsan/lsan_allocator.cc +++ b/compiler-rt/lib/lsan/lsan_allocator.cc @@ -44,6 +44,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, static Allocator allocator; static THREADLOCAL AllocatorCache cache; +// All allocations made while this is > 0 will be treated as non-leaks. +static THREADLOCAL uptr lsan_disabled; void InitializeAllocator() { allocator.Init(); @@ -61,6 +63,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { if (!p) return; ChunkMetadata *m = Metadata(p); CHECK(m); + m->tag = lsan_disabled ? kSuppressed : kDirectlyLeaked; m->stack_trace_id = StackDepotPut(stack.trace, stack.size); m->requested_size = size; atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed); @@ -185,5 +188,21 @@ template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback); template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback); template void ForEachChunk<MarkIndirectlyLeakedCb>( MarkIndirectlyLeakedCb const &callback); -template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback); +template void ForEachChunk<CollectSuppressedCb>( + CollectSuppressedCb const &callback); } // namespace __lsan + +extern "C" { +void __lsan_disable() { + __lsan::lsan_disabled++; +} + +void __lsan_enable() { + if (!__lsan::lsan_disabled) { + Report("Unmatched call to __lsan_enable().\n"); + Die(); + } + __lsan::lsan_disabled--; +} +} // extern "C" + diff --git a/compiler-rt/lib/lsan/lsan_common.cc b/compiler-rt/lib/lsan/lsan_common.cc index f6e93ae09f1..ae1fca740d3 100644 --- a/compiler-rt/lib/lsan/lsan_common.cc +++ b/compiler-rt/lib/lsan/lsan_common.cc @@ -78,8 +78,8 @@ static inline bool CanBeAHeapPointer(uptr p) { // Scan the memory range, looking for byte patterns that point into allocator // chunks. Mark those chunks with tag and add them to the frontier. -// There are two usage modes for this function: finding non-leaked chunks -// (tag = kReachable) and finding indirectly leaked chunks +// There are two usage modes for this function: finding reachable or suppressed +// chunks (tag = kReachable or kSuppressed) and finding indirectly leaked chunks // (tag = kIndirectlyLeaked). In the second case, there's no flood fill, // so frontier = 0. void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, @@ -93,12 +93,12 @@ void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, for (; pp + sizeof(uptr) <= end; pp += alignment) { void *p = *reinterpret_cast<void**>(pp); if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; - // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in - // LargeMmapAllocator involves a lock and a linear search. void *chunk = PointsIntoChunk(p); if (!chunk) continue; LsanMetadata m(chunk); + // Reachable beats suppressed beats leaked. if (m.tag() == kReachable) continue; + if (m.tag() == kSuppressed && tag != kReachable) continue; m.set_tag(tag); if (flags()->log_pointers) Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, @@ -178,13 +178,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, } } -static void FloodFillReachable(InternalVector<uptr> *frontier) { +static void FloodFillTag(InternalVector<uptr> *frontier, ChunkTag tag) { while (frontier->size()) { uptr next_chunk = frontier->back(); frontier->pop_back(); LsanMetadata m(reinterpret_cast<void *>(next_chunk)); ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, - "HEAP", kReachable); + "HEAP", tag); } } @@ -199,6 +199,13 @@ void MarkIndirectlyLeakedCb::operator()(void *p) const { } } +void CollectSuppressedCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + if (m.allocated() && m.tag() == kSuppressed) + frontier_->push_back(reinterpret_cast<uptr>(p)); +} + // Set the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. @@ -207,21 +214,24 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { if (flags()->use_globals) ProcessGlobalRegions(&frontier); ProcessThreads(suspended_threads, &frontier); - FloodFillReachable(&frontier); + FloodFillTag(&frontier, kReachable); + // The check here is relatively expensive, so we do this in a separate flood + // fill. That way we can skip the check for chunks that are reachable + // otherwise. ProcessPlatformSpecificAllocations(&frontier); - FloodFillReachable(&frontier); + FloodFillTag(&frontier, kReachable); - // Now all reachable chunks are marked. Iterate over leaked chunks and mark - // those that are reachable from other leaked chunks. if (flags()->log_pointers) - Report("Now scanning leaked blocks for pointers.\n"); - ForEachChunk(MarkIndirectlyLeakedCb()); -} + Report("Scanning suppressed blocks.\n"); + CHECK_EQ(0, frontier.size()); + ForEachChunk(CollectSuppressedCb(&frontier)); + FloodFillTag(&frontier, kSuppressed); -void ClearTagCb::operator()(void *p) const { - p = GetUserBegin(p); - LsanMetadata m(p); - m.set_tag(kDirectlyLeaked); + // Iterate over leaked chunks and mark those that are reachable from other + // leaked chunks. + if (flags()->log_pointers) + Report("Scanning leaked blocks.\n"); + ForEachChunk(MarkIndirectlyLeakedCb()); } static void PrintStackTraceById(u32 stack_trace_id) { @@ -232,21 +242,11 @@ static void PrintStackTraceById(u32 stack_trace_id) { common_flags()->strip_path_prefix, 0); } -static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) { - LockThreadRegistry(); - LockAllocator(); - StopTheWorld(callback, arg); - UnlockAllocator(); - UnlockThreadRegistry(); -} - -///// Normal leak checking. ///// - void CollectLeaksCb::operator()(void *p) const { p = GetUserBegin(p); LsanMetadata m(p); if (!m.allocated()) return; - if (m.tag() != kReachable) { + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { uptr resolution = flags()->resolution; if (resolution > 0) { uptr size = 0; @@ -268,8 +268,7 @@ void PrintLeakedCb::operator()(void *p) const { p = GetUserBegin(p); LsanMetadata m(p); if (!m.allocated()) return; - if (m.tag() != kReachable) { - CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked); + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { Printf("%s leaked %llu byte block at %p\n", m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", m.requested_size(), p); @@ -308,13 +307,19 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, PrintLeaked(); leak_report.PrintSummary(); Printf("\n"); - ForEachChunk(ClearTagCb()); *result = kLeaksFound; } void DoLeakCheck() { + static bool already_done; LeakCheckResult result = kFatalError; - LockAndSuspendThreads(DoLeakCheckCallback, &result); + LockThreadRegistry(); + LockAllocator(); + CHECK(!already_done); + already_done = true; + StopTheWorld(DoLeakCheckCallback, &result); + UnlockAllocator(); + UnlockThreadRegistry(); if (result == kFatalError) { Report("LeakSanitizer has encountered a fatal error.\n"); Die(); @@ -387,5 +392,6 @@ void LeakReport::PrintSummary() { Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n", bytes, allocations); } + } // namespace __lsan #endif // CAN_SANITIZE_LEAKS diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h index 8cb4b2753cd..18660ba68e3 100644 --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -32,7 +32,8 @@ namespace __lsan { enum ChunkTag { kDirectlyLeaked = 0, // default kIndirectlyLeaked = 1, - kReachable = 2 + kReachable = 2, + kSuppressed = 3 }; struct Flags { @@ -135,17 +136,21 @@ class CollectLeaksCb { LeakReport *leak_report_; }; -// Resets each chunk's tag to default (kDirectlyLeaked). -class ClearTagCb { +// Scans each leaked chunk for pointers to other leaked chunks, and marks each +// of them as indirectly leaked. +class MarkIndirectlyLeakedCb { public: void operator()(void *p) const; }; -// Scans each leaked chunk for pointers to other leaked chunks, and marks each -// of them as indirectly leaked. -class MarkIndirectlyLeakedCb { +// Finds all chunk marked as kSuppressed and adds their addresses to frontier. +class CollectSuppressedCb { public: + explicit CollectSuppressedCb(InternalVector<uptr> *frontier) + : frontier_(frontier) {} void operator()(void *p) const; + private: + InternalVector<uptr> *frontier_; }; // The following must be implemented in the parent tool. |