diff options
author | Kostya Serebryany <kcc@google.com> | 2013-04-04 11:32:49 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@google.com> | 2013-04-04 11:32:49 +0000 |
commit | eef8bd4355e85668de5ca11ff10836cd631eeb2d (patch) | |
tree | 061fb7f9afb68915cf9eb0f0ebaf4a554223beed | |
parent | 13b07733b1b1a5ac4c51117f08f0b3ae31e87886 (diff) | |
download | bcm5719-llvm-eef8bd4355e85668de5ca11ff10836cd631eeb2d.tar.gz bcm5719-llvm-eef8bd4355e85668de5ca11ff10836cd631eeb2d.zip |
[asan] nuke the old unused allocator code
llvm-svn: 178758
-rw-r--r-- | compiler-rt/lib/asan/CMakeLists.txt | 1 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_allocator.cc | 812 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_allocator.h | 18 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_allocator2.cc | 4 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_flags.h | 2 | ||||
-rw-r--r-- | compiler-rt/lib/asan/asan_rtl.cc | 2 | ||||
-rw-r--r-- | compiler-rt/lib/asan/tests/asan_noinst_test.cc | 91 | ||||
-rw-r--r-- | compiler-rt/lib/asan/tests/asan_oob_test.cc | 2 |
8 files changed, 3 insertions, 929 deletions
diff --git a/compiler-rt/lib/asan/CMakeLists.txt b/compiler-rt/lib/asan/CMakeLists.txt index 030c7b3ec3f..b70a29cfd27 100644 --- a/compiler-rt/lib/asan/CMakeLists.txt +++ b/compiler-rt/lib/asan/CMakeLists.txt @@ -1,7 +1,6 @@ # Build for the AddressSanitizer runtime support library. set(ASAN_SOURCES - asan_allocator.cc asan_allocator2.cc asan_fake_stack.cc asan_globals.cc diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc deleted file mode 100644 index 63454b78e67..00000000000 --- a/compiler-rt/lib/asan/asan_allocator.cc +++ /dev/null @@ -1,812 +0,0 @@ -//===-- asan_allocator.cc -------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Implementation of ASan's memory allocator. -// Evey piece of memory (AsanChunk) allocated by the allocator -// has a left redzone of REDZONE bytes and -// a right redzone such that the end of the chunk is aligned by REDZONE -// (i.e. the right redzone is between 0 and REDZONE-1). -// The left redzone is always poisoned. -// The right redzone is poisoned on malloc, the body is poisoned on free. -// Once freed, a chunk is moved to a quarantine (fifo list). -// After quarantine, a chunk is returned to freelists. -// -// The left redzone contains ASan's internal data and the stack trace of -// the malloc call. -// Once freed, the body of the chunk contains the stack trace of the free call. -// -//===----------------------------------------------------------------------===// -#include "asan_allocator.h" - -#if ASAN_ALLOCATOR_VERSION == 1 -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_stats.h" -#include "asan_report.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_mutex.h" - -namespace __asan { - -#define REDZONE ((uptr)(flags()->redzone)) -static const uptr kMinAllocSize = REDZONE * 2; -static const u64 kMaxAvailableRam = 128ULL << 30; // 128G -static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M - -static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20; -static const uptr kMaxSizeForThreadLocalFreeList = - (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17; - -// Size classes less than kMallocSizeClassStep are powers of two. -// All other size classes are multiples of kMallocSizeClassStep. -static const uptr kMallocSizeClassStepLog = 26; -static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog; - -static const uptr kMaxAllowedMallocSize = - (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30; - -static inline uptr SizeClassToSize(u8 size_class) { - CHECK(size_class < kNumberOfSizeClasses); - if (size_class <= kMallocSizeClassStepLog) { - return 1UL << size_class; - } else { - return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep; - } -} - -static inline u8 SizeToSizeClass(uptr size) { - u8 res = 0; - if (size <= kMallocSizeClassStep) { - uptr rounded = RoundUpToPowerOfTwo(size); - res = Log2(rounded); - } else { - res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep) - + kMallocSizeClassStepLog; - } - CHECK(res < kNumberOfSizeClasses); - CHECK(size <= SizeClassToSize(res)); - return res; -} - -// Given REDZONE bytes, we need to mark first size bytes -// as addressable and the rest REDZONE-size bytes as unaddressable. -static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) { - CHECK(size <= REDZONE); - CHECK(IsAligned(mem, REDZONE)); - CHECK(IsPowerOfTwo(SHADOW_GRANULARITY)); - CHECK(IsPowerOfTwo(REDZONE)); - CHECK(REDZONE >= SHADOW_GRANULARITY); - PoisonShadowPartialRightRedzone(mem, size, REDZONE, - kAsanHeapRightRedzoneMagic); -} - -static u8 *MmapNewPagesAndPoisonShadow(uptr size) { - CHECK(IsAligned(size, GetPageSizeCached())); - u8 *res = (u8*)MmapOrDie(size, __FUNCTION__); - PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic); - if (flags()->debug) { - Printf("ASAN_MMAP: [%p, %p)\n", res, res + size); - } - return res; -} - -// Every chunk of memory allocated by this allocator can be in one of 3 states: -// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. -// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. -// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. -// -// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not -// the beginning of a AsanChunk (in which the actual chunk resides at -// this - this->used_size). -// -// The magic numbers for the enum values are taken randomly. -enum { - CHUNK_AVAILABLE = 0x57, - CHUNK_ALLOCATED = 0x32, - CHUNK_QUARANTINE = 0x19, - CHUNK_MEMALIGN = 0xDC -}; - -struct ChunkBase { - // First 8 bytes. - uptr chunk_state : 8; - uptr alloc_tid : 24; - uptr size_class : 8; - uptr free_tid : 24; - - // Second 8 bytes. - uptr alignment_log : 8; - uptr alloc_type : 2; - uptr used_size : FIRST_32_SECOND_64(32, 54); // Size requested by the user. - - // This field may overlap with the user area and thus should not - // be used while the chunk is in CHUNK_ALLOCATED state. - AsanChunk *next; - - // Typically the beginning of the user-accessible memory is 'this'+REDZONE - // and is also aligned by REDZONE. However, if the memory is allocated - // by memalign, the alignment might be higher and the user-accessible memory - // starts at the first properly aligned address after 'this'. - uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); } - uptr Size() { return SizeClassToSize(size_class); } - u8 SizeClass() { return size_class; } -}; - -struct AsanChunk: public ChunkBase { - u32 *compressed_alloc_stack() { - return (u32*)((uptr)this + sizeof(ChunkBase)); - } - u32 *compressed_free_stack() { - return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase))); - } - - // The left redzone after the ChunkBase is given to the alloc stack trace. - uptr compressed_alloc_stack_size() { - if (REDZONE < sizeof(ChunkBase)) return 0; - return (REDZONE - sizeof(ChunkBase)) / sizeof(u32); - } - uptr compressed_free_stack_size() { - if (REDZONE < sizeof(ChunkBase)) return 0; - return (REDZONE) / sizeof(u32); - } -}; - -uptr AsanChunkView::Beg() { return chunk_->Beg(); } -uptr AsanChunkView::End() { return Beg() + UsedSize(); } -uptr AsanChunkView::UsedSize() { return chunk_->used_size; } -uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } -uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } - -void AsanChunkView::GetAllocStack(StackTrace *stack) { - StackTrace::UncompressStack(stack, chunk_->compressed_alloc_stack(), - chunk_->compressed_alloc_stack_size()); -} - -void AsanChunkView::GetFreeStack(StackTrace *stack) { - StackTrace::UncompressStack(stack, chunk_->compressed_free_stack(), - chunk_->compressed_free_stack_size()); -} - -static AsanChunk *PtrToChunk(uptr ptr) { - AsanChunk *m = (AsanChunk*)(ptr - REDZONE); - if (m->chunk_state == CHUNK_MEMALIGN) { - m = (AsanChunk*)((uptr)m - m->used_size); - } - return m; -} - -void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { - CHECK(q->size() > 0); - size_ += q->size(); - append_back(q); - q->clear(); -} - -void AsanChunkFifoList::Push(AsanChunk *n) { - push_back(n); - size_ += n->Size(); -} - -// Interesting performance observation: this function takes up to 15% of overal -// allocator time. That's because *first_ has been evicted from cache long time -// ago. Not sure if we can or want to do anything with this. -AsanChunk *AsanChunkFifoList::Pop() { - CHECK(first_); - AsanChunk *res = front(); - size_ -= res->Size(); - pop_front(); - return res; -} - -// All pages we ever allocated. -struct PageGroup { - uptr beg; - uptr end; - uptr size_of_chunk; - uptr last_chunk; - bool InRange(uptr addr) { - return addr >= beg && addr < end; - } -}; - -class MallocInfo { - public: - explicit MallocInfo(LinkerInitialized x) : mu_(x) { } - - AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) { - AsanChunk *m = 0; - AsanChunk **fl = &free_lists_[size_class]; - { - BlockingMutexLock lock(&mu_); - for (uptr i = 0; i < n_chunks; i++) { - if (!(*fl)) { - *fl = GetNewChunks(size_class); - } - AsanChunk *t = *fl; - *fl = t->next; - t->next = m; - CHECK(t->chunk_state == CHUNK_AVAILABLE); - m = t; - } - } - return m; - } - - void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x, - bool eat_free_lists) { - CHECK(flags()->quarantine_size > 0); - BlockingMutexLock lock(&mu_); - AsanChunkFifoList *q = &x->quarantine_; - if (q->size() > 0) { - quarantine_.PushList(q); - while (quarantine_.size() > (uptr)flags()->quarantine_size) { - QuarantinePop(); - } - } - if (eat_free_lists) { - for (uptr size_class = 0; size_class < kNumberOfSizeClasses; - size_class++) { - AsanChunk *m = x->free_lists_[size_class]; - while (m) { - AsanChunk *t = m->next; - m->next = free_lists_[size_class]; - free_lists_[size_class] = m; - m = t; - } - x->free_lists_[size_class] = 0; - } - } - } - - void BypassThreadLocalQuarantine(AsanChunk *chunk) { - BlockingMutexLock lock(&mu_); - quarantine_.Push(chunk); - } - - AsanChunk *FindChunkByAddr(uptr addr) { - BlockingMutexLock lock(&mu_); - return FindChunkByAddrUnlocked(addr); - } - - uptr AllocationSize(uptr ptr) { - if (!ptr) return 0; - BlockingMutexLock lock(&mu_); - - // Make sure this is our chunk and |ptr| actually points to the beginning - // of the allocated memory. - AsanChunk *m = FindChunkByAddrUnlocked(ptr); - if (!m || m->Beg() != ptr) return 0; - - if (m->chunk_state == CHUNK_ALLOCATED) { - return m->used_size; - } else { - return 0; - } - } - - void ForceLock() { - mu_.Lock(); - } - - void ForceUnlock() { - mu_.Unlock(); - } - - void PrintStatus() { - BlockingMutexLock lock(&mu_); - uptr malloced = 0; - - Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ", - quarantine_.size() >> 20, malloced >> 20); - for (uptr j = 1; j < kNumberOfSizeClasses; j++) { - AsanChunk *i = free_lists_[j]; - if (!i) continue; - uptr t = 0; - for (; i; i = i->next) { - t += i->Size(); - } - Printf("%zu:%zu ", j, t >> 20); - } - Printf("\n"); - } - - PageGroup *FindPageGroup(uptr addr) { - BlockingMutexLock lock(&mu_); - return FindPageGroupUnlocked(addr); - } - - private: - PageGroup *FindPageGroupUnlocked(uptr addr) { - int n = atomic_load(&n_page_groups_, memory_order_relaxed); - // If the page groups are not sorted yet, sort them. - if (n_sorted_page_groups_ < n) { - SortArray((uptr*)page_groups_, n); - n_sorted_page_groups_ = n; - } - // Binary search over the page groups. - int beg = 0, end = n; - while (beg < end) { - int med = (beg + end) / 2; - uptr g = (uptr)page_groups_[med]; - if (addr > g) { - // 'g' points to the end of the group, so 'addr' - // may not belong to page_groups_[med] or any previous group. - beg = med + 1; - } else { - // 'addr' may belong to page_groups_[med] or a previous group. - end = med; - } - } - if (beg >= n) - return 0; - PageGroup *g = page_groups_[beg]; - CHECK(g); - if (g->InRange(addr)) - return g; - return 0; - } - - // We have an address between two chunks, and we want to report just one. - AsanChunk *ChooseChunk(uptr addr, - AsanChunk *left_chunk, AsanChunk *right_chunk) { - // Prefer an allocated chunk or a chunk from quarantine. - if (left_chunk->chunk_state == CHUNK_AVAILABLE && - right_chunk->chunk_state != CHUNK_AVAILABLE) - return right_chunk; - if (right_chunk->chunk_state == CHUNK_AVAILABLE && - left_chunk->chunk_state != CHUNK_AVAILABLE) - return left_chunk; - // Choose based on offset. - sptr l_offset = 0, r_offset = 0; - CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); - CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); - if (l_offset < r_offset) - return left_chunk; - return right_chunk; - } - - AsanChunk *FindChunkByAddrUnlocked(uptr addr) { - PageGroup *g = FindPageGroupUnlocked(addr); - if (!g) return 0; - CHECK(g->size_of_chunk); - uptr offset_from_beg = addr - g->beg; - uptr this_chunk_addr = g->beg + - (offset_from_beg / g->size_of_chunk) * g->size_of_chunk; - CHECK(g->InRange(this_chunk_addr)); - AsanChunk *m = (AsanChunk*)this_chunk_addr; - CHECK(m->chunk_state == CHUNK_ALLOCATED || - m->chunk_state == CHUNK_AVAILABLE || - m->chunk_state == CHUNK_QUARANTINE); - sptr offset = 0; - AsanChunkView m_view(m); - if (m_view.AddrIsInside(addr, 1, &offset)) - return m; - - if (m_view.AddrIsAtRight(addr, 1, &offset)) { - if (this_chunk_addr == g->last_chunk) // rightmost chunk - return m; - uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk; - CHECK(g->InRange(right_chunk_addr)); - return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr); - } else { - CHECK(m_view.AddrIsAtLeft(addr, 1, &offset)); - if (this_chunk_addr == g->beg) // leftmost chunk - return m; - uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk; - CHECK(g->InRange(left_chunk_addr)); - return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m); - } - } - - void QuarantinePop() { - CHECK(quarantine_.size() > 0); - AsanChunk *m = quarantine_.Pop(); - CHECK(m); - // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m); - - CHECK(m->chunk_state == CHUNK_QUARANTINE); - m->chunk_state = CHUNK_AVAILABLE; - PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic); - CHECK(m->alloc_tid >= 0); - CHECK(m->free_tid >= 0); - - uptr size_class = m->SizeClass(); - m->next = free_lists_[size_class]; - free_lists_[size_class] = m; - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.real_frees++; - thread_stats.really_freed += m->used_size; - thread_stats.really_freed_redzones += m->Size() - m->used_size; - thread_stats.really_freed_by_size[m->SizeClass()]++; - } - - // Get a list of newly allocated chunks. - AsanChunk *GetNewChunks(u8 size_class) { - uptr size = SizeClassToSize(size_class); - CHECK(IsPowerOfTwo(kMinMmapSize)); - CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0); - uptr mmap_size = Max(size, kMinMmapSize); - uptr n_chunks = mmap_size / size; - CHECK(n_chunks * size == mmap_size); - uptr PageSize = GetPageSizeCached(); - if (size < PageSize) { - // Size is small, just poison the last chunk. - n_chunks--; - } else { - // Size is large, allocate an extra page at right and poison it. - mmap_size += PageSize; - } - CHECK(n_chunks > 0); - u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size); - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mmaps++; - thread_stats.mmaped += mmap_size; - thread_stats.mmaped_by_size[size_class] += n_chunks; - - AsanChunk *res = 0; - for (uptr i = 0; i < n_chunks; i++) { - AsanChunk *m = (AsanChunk*)(mem + i * size); - m->chunk_state = CHUNK_AVAILABLE; - m->size_class = size_class; - m->next = res; - res = m; - } - PageGroup *pg = (PageGroup*)(mem + n_chunks * size); - // This memory is already poisoned, no need to poison it again. - pg->beg = (uptr)mem; - pg->end = pg->beg + mmap_size; - pg->size_of_chunk = size; - pg->last_chunk = (uptr)(mem + size * (n_chunks - 1)); - int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed); - CHECK(idx < (int)ARRAY_SIZE(page_groups_)); - page_groups_[idx] = pg; - return res; - } - - AsanChunk *free_lists_[kNumberOfSizeClasses]; - AsanChunkFifoList quarantine_; - BlockingMutex mu_; - - PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize]; - atomic_uint32_t n_page_groups_; - int n_sorted_page_groups_; -}; - -static MallocInfo malloc_info(LINKER_INITIALIZED); - -void AsanThreadLocalMallocStorage::CommitBack() { - malloc_info.SwallowThreadLocalMallocStorage(this, true); -} - -AsanChunkView FindHeapChunkByAddress(uptr address) { - return AsanChunkView(malloc_info.FindChunkByAddr(address)); -} - -static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack, - AllocType alloc_type) { - __asan_init(); - CHECK(stack); - if (size == 0) { - size = 1; // TODO(kcc): do something smarter - } - CHECK(IsPowerOfTwo(alignment)); - uptr rounded_size = RoundUpTo(size, REDZONE); - uptr needed_size = rounded_size + REDZONE; - if (alignment > REDZONE) { - needed_size += alignment; - } - CHECK(IsAligned(needed_size, REDZONE)); - if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", - (void*)size); - return 0; - } - - u8 size_class = SizeToSizeClass(needed_size); - uptr size_to_allocate = SizeClassToSize(size_class); - CHECK(size_to_allocate >= kMinAllocSize); - CHECK(size_to_allocate >= needed_size); - CHECK(IsAligned(size_to_allocate, REDZONE)); - - if (flags()->verbosity >= 3) { - Printf("Allocate align: %zu size: %zu class: %u real: %zu\n", - alignment, size, size_class, size_to_allocate); - } - - AsanThread *t = GetCurrentThread(); - AsanStats &thread_stats = GetCurrentThreadStats(); - // Statistics - thread_stats.mallocs++; - thread_stats.malloced += size; - thread_stats.malloced_redzones += size_to_allocate - size; - thread_stats.malloced_by_size[size_class]++; - - AsanChunk *m = 0; - if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) { - // get directly from global storage. - m = malloc_info.AllocateChunks(size_class, 1); - thread_stats.malloc_large++; - } else { - // get from the thread-local storage. - AsanChunk **fl = &t->malloc_storage().free_lists_[size_class]; - if (!*fl) { - uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate; - *fl = malloc_info.AllocateChunks(size_class, n_new_chunks); - thread_stats.malloc_small_slow++; - } - m = *fl; - *fl = (*fl)->next; - } - CHECK(m); - CHECK(m->chunk_state == CHUNK_AVAILABLE); - m->chunk_state = CHUNK_ALLOCATED; - m->alloc_type = alloc_type; - m->next = 0; - CHECK(m->Size() == size_to_allocate); - uptr addr = (uptr)m + REDZONE; - CHECK(addr <= (uptr)m->compressed_free_stack()); - - if (alignment > REDZONE && (addr & (alignment - 1))) { - addr = RoundUpTo(addr, alignment); - CHECK((addr & (alignment - 1)) == 0); - AsanChunk *p = (AsanChunk*)(addr - REDZONE); - p->chunk_state = CHUNK_MEMALIGN; - p->used_size = (uptr)p - (uptr)m; - m->alignment_log = Log2(alignment); - CHECK(m->Beg() == addr); - } else { - m->alignment_log = Log2(REDZONE); - } - CHECK(m == PtrToChunk(addr)); - m->used_size = size; - CHECK(m->Beg() == addr); - m->alloc_tid = t ? t->tid() : 0; - m->free_tid = kInvalidTid; - StackTrace::CompressStack(stack, m->compressed_alloc_stack(), - m->compressed_alloc_stack_size()); - PoisonShadow(addr, rounded_size, 0); - if (size < rounded_size) { - PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE, - size & (REDZONE - 1)); - } - if (size <= (uptr)(flags()->max_malloc_fill_size)) { - REAL(memset)((void*)addr, 0, rounded_size); - } - return (u8*)addr; -} - -static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) { - if (!ptr) return; - CHECK(stack); - - if (flags()->debug) { - CHECK(malloc_info.FindPageGroup((uptr)ptr)); - } - - // Printf("Deallocate %p\n", ptr); - AsanChunk *m = PtrToChunk((uptr)ptr); - - // Flip the chunk_state atomically to avoid race on double-free. - u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, - memory_order_acq_rel); - - if (old_chunk_state == CHUNK_QUARANTINE) { - ReportDoubleFree((uptr)ptr, stack); - } else if (old_chunk_state != CHUNK_ALLOCATED) { - ReportFreeNotMalloced((uptr)ptr, stack); - } - CHECK(old_chunk_state == CHUNK_ALLOCATED); - if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) - ReportAllocTypeMismatch((uptr)ptr, stack, - (AllocType)m->alloc_type, (AllocType)alloc_type); - // With REDZONE==16 m->next is in the user area, otherwise it should be 0. - CHECK(REDZONE <= 16 || !m->next); - CHECK(m->free_tid == kInvalidTid); - CHECK(m->alloc_tid >= 0); - AsanThread *t = GetCurrentThread(); - m->free_tid = t ? t->tid() : 0; - StackTrace::CompressStack(stack, m->compressed_free_stack(), - m->compressed_free_stack_size()); - uptr rounded_size = RoundUpTo(m->used_size, REDZONE); - PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic); - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.frees++; - thread_stats.freed += m->used_size; - thread_stats.freed_by_size[m->SizeClass()]++; - - CHECK(m->chunk_state == CHUNK_QUARANTINE); - - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - ms->quarantine_.Push(m); - - if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) { - malloc_info.SwallowThreadLocalMallocStorage(ms, false); - } - } else { - malloc_info.BypassThreadLocalQuarantine(m); - } -} - -static u8 *Reallocate(u8 *old_ptr, uptr new_size, - StackTrace *stack) { - CHECK(old_ptr && new_size); - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.reallocs++; - thread_stats.realloced += new_size; - - AsanChunk *m = PtrToChunk((uptr)old_ptr); - CHECK(m->chunk_state == CHUNK_ALLOCATED); - uptr old_size = m->used_size; - uptr memcpy_size = Min(new_size, old_size); - u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC); - if (new_ptr) { - CHECK(REAL(memcpy) != 0); - REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, stack, FROM_MALLOC); - } - return new_ptr; -} - -} // namespace __asan - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -extern "C" { -SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE -void __asan_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE -void __asan_free_hook(void *ptr) { - (void)ptr; -} -} // extern "C" -#endif - -namespace __asan { - -void InitializeAllocator() { } - -void PrintInternalAllocatorStats() { -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, - AllocType alloc_type) { - void *ptr = (void*)Allocate(alignment, size, stack, alloc_type); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -SANITIZER_INTERFACE_ATTRIBUTE -void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { - ASAN_FREE_HOOK(ptr); - Deallocate((u8*)ptr, stack, alloc_type); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *asan_malloc(uptr size, StackTrace *stack) { - void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; - void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC); - if (ptr) - REAL(memset)(ptr, 0, nmemb * size); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_realloc(void *p, uptr size, StackTrace *stack) { - if (p == 0) { - void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; - } else if (size == 0) { - ASAN_FREE_HOOK(p); - Deallocate((u8*)p, stack, FROM_MALLOC); - return 0; - } - return Reallocate((u8*)p, size, stack); -} - -void *asan_valloc(uptr size, StackTrace *stack) { - void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_pvalloc(uptr size, StackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - StackTrace *stack) { - void *ptr = Allocate(alignment, size, stack, FROM_MALLOC); - CHECK(IsAligned((uptr)ptr, alignment)); - ASAN_MALLOC_HOOK(ptr, size); - *memptr = ptr; - return 0; -} - -uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { - CHECK(stack); - if (ptr == 0) return 0; - uptr usable_size = malloc_info.AllocationSize((uptr)ptr); - if (flags()->check_malloc_usable_size && (usable_size == 0)) { - ReportMallocUsableSizeNotOwned((uptr)ptr, stack); - } - return usable_size; -} - -uptr asan_mz_size(const void *ptr) { - return malloc_info.AllocationSize((uptr)ptr); -} - -void asan_mz_force_lock() { - malloc_info.ForceLock(); -} - -void asan_mz_force_unlock() { - malloc_info.ForceUnlock(); -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// ASan allocator doesn't reserve extra bytes, so normally we would -// just return "size". -uptr __asan_get_estimated_allocated_size(uptr size) { - if (size == 0) return 1; - return Min(size, kMaxAllowedMallocSize); -} - -bool __asan_get_ownership(const void *p) { - return malloc_info.AllocationSize((uptr)p) > 0; -} - -uptr __asan_get_allocated_size(const void *p) { - if (p == 0) return 0; - uptr allocated_size = malloc_info.AllocationSize((uptr)p); - // Die if p is not malloced or if it is already freed. - if (allocated_size == 0) { - GET_STACK_TRACE_FATAL_HERE; - ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack); - } - return allocated_size; -} -#endif // ASAN_ALLOCATOR_VERSION diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h index 38477c01163..4025cfe7b9c 100644 --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -9,7 +9,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_allocator.cc. +// ASan-private header for asan_allocator2.cc. //===----------------------------------------------------------------------===// #ifndef ASAN_ALLOCATOR_H @@ -19,14 +19,6 @@ #include "asan_interceptors.h" #include "sanitizer_common/sanitizer_list.h" -// We are in the process of transitioning from the old allocator (version 1) -// to a new one (version 2). The change is quite intrusive so both allocators -// will co-exist in the source base for a while. The actual allocator is chosen -// at build time by redefining this macro. -#ifndef ASAN_ALLOCATOR_VERSION -#define ASAN_ALLOCATOR_VERSION 2 -#endif // ASAN_ALLOCATOR_VERSION - namespace __asan { enum AllocType { @@ -99,22 +91,14 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> { struct AsanThreadLocalMallocStorage { explicit AsanThreadLocalMallocStorage(LinkerInitialized x) -#if ASAN_ALLOCATOR_VERSION == 1 - : quarantine_(x) -#endif { } AsanThreadLocalMallocStorage() { CHECK(REAL(memset)); REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage)); } -#if ASAN_ALLOCATOR_VERSION == 1 - AsanChunkFifoList quarantine_; - AsanChunk *free_lists_[kNumberOfSizeClasses]; -#else uptr quarantine_cache[16]; uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque. -#endif void CommitBack(); }; diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc index 46cc78d7ae6..3d569fff115 100644 --- a/compiler-rt/lib/asan/asan_allocator2.cc +++ b/compiler-rt/lib/asan/asan_allocator2.cc @@ -16,7 +16,6 @@ // Status: under development, not enabled by default yet. //===----------------------------------------------------------------------===// #include "asan_allocator.h" -#if ASAN_ALLOCATOR_VERSION == 2 #include "asan_mapping.h" #include "asan_poisoning.h" @@ -715,6 +714,3 @@ void __asan_free_hook(void *ptr) { } } // extern "C" #endif - - -#endif // ASAN_ALLOCATOR_VERSION diff --git a/compiler-rt/lib/asan/asan_flags.h b/compiler-rt/lib/asan/asan_flags.h index 0969ae23767..cd1c7c17e75 100644 --- a/compiler-rt/lib/asan/asan_flags.h +++ b/compiler-rt/lib/asan/asan_flags.h @@ -56,7 +56,7 @@ struct Flags { bool replace_intrin; // Used on Mac only. bool mac_ignore_invalid_free; - // ASan allocator flag. See asan_allocator.cc. + // ASan allocator flag. bool use_fake_stack; // ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes // that will be filled with malloc_fill_byte on malloc. diff --git a/compiler-rt/lib/asan/asan_rtl.cc b/compiler-rt/lib/asan/asan_rtl.cc index 07c2551d672..43f917122c9 100644 --- a/compiler-rt/lib/asan/asan_rtl.cc +++ b/compiler-rt/lib/asan/asan_rtl.cc @@ -139,7 +139,7 @@ void InitializeFlags(Flags *f, const char *env) { f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; f->symbolize = (asan_external_symbolizer != 0); f->verbosity = 0; - f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128; + f->redzone = 16; f->debug = false; f->report_globals = 1; f->check_initialization_order = false; diff --git a/compiler-rt/lib/asan/tests/asan_noinst_test.cc b/compiler-rt/lib/asan/tests/asan_noinst_test.cc index 16818420491..e0100933560 100644 --- a/compiler-rt/lib/asan/tests/asan_noinst_test.cc +++ b/compiler-rt/lib/asan/tests/asan_noinst_test.cc @@ -348,11 +348,7 @@ TEST(AddressSanitizer, MemsetWildAddressTest) { } TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { -#if ASAN_ALLOCATOR_VERSION == 1 - EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0)); -#elif ASAN_ALLOCATOR_VERSION == 2 EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0)); -#endif const size_t sizes[] = { 1, 30, 1<<30 }; for (size_t i = 0; i < 3; i++) { EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i])); @@ -426,39 +422,6 @@ static void DoDoubleFree() { delete Ident(x); } -#if ASAN_ALLOCATOR_VERSION == 1 -// This test is run in a separate process, so that large malloced -// chunk won't remain in the free lists after the test. -// Note: use ASSERT_* instead of EXPECT_* here. -static void RunGetHeapSizeTestAndDie() { - size_t old_heap_size, new_heap_size, heap_growth; - // We unlikely have have chunk of this size in free list. - static const size_t kLargeMallocSize = 1 << 29; // 512M - old_heap_size = __asan_get_heap_size(); - fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); - free(Ident(malloc(kLargeMallocSize))); - new_heap_size = __asan_get_heap_size(); - heap_growth = new_heap_size - old_heap_size; - fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth); - ASSERT_GE(heap_growth, kLargeMallocSize); - ASSERT_LE(heap_growth, 2 * kLargeMallocSize); - - // Now large chunk should fall into free list, and can be - // allocated without increasing heap size. - old_heap_size = new_heap_size; - free(Ident(malloc(kLargeMallocSize))); - heap_growth = __asan_get_heap_size() - old_heap_size; - fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth); - ASSERT_LT(heap_growth, kLargeMallocSize); - - // Test passed. Now die with expected double-free. - DoDoubleFree(); -} - -TEST(AddressSanitizerInterface, GetHeapSizeTest) { - EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free"); -} -#elif ASAN_ALLOCATOR_VERSION == 2 TEST(AddressSanitizerInterface, GetHeapSizeTest) { // asan_allocator2 does not keep huge chunks in free list, but unmaps them. // The chunk should be greater than the quarantine size, @@ -472,55 +435,6 @@ TEST(AddressSanitizerInterface, GetHeapSizeTest) { EXPECT_EQ(old_heap_size, __asan_get_heap_size()); } } -#endif - -// Note: use ASSERT_* instead of EXPECT_* here. -static void DoLargeMallocForGetFreeBytesTestAndDie() { -#if ASAN_ALLOCATOR_VERSION == 1 - // asan_allocator2 does not keep large chunks in free_lists, so this test - // will not work. - size_t old_free_bytes, new_free_bytes; - static const size_t kLargeMallocSize = 1 << 29; // 512M - // If we malloc and free a large memory chunk, it will not fall - // into quarantine and will be available for future requests. - old_free_bytes = __asan_get_free_bytes(); - fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); - fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes); - free(Ident(malloc(kLargeMallocSize))); - new_free_bytes = __asan_get_free_bytes(); - fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes); - ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize); -#endif // ASAN_ALLOCATOR_VERSION - // Test passed. - DoDoubleFree(); -} - -TEST(AddressSanitizerInterface, GetFreeBytesTest) { -#if ASAN_ALLOCATOR_VERSION == 1 - // Allocate a small chunk. Now allocator probably has a lot of these - // chunks to fulfill future requests. So, future requests will decrease - // the number of free bytes. Do this only on systems where there - // is enough memory for such assumptions. - if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) { - static const size_t kNumOfChunks = 100; - static const size_t kChunkSize = 100; - char *chunks[kNumOfChunks]; - size_t i; - size_t old_free_bytes, new_free_bytes; - chunks[0] = Ident((char*)malloc(kChunkSize)); - old_free_bytes = __asan_get_free_bytes(); - for (i = 1; i < kNumOfChunks; i++) { - chunks[i] = Ident((char*)malloc(kChunkSize)); - new_free_bytes = __asan_get_free_bytes(); - EXPECT_LT(new_free_bytes, old_free_bytes); - old_free_bytes = new_free_bytes; - } - for (i = 0; i < kNumOfChunks; i++) - free(chunks[i]); - } -#endif - EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free"); -} static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357}; static const size_t kManyThreadsIterations = 250; @@ -828,12 +742,7 @@ TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) { TEST(AddressSanitizerInterface, GetOwnershipStressTest) { std::vector<char *> pointers; std::vector<size_t> sizes; -#if ASAN_ALLOCATOR_VERSION == 1 - const size_t kNumMallocs = - (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14; -#elif ASAN_ALLOCATOR_VERSION == 2 // too slow with asan_allocator2. :( const size_t kNumMallocs = 1 << 9; -#endif for (size_t i = 0; i < kNumMallocs; i++) { size_t size = i * 100 + 1; pointers.push_back((char*)malloc(size)); diff --git a/compiler-rt/lib/asan/tests/asan_oob_test.cc b/compiler-rt/lib/asan/tests/asan_oob_test.cc index dbe272c2c8b..f8343f19cfc 100644 --- a/compiler-rt/lib/asan/tests/asan_oob_test.cc +++ b/compiler-rt/lib/asan/tests/asan_oob_test.cc @@ -99,7 +99,6 @@ TEST(AddressSanitizer, OOBRightTest) { } } -#if ASAN_ALLOCATOR_VERSION == 2 // Broken with the asan_allocator1 TEST(AddressSanitizer, LargeOOBRightTest) { size_t large_power_of_two = 1 << 19; for (size_t i = 16; i <= 256; i *= 2) { @@ -109,7 +108,6 @@ TEST(AddressSanitizer, LargeOOBRightTest) { delete [] p; } } -#endif // ASAN_ALLOCATOR_VERSION == 2 TEST(AddressSanitizer, DISABLED_DemoOOBLeftLow) { oob_test<U1>(10, -1); |