diff options
Diffstat (limited to 'compiler-rt/lib/asan/asan_allocator.cc')
| -rw-r--r-- | compiler-rt/lib/asan/asan_allocator.cc | 278 |
1 files changed, 139 insertions, 139 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc index 64288120dfb..900384febae 100644 --- a/compiler-rt/lib/asan/asan_allocator.cc +++ b/compiler-rt/lib/asan/asan_allocator.cc @@ -42,27 +42,27 @@ namespace __asan { #define REDZONE FLAG_redzone -static const size_t kMinAllocSize = REDZONE * 2; +static const uptr kMinAllocSize = REDZONE * 2; static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G -static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M +static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M -static const size_t kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20; -static const size_t kMaxSizeForThreadLocalFreeList = +static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20; +static const uptr kMaxSizeForThreadLocalFreeList = (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17; // Size classes less than kMallocSizeClassStep are powers of two. // All other size classes are multiples of kMallocSizeClassStep. -static const size_t kMallocSizeClassStepLog = 26; -static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog; +static const uptr kMallocSizeClassStepLog = 26; +static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog; static const uptr kMaxAllowedMallocSize = (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30; -static inline bool IsAligned(uintptr_t a, uintptr_t alignment) { +static inline bool IsAligned(uptr a, uptr alignment) { return (a & (alignment - 1)) == 0; } -static inline size_t Log2(size_t x) { +static inline uptr Log2(uptr x) { CHECK(IsPowerOfTwo(x)); #if !defined(_WIN32) || defined(__clang__) return __builtin_ctzl(x); @@ -77,7 +77,7 @@ static inline size_t Log2(size_t x) { #endif } -static inline size_t RoundUpToPowerOfTwo(size_t size) { +static inline uptr RoundUpToPowerOfTwo(uptr size) { CHECK(size); if (IsPowerOfTwo(size)) return size; @@ -94,7 +94,7 @@ static inline size_t RoundUpToPowerOfTwo(size_t size) { return 1UL << (up + 1); } -static inline size_t SizeClassToSize(uint8_t size_class) { +static inline uptr SizeClassToSize(uint8_t size_class) { CHECK(size_class < kNumberOfSizeClasses); if (size_class <= kMallocSizeClassStepLog) { return 1UL << size_class; @@ -103,10 +103,10 @@ static inline size_t SizeClassToSize(uint8_t size_class) { } } -static inline uint8_t SizeToSizeClass(size_t size) { +static inline uint8_t SizeToSizeClass(uptr size) { uint8_t res = 0; if (size <= kMallocSizeClassStep) { - size_t rounded = RoundUpToPowerOfTwo(size); + uptr rounded = RoundUpToPowerOfTwo(size); res = Log2(rounded); } else { res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep) @@ -119,7 +119,7 @@ static inline uint8_t SizeToSizeClass(size_t size) { // Given REDZONE bytes, we need to mark first size bytes // as addressable and the rest REDZONE-size bytes as unaddressable. -static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) { +static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) { CHECK(size <= REDZONE); CHECK(IsAligned(mem, REDZONE)); CHECK(IsPowerOfTwo(SHADOW_GRANULARITY)); @@ -129,10 +129,10 @@ static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) { kAsanHeapRightRedzoneMagic); } -static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) { +static uint8_t *MmapNewPagesAndPoisonShadow(uptr size) { CHECK(IsAligned(size, kPageSize)); uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__); - PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic); + PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic); if (FLAG_debug) { Printf("ASAN_MMAP: [%p, %p)\n", res, res + size); } @@ -162,33 +162,33 @@ struct ChunkBase { uint32_t offset; // User-visible memory starts at this+offset (beg()). int32_t alloc_tid; int32_t free_tid; - size_t used_size; // Size requested by the user. + uptr used_size; // Size requested by the user. AsanChunk *next; - uintptr_t beg() { return (uintptr_t)this + offset; } - size_t Size() { return SizeClassToSize(size_class); } + uptr beg() { return (uptr)this + offset; } + uptr Size() { return SizeClassToSize(size_class); } uint8_t SizeClass() { return size_class; } }; struct AsanChunk: public ChunkBase { uint32_t *compressed_alloc_stack() { CHECK(REDZONE >= sizeof(ChunkBase)); - return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase)); + return (uint32_t*)((uptr)this + sizeof(ChunkBase)); } uint32_t *compressed_free_stack() { CHECK(REDZONE >= sizeof(ChunkBase)); - return (uint32_t*)((uintptr_t)this + REDZONE); + return (uint32_t*)((uptr)this + REDZONE); } // The left redzone after the ChunkBase is given to the alloc stack trace. - size_t compressed_alloc_stack_size() { + uptr compressed_alloc_stack_size() { return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t); } - size_t compressed_free_stack_size() { + uptr compressed_free_stack_size() { return (REDZONE) / sizeof(uint32_t); } - bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) { + bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) { if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) { *offset = addr - beg(); return true; @@ -196,7 +196,7 @@ struct AsanChunk: public ChunkBase { return false; } - bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) { + bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) { if (addr < beg()) { *offset = beg() - addr; return true; @@ -204,7 +204,7 @@ struct AsanChunk: public ChunkBase { return false; } - bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) { + bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) { if (addr + access_size >= beg() + used_size) { if (addr <= beg() + used_size) *offset = 0; @@ -215,8 +215,8 @@ struct AsanChunk: public ChunkBase { return false; } - void DescribeAddress(uintptr_t addr, size_t access_size) { - size_t offset; + void DescribeAddress(uptr addr, uptr access_size) { + uptr offset; Printf("%p is located ", addr); if (AddrIsInside(addr, access_size, &offset)) { Printf("%zu bytes inside of", offset); @@ -232,7 +232,7 @@ struct AsanChunk: public ChunkBase { } }; -static AsanChunk *PtrToChunk(uintptr_t ptr) { +static AsanChunk *PtrToChunk(uptr ptr) { AsanChunk *m = (AsanChunk*)(ptr - REDZONE); if (m->chunk_state == CHUNK_MEMALIGN) { m = m->next; @@ -261,7 +261,7 @@ void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { } void AsanChunkFifoList::Push(AsanChunk *n) { - CHECK(n->next == NULL); + CHECK(n->next == 0); if (last_) { CHECK(first_); CHECK(!last_->next); @@ -281,8 +281,8 @@ AsanChunk *AsanChunkFifoList::Pop() { CHECK(first_); AsanChunk *res = first_; first_ = first_->next; - if (first_ == NULL) - last_ = NULL; + if (first_ == 0) + last_ = 0; CHECK(size_ >= res->Size()); size_ -= res->Size(); if (last_) { @@ -293,11 +293,11 @@ AsanChunk *AsanChunkFifoList::Pop() { // All pages we ever allocated. struct PageGroup { - uintptr_t beg; - uintptr_t end; - size_t size_of_chunk; - uintptr_t last_chunk; - bool InRange(uintptr_t addr) { + uptr beg; + uptr end; + uptr size_of_chunk; + uptr last_chunk; + bool InRange(uptr addr) { return addr >= beg && addr < end; } }; @@ -307,12 +307,12 @@ class MallocInfo { explicit MallocInfo(LinkerInitialized x) : mu_(x) { } - AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) { - AsanChunk *m = NULL; + AsanChunk *AllocateChunks(uint8_t size_class, uptr n_chunks) { + AsanChunk *m = 0; AsanChunk **fl = &free_lists_[size_class]; { ScopedLock lock(&mu_); - for (size_t i = 0; i < n_chunks; i++) { + for (uptr i = 0; i < n_chunks; i++) { if (!(*fl)) { *fl = GetNewChunks(size_class); } @@ -338,7 +338,7 @@ class MallocInfo { } } if (eat_free_lists) { - for (size_t size_class = 0; size_class < kNumberOfSizeClasses; + for (uptr size_class = 0; size_class < kNumberOfSizeClasses; size_class++) { AsanChunk *m = x->free_lists_[size_class]; while (m) { @@ -357,12 +357,12 @@ class MallocInfo { quarantine_.Push(chunk); } - AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) { + AsanChunk *FindMallocedOrFreed(uptr addr, uptr access_size) { ScopedLock lock(&mu_); return FindChunkByAddr(addr); } - size_t AllocationSize(uintptr_t ptr) { + uptr AllocationSize(uptr ptr) { if (!ptr) return 0; ScopedLock lock(&mu_); @@ -387,14 +387,14 @@ class MallocInfo { void PrintStatus() { ScopedLock lock(&mu_); - size_t malloced = 0; + uptr malloced = 0; Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ", quarantine_.size() >> 20, malloced >> 20); - for (size_t j = 1; j < kNumberOfSizeClasses; j++) { + for (uptr j = 1; j < kNumberOfSizeClasses; j++) { AsanChunk *i = free_lists_[j]; if (!i) continue; - size_t t = 0; + uptr t = 0; for (; i; i = i->next) { t += i->Size(); } @@ -403,24 +403,24 @@ class MallocInfo { Printf("\n"); } - PageGroup *FindPageGroup(uintptr_t addr) { + PageGroup *FindPageGroup(uptr addr) { ScopedLock lock(&mu_); return FindPageGroupUnlocked(addr); } private: - PageGroup *FindPageGroupUnlocked(uintptr_t addr) { + PageGroup *FindPageGroupUnlocked(uptr addr) { int n = n_page_groups_; // If the page groups are not sorted yet, sort them. if (n_sorted_page_groups_ < n) { - SortArray((uintptr_t*)page_groups_, n); + SortArray((uptr*)page_groups_, n); n_sorted_page_groups_ = n; } // Binary search over the page groups. int beg = 0, end = n; while (beg < end) { int med = (beg + end) / 2; - uintptr_t g = (uintptr_t)page_groups_[med]; + uptr g = (uptr)page_groups_[med]; if (addr > g) { // 'g' points to the end of the group, so 'addr' // may not belong to page_groups_[med] or any previous group. @@ -431,16 +431,16 @@ class MallocInfo { } } if (beg >= n) - return NULL; + return 0; PageGroup *g = page_groups_[beg]; CHECK(g); if (g->InRange(addr)) return g; - return NULL; + return 0; } // We have an address between two chunks, and we want to report just one. - AsanChunk *ChooseChunk(uintptr_t addr, + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, AsanChunk *right_chunk) { // Prefer an allocated chunk or a chunk from quarantine. if (left_chunk->chunk_state == CHUNK_AVAILABLE && @@ -450,7 +450,7 @@ class MallocInfo { left_chunk->chunk_state != CHUNK_AVAILABLE) return left_chunk; // Choose based on offset. - size_t l_offset = 0, r_offset = 0; + uptr l_offset = 0, r_offset = 0; CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset)); CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset)); if (l_offset < r_offset) @@ -458,33 +458,33 @@ class MallocInfo { return right_chunk; } - AsanChunk *FindChunkByAddr(uintptr_t addr) { + AsanChunk *FindChunkByAddr(uptr addr) { PageGroup *g = FindPageGroupUnlocked(addr); if (!g) return 0; CHECK(g->size_of_chunk); - uintptr_t offset_from_beg = addr - g->beg; - uintptr_t this_chunk_addr = g->beg + + uptr offset_from_beg = addr - g->beg; + uptr this_chunk_addr = g->beg + (offset_from_beg / g->size_of_chunk) * g->size_of_chunk; CHECK(g->InRange(this_chunk_addr)); AsanChunk *m = (AsanChunk*)this_chunk_addr; CHECK(m->chunk_state == CHUNK_ALLOCATED || m->chunk_state == CHUNK_AVAILABLE || m->chunk_state == CHUNK_QUARANTINE); - size_t offset = 0; + uptr offset = 0; if (m->AddrIsInside(addr, 1, &offset)) return m; if (m->AddrIsAtRight(addr, 1, &offset)) { if (this_chunk_addr == g->last_chunk) // rightmost chunk return m; - uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk; + uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk; CHECK(g->InRange(right_chunk_addr)); return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr); } else { CHECK(m->AddrIsAtLeft(addr, 1, &offset)); if (this_chunk_addr == g->beg) // leftmost chunk return m; - uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk; + uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk; CHECK(g->InRange(left_chunk_addr)); return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m); } @@ -498,11 +498,11 @@ class MallocInfo { CHECK(m->chunk_state == CHUNK_QUARANTINE); m->chunk_state = CHUNK_AVAILABLE; - PoisonShadow((uintptr_t)m, m->Size(), kAsanHeapLeftRedzoneMagic); + PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic); CHECK(m->alloc_tid >= 0); CHECK(m->free_tid >= 0); - size_t size_class = m->SizeClass(); + uptr size_class = m->SizeClass(); m->next = free_lists_[size_class]; free_lists_[size_class] = m; @@ -516,11 +516,11 @@ class MallocInfo { // Get a list of newly allocated chunks. AsanChunk *GetNewChunks(uint8_t size_class) { - size_t size = SizeClassToSize(size_class); + uptr size = SizeClassToSize(size_class); CHECK(IsPowerOfTwo(kMinMmapSize)); CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0); - size_t mmap_size = Max(size, kMinMmapSize); - size_t n_chunks = mmap_size / size; + uptr mmap_size = Max(size, kMinMmapSize); + uptr n_chunks = mmap_size / size; CHECK(n_chunks * size == mmap_size); if (size < kPageSize) { // Size is small, just poison the last chunk. @@ -538,8 +538,8 @@ class MallocInfo { thread_stats.mmaped += mmap_size; thread_stats.mmaped_by_size[size_class] += n_chunks; - AsanChunk *res = NULL; - for (size_t i = 0; i < n_chunks; i++) { + AsanChunk *res = 0; + for (uptr i = 0; i < n_chunks; i++) { AsanChunk *m = (AsanChunk*)(mem + i * size); m->chunk_state = CHUNK_AVAILABLE; m->size_class = size_class; @@ -548,10 +548,10 @@ class MallocInfo { } PageGroup *pg = (PageGroup*)(mem + n_chunks * size); // This memory is already poisoned, no need to poison it again. - pg->beg = (uintptr_t)mem; + pg->beg = (uptr)mem; pg->end = pg->beg + mmap_size; pg->size_of_chunk = size; - pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1)); + pg->last_chunk = (uptr)(mem + size * (n_chunks - 1)); int page_group_idx = AtomicInc(&n_page_groups_) - 1; CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_)); page_groups_[page_group_idx] = pg; @@ -573,7 +573,7 @@ void AsanThreadLocalMallocStorage::CommitBack() { malloc_info.SwallowThreadLocalMallocStorage(this, true); } -static void Describe(uintptr_t addr, size_t access_size) { +static void Describe(uptr addr, uptr access_size) { AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size); if (!m) return; m->DescribeAddress(addr, access_size); @@ -608,15 +608,15 @@ static void Describe(uintptr_t addr, size_t access_size) { } } -static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { +static uint8_t *Allocate(uptr alignment, uptr size, AsanStackTrace *stack) { __asan_init(); CHECK(stack); if (size == 0) { size = 1; // TODO(kcc): do something smarter } CHECK(IsPowerOfTwo(alignment)); - size_t rounded_size = RoundUpTo(size, REDZONE); - size_t needed_size = rounded_size + REDZONE; + uptr rounded_size = RoundUpTo(size, REDZONE); + uptr needed_size = rounded_size + REDZONE; if (alignment > REDZONE) { needed_size += alignment; } @@ -627,7 +627,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { } uint8_t size_class = SizeToSizeClass(needed_size); - size_t size_to_allocate = SizeClassToSize(size_class); + uptr size_to_allocate = SizeClassToSize(size_class); CHECK(size_to_allocate >= kMinAllocSize); CHECK(size_to_allocate >= needed_size); CHECK(IsAligned(size_to_allocate, REDZONE)); @@ -645,7 +645,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { thread_stats.malloced_redzones += size_to_allocate - size; thread_stats.malloced_by_size[size_class]++; - AsanChunk *m = NULL; + AsanChunk *m = 0; if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) { // get directly from global storage. m = malloc_info.AllocateChunks(size_class, 1); @@ -654,7 +654,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { // get from the thread-local storage. AsanChunk **fl = &t->malloc_storage().free_lists_[size_class]; if (!*fl) { - size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate; + uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate; *fl = malloc_info.AllocateChunks(size_class, n_new_chunks); thread_stats.malloc_small_slow++; } @@ -664,10 +664,10 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { CHECK(m); CHECK(m->chunk_state == CHUNK_AVAILABLE); m->chunk_state = CHUNK_ALLOCATED; - m->next = NULL; + m->next = 0; CHECK(m->Size() == size_to_allocate); - uintptr_t addr = (uintptr_t)m + REDZONE; - CHECK(addr == (uintptr_t)m->compressed_free_stack()); + uptr addr = (uptr)m + REDZONE; + CHECK(addr == (uptr)m->compressed_free_stack()); if (alignment > REDZONE && (addr & (alignment - 1))) { addr = RoundUpTo(addr, alignment); @@ -678,7 +678,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { } CHECK(m == PtrToChunk(addr)); m->used_size = size; - m->offset = addr - (uintptr_t)m; + m->offset = addr - (uptr)m; CHECK(m->beg() == addr); m->alloc_tid = t ? t->tid() : 0; m->free_tid = AsanThread::kInvalidTid; @@ -700,11 +700,11 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) { CHECK(stack); if (FLAG_debug) { - CHECK(malloc_info.FindPageGroup((uintptr_t)ptr)); + CHECK(malloc_info.FindPageGroup((uptr)ptr)); } // Printf("Deallocate %p\n", ptr); - AsanChunk *m = PtrToChunk((uintptr_t)ptr); + AsanChunk *m = PtrToChunk((uptr)ptr); // Flip the state atomically to avoid race on double-free. uint16_t old_chunk_state = AtomicExchange(&m->chunk_state, CHUNK_QUARANTINE); @@ -712,7 +712,7 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) { if (old_chunk_state == CHUNK_QUARANTINE) { Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr); stack->PrintStack(); - Describe((uintptr_t)ptr, 1); + Describe((uptr)ptr, 1); ShowStatsAndAbort(); } else if (old_chunk_state != CHUNK_ALLOCATED) { Report("ERROR: AddressSanitizer attempting free on address which was not" @@ -727,8 +727,8 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) { m->free_tid = t ? t->tid() : 0; AsanStackTrace::CompressStack(stack, m->compressed_free_stack(), m->compressed_free_stack_size()); - size_t rounded_size = RoundUpTo(m->used_size, REDZONE); - PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic); + uptr rounded_size = RoundUpTo(m->used_size, REDZONE); + PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic); // Statistics. AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); @@ -751,7 +751,7 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) { } } -static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size, +static uint8_t *Reallocate(uint8_t *old_ptr, uptr new_size, AsanStackTrace *stack) { CHECK(old_ptr && new_size); @@ -760,13 +760,13 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size, thread_stats.reallocs++; thread_stats.realloced += new_size; - AsanChunk *m = PtrToChunk((uintptr_t)old_ptr); + AsanChunk *m = PtrToChunk((uptr)old_ptr); CHECK(m->chunk_state == CHUNK_ALLOCATED); - size_t old_size = m->used_size; - size_t memcpy_size = Min(new_size, old_size); + uptr old_size = m->used_size; + uptr memcpy_size = Min(new_size, old_size); uint8_t *new_ptr = Allocate(0, new_size, stack); if (new_ptr) { - CHECK(REAL(memcpy) != NULL); + CHECK(REAL(memcpy) != 0); REAL(memcpy)(new_ptr, old_ptr, memcpy_size); Deallocate(old_ptr, stack); } @@ -784,9 +784,9 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size, // program must provide implementation of this hook. // If macro is undefined, the hook is no-op. #ifdef ASAN_NEW_HOOK -extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size); +extern "C" void ASAN_NEW_HOOK(void *ptr, uptr size); #else -static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { } +static inline void ASAN_NEW_HOOK(void *ptr, uptr size) { } #endif #ifdef ASAN_DELETE_HOOK @@ -797,7 +797,7 @@ static inline void ASAN_DELETE_HOOK(void *ptr) { } namespace __asan { -void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) { +void *asan_memalign(uptr alignment, uptr size, AsanStackTrace *stack) { void *ptr = (void*)Allocate(alignment, size, stack); ASAN_NEW_HOOK(ptr, size); return ptr; @@ -808,13 +808,13 @@ void asan_free(void *ptr, AsanStackTrace *stack) { Deallocate((uint8_t*)ptr, stack); } -void *asan_malloc(size_t size, AsanStackTrace *stack) { +void *asan_malloc(uptr size, AsanStackTrace *stack) { void *ptr = (void*)Allocate(0, size, stack); ASAN_NEW_HOOK(ptr, size); return ptr; } -void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) { +void *asan_calloc(uptr nmemb, uptr size, AsanStackTrace *stack) { void *ptr = (void*)Allocate(0, nmemb * size, stack); if (ptr) REAL(memset)(ptr, 0, nmemb * size); @@ -822,26 +822,26 @@ void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) { return ptr; } -void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) { - if (p == NULL) { +void *asan_realloc(void *p, uptr size, AsanStackTrace *stack) { + if (p == 0) { void *ptr = (void*)Allocate(0, size, stack); ASAN_NEW_HOOK(ptr, size); return ptr; } else if (size == 0) { ASAN_DELETE_HOOK(p); Deallocate((uint8_t*)p, stack); - return NULL; + return 0; } return Reallocate((uint8_t*)p, size, stack); } -void *asan_valloc(size_t size, AsanStackTrace *stack) { +void *asan_valloc(uptr size, AsanStackTrace *stack) { void *ptr = (void*)Allocate(kPageSize, size, stack); ASAN_NEW_HOOK(ptr, size); return ptr; } -void *asan_pvalloc(size_t size, AsanStackTrace *stack) { +void *asan_pvalloc(uptr size, AsanStackTrace *stack) { size = RoundUpTo(size, kPageSize); if (size == 0) { // pvalloc(0) should allocate one page. @@ -852,34 +852,34 @@ void *asan_pvalloc(size_t size, AsanStackTrace *stack) { return ptr; } -int asan_posix_memalign(void **memptr, size_t alignment, size_t size, +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, AsanStackTrace *stack) { void *ptr = Allocate(alignment, size, stack); - CHECK(IsAligned((uintptr_t)ptr, alignment)); + CHECK(IsAligned((uptr)ptr, alignment)); ASAN_NEW_HOOK(ptr, size); *memptr = ptr; return 0; } -size_t asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) { +uptr asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) { CHECK(stack); - if (ptr == NULL) return 0; - size_t usable_size = malloc_info.AllocationSize((uintptr_t)ptr); + if (ptr == 0) return 0; + uptr usable_size = malloc_info.AllocationSize((uptr)ptr); if (FLAG_check_malloc_usable_size && (usable_size == 0)) { Report("ERROR: AddressSanitizer attempting to call malloc_usable_size() " "for pointer which is not owned: %p\n", ptr); stack->PrintStack(); - Describe((uintptr_t)ptr, 1); + Describe((uptr)ptr, 1); ShowStatsAndAbort(); } return usable_size; } -size_t asan_mz_size(const void *ptr) { - return malloc_info.AllocationSize((uintptr_t)ptr); +uptr asan_mz_size(const void *ptr) { + return malloc_info.AllocationSize((uptr)ptr); } -void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) { +void DescribeHeapAddress(uptr addr, uptr access_size) { Describe(addr, access_size); } @@ -893,34 +893,34 @@ void asan_mz_force_unlock() { // ---------------------- Fake stack-------------------- {{{1 FakeStack::FakeStack() { - CHECK(REAL(memset) != NULL); + CHECK(REAL(memset) != 0); REAL(memset)(this, 0, sizeof(*this)); } -bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) { - uintptr_t mem = allocated_size_classes_[size_class]; - uintptr_t size = ClassMmapSize(size_class); +bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) { + uptr mem = allocated_size_classes_[size_class]; + uptr size = ClassMmapSize(size_class); bool res = mem && addr >= mem && addr < mem + size; return res; } -uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) { - for (size_t i = 0; i < kNumberOfSizeClasses; i++) { +uptr FakeStack::AddrIsInFakeStack(uptr addr) { + for (uptr i = 0; i < kNumberOfSizeClasses; i++) { if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i]; } return 0; } // We may want to compute this during compilation. -inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) { - size_t rounded_size = RoundUpToPowerOfTwo(alloc_size); - size_t log = Log2(rounded_size); +inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) { + uptr rounded_size = RoundUpToPowerOfTwo(alloc_size); + uptr log = Log2(rounded_size); CHECK(alloc_size <= (1UL << log)); if (!(alloc_size > (1UL << (log-1)))) { Printf("alloc_size %zu log %zu\n", alloc_size, log); } CHECK(alloc_size > (1UL << (log-1))); - size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog; + uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog; CHECK(res < kNumberOfSizeClasses); CHECK(ClassSize(res) >= rounded_size); return res; @@ -952,15 +952,15 @@ FakeFrame *FakeFrameFifo::FifoPop() { return res; } -void FakeStack::Init(size_t stack_size) { +void FakeStack::Init(uptr stack_size) { stack_size_ = stack_size; alive_ = true; } void FakeStack::Cleanup() { alive_ = false; - for (size_t i = 0; i < kNumberOfSizeClasses; i++) { - uintptr_t mem = allocated_size_classes_[i]; + for (uptr i = 0; i < kNumberOfSizeClasses; i++) { + uptr mem = allocated_size_classes_[i]; if (mem) { PoisonShadow(mem, ClassMmapSize(i), 0); allocated_size_classes_[i] = 0; @@ -969,19 +969,19 @@ void FakeStack::Cleanup() { } } -size_t FakeStack::ClassMmapSize(size_t size_class) { +uptr FakeStack::ClassMmapSize(uptr size_class) { return RoundUpToPowerOfTwo(stack_size_); } -void FakeStack::AllocateOneSizeClass(size_t size_class) { +void FakeStack::AllocateOneSizeClass(uptr size_class) { CHECK(ClassMmapSize(size_class) >= kPageSize); - uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie( + uptr new_mem = (uptr)AsanMmapSomewhereOrDie( ClassMmapSize(size_class), __FUNCTION__); // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n", // asanThreadRegistry().GetCurrent()->tid(), // size_class, new_mem, new_mem + ClassMmapSize(size_class), // ClassMmapSize(size_class)); - size_t i; + uptr i; for (i = 0; i < ClassMmapSize(size_class); i += ClassSize(size_class)) { size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i)); @@ -990,10 +990,10 @@ void FakeStack::AllocateOneSizeClass(size_t size_class) { allocated_size_classes_[size_class] = new_mem; } -uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) { +uptr FakeStack::AllocateStack(uptr size, uptr real_stack) { if (!alive_) return real_stack; CHECK(size <= kMaxStackMallocSize && size > 1); - size_t size_class = ComputeSizeClass(size); + uptr size_class = ComputeSizeClass(size); if (!allocated_size_classes_[size_class]) { AllocateOneSizeClass(size_class); } @@ -1007,23 +1007,23 @@ uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) { DeallocateFrame(top); } call_stack_.LifoPush(fake_frame); - uintptr_t ptr = (uintptr_t)fake_frame; + uptr ptr = (uptr)fake_frame; PoisonShadow(ptr, size, 0); return ptr; } void FakeStack::DeallocateFrame(FakeFrame *fake_frame) { CHECK(alive_); - size_t size = fake_frame->size_minus_one + 1; - size_t size_class = ComputeSizeClass(size); + uptr size = fake_frame->size_minus_one + 1; + uptr size_class = ComputeSizeClass(size); CHECK(allocated_size_classes_[size_class]); - uintptr_t ptr = (uintptr_t)fake_frame; + uptr ptr = (uptr)fake_frame; CHECK(AddrIsInSizeClass(ptr, size_class)); CHECK(AddrIsInSizeClass(ptr + size - 1, size_class)); size_classes_[size_class].FifoPush(fake_frame); } -void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) { +void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) { FakeFrame *fake_frame = (FakeFrame*)ptr; CHECK(fake_frame->magic = kRetiredStackFrameMagic); CHECK(fake_frame->descr != 0); @@ -1036,19 +1036,19 @@ void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) { // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT -uptr __asan_stack_malloc(size_t size, size_t real_stack) { +uptr __asan_stack_malloc(uptr size, uptr real_stack) { if (!FLAG_use_fake_stack) return real_stack; AsanThread *t = asanThreadRegistry().GetCurrent(); if (!t) { // TSD is gone, use the real stack. return real_stack; } - size_t ptr = t->fake_stack().AllocateStack(size, real_stack); + uptr ptr = t->fake_stack().AllocateStack(size, real_stack); // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack); return ptr; } -void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) { +void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) { if (!FLAG_use_fake_stack) return; if (ptr != real_stack) { FakeStack::OnFree(ptr, size, real_stack); @@ -1063,19 +1063,19 @@ uptr __asan_get_estimated_allocated_size(uptr size) { } bool __asan_get_ownership(const void *p) { - return malloc_info.AllocationSize((uintptr_t)p) > 0; + return malloc_info.AllocationSize((uptr)p) > 0; } uptr __asan_get_allocated_size(const void *p) { - if (p == NULL) return 0; - size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p); + if (p == 0) return 0; + uptr allocated_size = malloc_info.AllocationSize((uptr)p); // Die if p is not malloced or if it is already freed. if (allocated_size == 0) { Report("ERROR: AddressSanitizer attempting to call " "__asan_get_allocated_size() for pointer which is " "not owned: %p\n", p); PRINT_CURRENT_STACK(); - Describe((uintptr_t)p, 1); + Describe((uptr)p, 1); ShowStatsAndAbort(); } return allocated_size; |

