summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2012-12-20 08:53:41 +0000
committerKostya Serebryany <kcc@google.com>2012-12-20 08:53:41 +0000
commitab8d33184d06bc726058be393f81a25e42621168 (patch)
tree4991e130d587a6126e01c3fab83e8f63769cb43a
parent5a0761ed3cc47405b2c993a285e83f087175ecfa (diff)
downloadbcm5719-llvm-ab8d33184d06bc726058be393f81a25e42621168.tar.gz
bcm5719-llvm-ab8d33184d06bc726058be393f81a25e42621168.zip
[asan] asan_allocator2: make all remaining tests pass.
llvm-svn: 170680
-rw-r--r--compiler-rt/lib/asan/asan_allocator2.cc38
-rw-r--r--compiler-rt/lib/asan/asan_thread_registry.cc1
-rw-r--r--compiler-rt/lib/asan/tests/asan_noinst_test.cc23
3 files changed, 51 insertions, 11 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc
index bc91ad7b916..4527e3bc510 100644
--- a/compiler-rt/lib/asan/asan_allocator2.cc
+++ b/compiler-rt/lib/asan/asan_allocator2.cc
@@ -196,9 +196,7 @@ class Quarantine: public AsanChunkFifoList {
PushList(q);
PopAndDeallocateLoop(ms);
}
- void SwallowThreadLocalCache(AllocatorCache *cache) {
- // FIXME.
- }
+
void BypassThreadLocalQuarantine(AsanChunk *m) {
SpinMutexLock l(&mutex_);
Push(m);
@@ -225,6 +223,12 @@ class Quarantine: public AsanChunkFifoList {
void *p = reinterpret_cast<void *>(alloc_beg);
if (m->from_memalign)
p = allocator.GetBlockBegin(p);
+
+ // Statistics.
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
allocator.Deallocate(GetAllocatorCache(ms), p);
}
SpinMutex mutex_;
@@ -308,6 +312,10 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
*shadow = size & (SHADOW_GRANULARITY - 1);
}
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+
void *res = reinterpret_cast<void *>(user_beg);
ASAN_MALLOC_HOOK(res, size);
return res;
@@ -341,6 +349,10 @@ static void Deallocate(void *ptr, StackTrace *stack) {
RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
// Push into quarantine.
if (t) {
AsanChunkFifoList &q = t->malloc_storage().quarantine_;
@@ -432,7 +444,7 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) {
void AsanThreadLocalMallocStorage::CommitBack() {
quarantine.SwallowThreadLocalQuarantine(this);
- quarantine.SwallowThreadLocalCache(GetAllocatorCache(this));
+ allocator.SwallowCache(GetAllocatorCache(this));
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -517,20 +529,24 @@ void asan_mz_force_unlock() {
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
-// just return "size".
+// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __asan_get_estimated_allocated_size(uptr size) {
- UNIMPLEMENTED();
- return 0;
+ return size;
}
bool __asan_get_ownership(const void *p) {
- UNIMPLEMENTED();
- return false;
+ return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
}
uptr __asan_get_allocated_size(const void *p) {
- UNIMPLEMENTED();
- return 0;
+ if (p == 0) return 0;
+ uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
+ }
+ return allocated_size;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
diff --git a/compiler-rt/lib/asan/asan_thread_registry.cc b/compiler-rt/lib/asan/asan_thread_registry.cc
index 801b0b2df96..bcfe4f20c5a 100644
--- a/compiler-rt/lib/asan/asan_thread_registry.cc
+++ b/compiler-rt/lib/asan/asan_thread_registry.cc
@@ -130,6 +130,7 @@ uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
+ - accumulated_stats_.munmaped
+ accumulated_stats_.really_freed
+ accumulated_stats_.really_freed_redzones;
uptr total_used = accumulated_stats_.malloced
diff --git a/compiler-rt/lib/asan/tests/asan_noinst_test.cc b/compiler-rt/lib/asan/tests/asan_noinst_test.cc
index 7f9938acea2..1c233b19ae9 100644
--- a/compiler-rt/lib/asan/tests/asan_noinst_test.cc
+++ b/compiler-rt/lib/asan/tests/asan_noinst_test.cc
@@ -345,7 +345,11 @@ TEST(AddressSanitizer, MemsetWildAddressTest) {
}
TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
+#if ASAN_ALLOCATOR_VERSION == 1
EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
+#elif ASAN_ALLOCATOR_VERSION == 2
+ EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
+#endif
const size_t sizes[] = { 1, 30, 1<<30 };
for (size_t i = 0; i < 3; i++) {
EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
@@ -410,6 +414,7 @@ static void DoDoubleFree() {
delete Ident(x);
}
+#if ASAN_ALLOCATOR_VERSION == 1
// This test is run in a separate process, so that large malloced
// chunk won't remain in the free lists after the test.
// Note: use ASSERT_* instead of EXPECT_* here.
@@ -441,9 +446,26 @@ static void RunGetHeapSizeTestAndDie() {
TEST(AddressSanitizerInterface, GetHeapSizeTest) {
EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
}
+#elif ASAN_ALLOCATOR_VERSION == 2
+TEST(AddressSanitizerInterface, GetHeapSizeTest) {
+ // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
+ // The chunk should be greater than the quarantine size,
+ // otherwise it will be stuck in quarantine instead of being unmaped.
+ static const size_t kLargeMallocSize = 1 << 28; // 256M
+ uptr old_heap_size = __asan_get_heap_size();
+ for (int i = 0; i < 3; i++) {
+ // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
+ free(Ident(malloc(kLargeMallocSize)));
+ EXPECT_EQ(old_heap_size, __asan_get_heap_size());
+ }
+}
+#endif
// Note: use ASSERT_* instead of EXPECT_* here.
static void DoLargeMallocForGetFreeBytesTestAndDie() {
+#if ASAN_ALLOCATOR_VERSION == 1
+ // asan_allocator2 does not keep large chunks in free_lists, so this test
+ // will not work.
size_t old_free_bytes, new_free_bytes;
static const size_t kLargeMallocSize = 1 << 29; // 512M
// If we malloc and free a large memory chunk, it will not fall
@@ -455,6 +477,7 @@ static void DoLargeMallocForGetFreeBytesTestAndDie() {
new_free_bytes = __asan_get_free_bytes();
fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
+#endif // ASAN_ALLOCATOR_VERSION
// Test passed.
DoDoubleFree();
}
OpenPOWER on IntegriCloud