summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler-rt/lib/asan/asan_malloc_local.h25
-rw-r--r--compiler-rt/lib/asan/asan_new_delete.cc30
2 files changed, 29 insertions, 26 deletions
diff --git a/compiler-rt/lib/asan/asan_malloc_local.h b/compiler-rt/lib/asan/asan_malloc_local.h
index 65e3e5a97fa..3f784b90c73 100644
--- a/compiler-rt/lib/asan/asan_malloc_local.h
+++ b/compiler-rt/lib/asan/asan_malloc_local.h
@@ -17,25 +17,34 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
-// On RTEMS, we use the local pool to handle memory allocation when the ASan
-// run-time is not up.
static INLINE bool EarlyMalloc() {
- return SANITIZER_RTEMS && (!__asan::asan_inited ||
- __asan::asan_init_is_running);
+ return SANITIZER_RTEMS &&
+ (!__asan::asan_inited || __asan::asan_init_is_running);
}
-void* MemalignFromLocalPool(uptr alignment, uptr size);
-
#if SANITIZER_RTEMS
bool IsFromLocalPool(const void *ptr);
+void *MemalignFromLocalPool(uptr alignment, uptr size);
+
+// On RTEMS, we use the local pool to handle memory allocation when the ASan
+// run-time is not up. This macro is expanded in the context of the operator new
+// implementation.
+#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow) \
+ do { \
+ if (UNLIKELY(EarlyMalloc())) { \
+ void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size); \
+ if (!nothrow) \
+ CHECK(res); \
+ return res; \
+ } \
+ } while (0)
-#define ALLOCATE_FROM_LOCAL_POOL UNLIKELY(EarlyMalloc())
#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
#else // SANITIZER_RTEMS
-#define ALLOCATE_FROM_LOCAL_POOL 0
+#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow)
#define IS_FROM_LOCAL_POOL(ptr) 0
#endif // SANITIZER_RTEMS
diff --git a/compiler-rt/lib/asan/asan_new_delete.cc b/compiler-rt/lib/asan/asan_new_delete.cc
index 8b53ac96e06..5f51d12b1b5 100644
--- a/compiler-rt/lib/asan/asan_new_delete.cc
+++ b/compiler-rt/lib/asan/asan_new_delete.cc
@@ -71,25 +71,19 @@ enum class align_val_t: size_t {};
// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM.
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
// allocator behavior.
-#define OPERATOR_NEW_BODY(type, nothrow) \
- if (ALLOCATE_FROM_LOCAL_POOL) {\
- void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size);\
- if (!nothrow) CHECK(res);\
- return res;\
- }\
- GET_STACK_TRACE_MALLOC;\
- void *res = asan_memalign(0, size, &stack, type);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+#define OPERATOR_NEW_BODY(type, nothrow) \
+ MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_memalign(0, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
return res;
-#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
- if (ALLOCATE_FROM_LOCAL_POOL) {\
- void *res = MemalignFromLocalPool((uptr)align, size);\
- if (!nothrow) CHECK(res);\
- return res;\
- }\
- GET_STACK_TRACE_MALLOC;\
- void *res = asan_memalign((uptr)align, size, &stack, type);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
+ MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_memalign((uptr)align, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
return res;
// On OS X it's not enough to just provide our own 'operator new' and
OpenPOWER on IntegriCloud