diff options
-rw-r--r-- | compiler-rt/lib/asan/asan_allocator.cc | 18 | ||||
-rw-r--r-- | compiler-rt/lib/hwasan/hwasan_allocator.cc | 4 | ||||
-rw-r--r-- | compiler-rt/lib/lsan/lsan_allocator.cc | 10 | ||||
-rw-r--r-- | compiler-rt/lib/msan/msan_allocator.cc | 14 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator.cc | 13 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h | 12 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h | 2 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h | 15 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc | 15 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_mman.cc | 14 |
10 files changed, 60 insertions, 57 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc index a437ae1cd3b..ee8af9758b4 100644 --- a/compiler-rt/lib/asan/asan_allocator.cc +++ b/compiler-rt/lib/asan/asan_allocator.cc @@ -398,7 +398,7 @@ struct Allocator { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) - return AsanAllocator::FailureHandler::OnOOM(); + return ReturnNullOrDieOnFailure::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -433,7 +433,7 @@ struct Allocator { if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } AsanThread *t = GetCurrentThread(); @@ -446,8 +446,8 @@ struct Allocator { AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8); } - if (!allocated) - return nullptr; + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned @@ -660,8 +660,8 @@ struct Allocator { } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CheckForCallocOverflow(size, nmemb)) - return AsanAllocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) + return ReturnNullOrDieOnFailure::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. @@ -883,7 +883,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -895,7 +895,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull( instance.Allocate(size, alignment, stack, alloc_type, true)); @@ -904,7 +904,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - AsanAllocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cc b/compiler-rt/lib/hwasan/hwasan_allocator.cc index 2acac818912..5bd46f8f579 100644 --- a/compiler-rt/lib/hwasan/hwasan_allocator.cc +++ b/compiler-rt/lib/hwasan/hwasan_allocator.cc @@ -128,7 +128,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment, if (size > kMaxAllowedMallocSize) { Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n", (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } HwasanThread *t = GetCurrentThread(); void *allocated; @@ -140,6 +140,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment, AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->state = CHUNK_ALLOCATED; diff --git a/compiler-rt/lib/lsan/lsan_allocator.cc b/compiler-rt/lib/lsan/lsan_allocator.cc index 2df58b44f6b..f5e4b58031e 100644 --- a/compiler-rt/lib/lsan/lsan_allocator.cc +++ b/compiler-rt/lib/lsan/lsan_allocator.cc @@ -76,9 +76,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, size = 1; if (size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) + return ReturnNullOrDieOnFailure::OnOOM(); // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); @@ -90,7 +92,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); size *= nmemb; return Allocate(stack, size, 1, true); } @@ -108,7 +110,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -129,7 +131,7 @@ uptr GetMallocUsableSize(const void *p) { void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } diff --git a/compiler-rt/lib/msan/msan_allocator.cc b/compiler-rt/lib/msan/msan_allocator.cc index 0f994232493..8a23edf2c91 100644 --- a/compiler-rt/lib/msan/msan_allocator.cc +++ b/compiler-rt/lib/msan/msan_allocator.cc @@ -141,7 +141,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; @@ -153,6 +153,8 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->requested_size = size; @@ -236,7 +238,7 @@ void *msan_malloc(uptr size, StackTrace *stack) { void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest()); return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true)); } @@ -258,7 +260,7 @@ void *msan_pvalloc(uptr size, StackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -268,7 +270,7 @@ void *msan_pvalloc(uptr size, StackTrace *stack) { void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -276,7 +278,7 @@ void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -284,7 +286,7 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { int msan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - Allocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = MsanAllocate(stack, size, alignment, false); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cc b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cc index fc4f7a75ae3..0642ee426da 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cc @@ -140,8 +140,8 @@ void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { if (size + sizeof(u64) < size) return nullptr; void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + return DieOnFailure::OnOOM(); ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } @@ -155,16 +155,17 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { size = size + sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); void *p = RawInternalRealloc(addr, size, cache); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + return DieOnFailure::OnOOM(); return (char*)p + sizeof(u64); } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { if (UNLIKELY(CheckForCallocOverflow(count, size))) - return InternalAllocator::FailureHandler::OnBadRequest(); + return DieOnFailure::OnBadRequest(); void *p = InternalAlloc(count * size, cache); - if (p) internal_memset(p, 0, count * size); + if (LIKELY(p)) + internal_memset(p, 0, count * size); return p; } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h index 0d8a2a174bb..1f874d60b92 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,8 +24,6 @@ template <class PrimaryAllocator, class AllocatorCache, class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized(s32 release_to_os_interval_ms) { primary_.Init(release_to_os_interval_ms); secondary_.InitLinkerInitialized(); @@ -42,8 +40,12 @@ class CombinedAllocator { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; - if (size + alignment < size) - return FailureHandler::OnBadRequest(); + if (size + alignment < size) { + Report("WARNING: %s: CombinedAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, size, alignment); + return nullptr; + } uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -62,8 +64,6 @@ class CombinedAllocator { res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); - if (!res) - return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); return res; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h index a791d0d9489..10536a9659c 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -47,7 +47,7 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> InternalAllocatorCache; typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> + LargeMmapAllocator<NoOpMapUnmapCallback> > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h index 261dfb5e1a2..fb3182c982d 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -17,12 +17,9 @@ // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template <class MapUnmapCallback = NoOpMapUnmapCallback, - class FailureHandlerT = ReturnNullOrDieOnFailure> +template <class MapUnmapCallback = NoOpMapUnmapCallback> class LargeMmapAllocator { public: - typedef FailureHandlerT FailureHandler; - void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); } @@ -38,12 +35,16 @@ class LargeMmapAllocator { if (alignment > page_size_) map_size += alignment; // Overflow. - if (map_size < size) - return FailureHandler::OnBadRequest(); + if (map_size < size) { + Report("WARNING: %s: LargeMmapAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, map_size, alignment); + return nullptr; + } uptr map_beg = reinterpret_cast<uptr>( MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); if (!map_beg) - return FailureHandler::OnOOM(); + return nullptr; CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index 7b5e3e21f1e..cc7892b639a 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -444,7 +444,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<TestMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -482,7 +482,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) { #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -565,7 +565,6 @@ void TestCombinedAllocator() { typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; - SetAllocatorMayReturnNull(true); Allocator *a = new Allocator; a->Init(kReleaseToOSIntervalNever); std::mt19937 r; @@ -579,11 +578,7 @@ void TestCombinedAllocator() { EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); - - // Set to false - SetAllocatorMayReturnNull(false); - EXPECT_DEATH(a->Allocate(&cache, -1, 1), - "allocator is terminating the process"); + EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); const uptr kNumAllocs = 100000; const uptr kNumIter = 10; @@ -893,7 +888,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) { } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -920,7 +915,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) { } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/compiler-rt/lib/tsan/rtl/tsan_mman.cc index 19680238bf7..39c0d860704 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -153,10 +153,10 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (UNLIKELY(p == 0)) - return 0; + return ReturnNullOrDieOnFailure::OnOOM(); if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) @@ -179,7 +179,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz) { void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { if (UNLIKELY(CheckForCallocOverflow(size, n))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest()); void *p = user_alloc_internal(thr, pc, n * size); if (p) internal_memset(p, 0, n * size); @@ -224,7 +224,7 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!IsPowerOfTwo(align))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -232,7 +232,7 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, uptr sz) { if (UNLIKELY(!CheckPosixMemalignAlignment(align))) { - Allocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = user_alloc_internal(thr, pc, sz, align); @@ -246,7 +246,7 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -259,7 +259,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. sz = sz ? RoundUpTo(sz, PageSize) : PageSize; |