summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/msan/msan_allocator.h
diff options
context:
space:
mode:
authorMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
committerMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
commit62a0f55930d03a7db0e42724db180af517e8dae7 (patch)
treeecbf9126d18ac61c2016a2725b2c403e58e5380a /compiler-rt/lib/msan/msan_allocator.h
parent043fa4b3d62190b45390347fc1a39e1c75e9ea0d (diff)
downloadbcm5719-llvm-62a0f55930d03a7db0e42724db180af517e8dae7.tar.gz
bcm5719-llvm-62a0f55930d03a7db0e42724db180af517e8dae7.zip
[sanitizer] Avoid possible deadlock in child process after fork
This patch addresses https://github.com/google/sanitizers/issues/774. When we fork a multi-threaded process it's possible to deadlock if some thread acquired StackDepot or allocator internal lock just before fork. In this case the lock will never be released in child process causing deadlock on following memory alloc/dealloc routine. While calling alloc/dealloc routines after multi-threaded fork is not allowed, most of modern allocators (Glibc, tcmalloc, jemalloc) are actually fork safe. Let's do the same for sanitizers except TSan that has complex locking rules. Differential Revision: https://reviews.llvm.org/D33325 llvm-svn: 304285
Diffstat (limited to 'compiler-rt/lib/msan/msan_allocator.h')
-rw-r--r--compiler-rt/lib/msan/msan_allocator.h97
1 files changed, 97 insertions, 0 deletions
diff --git a/compiler-rt/lib/msan/msan_allocator.h b/compiler-rt/lib/msan/msan_allocator.h
index 407942e54c1..abd4ea67852 100644
--- a/compiler-rt/lib/msan/msan_allocator.h
+++ b/compiler-rt/lib/msan/msan_allocator.h
@@ -15,9 +15,106 @@
#define MSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
namespace __msan {
+struct Metadata {
+ uptr requested_size;
+};
+
+struct MsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnUnmap(uptr p, uptr size) const {
+ __msan_unpoison((void *)p, size);
+
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
+ }
+};
+
+#if defined(__mips64)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30;
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__)
+#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
+ static const uptr kAllocatorSpace = 0x700000000000ULL;
+#else
+ static const uptr kAllocatorSpace = 0x600000000000ULL;
+#endif
+ static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+#elif defined(__powerpc64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__aarch64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+
+Allocator &get_allocator();
+
struct MsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
OpenPOWER on IntegriCloud