diff options
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_allocator.cpp | 8 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_tls.h | 2 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_tls_android.cpp | 2 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_tls_android.inc | 4 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_tls_linux.cpp | 4 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/scudo_tls_linux.inc | 6 | ||||
| -rw-r--r-- | compiler-rt/test/scudo/tsd_destruction.cpp | 42 |
7 files changed, 59 insertions, 9 deletions
diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp index 4f87d35232d..92155797ca4 100644 --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -495,7 +495,13 @@ struct ScudoAllocator { // Deallocates a Chunk, which means adding it to the delayed free list (or // Quarantine). void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { - initThreadMaybe(); + // For a deallocation, we only ensure minimal initialization, meaning thread + // local data will be left uninitialized for now (when using ELF TLS). The + // fallback cache will be used instead. This is a workaround for a situation + // where the only heap operation performed in a thread would be a free past + // the TLS destructors, ending up in initialized thread specific data never + // being destroyed properly. Any other heap operation will do a full init. + initThreadMaybe(/*MinimalInit=*/true); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (UNLIKELY(!UserPtr)) return; diff --git a/compiler-rt/lib/scudo/scudo_tls.h b/compiler-rt/lib/scudo/scudo_tls.h index 20c49204cf1..4784f6a305c 100644 --- a/compiler-rt/lib/scudo/scudo_tls.h +++ b/compiler-rt/lib/scudo/scudo_tls.h @@ -36,7 +36,7 @@ struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform { void commitBack(); }; -void initThread(); +void initThread(bool MinimalInit); // Platform specific dastpath functions definitions. #include "scudo_tls_android.inc" diff --git a/compiler-rt/lib/scudo/scudo_tls_android.cpp b/compiler-rt/lib/scudo/scudo_tls_android.cpp index ec74e37c8db..c0ea417ab86 100644 --- a/compiler-rt/lib/scudo/scudo_tls_android.cpp +++ b/compiler-rt/lib/scudo/scudo_tls_android.cpp @@ -49,7 +49,7 @@ static void initOnce() { ThreadContexts[i].init(); } -void initThread() { +void initThread(bool MinimalInit) { pthread_once(&GlobalInitialized, initOnce); // Initial context assignment is done in a plain round-robin fashion. u32 Index = atomic_fetch_add(&ThreadContextCurrentIndex, 1, diff --git a/compiler-rt/lib/scudo/scudo_tls_android.inc b/compiler-rt/lib/scudo/scudo_tls_android.inc index 8ecad7a30a6..6b82e49f55a 100644 --- a/compiler-rt/lib/scudo/scudo_tls_android.inc +++ b/compiler-rt/lib/scudo/scudo_tls_android.inc @@ -20,10 +20,10 @@ #if SANITIZER_LINUX && SANITIZER_ANDROID -ALWAYS_INLINE void initThreadMaybe() { +ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { if (LIKELY(*get_android_tls_ptr())) return; - initThread(); + initThread(MinimalInit); } ScudoThreadContext *getThreadContextAndLockSlow(); diff --git a/compiler-rt/lib/scudo/scudo_tls_linux.cpp b/compiler-rt/lib/scudo/scudo_tls_linux.cpp index 1e38233f339..f2592266d06 100644 --- a/compiler-rt/lib/scudo/scudo_tls_linux.cpp +++ b/compiler-rt/lib/scudo/scudo_tls_linux.cpp @@ -53,8 +53,10 @@ static void initOnce() { initScudo(); } -void initThread() { +void initThread(bool MinimalInit) { CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0); + if (UNLIKELY(MinimalInit)) + return; CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>( GetPthreadDestructorIterations())), 0); ThreadLocalContext.init(); diff --git a/compiler-rt/lib/scudo/scudo_tls_linux.inc b/compiler-rt/lib/scudo/scudo_tls_linux.inc index 242ee3329ea..53b80448540 100644 --- a/compiler-rt/lib/scudo/scudo_tls_linux.inc +++ b/compiler-rt/lib/scudo/scudo_tls_linux.inc @@ -31,14 +31,14 @@ extern THREADLOCAL ThreadState ScudoThreadState; __attribute__((tls_model("initial-exec"))) extern THREADLOCAL ScudoThreadContext ThreadLocalContext; -ALWAYS_INLINE void initThreadMaybe() { +ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { if (LIKELY(ScudoThreadState != ThreadNotInitialized)) return; - initThread(); + initThread(MinimalInit); } ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() { - if (UNLIKELY(ScudoThreadState == ThreadTornDown)) + if (UNLIKELY(ScudoThreadState != ThreadInitialized)) return nullptr; return &ThreadLocalContext; } diff --git a/compiler-rt/test/scudo/tsd_destruction.cpp b/compiler-rt/test/scudo/tsd_destruction.cpp new file mode 100644 index 00000000000..1b0d0eff9f1 --- /dev/null +++ b/compiler-rt/test/scudo/tsd_destruction.cpp @@ -0,0 +1,42 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t 2>&1 + +#include <locale.h> +#include <pthread.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +// Some of glibc's own thread local data is destroyed after a user's thread +// local destructors are called, via __libc_thread_freeres. This might involve +// calling free, as is the case for strerror_thread_freeres. +// If there is no prior heap operation in the thread, this free would end up +// initializing some thread specific data that would never be destroyed +// properly, while still being deallocated when the TLS goes away. As a result, +// a program could SEGV, usually in +// __sanitizer::AllocatorGlobalStats::Unregister, where one of the doubly +// linked list links would refer to a now unmapped memory area. + +// This test reproduces those circumstances. Success means executing without +// a segmentation fault. + +const int kNumThreads = 16; +pthread_t tid[kNumThreads]; + +void *thread_func(void *arg) { + uintptr_t i = (uintptr_t)arg; + if ((i & 1) == 0) free(malloc(16)); + // Calling strerror_l allows for strerror_thread_freeres to be called. + strerror_l(0, LC_GLOBAL_LOCALE); + return 0; +} + +int main(int argc, char** argv) { + for (uintptr_t j = 0; j < 8; j++) { + for (uintptr_t i = 0; i < kNumThreads; i++) + pthread_create(&tid[i], 0, thread_func, (void *)i); + for (uintptr_t i = 0; i < kNumThreads; i++) + pthread_join(tid[i], 0); + } + return 0; +} |

