summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
blob: 89b001a739c12f2f49e0c270ebe869a438a38873 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef SCUDO_TSD_EXCLUSIVE_H_
#define SCUDO_TSD_EXCLUSIVE_H_

#include "tsd.h"

#include <pthread.h>

namespace scudo {

enum class ThreadState : u8 {
  NotInitialized = 0,
  Initialized,
  TornDown,
};

template <class Allocator> void teardownThread(void *Ptr);

template <class Allocator> struct TSDRegistryExT {
  void initLinkerInitialized(Allocator *Instance) {
    Instance->initLinkerInitialized();
    CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
    FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
        map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
    FallbackTSD->initLinkerInitialized(Instance);
    Initialized = true;
  }
  void init(Allocator *Instance) {
    memset(this, 0, sizeof(*this));
    initLinkerInitialized(Instance);
  }

  void unmapTestOnly() {
    unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
  }

  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
    if (LIKELY(State != ThreadState::NotInitialized))
      return;
    initThread(Instance, MinimalInit);
  }

  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
    if (LIKELY(State == ThreadState::Initialized &&
               !atomic_load(&Disabled, memory_order_acquire))) {
      *UnlockRequired = false;
      return &ThreadTSD;
    }
    DCHECK(FallbackTSD);
    FallbackTSD->lock();
    *UnlockRequired = true;
    return FallbackTSD;
  }

  // To disable the exclusive TSD registry, we effectively lock the fallback TSD
  // and force all threads to attempt to use it instead of their local one.
  void disable() {
    FallbackTSD->lock();
    atomic_store(&Disabled, 1U, memory_order_release);
  }

  void enable() {
    atomic_store(&Disabled, 0U, memory_order_release);
    FallbackTSD->unlock();
  }

private:
  void initOnceMaybe(Allocator *Instance) {
    ScopedLock L(Mutex);
    if (LIKELY(Initialized))
      return;
    initLinkerInitialized(Instance); // Sets Initialized.
  }

  // Using minimal initialization allows for global initialization while keeping
  // the thread specific structure untouched. The fallback structure will be
  // used instead.
  NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
    initOnceMaybe(Instance);
    if (UNLIKELY(MinimalInit))
      return;
    CHECK_EQ(
        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
    ThreadTSD.initLinkerInitialized(Instance);
    State = ThreadState::Initialized;
  }

  pthread_key_t PThreadKey;
  bool Initialized;
  atomic_u8 Disabled;
  TSD<Allocator> *FallbackTSD;
  HybridMutex Mutex;
  static THREADLOCAL ThreadState State;
  static THREADLOCAL TSD<Allocator> ThreadTSD;

  friend void teardownThread<Allocator>(void *Ptr);
};

template <class Allocator>
THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;

template <class Allocator> void teardownThread(void *Ptr) {
  typedef TSDRegistryExT<Allocator> TSDRegistryT;
  Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
  // The glibc POSIX thread-local-storage deallocation routine calls user
  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
  // We want to be called last since other destructors might call free and the
  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
  // quarantine and swallowing the cache.
  if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
    TSDRegistryT::ThreadTSD.DestructorIterations--;
    // If pthread_setspecific fails, we will go ahead with the teardown.
    if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
                                   Ptr) == 0))
      return;
  }
  TSDRegistryT::ThreadTSD.commitBack(Instance);
  TSDRegistryT::State = ThreadState::TornDown;
}

} // namespace scudo

#endif // SCUDO_TSD_EXCLUSIVE_H_
OpenPOWER on IntegriCloud