diff options
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/atomic_helpers.h | 8 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/bytemap.h | 6 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/common.h | 5 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/fuchsia.cc | 21 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/linux.cc | 53 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/mutex.h | 92 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/primary32.h | 14 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/primary64.h | 8 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/quarantine.h | 10 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/secondary.cc | 4 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/secondary.h | 2 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/stats.h | 8 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/tests/map_test.cc | 6 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/tests/mutex_test.cc | 55 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/tsd.h | 2 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/tsd_exclusive.h | 4 | ||||
| -rw-r--r-- | compiler-rt/lib/scudo/standalone/tsd_shared.h | 4 | 
17 files changed, 144 insertions, 158 deletions
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h index 35d7369c107..47037d764e2 100644 --- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h +++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h @@ -126,6 +126,14 @@ INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {    atomic_store(A, V, memory_order_relaxed);  } +template <typename T> +INLINE typename T::Type atomic_compare_exchange(volatile T *A, +                                                typename T::Type Cmp, +                                                typename T::Type Xchg) { +  atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); +  return Cmp; +} +  } // namespace scudo  #endif // SCUDO_ATOMIC_H_ diff --git a/compiler-rt/lib/scudo/standalone/bytemap.h b/compiler-rt/lib/scudo/standalone/bytemap.h index ab009193616..caeeb2fac87 100644 --- a/compiler-rt/lib/scudo/standalone/bytemap.h +++ b/compiler-rt/lib/scudo/standalone/bytemap.h @@ -45,8 +45,8 @@ public:          map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));    }    void init() { -    initLinkerInitialized();      Mutex.init(); +    initLinkerInitialized();    }    void reset() { @@ -92,7 +92,7 @@ private:    u8 *getOrCreate(uptr Index) {      u8 *Res = get(Index);      if (!Res) { -      SpinMutexLock L(&Mutex); +      ScopedLock L(Mutex);        if (!(Res = get(Index))) {          Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));          atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res), @@ -103,7 +103,7 @@ private:    }    atomic_uptr *Level1Map; -  StaticSpinMutex Mutex; +  HybridMutex Mutex;  };  } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h index 313f89c9cc6..c015d1ca566 100644 --- a/compiler-rt/lib/scudo/standalone/common.h +++ b/compiler-rt/lib/scudo/standalone/common.h @@ -115,11 +115,12 @@ INLINE void yieldProcessor(u8 Count) {  // Platform specific functions. -void yieldPlatform(); -  extern uptr PageSizeCached;  uptr getPageSizeSlow();  INLINE uptr getPageSizeCached() { +  // Bionic uses a hardcoded value. +  if (SCUDO_ANDROID) +    return 4096U;    if (LIKELY(PageSizeCached))      return PageSizeCached;    return getPageSizeSlow(); diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cc b/compiler-rt/lib/scudo/standalone/fuchsia.cc index cf032381938..896d346e7e7 100644 --- a/compiler-rt/lib/scudo/standalone/fuchsia.cc +++ b/compiler-rt/lib/scudo/standalone/fuchsia.cc @@ -23,11 +23,6 @@  namespace scudo { -void yieldPlatform() { -  const zx_status_t Status = _zx_nanosleep(0); -  CHECK_EQ(Status, ZX_OK); -} -  uptr getPageSize() { return PAGE_SIZE; }  void NORETURN die() { __builtin_trap(); } @@ -155,18 +150,20 @@ const char *getEnv(const char *Name) { return getenv(Name); }  // Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS  // because the Fuchsia implementation of sync_mutex_t has clang thread safety  // annotations. Were we to apply proper capability annotations to the top level -// BlockingMutex class itself, they would not be needed. As it stands, the +// HybridMutex class itself, they would not be needed. As it stands, the  // thread analysis thinks that we are locking the mutex and accidentally leaving  // it locked on the way out. -void BlockingMutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS { +bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {    // Size and alignment must be compatible between both types. -  COMPILER_CHECK(sizeof(sync_mutex_t) <= sizeof(OpaqueStorage)); -  COMPILER_CHECK(!(alignof(decltype(OpaqueStorage)) % alignof(sync_mutex_t))); -  sync_mutex_lock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage)); +  return sync_mutex_trylock(&M) == ZX_OK; +} + +void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS { +  sync_mutex_lock(&M);  } -void BlockingMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { -  sync_mutex_unlock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage)); +void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { +  sync_mutex_unlock(&M);  }  u64 getMonotonicTime() { return _zx_clock_get_monotonic(); } diff --git a/compiler-rt/lib/scudo/standalone/linux.cc b/compiler-rt/lib/scudo/standalone/linux.cc index bfda03239f4..049477bba8b 100644 --- a/compiler-rt/lib/scudo/standalone/linux.cc +++ b/compiler-rt/lib/scudo/standalone/linux.cc @@ -37,8 +37,6 @@  namespace scudo { -void yieldPlatform() { sched_yield(); } -  uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }  void NORETURN die() { abort(); } @@ -46,15 +44,18 @@ void NORETURN die() { abort(); }  void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,            UNUSED MapPlatformData *Data) {    int MmapFlags = MAP_PRIVATE | MAP_ANON; -  if (Flags & MAP_NOACCESS) +  int MmapProt; +  if (Flags & MAP_NOACCESS) {      MmapFlags |= MAP_NORESERVE; +    MmapProt = PROT_NONE; +  } else { +    MmapProt = PROT_READ | PROT_WRITE; +  }    if (Addr) {      // Currently no scenario for a noaccess mapping with a fixed address.      DCHECK_EQ(Flags & MAP_NOACCESS, 0);      MmapFlags |= MAP_FIXED;    } -  const int MmapProt = -      (Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE;    void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);    if (P == MAP_FAILED) {      if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM) @@ -84,22 +85,34 @@ void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,  // Calling getenv should be fine (c)(tm) at any time.  const char *getEnv(const char *Name) { return getenv(Name); } -void BlockingMutex::lock() { -  atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage); -  if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked) +namespace { +enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 }; +} + +bool HybridMutex::tryLock() { +  return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked; +} + +// The following is based on https://akkadia.org/drepper/futex.pdf. +void HybridMutex::lockSlow() { +  u32 V = atomic_compare_exchange(&M, Unlocked, Locked); +  if (V == Unlocked)      return; -  while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked) -    syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage), -            FUTEX_WAIT_PRIVATE, MtxSleeping, nullptr, nullptr, 0); +  if (V != Sleeping) +    V = atomic_exchange(&M, Sleeping, memory_order_acquire); +  while (V != Unlocked) { +    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping, +            nullptr, nullptr, 0); +    V = atomic_exchange(&M, Sleeping, memory_order_acquire); +  }  } -void BlockingMutex::unlock() { -  atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage); -  const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release); -  DCHECK_NE(V, MtxUnlocked); -  if (V == MtxSleeping) -    syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage), -            FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); +void HybridMutex::unlock() { +  if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) { +    atomic_store(&M, Unlocked, memory_order_release); +    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1, +            nullptr, nullptr, 0); +  }  }  u64 getMonotonicTime() { @@ -141,8 +154,8 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {  }  void outputRaw(const char *Buffer) { -  static StaticSpinMutex Mutex; -  SpinMutexLock L(&Mutex); +  static HybridMutex Mutex; +  ScopedLock L(Mutex);    write(2, Buffer, strlen(Buffer));  } diff --git a/compiler-rt/lib/scudo/standalone/mutex.h b/compiler-rt/lib/scudo/standalone/mutex.h index 58bc1589853..b6dc9188d34 100644 --- a/compiler-rt/lib/scudo/standalone/mutex.h +++ b/compiler-rt/lib/scudo/standalone/mutex.h @@ -12,82 +12,62 @@  #include "atomic_helpers.h"  #include "common.h" +#include <string.h> + +#if SCUDO_FUCHSIA +#include <lib/sync/mutex.h> // for sync_mutex_t +#endif +  namespace scudo { -class StaticSpinMutex { +class HybridMutex {  public: -  void init() { atomic_store_relaxed(&State, 0); } - -  void lock() { +  void init() { memset(this, 0, sizeof(*this)); } +  bool tryLock(); +  NOINLINE void lock() {      if (tryLock())        return; -    lockSlow(); -  } - -  bool tryLock() { -    return atomic_exchange(&State, 1, memory_order_acquire) == 0; -  } - -  void unlock() { atomic_store(&State, 0, memory_order_release); } - -  void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); } - -private: -  atomic_u8 State; - -  void NOINLINE lockSlow() { -    for (u32 I = 0;; I++) { -      if (I < 10) -        yieldProcessor(10); -      else -        yieldPlatform(); -      if (atomic_load_relaxed(&State) == 0 && -          atomic_exchange(&State, 1, memory_order_acquire) == 0) +      // The compiler may try to fully unroll the loop, ending up in a +      // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This +      // is large, ugly and unneeded, a compact loop is better for our purpose +      // here. Use a pragma to tell the compiler not to unroll the loop. +#ifdef __clang__ +#pragma nounroll +#endif +    for (u8 I = 0U; I < NumberOfTries; I++) { +      yieldProcessor(NumberOfYields); +      if (tryLock())          return;      } +    lockSlow();    } -}; - -class SpinMutex : public StaticSpinMutex { -public: -  SpinMutex() { init(); } +  void unlock();  private: -  SpinMutex(const SpinMutex &) = delete; -  void operator=(const SpinMutex &) = delete; -}; +  static constexpr u8 NumberOfTries = 10U; +  static constexpr u8 NumberOfYields = 10U; -class BlockingMutex { -public: -  explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{} {} -  BlockingMutex() { memset(this, 0, sizeof(*this)); } -  void lock(); -  void unlock(); -  void checkLocked() { -    atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage); -    CHECK_NE(MtxUnlocked, atomic_load_relaxed(M)); -  } +#if SCUDO_LINUX +  atomic_u32 M; +#elif SCUDO_FUCHSIA +  sync_mutex_t M; +#endif -private: -  enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; -  uptr OpaqueStorage[1]; +  void lockSlow();  }; -template <typename MutexType> class GenericScopedLock { +class ScopedLock {  public: -  explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); } -  ~GenericScopedLock() { Mutex->unlock(); } +  explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); } +  ~ScopedLock() { Mutex.unlock(); }  private: -  MutexType *Mutex; +  HybridMutex &Mutex; -  GenericScopedLock(const GenericScopedLock &) = delete; -  void operator=(const GenericScopedLock &) = delete; +  ScopedLock(const ScopedLock &) = delete; +  void operator=(const ScopedLock &) = delete;  }; -typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; -typedef GenericScopedLock<BlockingMutex> BlockingMutexLock; -  } // namespace scudo  #endif // SCUDO_MUTEX_H_ diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h index e89409d10f4..eade88a4567 100644 --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -97,7 +97,7 @@ public:    TransferBatch *popBatch(CacheT *C, uptr ClassId) {      DCHECK_LT(ClassId, NumClasses);      SizeClassInfo *Sci = getSizeClassInfo(ClassId); -    BlockingMutexLock L(&Sci->Mutex); +    ScopedLock L(Sci->Mutex);      TransferBatch *B = Sci->FreeList.front();      if (B)        Sci->FreeList.pop_front(); @@ -115,7 +115,7 @@ public:      DCHECK_LT(ClassId, NumClasses);      DCHECK_GT(B->getCount(), 0);      SizeClassInfo *Sci = getSizeClassInfo(ClassId); -    BlockingMutexLock L(&Sci->Mutex); +    ScopedLock L(Sci->Mutex);      Sci->FreeList.push_front(B);      Sci->Stats.PushedBlocks += B->getCount();      if (Sci->CanRelease) @@ -164,7 +164,7 @@ public:    void releaseToOS() {      for (uptr I = 1; I < NumClasses; I++) {        SizeClassInfo *Sci = getSizeClassInfo(I); -      BlockingMutexLock L(&Sci->Mutex); +      ScopedLock L(Sci->Mutex);        releaseToOSMaybe(Sci, I, /*Force=*/true);      }    } @@ -192,7 +192,7 @@ private:    };    struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { -    BlockingMutex Mutex; +    HybridMutex Mutex;      IntrusiveList<TransferBatch> FreeList;      SizeClassStats Stats;      bool CanRelease; @@ -217,7 +217,7 @@ private:      const uptr MapEnd = MapBase + MapSize;      uptr Region = MapBase;      if (isAligned(Region, RegionSize)) { -      SpinMutexLock L(&RegionsStashMutex); +      ScopedLock L(RegionsStashMutex);        if (NumberOfStashedRegions < MaxStashedRegions)          RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;        else @@ -237,7 +237,7 @@ private:      DCHECK_LT(ClassId, NumClasses);      uptr Region = 0;      { -      SpinMutexLock L(&RegionsStashMutex); +      ScopedLock L(RegionsStashMutex);        if (NumberOfStashedRegions > 0)          Region = RegionsStash[--NumberOfStashedRegions];      } @@ -389,7 +389,7 @@ private:    // Unless several threads request regions simultaneously from different size    // classes, the stash rarely contains more than 1 entry.    static constexpr uptr MaxStashedRegions = 4; -  StaticSpinMutex RegionsStashMutex; +  HybridMutex RegionsStashMutex;    uptr NumberOfStashedRegions;    uptr RegionsStash[MaxStashedRegions];  }; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h index 9c50e78d3f6..89a43cce3b1 100644 --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -100,7 +100,7 @@ public:    TransferBatch *popBatch(CacheT *C, uptr ClassId) {      DCHECK_LT(ClassId, NumClasses);      RegionInfo *Region = getRegionInfo(ClassId); -    BlockingMutexLock L(&Region->Mutex); +    ScopedLock L(Region->Mutex);      TransferBatch *B = Region->FreeList.front();      if (B)        Region->FreeList.pop_front(); @@ -117,7 +117,7 @@ public:    void pushBatch(uptr ClassId, TransferBatch *B) {      DCHECK_GT(B->getCount(), 0);      RegionInfo *Region = getRegionInfo(ClassId); -    BlockingMutexLock L(&Region->Mutex); +    ScopedLock L(Region->Mutex);      Region->FreeList.push_front(B);      Region->Stats.PushedBlocks += B->getCount();      if (Region->CanRelease) @@ -168,7 +168,7 @@ public:    void releaseToOS() {      for (uptr I = 1; I < NumClasses; I++) {        RegionInfo *Region = getRegionInfo(I); -      BlockingMutexLock L(&Region->Mutex); +      ScopedLock L(Region->Mutex);        releaseToOSMaybe(Region, I, /*Force=*/true);      }    } @@ -194,7 +194,7 @@ private:    };    struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo { -    BlockingMutex Mutex; +    HybridMutex Mutex;      IntrusiveList<TransferBatch> FreeList;      RegionStats Stats;      bool CanRelease; diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h index ec985da7699..bac36e01c1d 100644 --- a/compiler-rt/lib/scudo/standalone/quarantine.h +++ b/compiler-rt/lib/scudo/standalone/quarantine.h @@ -202,7 +202,7 @@ public:    void NOINLINE drain(CacheT *C, Callback Cb) {      { -      SpinMutexLock L(&CacheMutex); +      ScopedLock L(CacheMutex);        Cache.transfer(C);      }      if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock()) @@ -211,7 +211,7 @@ public:    void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {      { -      SpinMutexLock L(&CacheMutex); +      ScopedLock L(CacheMutex);        Cache.transfer(C);      }      RecyleMutex.lock(); @@ -227,9 +227,9 @@ public:  private:    // Read-only data. -  alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex CacheMutex; +  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;    CacheT Cache; -  alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex RecyleMutex; +  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;    atomic_uptr MinSize;    atomic_uptr MaxSize;    alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize; @@ -238,7 +238,7 @@ private:      CacheT Tmp;      Tmp.init();      { -      SpinMutexLock L(&CacheMutex); +      ScopedLock L(CacheMutex);        // Go over the batches and merge partially filled ones to        // save some memory, otherwise batches themselves (since the memory used        // by them is counted against quarantine limit) can overcome the actual diff --git a/compiler-rt/lib/scudo/standalone/secondary.cc b/compiler-rt/lib/scudo/standalone/secondary.cc index c0de268bef9..75f9171f161 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.cc +++ b/compiler-rt/lib/scudo/standalone/secondary.cc @@ -72,7 +72,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {    H->BlockEnd = CommitBase + CommitSize;    H->Data = Data;    { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      if (!Tail) {        Tail = H;      } else { @@ -95,7 +95,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {  void MapAllocator::deallocate(void *Ptr) {    LargeBlock::Header *H = LargeBlock::getHeader(Ptr);    { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      LargeBlock::Header *Prev = H->Prev;      LargeBlock::Header *Next = H->Next;      if (Prev) { diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index 016928cc60d..9124e2a41c6 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -82,7 +82,7 @@ public:    }  private: -  StaticSpinMutex Mutex; +  HybridMutex Mutex;    LargeBlock::Header *Tail;    uptr AllocatedBytes;    uptr FreedBytes; diff --git a/compiler-rt/lib/scudo/standalone/stats.h b/compiler-rt/lib/scudo/standalone/stats.h index 7fb9c9ed6e1..12436756226 100644 --- a/compiler-rt/lib/scudo/standalone/stats.h +++ b/compiler-rt/lib/scudo/standalone/stats.h @@ -65,7 +65,7 @@ public:    }    void link(LocalStats *S) { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      S->Next = Next;      S->Prev = this;      Next->Prev = S; @@ -73,7 +73,7 @@ public:    }    void unlink(LocalStats *S) { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      S->Prev->Next = S->Next;      S->Next->Prev = S->Prev;      for (uptr I = 0; I < StatCount; I++) @@ -82,7 +82,7 @@ public:    void get(uptr *S) const {      memset(S, 0, StatCount * sizeof(uptr)); -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      const LocalStats *Stats = this;      for (;;) {        for (uptr I = 0; I < StatCount; I++) @@ -97,7 +97,7 @@ public:    }  private: -  mutable StaticSpinMutex Mutex; +  mutable HybridMutex Mutex;  };  } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/tests/map_test.cc b/compiler-rt/lib/scudo/standalone/tests/map_test.cc index 7c726e94735..a645e2365b2 100644 --- a/compiler-rt/lib/scudo/standalone/tests/map_test.cc +++ b/compiler-rt/lib/scudo/standalone/tests/map_test.cc @@ -11,9 +11,15 @@  #include "gtest/gtest.h"  #include <string.h> +#include <unistd.h>  static const char *MappingName = "scudo:test"; +TEST(ScudoMapTest, PageSize) { +  EXPECT_EQ(scudo::getPageSizeCached(), +            static_cast<scudo::uptr>(getpagesize())); +} +  TEST(ScudoMapTest, MapNoAccessUnmap) {    const scudo::uptr Size = 4 * scudo::getPageSizeCached();    scudo::MapPlatformData Data = {}; diff --git a/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc b/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc index ce33db58b45..930838c5e27 100644 --- a/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc +++ b/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc @@ -12,15 +12,15 @@  #include <string.h> -template <typename MutexType> class TestData { +class TestData {  public: -  explicit TestData(MutexType *M) : Mutex(M) { +  explicit TestData(scudo::HybridMutex &M) : Mutex(M) {      for (scudo::u32 I = 0; I < Size; I++)        Data[I] = 0;    }    void write() { -    Lock L(Mutex); +    scudo::ScopedLock L(Mutex);      T V0 = Data[0];      for (scudo::u32 I = 0; I < Size; I++) {        EXPECT_EQ(Data[I], V0); @@ -29,14 +29,14 @@ public:    }    void tryWrite() { -    if (!Mutex->tryLock()) +    if (!Mutex.tryLock())        return;      T V0 = Data[0];      for (scudo::u32 I = 0; I < Size; I++) {        EXPECT_EQ(Data[I], V0);        Data[I]++;      } -    Mutex->unlock(); +    Mutex.unlock();    }    void backoff() { @@ -48,10 +48,9 @@ public:    }  private: -  typedef scudo::GenericScopedLock<MutexType> Lock;    static const scudo::u32 Size = 64U;    typedef scudo::u64 T; -  MutexType *Mutex; +  scudo::HybridMutex &Mutex;    ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size];  }; @@ -62,8 +61,8 @@ const scudo::u32 NumberOfIterations = 4 * 1024;  const scudo::u32 NumberOfIterations = 16 * 1024;  #endif -template <typename MutexType> static void *lockThread(void *Param) { -  TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param); +static void *lockThread(void *Param) { +  TestData *Data = reinterpret_cast<TestData *>(Param);    for (scudo::u32 I = 0; I < NumberOfIterations; I++) {      Data->write();      Data->backoff(); @@ -71,8 +70,8 @@ template <typename MutexType> static void *lockThread(void *Param) {    return 0;  } -template <typename MutexType> static void *tryThread(void *Param) { -  TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param); +static void *tryThread(void *Param) { +  TestData *Data = reinterpret_cast<TestData *>(Param);    for (scudo::u32 I = 0; I < NumberOfIterations; I++) {      Data->tryWrite();      Data->backoff(); @@ -80,42 +79,24 @@ template <typename MutexType> static void *tryThread(void *Param) {    return 0;  } -template <typename MutexType> static void checkLocked(MutexType *M) { -  scudo::GenericScopedLock<MutexType> L(M); -  M->checkLocked(); -} - -TEST(ScudoMutexTest, SpinMutex) { -  scudo::SpinMutex M; +TEST(ScudoMutexTest, Mutex) { +  scudo::HybridMutex M;    M.init(); -  TestData<scudo::SpinMutex> Data(&M); +  TestData Data(M);    pthread_t Threads[NumberOfThreads];    for (scudo::u32 I = 0; I < NumberOfThreads; I++) -    pthread_create(&Threads[I], 0, lockThread<scudo::SpinMutex>, &Data); +    pthread_create(&Threads[I], 0, lockThread, &Data);    for (scudo::u32 I = 0; I < NumberOfThreads; I++)      pthread_join(Threads[I], 0);  } -TEST(ScudoMutexTest, SpinMutexTry) { -  scudo::SpinMutex M; +TEST(ScudoMutexTest, MutexTry) { +  scudo::HybridMutex M;    M.init(); -  TestData<scudo::SpinMutex> Data(&M); -  pthread_t Threads[NumberOfThreads]; -  for (scudo::u32 I = 0; I < NumberOfThreads; I++) -    pthread_create(&Threads[I], 0, tryThread<scudo::SpinMutex>, &Data); -  for (scudo::u32 I = 0; I < NumberOfThreads; I++) -    pthread_join(Threads[I], 0); -} - -TEST(ScudoMutexTest, BlockingMutex) { -  scudo::u64 MutexMemory[1024] = {}; -  scudo::BlockingMutex *M = -      new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED); -  TestData<scudo::BlockingMutex> Data(M); +  TestData Data(M);    pthread_t Threads[NumberOfThreads];    for (scudo::u32 I = 0; I < NumberOfThreads; I++) -    pthread_create(&Threads[I], 0, lockThread<scudo::BlockingMutex>, &Data); +    pthread_create(&Threads[I], 0, tryThread, &Data);    for (scudo::u32 I = 0; I < NumberOfThreads; I++)      pthread_join(Threads[I], 0); -  checkLocked(M);  } diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h index 10cb83f94fd..f24ff01960f 100644 --- a/compiler-rt/lib/scudo/standalone/tsd.h +++ b/compiler-rt/lib/scudo/standalone/tsd.h @@ -57,7 +57,7 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {    INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }  private: -  StaticSpinMutex Mutex; +  HybridMutex Mutex;    atomic_uptr Precedence;  }; diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h index ce9affcb439..18cce1c56af 100644 --- a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h +++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h @@ -60,7 +60,7 @@ template <class Allocator> struct TSDRegistryExT {  private:    void initOnceMaybe(Allocator *Instance) { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      if (Initialized)        return;      initLinkerInitialized(Instance); // Sets Initialized. @@ -82,7 +82,7 @@ private:    pthread_key_t PThreadKey;    bool Initialized;    TSD<Allocator> *FallbackTSD; -  StaticSpinMutex Mutex; +  HybridMutex Mutex;    static THREADLOCAL ThreadState State;    static THREADLOCAL TSD<Allocator> ThreadTSD; diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h index 48747f69f98..0f0a83a3eed 100644 --- a/compiler-rt/lib/scudo/standalone/tsd_shared.h +++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h @@ -94,7 +94,7 @@ private:    }    void initOnceMaybe(Allocator *Instance) { -    SpinMutexLock L(&Mutex); +    ScopedLock L(Mutex);      if (Initialized)        return;      initLinkerInitialized(Instance); // Sets Initialized. @@ -152,7 +152,7 @@ private:    u32 NumberOfCoPrimes;    u32 CoPrimes[MaxTSDCount];    bool Initialized; -  StaticSpinMutex Mutex; +  HybridMutex Mutex;  #if SCUDO_LINUX && !SCUDO_ANDROID    static THREADLOCAL TSD<Allocator> *ThreadTSD;  #endif  | 

