summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libcxx/src/memory.cpp30
1 files changed, 29 insertions, 1 deletions
diff --git a/libcxx/src/memory.cpp b/libcxx/src/memory.cpp
index 15e947fc0ff..b9a741b23f4 100644
--- a/libcxx/src/memory.cpp
+++ b/libcxx/src/memory.cpp
@@ -96,7 +96,35 @@ __shared_weak_count::__release_shared() _NOEXCEPT
void
__shared_weak_count::__release_weak() _NOEXCEPT
{
- if (decrement(__shared_weak_owners_) == -1)
+ // NOTE: The acquire load here is an optimization of the very
+ // common case where a shared pointer is being destructed while
+ // having no other contended references.
+ //
+ // BENEFIT: We avoid expensive atomic stores like XADD and STREX
+ // in a common case. Those instructions are slow and do nasty
+ // things to caches.
+ //
+ // IS THIS SAFE? Yes. During weak destruction, if we see that we
+ // are the last reference, we know that no-one else is accessing
+ // us. If someone were accessing us, then they would be doing so
+ // while the last shared / weak_ptr was being destructed, and
+ // that's undefined anyway.
+ //
+ // If we see anything other than a 0, then we have possible
+ // contention, and need to use an atomicrmw primitive.
+ // The same arguments don't apply for increment, where it is legal
+ // (though inadvisable) to share shared_ptr references between
+ // threads, and have them all get copied at once. The argument
+ // also doesn't apply for __release_shared, because an outstanding
+ // weak_ptr::lock() could read / modify the shared count.
+ if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Aquire) == 0)
+ {
+ // no need to do this store, because we are about
+ // to destroy everything.
+ //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
+ __on_zero_shared_weak();
+ }
+ else if (decrement(__shared_weak_owners_) == -1)
__on_zero_shared_weak();
}
OpenPOWER on IntegriCloud