summaryrefslogtreecommitdiffstats
path: root/libcxx
diff options
context:
space:
mode:
authorHoward Hinnant <hhinnant@apple.com>2010-10-18 16:40:13 +0000
committerHoward Hinnant <hhinnant@apple.com>2010-10-18 16:40:13 +0000
commit8166eca7b4fdea9c4c4d3e21ce1b1f6110f15253 (patch)
treef0545e9c9318cde698c0a94d27d96477fde4acf3 /libcxx
parente8a0eaafe6f0e61968bc300864d1efca3d6f7bca (diff)
downloadbcm5719-llvm-8166eca7b4fdea9c4c4d3e21ce1b1f6110f15253.tar.gz
bcm5719-llvm-8166eca7b4fdea9c4c4d3e21ce1b1f6110f15253.zip
Update atomic Design A spec with reference C++ implementations for the purpose of documeting the semantics of each atomic operation.
llvm-svn: 116713
Diffstat (limited to 'libcxx')
-rw-r--r--libcxx/www/atomic_design_a.html146
1 files changed, 145 insertions, 1 deletions
diff --git a/libcxx/www/atomic_design_a.html b/libcxx/www/atomic_design_a.html
index d8cc6c61241..f649b2b372f 100644
--- a/libcxx/www/atomic_design_a.html
+++ b/libcxx/www/atomic_design_a.html
@@ -71,7 +71,7 @@ type __atomic_load(const type* atomic_obj, int mem_ord);
<font color="#C80000">// type must be trivially copyable</font>
<font color="#C80000">// Behavior is defined for mem_ord = 0, 3, 5</font>
-type __atomic_store(type* atomic_obj, type desired, int mem_ord);
+void __atomic_store(type* atomic_obj, type desired, int mem_ord);
<font color="#C80000">// type must be trivially copyable</font>
<font color="#C80000">// Behavior is defined for mem_ord = [0 ... 5]</font>
@@ -160,6 +160,150 @@ translate_memory_order(int o)
return o;
}
</pre></blockquote>
+
+<p>
+Below are representative C++ implementations of all of the operations. Their
+purpose is to document the desired semantics of each operation, assuming
+<tt>memory_order_seq_cst</tt>. This is essentially the code that will be called
+if the front end calls out to compiler-rt.
+</p>
+
+<blockquote><pre>
+template &lt;class T&gt;
+T
+__atomic_load(T const volatile* obj)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ return *obj;
+}
+
+template &lt;class T&gt;
+void
+__atomic_store(T volatile* obj, T desr)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ *obj = desr;
+}
+
+template &lt;class T&gt;
+T
+__atomic_exchange(T volatile* obj, T desr)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj = desr;
+ return r;
+}
+
+template &lt;class T&gt;
+bool
+__atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ if (std::memcmp(const_cast&lt;T*&gt;(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font>
+ {
+ std::memcpy(const_cast&lt;T*&gt;(obj), &amp;desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font>
+ return true;
+ }
+ std::memcpy(exp, const_cast&lt;T*&gt;(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font>
+ return false;
+}
+
+<font color="#C80000">// May spuriously return false (even if *obj == *exp)</font>
+template &lt;class T&gt;
+bool
+__atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ if (std::memcmp(const_cast&lt;T*&gt;(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font>
+ {
+ std::memcpy(const_cast&lt;T*&gt;(obj), &amp;desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font>
+ return true;
+ }
+ std::memcpy(exp, const_cast&lt;T*&gt;(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font>
+ return false;
+}
+
+template &lt;class T&gt;
+T
+__atomic_fetch_add(T volatile* obj, T operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj += operand;
+ return r;
+}
+
+template &lt;class T&gt;
+T
+__atomic_fetch_sub(T volatile* obj, T operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj -= operand;
+ return r;
+}
+
+template &lt;class T&gt;
+T
+__atomic_fetch_and(T volatile* obj, T operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj &amp;= operand;
+ return r;
+}
+
+template &lt;class T&gt;
+T
+__atomic_fetch_or(T volatile* obj, T operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj |= operand;
+ return r;
+}
+
+template &lt;class T&gt;
+T
+__atomic_fetch_xor(T volatile* obj, T operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ T r = *obj;
+ *obj ^= operand;
+ return r;
+}
+
+void*
+__atomic_fetch_add(void* volatile* obj, ptrdiff_t operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ void* r = *obj;
+ (char*&amp;)(*obj) += operand;
+ return r;
+}
+
+void*
+__atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand)
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+ void* r = *obj;
+ (char*&amp;)(*obj) -= operand;
+ return r;
+}
+
+void __atomic_thread_fence()
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+}
+
+void __atomic_signal_fence()
+{
+ unique_lock&lt;mutex&gt; _(some_mutex);
+}
+</pre></blockquote>
+
+
</div>
</body>
</html>
OpenPOWER on IntegriCloud