[cfe-commits] [libcxx] r116713 - /libcxx/trunk/www/atomic_design_a.html

Howard Hinnant hhinnant at apple.com
Mon Oct 18 09:40:13 PDT 2010


Author: hhinnant
Date: Mon Oct 18 11:40:13 2010
New Revision: 116713

URL: http://llvm.org/viewvc/llvm-project?rev=116713&view=rev
Log:
Update atomic Design A spec with reference C++ implementations for the purpose of documeting the semantics of each atomic operation.

Modified:
    libcxx/trunk/www/atomic_design_a.html

Modified: libcxx/trunk/www/atomic_design_a.html
URL: http://llvm.org/viewvc/llvm-project/libcxx/trunk/www/atomic_design_a.html?rev=116713&r1=116712&r2=116713&view=diff
==============================================================================
--- libcxx/trunk/www/atomic_design_a.html (original)
+++ libcxx/trunk/www/atomic_design_a.html Mon Oct 18 11:40:13 2010
@@ -71,7 +71,7 @@
 
 <font color="#C80000">// type must be trivially copyable</font>
 <font color="#C80000">// Behavior is defined for mem_ord = 0, 3, 5</font>
-type __atomic_store(type* atomic_obj, type desired, int mem_ord);
+void __atomic_store(type* atomic_obj, type desired, int mem_ord);
 
 <font color="#C80000">// type must be trivially copyable</font>
 <font color="#C80000">// Behavior is defined for mem_ord = [0 ... 5]</font>
@@ -160,6 +160,150 @@
     return o;
 }
 </pre></blockquote>
+
+<p>
+Below are representative C++ implementations of all of the operations.  Their
+purpose is to document the desired semantics of each operation, assuming
+<tt>memory_order_seq_cst</tt>.  This is essentially the code that will be called
+if the front end calls out to compiler-rt.
+</p>
+
+<blockquote><pre>
+template <class T>
+T
+__atomic_load(T const volatile* obj)
+{
+    unique_lock<mutex> _(some_mutex);
+    return *obj;
+}
+
+template <class T>
+void
+__atomic_store(T volatile* obj, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    *obj = desr;
+}
+
+template <class T>
+T
+__atomic_exchange(T volatile* obj, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj = desr;
+    return r;
+}
+
+template <class T>
+bool
+__atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font>
+    {
+        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font>
+        return true;
+    }
+    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font>
+    return false;
+}
+
+<font color="#C80000">// May spuriously return false (even if *obj == *exp)</font>
+template <class T>
+bool
+__atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font>
+    {
+        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font>
+        return true;
+    }
+    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font>
+    return false;
+}
+
+template <class T>
+T
+__atomic_fetch_add(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj += operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_sub(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj -= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_and(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj &= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_or(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj |= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_xor(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj ^= operand;
+    return r;
+}
+
+void*
+__atomic_fetch_add(void* volatile* obj, ptrdiff_t operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    void* r = *obj;
+    (char*&)(*obj) += operand;
+    return r;
+}
+
+void*
+__atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    void* r = *obj;
+    (char*&)(*obj) -= operand;
+    return r;
+}
+
+void __atomic_thread_fence()
+{
+    unique_lock<mutex> _(some_mutex);
+}
+
+void __atomic_signal_fence()
+{
+    unique_lock<mutex> _(some_mutex);
+}
+</pre></blockquote>
+
+
 </div>
 </body>
 </html>





More information about the cfe-commits mailing list