[libc-commits] [libc] [libc][cpp] add `atomic_signal_fence` (PR #82138)

Schrodinger ZHU Yifan via libc-commits libc-commits at lists.llvm.org
Tue Feb 20 07:57:06 PST 2024


https://github.com/SchrodingerZhu updated https://github.com/llvm/llvm-project/pull/82138

>From 5c995fd00a1c1fbdbdde99ec9db145b81e02e99f Mon Sep 17 00:00:00 2001
From: Schrodinger ZHU Yifan <yifanzhu at rochester.edu>
Date: Sat, 17 Feb 2024 18:30:57 -0500
Subject: [PATCH 1/2] [libc][cpp] add atomic_signal_fence

---
 libc/src/__support/CPP/atomic.h | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/libc/src/__support/CPP/atomic.h b/libc/src/__support/CPP/atomic.h
index 1c4478dfeab6de..61e2742661f21a 100644
--- a/libc/src/__support/CPP/atomic.h
+++ b/libc/src/__support/CPP/atomic.h
@@ -161,6 +161,21 @@ LIBC_INLINE void atomic_thread_fence(MemoryOrder mem_ord) {
 #endif
 }
 
+// Establishes memory synchronization ordering of non-atomic and relaxed atomic
+// accesses, as instructed by order, between a thread and a signal handler
+// executed on the same thread. This is equivalent to atomic_thread_fence,
+// except no instructions for memory ordering are issued. Only reordering of
+// the instructions by the compiler is suppressed as order instructs.
+LIBC_INLINE void atomic_signal_fence(MemoryOrder mem_ord) {
+#if __has_builtin(__atomic_signal_fence)
+  __atomic_signal_fence(int(mem_ord));
+#else
+  // if the builtin is not ready, use asm as a full compiler barrier.
+  (void)mem_ord;
+  asm volatile("" ::: "memory");
+#endif
+}
+
 } // namespace cpp
 } // namespace LIBC_NAMESPACE
 

>From 92296618a8572e1ce247bd57174e1479505626bb Mon Sep 17 00:00:00 2001
From: Schrodinger ZHU Yifan <yifanzhu at rochester.edu>
Date: Tue, 20 Feb 2024 10:56:52 -0500
Subject: [PATCH 2/2] address cr: remove c-style code

---
 libc/src/__support/CPP/atomic.h | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/libc/src/__support/CPP/atomic.h b/libc/src/__support/CPP/atomic.h
index 61e2742661f21a..c10d06c04bccad 100644
--- a/libc/src/__support/CPP/atomic.h
+++ b/libc/src/__support/CPP/atomic.h
@@ -150,14 +150,13 @@ template <typename T> struct Atomic {
 };
 
 // Issue a thread fence with the given memory ordering.
-LIBC_INLINE void atomic_thread_fence(MemoryOrder mem_ord) {
+LIBC_INLINE void atomic_thread_fence([[maybe_unused]] MemoryOrder mem_ord) {
 // The NVPTX backend currently does not support atomic thread fences so we use a
 // full system fence instead.
 #ifdef LIBC_TARGET_ARCH_IS_NVPTX
-  (void)mem_ord;
   __nvvm_membar_sys();
 #else
-  __atomic_thread_fence(int(mem_ord));
+  __atomic_thread_fence(static_cast<int>(mem_ord));
 #endif
 }
 
@@ -166,12 +165,11 @@ LIBC_INLINE void atomic_thread_fence(MemoryOrder mem_ord) {
 // executed on the same thread. This is equivalent to atomic_thread_fence,
 // except no instructions for memory ordering are issued. Only reordering of
 // the instructions by the compiler is suppressed as order instructs.
-LIBC_INLINE void atomic_signal_fence(MemoryOrder mem_ord) {
+LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
 #if __has_builtin(__atomic_signal_fence)
-  __atomic_signal_fence(int(mem_ord));
+  __atomic_signal_fence(static_cast<int>(mem_ord));
 #else
   // if the builtin is not ready, use asm as a full compiler barrier.
-  (void)mem_ord;
   asm volatile("" ::: "memory");
 #endif
 }



More information about the libc-commits mailing list