[compiler-rt] a4b32c2 - Revert "[compiler-rt][builtins] Switch libatomic locks to pthread_mutex_t (#94374)"

Martin Storsjö via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 5 12:51:11 PDT 2024


Author: Martin Storsjö
Date: 2024-06-05T22:50:10+03:00
New Revision: a4b32c25761e3de55d42a4799a303f36aa198fb7

URL: https://github.com/llvm/llvm-project/commit/a4b32c25761e3de55d42a4799a303f36aa198fb7
DIFF: https://github.com/llvm/llvm-project/commit/a4b32c25761e3de55d42a4799a303f36aa198fb7.diff

LOG: Revert "[compiler-rt][builtins] Switch libatomic locks to pthread_mutex_t (#94374)"

This reverts commit b62b7a42bbee4a3bbf9094808f460fdc9c119bd7 and
a5729b71d844c1444f7d348dc2d4ea5b98de5ec5.

This commit broke compilation for systems that lack pthreads.

Added: 
    

Modified: 
    compiler-rt/lib/builtins/atomic.c

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c
index c3a36a9aaba607..852bb20f086723 100644
--- a/compiler-rt/lib/builtins/atomic.c
+++ b/compiler-rt/lib/builtins/atomic.c
@@ -51,14 +51,6 @@
 #endif
 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
 
-#ifndef CACHE_LINE_SIZE
-#define CACHE_LINE_SIZE 64
-#endif
-
-#ifdef __clang__
-#pragma clang diagnostic ignored "-Wgnu-designator"
-#endif
-
 ////////////////////////////////////////////////////////////////////////////////
 // Platform-specific lock implementation.  Falls back to spinlocks if none is
 // defined.  Each platform should define the Lock type, and corresponding
@@ -102,18 +94,21 @@ static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
 #else
 _Static_assert(__atomic_always_lock_free(sizeof(uintptr_t), 0),
                "Implementation assumes lock-free pointer-size cmpxchg");
-#include <pthread.h>
-#include <stdalign.h>
-typedef struct {
-  alignas(CACHE_LINE_SIZE) pthread_mutex_t m;
-} Lock;
+typedef _Atomic(uintptr_t) Lock;
 /// Unlock a lock.  This is a release operation.
-__inline static void unlock(Lock *l) { pthread_mutex_unlock(&l->m); }
-/// Locks a lock.
-__inline static void lock(Lock *l) { pthread_mutex_lock(&l->m); }
+__inline static void unlock(Lock *l) {
+  __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
+}
+/// Locks a lock.  In the current implementation, this is potentially
+/// unbounded in the contended case.
+__inline static void lock(Lock *l) {
+  uintptr_t old = 0;
+  while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
+                                             __ATOMIC_RELAXED))
+    old = 0;
+}
 /// locks for atomic operations
-static Lock locks[SPINLOCK_COUNT] = {
-    [0 ... SPINLOCK_COUNT - 1] = {PTHREAD_MUTEX_INITIALIZER}};
+static Lock locks[SPINLOCK_COUNT];
 #endif
 
 /// Returns a lock to use for a given pointer.


        


More information about the llvm-commits mailing list