[libc-commits] [libc] [libc][arc4random 3/n] implement global and local random state (PR #152617)

via libc-commits libc-commits at lists.llvm.org
Fri Aug 8 06:18:15 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-libc

Author: Schrodinger ZHU Yifan (SchrodingerZhu)

<details>
<summary>Changes</summary>



---

Patch is 24.94 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/152617.diff


9 Files Affected:

- (modified) libc/src/__support/CMakeLists.txt (+24) 
- (added) libc/src/__support/aba_ptr.h (+83) 
- (added) libc/src/__support/mpmc_stack.h (+107) 
- (modified) libc/src/stdlib/linux/CMakeLists.txt (+20) 
- (added) libc/src/stdlib/linux/vsdo_rng.h (+280) 
- (modified) libc/test/integration/src/__support/CMakeLists.txt (+15) 
- (added) libc/test/integration/src/__support/mpmc_stack_test.cpp (+119) 
- (added) libc/test/integration/src/stdlib/linux/CMakeLists.txt (+14) 
- (added) libc/test/integration/src/stdlib/linux/vsdo_rng_test.cpp (+68) 


``````````diff
diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt
index 2196d9e23bba7..c9d89cf6fc286 100644
--- a/libc/src/__support/CMakeLists.txt
+++ b/libc/src/__support/CMakeLists.txt
@@ -398,6 +398,30 @@ add_header_library(
     libc.src.__support.macros.attributes
 )
 
+add_header_library(
+  aba_ptr
+  HDRS
+    aba_ptr.h
+  DEPENDS
+    libc.hdr.types.size_t
+    libc.src.__support.common
+    libc.src.__support.threads.sleep
+)
+
+add_header_library(
+  mpmc_stack
+  HDRS
+    mpmc_stack.h
+  DEPENDS
+    libc.src.__support.aba_ptr
+    libc.src.__support.common
+    libc.src.__support.CPP.atomic
+    libc.src.__support.CPP.new
+    libc.src.__support.CPP.optional
+    libc.src.__support.CPP.type_traits
+)
+
+
 add_subdirectory(FPUtil)
 add_subdirectory(OSUtil)
 add_subdirectory(StringUtil)
diff --git a/libc/src/__support/aba_ptr.h b/libc/src/__support/aba_ptr.h
new file mode 100644
index 0000000000000..632cc466c295b
--- /dev/null
+++ b/libc/src/__support/aba_ptr.h
@@ -0,0 +1,83 @@
+//===-- Transactional Ptr for ABA prevention --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_TAGGED_POINTER_H
+#define LLVM_LIBC_SRC___SUPPORT_TAGGED_POINTER_H
+
+#include "hdr/types/size_t.h"
+#include "src/__support/common.h"
+#include "src/__support/threads/sleep.h"
+
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+#define LIBC_ABA_PTR_IS_ATOMIC true
+#else
+#define LIBC_ABA_PTR_IS_ATOMIC false
+#endif
+
+namespace LIBC_NAMESPACE_DECL {
+
+template <class T, bool IsAtomic> struct AbaPtrImpl {
+  union Impl {
+    struct alignas(2 * alignof(void *)) Atomic {
+      T *ptr;
+      size_t tag;
+    } atomic;
+    struct Mutex {
+      T *ptr;
+      bool locked;
+    } mutex;
+  } impl;
+
+  LIBC_INLINE constexpr AbaPtrImpl(T *ptr)
+      : impl(IsAtomic ? Impl{.atomic{ptr, 0}} : Impl{.mutex{ptr, false}}) {}
+
+  /// User must guarantee that operation is redoable.
+  template <class Op> LIBC_INLINE void transaction(Op &&op) {
+    if constexpr (IsAtomic) {
+      for (;;) {
+        typename Impl::Atomic snapshot, next;
+        __atomic_load(&impl.atomic, &snapshot, __ATOMIC_RELAXED);
+        next.ptr = op(snapshot.ptr);
+        // Wrapping add for unsigned integers.
+        next.tag = snapshot.tag + 1;
+        if (__atomic_compare_exchange(&impl.atomic, &snapshot, &next, true,
+                                      __ATOMIC_ACQ_REL, __ATOMIC_RELAXED))
+          return;
+      }
+    } else {
+      // Acquire the lock.
+      while (__atomic_exchange_n(&impl.mutex.locked, true, __ATOMIC_ACQUIRE))
+        while (__atomic_load_n(&impl.mutex.locked, __ATOMIC_RELAXED))
+          LIBC_NAMESPACE::sleep_briefly();
+
+      impl.mutex.ptr = op(impl.mutex.ptr);
+      // Release the lock.
+      __atomic_store_n(&impl.mutex.locked, false, __ATOMIC_RELEASE);
+    }
+  }
+
+  LIBC_INLINE T *get() const {
+    if constexpr (IsAtomic) {
+      // Weak micro-architectures typically reguards simultaneous partial word
+      // loading and full word loading as a race condition. While there are
+      // implementations that uses racy read anyway, we still load the whole
+      // word to avoid any complications.
+      typename Impl::Atomic snapshot;
+      __atomic_load(&impl.atomic, &snapshot, __ATOMIC_RELAXED);
+      return snapshot.ptr;
+    } else {
+      return impl.mutex.ptr;
+    }
+  }
+};
+
+template <class T> using AbaPtr = AbaPtrImpl<T, LIBC_ABA_PTR_IS_ATOMIC>;
+} // namespace LIBC_NAMESPACE_DECL
+
+#undef LIBC_ABA_PTR_IS_ATOMIC
+#endif
diff --git a/libc/src/__support/mpmc_stack.h b/libc/src/__support/mpmc_stack.h
new file mode 100644
index 0000000000000..df235c2c1dfac
--- /dev/null
+++ b/libc/src/__support/mpmc_stack.h
@@ -0,0 +1,107 @@
+//===-- Simple Lock-free MPMC Stack -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MPMC_STACK_H
+#define LLVM_LIBC_SRC___SUPPORT_MPMC_STACK_H
+
+#include "src/__support/CPP/atomic.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/aba_ptr.h"
+
+namespace LIBC_NAMESPACE_DECL {
+template <class T> class MPMCStack {
+  struct Node {
+    cpp::Atomic<size_t> visitor;
+    Node *next;
+    T value;
+
+    LIBC_INLINE Node(T val) : visitor(0), next(nullptr), value(val) {}
+  };
+  AbaPtr<Node> head;
+
+public:
+  static_assert(cpp::is_copy_constructible<T>::value,
+                "T must be copy constructible");
+  LIBC_INLINE constexpr MPMCStack() : head(nullptr) {}
+  LIBC_INLINE bool push(T value) {
+    AllocChecker ac;
+    Node *new_node = new (ac) Node(value);
+    if (!ac)
+      return false;
+    head.transaction([new_node](Node *old_head) {
+      new_node->next = old_head;
+      return new_node;
+    });
+    return true;
+  }
+  LIBC_INLINE bool push_all(T values[], size_t count) {
+    struct Guard {
+      size_t count;
+      Node **allocated;
+      LIBC_INLINE Guard(Node *allocated[]) : count(0), allocated(allocated) {}
+      LIBC_INLINE ~Guard() {
+        for (size_t i = 0; i < count; ++i)
+          delete allocated[i];
+      }
+      LIBC_INLINE void add(Node *node) { allocated[count++] = node; }
+      LIBC_INLINE void clear() { count = 0; }
+    };
+    // Variable sized array is a GNU extension.
+    __extension__ Node *allocated[count];
+    {
+      Guard guard(allocated);
+      for (size_t i = 0; i < count; ++i) {
+        AllocChecker ac;
+        Node *new_node = new (ac) Node(values[i]);
+        if (!ac)
+          return false;
+        guard.add(new_node);
+        if (i != 0)
+          new_node->next = allocated[i - 1];
+      }
+      guard.clear();
+    }
+    head.transaction([&allocated, count](Node *old_head) {
+      allocated[0]->next = old_head;
+      return allocated[count - 1];
+    });
+    return true;
+  }
+  LIBC_INLINE cpp::optional<T> pop() {
+    cpp::optional<T> res = cpp::nullopt;
+    Node *node = nullptr;
+    head.transaction([&](Node *current_head) -> Node * {
+      if (!current_head) {
+        res = cpp::nullopt;
+        return nullptr;
+      }
+      node = current_head;
+      node->visitor.fetch_add(1);
+      res = cpp::optional<T>{node->value};
+      Node *next = node->next;
+      node->visitor.fetch_sub(1);
+      return next;
+    });
+    // On a successful transaction, a node is popped by us. So we must delete
+    // it. When we are at here, no one else can acquire
+    // new reference to the node, but we still need to wait until other threads
+    // inside the transaction who may potentially be holding a reference to the
+    // node.
+    if (res) {
+      // Spin until the node is no longer in use.
+      while (node->visitor.load() != 0)
+        LIBC_NAMESPACE::sleep_briefly();
+      delete node;
+    }
+    return res;
+  }
+};
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif
diff --git a/libc/src/stdlib/linux/CMakeLists.txt b/libc/src/stdlib/linux/CMakeLists.txt
index 1d3c00a5e0ddb..8bca9e726105b 100644
--- a/libc/src/stdlib/linux/CMakeLists.txt
+++ b/libc/src/stdlib/linux/CMakeLists.txt
@@ -9,3 +9,23 @@ add_entrypoint_object(
     libc.src.signal.raise
     libc.src.stdlib._Exit
 )
+
+add_header_library(
+  vsdo_rng
+  HDRS
+    vsdo_rng.h
+  DEPENDS
+    libc.src.__support.threads.thread # For __cxa_thread_atexit_impl
+    libc.src.__support.CPP.algorithm
+    libc.src.__support.CPP.bit
+    libc.src.__support.CPP.mutex
+    libc.src.__support.CPP.optional
+    libc.src.__support.OSUtil.linux.vdso
+    libc.src.__support.OSUtil.osutil
+    libc.src.__support.macros.config
+    libc.src.__support.mpmc_stack
+    libc.src.__support.threads.callonce
+    libc.src.__support.threads.linux.raw_mutex
+    libc.src.sys.auxv.getauxval # TODO: remove public entrypoint dependency
+    libc.include.sys_syscall
+)
diff --git a/libc/src/stdlib/linux/vsdo_rng.h b/libc/src/stdlib/linux/vsdo_rng.h
new file mode 100644
index 0000000000000..023a125652e07
--- /dev/null
+++ b/libc/src/stdlib/linux/vsdo_rng.h
@@ -0,0 +1,280 @@
+//===-- vDSO based RNG ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SRC_STDLIB_LINUX_VSDO_RNG_H
+#define LIBC_SRC_STDLIB_LINUX_VSDO_RNG_H
+
+#include "src/__support/CPP/bit.h"
+#include "src/__support/CPP/mutex.h"
+#include "src/__support/OSUtil/linux/vdso.h"
+#include "src/__support/OSUtil/syscall.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/mpmc_stack.h"
+#include "src/__support/threads/callonce.h"
+#include "src/__support/threads/linux/raw_mutex.h"
+// TODO: this is public entrypoint, we should remove it later on.
+#include "src/sys/auxv/getauxval.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace vsdo_rng {
+extern "C" {
+using Destructor = void(void *);
+[[gnu::weak]] extern void *__dso_handle;
+int __cxa_thread_atexit_impl(Destructor *, void *, void *);
+}
+class GlobalState {
+public:
+  struct VGetrandomOpaqueParams {
+    unsigned int size_of_opaque_states;
+    unsigned int mmap_prot;
+    unsigned int mmap_flags;
+    unsigned int reserved[13];
+  };
+
+  struct Config {
+    size_t page_size;
+    size_t pages_per_alloc;
+    size_t states_per_page;
+    vdso::VDSOSymType<vdso::VDSOSym::GetRandom> getrandom;
+    VGetrandomOpaqueParams params;
+  };
+
+private:
+  // A lock-free stack of free opaque states.
+  MPMCStack<void *> free_list{};
+  // A mutex protecting the allocation of new pages.
+  RawMutex allocation_mutex{};
+
+  // Shared global configuration.
+  static CallOnceFlag config_flag;
+  static Config config;
+
+  // We grow the states by the number of CPUs. This function uses
+  // SYS_sched_getaffinity to get the number of CPUs.
+  LIBC_INLINE static size_t cpu_count();
+
+  // Grow available states. This function can fail if the system is out of
+  // memory.
+  // - This routine assumes that the global config is valid.
+  // - On success, this routine returns one opaque state for direct use.
+  LIBC_INLINE void *grow();
+
+public:
+  LIBC_INLINE constexpr GlobalState() {}
+  LIBC_INLINE static const Config &get_config();
+  LIBC_INLINE static const Config &get_config_unchecked() { return config; }
+  LIBC_INLINE void *get();
+  LIBC_INLINE void recycle(void *state);
+};
+
+LIBC_INLINE_VAR GlobalState global_state{};
+
+class LocalState {
+  bool in_flight = false;
+  bool failed = false;
+  void *state = nullptr;
+
+public:
+  struct Guard {
+    LocalState *tls;
+    LIBC_INLINE Guard(LocalState *tls) : tls(tls) {
+      tls->in_flight = true;
+      cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST);
+    }
+    LIBC_INLINE Guard(Guard &&other) : tls(other.tls) { other.tls = nullptr; }
+    LIBC_INLINE ~Guard() {
+      cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST);
+      if (tls)
+        tls->in_flight = false;
+    }
+    LIBC_INLINE void fill(void *buf, size_t size) const;
+  };
+  LIBC_INLINE constexpr LocalState() {}
+  LIBC_INLINE cpp::optional<Guard> get() {
+    if (in_flight)
+      return cpp::nullopt;
+
+    Guard guard(this);
+
+    if (!failed && !state) {
+      int register_res = __cxa_thread_atexit_impl(
+          [](void *self) {
+            auto *tls = static_cast<LocalState *>(self);
+            // Reject all future attempts to get a state.
+            void *state = tls->state;
+            tls->in_flight = true;
+            tls->failed = true;
+            tls->state = nullptr;
+            cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST);
+            if (state)
+              LIBC_NAMESPACE::vsdo_rng::global_state.recycle(state);
+          },
+          this, __dso_handle);
+      if (register_res == 0)
+        state = LIBC_NAMESPACE::vsdo_rng::global_state.get();
+      if (!state)
+        failed = true;
+    }
+
+    if (!state)
+      return cpp::nullopt;
+
+    return cpp::move(guard);
+  }
+};
+
+LIBC_INLINE_VAR LIBC_THREAD_LOCAL LocalState local_state{};
+
+//===----------------------------------------------------------------------===//
+// Implementation
+//===----------------------------------------------------------------------===//
+
+LIBC_INLINE_VAR GlobalState::Config GlobalState::config{};
+LIBC_INLINE_VAR CallOnceFlag GlobalState::config_flag = 0;
+
+LIBC_INLINE size_t GlobalState::cpu_count() {
+  char cpu_set[128] = {0};
+  int res = LIBC_NAMESPACE::syscall_impl<int>(SYS_sched_getaffinity, 0,
+                                              sizeof(cpu_set), cpu_set);
+  if (res <= 0)
+    return 1;
+
+  size_t count = 0;
+  for (size_t i = 0; i < sizeof(cpu_set) / sizeof(unsigned long); ++i) {
+    unsigned long *mask_ptr = reinterpret_cast<unsigned long *>(cpu_set);
+    count += LIBC_NAMESPACE::cpp::popcount(mask_ptr[i]);
+  }
+
+  return count > 0 ? count : 1;
+}
+
+LIBC_INLINE const GlobalState::Config &GlobalState::get_config() {
+  callonce(&config_flag, []() {
+    config.getrandom =
+        LIBC_NAMESPACE::vdso::TypedSymbol<vdso::VDSOSym::GetRandom>{};
+    if (!config.getrandom)
+      return;
+
+    // Call with special flag to get the desired configuration.
+    int res = config.getrandom(
+        /*buf=*/nullptr, /*count=*/0, /*flags=*/0,
+        /*opaque_states=*/&config.params,
+        /*size_of_opaque_states=*/~0);
+    if (res != 0)
+      return;
+
+    config.page_size = LIBC_NAMESPACE::getauxval(AT_PAGESZ);
+    if (!config.page_size)
+      return;
+
+    size_t count = cpp::max(cpu_count(), size_t{4});
+
+    config.states_per_page =
+        config.page_size / config.params.size_of_opaque_states;
+
+    config.pages_per_alloc =
+        count / config.states_per_page + (count % config.states_per_page != 0);
+  });
+  return config;
+}
+
+LIBC_INLINE void *GlobalState::grow() {
+  cpp::lock_guard guard(allocation_mutex);
+
+  // It is possible that when we finally grab the lock, other threads have
+  // successfully finished the allocation already. Hence, we first try if we
+  // can pop anything from the free list.
+  if (cpp::optional<void *> state = free_list.pop())
+    return *state;
+
+  long mmap_res = LIBC_NAMESPACE::syscall_impl<long>(
+      SYS_mmap, /*addr=*/nullptr,
+      /*length=*/config.page_size * config.pages_per_alloc,
+      /*prot=*/config.params.mmap_prot,
+      /*flags=*/config.params.mmap_flags,
+      /*fd=*/-1, /*offset=*/0);
+  if (mmap_res == -1 /* MAP_FAILED */)
+    return nullptr;
+
+  char *pages = reinterpret_cast<char *>(mmap_res);
+
+  // Initialize the page.
+  size_t total_states = config.pages_per_alloc * config.states_per_page;
+  size_t free_states = total_states - 1; // reserve one for direct use.
+  __extension__ void *opaque_states[total_states];
+  size_t index = 0;
+  for (size_t p = 0; p < config.pages_per_alloc; ++p) {
+    char *page = &pages[p * config.page_size];
+    for (size_t s = 0; s < config.states_per_page; ++s) {
+      void *state = &page[s * config.params.size_of_opaque_states];
+      opaque_states[index++] = state;
+    }
+  }
+
+  constexpr size_t RETRY_COUNT = 64;
+  for (size_t i = 0; i < RETRY_COUNT; ++i) {
+    if (free_list.push_all(opaque_states, free_states))
+      break;
+    // Abort if we are still short in memory after all these retries.
+    if (i + 1 == RETRY_COUNT) {
+      LIBC_NAMESPACE::syscall_impl<long>(
+          SYS_munmap, pages, config.page_size * config.pages_per_alloc);
+      return nullptr;
+    }
+  }
+
+  return opaque_states[free_states];
+}
+
+LIBC_INLINE void *GlobalState::get() {
+  const Config &config = get_config();
+  // If page size is not set, the global config is invalid. Early return.
+  if (!config.page_size)
+    return nullptr;
+
+  if (cpp::optional<void *> state = free_list.pop())
+    return *state;
+
+  // At this stage, we know that the config is valid.
+  return grow();
+}
+
+LIBC_INLINE void GlobalState::recycle(void *state) {
+  LIBC_ASSERT(state != nullptr);
+  constexpr size_t RETRY_COUNT = 64;
+  for (size_t i = 0; i < RETRY_COUNT; ++i)
+    if (free_list.push(state))
+      return;
+  // Otherwise, we just let it leak. It won't be too bad not to reuse the state
+  // since the OS can free the page if memory is tight.
+}
+
+//===----------------------------------------------------------------------===//
+// LocalState
+//===----------------------------------------------------------------------===//
+
+LIBC_INLINE void LocalState::Guard::fill(void *buf, size_t size) const {
+  LIBC_ASSERT(tls->state != nullptr);
+  char *cursor = reinterpret_cast<char *>(buf);
+  size_t remaining = size;
+  const auto &config = GlobalState::get_config_unchecked();
+  while (remaining > 0) {
+    int res = config.getrandom(cursor, remaining, /* default random flag */ 0,
+                               tls->state, config.params.size_of_opaque_states);
+    if (res < 0)
+      continue;
+    remaining -= static_cast<size_t>(res);
+    cursor += res;
+  }
+}
+
+} // namespace vsdo_rng
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_SRC_STDLIB_LINUX_VSDO_RNG_H
diff --git a/libc/test/integration/src/__support/CMakeLists.txt b/libc/test/integration/src/__support/CMakeLists.txt
index b5b6557e8d689..93f54083f3c00 100644
--- a/libc/test/integration/src/__support/CMakeLists.txt
+++ b/libc/test/integration/src/__support/CMakeLists.txt
@@ -2,3 +2,18 @@ add_subdirectory(threads)
 if(LIBC_TARGET_OS_IS_GPU)
   add_subdirectory(GPU)
 endif()
+
+add_libc_integration_test_suite(libc-support-integration-tests)
+
+add_integration_test(
+  mpmc_stack_test
+  SUITE
+    libc-support-integration-tests
+  SRCS
+    mpmc_stack_test.cpp
+  DEPENDS
+    libc.src.__support.mpmc_stack
+    libc.src.__support.threads.thread
+    libc.src.pthread.pthread_create
+    libc.src.pthread.pthread_join
+)
diff --git a/libc/test/integration/src/__support/mpmc_stack_test.cpp b/libc/test/integration/src/__support/mpmc_stack_test.cpp
new file mode 100644
index 0000000000000..9166a816a74fe
--- /dev/null
+++ b/libc/test/integration/src/__support/mpmc_stack_test.cpp
@@ -0,0 +1,119 @@
+#include "src/__support/CPP/atomic.h"
+#include "src/__support/mpmc_stack.h"
+#include "src/pthread/pthread_create.h"
+#include "src/pthread/pthread_join.h"
+#include "test/IntegrationTest/test.h"
+
+using namespace LIBC_NAMESPACE;
+
+void smoke_test() {
+  MPMCStack<int> stack;
+  for (int i = 0; i <= 100; ++i)
+    if (!stack.push(i))
+      __builtin_trap();
+  for (int i = 100; i >= 0; --i)
+    if (*stack.pop() != i)
+      __builtin_trap();
+  if (stack.pop())
+    __builtin_trap(); // Should be empty now.
+}
+
+void multithread_test() {
+  constexpr static size_t NUM_THREADS = 5;
+  constexpr static size_t NUM_PUSHES = 100;
+  struct State {
+    MPMCStack<size_t> stack;
+    cpp::Atomic<size_t> counter = 0;
+    cpp::Atomic<bool> flags[NUM_PUSHES];
+  } state;
+  pthread_t threads[NUM_THREADS];
+  for (size_t i = 0; i < NUM_THREADS; ++i) {
+    LIBC_NAMESPACE::pthread_create(
+        &threads[i], nullptr,
+        [](void *arg) -> void * {
+          State *state = static_cast<State *>(arg);
+          for (;;) {
+            size_t current = state->counter.fetch_add(1);
+            if (current >= NUM_PUSHES)
+              break;
+            if (!state->stack.push(current))
+              __builtin_trap();
+          }
+          while (auto res = state->stack.pop())
+            state->flags[res.value()].store(true);
+          return nullptr;
+        },
+        &state);
+  }
+  for (pthread_t thread : threads)
+    LIBC_NAMESPACE::pthread_join(thread, nullptr);
+  while (cpp::optional<size_t> res = state.stack.pop())
+    state.flags[res.value()].store(true);
+  for (size_t i = 0; i < NUM_PUSHES; ++i)
+    if (!state.flags[i].load...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/152617


More information about the libc-commits mailing list