[libc-commits] [libc] [libc] add rwlock (PR #94156)

Schrodinger ZHU Yifan via libc-commits libc-commits at lists.llvm.org
Sun Jun 2 21:11:40 PDT 2024


https://github.com/SchrodingerZhu updated https://github.com/llvm/llvm-project/pull/94156

>From 32190b039eec473a23c9bfb03fdc8a6cbb8fcaf3 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 03:48:26 -0700
Subject: [PATCH 01/13] [libc] add rwlock implementation

---
 libc/config/config.json                       |   4 +
 libc/docs/configure.rst                       |   1 +
 .../__support/threads/linux/CMakeLists.txt    |  21 +-
 libc/src/__support/threads/linux/rwlock.h     | 519 ++++++++++++++++++
 4 files changed, 540 insertions(+), 5 deletions(-)
 create mode 100644 libc/src/__support/threads/linux/rwlock.h

diff --git a/libc/config/config.json b/libc/config/config.json
index d3d1ff1e28716..8d6a84e732597 100644
--- a/libc/config/config.json
+++ b/libc/config/config.json
@@ -49,6 +49,10 @@
     "LIBC_CONF_RAW_MUTEX_DEFAULT_SPIN_COUNT": {
       "value": 100,
       "doc": "Default number of spins before blocking if a mutex is in contention (default to 100)."
+    },
+    "LIBC_CONF_RWLOCK_DEFAULT_SPIN_COUNT": {
+      "value": 100,
+      "doc": "Default number of spins before blocking if a rwlock is in contention (default to 100)."
     }
   }
 }
diff --git a/libc/docs/configure.rst b/libc/docs/configure.rst
index 77ade07714fdf..bdae6c54052f2 100644
--- a/libc/docs/configure.rst
+++ b/libc/docs/configure.rst
@@ -36,6 +36,7 @@ to learn about the defaults for your platform and target.
     - ``LIBC_CONF_PRINTF_FLOAT_TO_STR_USE_MEGA_LONG_DOUBLE_TABLE``: Use large table for better printf long double performance.
 * **"pthread" options**
     - ``LIBC_CONF_RAW_MUTEX_DEFAULT_SPIN_COUNT``: Default number of spins before blocking if a mutex is in contention (default to 100).
+    - ``LIBC_CONF_RWLOCK_DEFAULT_SPIN_COUNT``: Default number of spins before blocking if a rwlock is in contention (default to 100).
     - ``LIBC_CONF_TIMEOUT_ENSURE_MONOTONICITY``: Automatically adjust timeout to CLOCK_MONOTONIC (default to true). POSIX API may require CLOCK_REALTIME, which can be unstable and leading to unexpected behavior. This option will convert the real-time timestamp to monotonic timestamp relative to the time of call.
 * **"string" options**
     - ``LIBC_CONF_MEMSET_X86_USE_SOFTWARE_PREFETCHING``: Inserts prefetch for write instructions (PREFETCHW) for memset on x86 to recover performance when hardware prefetcher is disabled.
diff --git a/libc/src/__support/threads/linux/CMakeLists.txt b/libc/src/__support/threads/linux/CMakeLists.txt
index 9bf88ccc84557..bac8073a66049 100644
--- a/libc/src/__support/threads/linux/CMakeLists.txt
+++ b/libc/src/__support/threads/linux/CMakeLists.txt
@@ -22,11 +22,11 @@ add_header_library(
     libc.src.__support.time.linux.abs_timeout
 )
 
-set(raw_mutex_additional_flags)
+set(monotonicity_flags)
 if (LIBC_CONF_TIMEOUT_ENSURE_MONOTONICITY)
-  set(raw_mutex_additional_flags -DLIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY=1)
+  set(monotonicity_flags -DLIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY=1)
 else()
-  set(raw_mutex_additional_flags -DLIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY=0)
+  set(monotonicity_flags -DLIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY=0)
 endif()
 
 add_header_library(
@@ -42,8 +42,19 @@ add_header_library(
     libc.hdr.types.pid_t
   COMPILE_OPTIONS
     -DLIBC_COPT_RAW_MUTEX_DEFAULT_SPIN_COUNT=${LIBC_CONF_RAW_MUTEX_DEFAULT_SPIN_COUNT}
-    ${raw_mutex_additional_flags}
-  
+    ${monotonicity_flags}
+)
+
+add_header_library(
+  rwlock
+  HDRS
+    rwlock.h
+  DEPENDS
+    .futex_utils
+    .raw_mutex
+  COMPILE_OPTIONS
+    -DLIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT=${LIBC_CONF_RWLOCK_DEFAULT_SPIN_COUNT}
+    ${monotonicity_flags}
 )
 
 add_header_library(
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
new file mode 100644
index 0000000000000..e8da17681ddfe
--- /dev/null
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -0,0 +1,519 @@
+//===--- Implementation of a Linux RwLock class ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_RWLOCK_H
+#define LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_RWLOCK_H
+
+#include "hdr/errno_macros.h"
+#include "hdr/types/pid_t.h"
+#include "src/__support/CPP/atomic.h"
+#include "src/__support/CPP/expected.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/CPP/type_traits/make_signed.h"
+#include "src/__support/OSUtil/linux/x86_64/syscall.h"
+#include "src/__support/common.h"
+#include "src/__support/libc_assert.h"
+#include "src/__support/macros/attributes.h"
+#include "src/__support/macros/optimization.h"
+#include "src/__support/threads/linux/futex_utils.h"
+#include "src/__support/threads/linux/futex_word.h"
+#include "src/__support/threads/linux/raw_mutex.h"
+#include "src/__support/threads/sleep.h"
+
+#ifndef LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT
+#define LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT 100
+#endif
+
+#ifndef LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
+#define LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY 1
+#warning "LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY is not defined, defaulting to 1"
+#endif
+
+#if LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
+#include "src/__support/time/linux/monotonicity.h"
+#endif
+
+namespace LIBC_NAMESPACE {
+class RwLock {
+private:
+  class WaitingQueue final : private RawMutex {
+    FutexWordType pending_reader;
+    FutexWordType pending_writer;
+    Futex reader_serialization;
+    Futex writer_serialization;
+
+  public:
+    class Guard {
+      WaitingQueue &queue;
+
+      LIBC_INLINE constexpr Guard(WaitingQueue &queue) : queue(queue) {}
+
+    public:
+      LIBC_INLINE ~Guard() { queue.unlock(); }
+      LIBC_INLINE FutexWordType &pending_reader() {
+        return queue.pending_reader;
+      }
+      LIBC_INLINE FutexWordType &pending_writer() {
+        return queue.pending_writer;
+      }
+      LIBC_INLINE FutexWordType &reader_serialization() {
+        return queue.reader_serialization.val;
+      }
+      LIBC_INLINE FutexWordType &writer_serialization() {
+        return queue.writer_serialization.val;
+      }
+      friend RwLock;
+    };
+
+  public:
+    LIBC_INLINE constexpr WaitingQueue()
+        : RawMutex(), pending_reader(0), pending_writer(0),
+          reader_serialization(0), writer_serialization(0) {}
+    LIBC_INLINE Guard acquire() {
+      this->lock();
+      return Guard(*this);
+    }
+    LIBC_INLINE long reader_wait(FutexWordType expected,
+                                 cpp::optional<Futex::Timeout> timeout,
+                                 bool is_pshared) {
+      return reader_serialization.wait(expected, timeout, is_pshared);
+    }
+    LIBC_INLINE long reader_notify_all(bool is_pshared) {
+      return reader_serialization.notify_all(is_pshared);
+    }
+    LIBC_INLINE long writer_wait(FutexWordType expected,
+                                 cpp::optional<Futex::Timeout> timeout,
+                                 bool is_pshared) {
+      return writer_serialization.wait(expected, timeout, is_pshared);
+    }
+    LIBC_INLINE long writer_notify_one(bool is_pshared) {
+      return writer_serialization.notify_one(is_pshared);
+    }
+  };
+
+public:
+  enum class Preference : char { Reader, Writer };
+  enum class LockResult {
+    Success = 0,
+    Timeout = ETIMEDOUT,
+    Overflow = EAGAIN,
+    Busy = EBUSY,
+    Deadlock = EDEADLOCK,
+    PermissionDenied = EPERM,
+  };
+
+private:
+  // The State of the RwLock is stored in a 32-bit word, consisting of the
+  // following components:
+  // -----------------------------------------------
+  // | Range |           Description               |
+  // ===============================================
+  // | 0     | Pending Reader Bit                  |
+  // -----------------------------------------------
+  // | 1     | Pending Writer Bit                  |
+  // -----------------------------------------------
+  // | 2-30  | Active Reader Count                 |
+  // -----------------------------------------------
+  // | 31    | Active Writer Bit                   |
+  // -----------------------------------------------
+  class State {
+    // We use the signed interger as the state type. It is easier
+    // to handle state trasitions and detections using signed integers.
+    using Type = int32_t;
+
+    // Shift amounts to access the components of the state.
+    LIBC_INLINE_VAR static constexpr Type PENDING_READER_SHIFT = 0;
+    LIBC_INLINE_VAR static constexpr Type PENDING_WRITER_SHIFT = 1;
+    LIBC_INLINE_VAR static constexpr Type ACTIVE_READER_SHIFT = 2;
+    LIBC_INLINE_VAR static constexpr Type ACTIVE_WRITER_SHIFT = 31;
+
+    // Bitmasks to access the components of the state.
+    LIBC_INLINE_VAR static constexpr Type PENDING_READER_BIT =
+        1 << PENDING_READER_SHIFT;
+    LIBC_INLINE_VAR static constexpr Type PENDING_WRITER_BIT =
+        1 << PENDING_WRITER_SHIFT;
+    LIBC_INLINE_VAR static constexpr Type ACTIVE_READER_COUNT_UNIT =
+        1 << ACTIVE_READER_SHIFT;
+    LIBC_INLINE_VAR static constexpr Type ACTIVE_WRITER_BIT =
+        1 << ACTIVE_WRITER_SHIFT;
+    LIBC_INLINE_VAR static constexpr Type PENDING_MASK =
+        PENDING_READER_BIT | PENDING_WRITER_BIT;
+
+  private:
+    Type state;
+
+  public:
+    // Construction and conversion functions.
+    LIBC_INLINE constexpr State(Type state = 0) : state(state) {}
+    LIBC_INLINE constexpr operator Type() const { return state; }
+
+    // Utilities to check the state of the RwLock.
+    LIBC_INLINE constexpr bool has_active_writer() const { return state < 0; }
+    LIBC_INLINE constexpr bool has_active_reader() const {
+      return state > ACTIVE_READER_COUNT_UNIT;
+    }
+    LIBC_INLINE constexpr bool has_acitve_owner() const {
+      return has_active_reader() || has_active_writer();
+    }
+    LIBC_INLINE constexpr bool has_last_reader() const {
+      return (state >> ACTIVE_READER_SHIFT) == 1;
+    }
+    LIBC_INLINE constexpr bool has_pending_writer() const {
+      return state & PENDING_WRITER_BIT;
+    }
+    LIBC_INLINE constexpr bool has_pending() const {
+      return state & PENDING_MASK;
+    }
+    LIBC_INLINE constexpr State set_writer_bit() const {
+      return State(state | ACTIVE_WRITER_BIT);
+    }
+    // The preference parameter changes the behavior of the lock acquisition
+    // if there are both readers and writers waiting for the lock. If writers
+    // are preferred, reader acquisition will be blocked until all pending
+    // writers are served.
+    LIBC_INLINE bool can_acquire_reader(Preference preference) const {
+      switch (preference) {
+      case Preference::Reader:
+        return !has_active_writer();
+      case Preference::Writer:
+        return !has_active_writer() && !has_pending_writer();
+      }
+    }
+    LIBC_INLINE bool can_acquire_writer(Preference /*unused*/) const {
+      return !has_acitve_owner();
+    }
+    // This function check if it is possible to grow the reader count without
+    // overflowing the state.
+    LIBC_INLINE cpp::optional<State> try_increase_reader_count() const {
+      LIBC_ASSERT(!has_active_writer() &&
+                  "try_increase_reader_count shall only be called when there "
+                  "is no active writer.");
+      State res;
+      if (LIBC_UNLIKELY(__builtin_sadd_overflow(state, ACTIVE_READER_COUNT_UNIT,
+                                                &res.state)))
+        return cpp::nullopt;
+      return res;
+    }
+
+    // Utilities to do atomic operations on the state.
+    LIBC_INLINE static State
+    fetch_sub_reader_count(cpp::Atomic<Type> &target,
+                           cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_sub(ACTIVE_READER_COUNT_UNIT, order));
+    }
+    LIBC_INLINE static State
+    load(cpp::Atomic<Type> &target,
+         cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.load(order));
+    }
+    LIBC_INLINE static State fetch_set_pending_reader(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_or(PENDING_READER_BIT, order));
+    }
+    LIBC_INLINE static State fetch_clear_pending_reader(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_and(~PENDING_READER_BIT, order));
+    }
+    LIBC_INLINE static State fetch_set_pending_writer(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_or(PENDING_WRITER_BIT, order));
+    }
+    LIBC_INLINE static State fetch_clear_pending_writer(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_and(~PENDING_WRITER_BIT, order));
+    }
+    LIBC_INLINE static State fetch_set_active_writer(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_or(ACTIVE_WRITER_BIT, order));
+    }
+    LIBC_INLINE static State fetch_clear_active_writer(
+        cpp::Atomic<Type> &target,
+        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      return State(target.fetch_and(~ACTIVE_WRITER_BIT, order));
+    }
+
+    LIBC_INLINE bool
+    compare_exchange_weak_with(cpp::Atomic<Type> &target, State desired,
+                               cpp::MemoryOrder success_order,
+                               cpp::MemoryOrder failure_order) {
+      return target.compare_exchange_weak(state, desired, success_order,
+                                          failure_order);
+    }
+
+    // Utilities to spin and reload the state.
+  private:
+    template <class F>
+    LIBC_INLINE static State spin_reload_until(cpp::Atomic<Type> &target,
+                                               F &&func, unsigned spin_count) {
+      for (;;) {
+        auto state = State::load(target);
+        if (func(state) || spin_count == 0)
+          return state;
+        sleep_briefly();
+        spin_count--;
+      }
+    }
+
+  public:
+    // Return the reader state if either the lock is available or there is any
+    // ongoing contention.
+    LIBC_INLINE static State spin_reload_for_reader(cpp::Atomic<Type> &target,
+                                                    Preference preference,
+                                                    unsigned spin_count) {
+      return spin_reload_until(
+          target,
+          [=](State state) {
+            return state.can_acquire_reader(preference) || state.has_pending();
+          },
+          spin_count);
+    }
+    // Return the writer state if either the lock is available or there is any
+    // contention *between writers*. Since writers can be way less than readers,
+    // we allow them to spin more to improve the fairness.
+    LIBC_INLINE static State spin_reload_for_writer(cpp::Atomic<Type> &target,
+                                                    Preference preference,
+                                                    unsigned spin_count) {
+      return spin_reload_until(
+          target,
+          [=](State state) {
+            return state.can_acquire_writer(preference) ||
+                   state.has_pending_writer();
+          },
+          spin_count);
+    }
+  };
+
+private:
+  // Whether the RwLock is shared between processes.
+  bool is_pshared;
+  // Reader/Writer preference.
+  Preference preference;
+  // State to keep track of the RwLock.
+  cpp::Atomic<int32_t> state;
+  // writer_tid is used to keep track of the thread id of the writer. Notice
+  // that TLS address is not a good idea here since it may remains the same
+  // across forked processes.
+  cpp::Atomic<pid_t> writer_tid;
+  // Waiting queue to keep track of the pending readers and writers.
+  WaitingQueue queue;
+
+private:
+  // TODO: use cached thread id once implemented.
+  LIBC_INLINE static pid_t gettid() { return syscall_impl<pid_t>(SYS_gettid); }
+
+  LIBC_INLINE LockResult try_read_lock(State &old) {
+    while (LIBC_LIKELY(old.can_acquire_reader(preference))) {
+      cpp::optional<State> next = old.try_increase_reader_count();
+      if (!next)
+        return LockResult::Overflow;
+      if (LIBC_LIKELY(old.compare_exchange_weak_with(
+              state, *next, cpp::MemoryOrder::ACQUIRE,
+              cpp::MemoryOrder::RELAXED)))
+        return LockResult::Success;
+      // Notice that old is updated by the compare_exchange_weak_with function.
+    }
+    return LockResult::Busy;
+  }
+
+  LIBC_INLINE LockResult try_write_lock(State &old) {
+    // This while loop should terminate quickly
+    while (LIBC_LIKELY(old.can_acquire_writer(preference))) {
+      if (LIBC_LIKELY(old.compare_exchange_weak_with(
+              state, old.set_writer_bit(), cpp::MemoryOrder::ACQUIRE,
+              cpp::MemoryOrder::RELAXED))) {
+        writer_tid.store(gettid(), cpp::MemoryOrder::RELAXED);
+        return LockResult::Success;
+      }
+      // Notice that old is updated by the compare_exchange_weak_with function.
+    }
+    return LockResult::Busy;
+  }
+
+public:
+  LIBC_INLINE constexpr RwLock(Preference preference = Preference::Reader,
+                               bool is_pshared = false)
+      : is_pshared(is_pshared), preference(preference), state(0), writer_tid(0),
+        queue() {}
+
+  LIBC_INLINE LockResult try_read_lock() {
+    State old = State::load(state, cpp::MemoryOrder::RELAXED);
+    return try_read_lock(old);
+  }
+  LIBC_INLINE LockResult try_write_lock() {
+    State old = State::load(state, cpp::MemoryOrder::RELAXED);
+    return try_write_lock(old);
+  }
+
+private:
+  template <State (&SpinReload)(cpp::Atomic<int32_t> &, Preference, unsigned),
+            State (&SetPending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder),
+            State (&ClearPending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder),
+            FutexWordType &(WaitingQueue::Guard::*Serialization)(),
+            FutexWordType &(WaitingQueue::Guard::*PendingCount)(),
+            LockResult (RwLock::*TryLock)(State &),
+            long (WaitingQueue::*Wait)(FutexWordType,
+                                       cpp::optional<Futex::Timeout>, bool),
+            bool (State::*CanAcquire)(Preference) const>
+  LIBC_INLINE LockResult
+  lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+       unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
+    // Phase 1: deadlock detection.
+    // A deadlock happens if this is a RAW/WAW lock in the same thread.
+    if (writer_tid.load(cpp::MemoryOrder::RELAXED) == gettid())
+      return LockResult::Deadlock;
+
+    // Phase 2: spin to get the initial state. We ignore the timing due to spin
+    // since it should end quickly.
+    State old = SpinReload(state, preference, spin_count);
+
+#if LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
+    // Phase 3: convert the timeout if necessary.
+    if (timeout)
+      ensure_monotonicity(*timeout);
+#endif
+
+    // Enter the main acquisition loop.
+    for (;;) {
+      // Phase 4: if the lock can be acquired, try to acquire it.
+      LockResult result = (this->*TryLock)(old);
+      if (result != LockResult::Busy)
+        return result;
+
+      // Phase 5: register ourselves as a pending reader.
+      int serial_number;
+      {
+        // The queue need to be protected by a mutex since the operations in
+        // this block must be executed as a whole transaction. It is possible
+        // that this lock will make the timeout imprecise, but this is the best
+        // we can do. The transaction is small and everyone should make
+        // progress rather quickly.
+        WaitingQueue::Guard guard = queue.acquire();
+        (guard.*PendingCount)()++;
+
+        // Use atomic operation to guarantee the total order of the operations
+        // on the state. The pending flag update should be visible to any
+        // succeeding unlock events. Or, if a unlock does happen before we sleep
+        // on the futex, we can avoid such waiting.
+        old = SetPending(state, cpp::MemoryOrder::RELAXED);
+        // no need to use atomic since it is already protected by the mutex.
+        serial_number = (guard.*Serialization)();
+      }
+
+      // Phase 6: do futex wait until the lock is available or timeout is
+      // reached.
+      bool timeout_flag = false;
+      if (!(old.*CanAcquire)(preference)) {
+        timeout_flag =
+            ((queue.*Wait)(serial_number, timeout, is_pshared) == -ETIMEDOUT);
+
+        // Phase 7: unregister ourselves as a pending reader.
+        {
+          // Similarly, the unregister operation should also be an atomic
+          // transaction.
+          WaitingQueue::Guard guard = queue.acquire();
+          (guard.*PendingCount)()--;
+          // Clear the flag if we are the last reader. The flag must be cleared
+          // otherwise operations like trylock may fail even though there is no
+          // competitors.
+          if ((guard.*PendingCount)() == 0)
+            ClearPending(state, cpp::MemoryOrder::RELAXED);
+        }
+
+        // Phase 8: exit the loop is timeout is reached.
+        if (timeout_flag)
+          return LockResult::Timeout;
+
+        // Phase 9: reload the state and retry the acquisition.
+        old = SpinReload(state, preference, spin_count);
+      }
+    }
+  }
+
+public:
+  LIBC_INLINE LockResult
+  read_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+            unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
+    return lock<State::spin_reload_for_reader, State::fetch_set_pending_reader,
+                State::fetch_clear_pending_reader,
+                &WaitingQueue::Guard::reader_serialization,
+                &WaitingQueue::Guard::pending_reader, &RwLock::try_read_lock,
+                &WaitingQueue::reader_wait, &State::can_acquire_reader>(
+        timeout, spin_count);
+  }
+  LIBC_INLINE LockResult
+  write_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+             unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
+    return lock<State::spin_reload_for_writer, State::fetch_set_pending_writer,
+                State::fetch_clear_pending_writer,
+                &WaitingQueue::Guard::writer_serialization,
+                &WaitingQueue::Guard::pending_writer, &RwLock::try_write_lock,
+                &WaitingQueue::writer_wait, &State::can_acquire_writer>(
+        timeout, spin_count);
+  }
+  LIBC_INLINE LockResult unlock() {
+    State old = State::load(state, cpp::MemoryOrder::RELAXED);
+
+    if (old.has_active_writer()) {
+      // The lock is held by a writer.
+
+      // Check if we are the owner of the lock.
+      if (writer_tid.load(cpp::MemoryOrder::RELAXED) != gettid())
+        return LockResult::PermissionDenied;
+
+      // clear writer tid.
+      writer_tid.store(0, cpp::MemoryOrder::RELAXED);
+
+      // clear the writer bit.
+      old = State::fetch_clear_active_writer(state);
+
+      // If there is no pending readers or writers, we are done.
+      if (!old.has_pending())
+        return LockResult::Success;
+    } else if (old.has_active_reader()) {
+      // The lock is held by readers.
+
+      // Decrease the reader count.
+      old = State::fetch_sub_reader_count(state);
+
+      // If there is no pending readers or writers, we are done.
+      if (!old.has_last_reader() || !old.has_pending())
+        return LockResult::Success;
+    } else
+      return LockResult::PermissionDenied;
+
+    enum class WakeTarget { Readers, Writers, None };
+    WakeTarget status;
+
+    {
+      WaitingQueue::Guard guard = queue.acquire();
+      if (guard.pending_writer() != 0) {
+        guard.writer_serialization()++;
+        status = WakeTarget::Writers;
+      } else if (guard.pending_reader() != 0) {
+        guard.reader_serialization()++;
+        status = WakeTarget::Readers;
+      } else
+        status = WakeTarget::None;
+    }
+
+    if (status == WakeTarget::Readers)
+      queue.reader_notify_all(is_pshared);
+    else if (status == WakeTarget::Writers)
+      queue.writer_notify_one(is_pshared);
+
+    return LockResult::Success;
+  }
+};
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SUPPORT_THREADS_LINUX_RWLOCK_H

>From 10cccb5ac80183bb25c204756a6784594ec6e15f Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 11:33:19 -0700
Subject: [PATCH 02/13] [libc] clean up headers

---
 libc/src/__support/threads/linux/rwlock.h | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index e8da17681ddfe..6a9b2bed16b0d 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -11,11 +11,8 @@
 #include "hdr/errno_macros.h"
 #include "hdr/types/pid_t.h"
 #include "src/__support/CPP/atomic.h"
-#include "src/__support/CPP/expected.h"
-#include "src/__support/CPP/new.h"
 #include "src/__support/CPP/optional.h"
-#include "src/__support/CPP/type_traits/make_signed.h"
-#include "src/__support/OSUtil/linux/x86_64/syscall.h"
+#include "src/__support/OSUtil/syscall.h"
 #include "src/__support/common.h"
 #include "src/__support/libc_assert.h"
 #include "src/__support/macros/attributes.h"
@@ -100,7 +97,7 @@ class RwLock {
   enum class Preference : char { Reader, Writer };
   enum class LockResult {
     Success = 0,
-    Timeout = ETIMEDOUT,
+    TimedOut = ETIMEDOUT,
     Overflow = EAGAIN,
     Busy = EBUSY,
     Deadlock = EDEADLOCK,
@@ -431,7 +428,7 @@ class RwLock {
 
         // Phase 8: exit the loop is timeout is reached.
         if (timeout_flag)
-          return LockResult::Timeout;
+          return LockResult::TimedOut;
 
         // Phase 9: reload the state and retry the acquisition.
         old = SpinReload(state, preference, spin_count);

>From 41a8334288d40b657e9af30e98f9fc9cb247fa20 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 11:37:44 -0700
Subject: [PATCH 03/13] [libc] add another trylock before operating on the
 timestamp

---
 libc/src/__support/threads/linux/rwlock.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 6a9b2bed16b0d..168d17e3c42ff 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -372,6 +372,11 @@ class RwLock {
     // Phase 2: spin to get the initial state. We ignore the timing due to spin
     // since it should end quickly.
     State old = SpinReload(state, preference, spin_count);
+    {
+      LockResult result = (this->*TryLock)(old);
+      if (result != LockResult::Busy)
+        return result;
+    }
 
 #if LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
     // Phase 3: convert the timeout if necessary.

>From f5b778c342cf892e7351d3227df1b679fd8e63e8 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 11:43:18 -0700
Subject: [PATCH 04/13] [libc] more clean ups

---
 libc/src/__support/threads/linux/CMakeLists.txt | 2 ++
 libc/src/__support/threads/linux/rwlock.h       | 2 --
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/libc/src/__support/threads/linux/CMakeLists.txt b/libc/src/__support/threads/linux/CMakeLists.txt
index bac8073a66049..249aca0545e38 100644
--- a/libc/src/__support/threads/linux/CMakeLists.txt
+++ b/libc/src/__support/threads/linux/CMakeLists.txt
@@ -52,6 +52,8 @@ add_header_library(
   DEPENDS
     .futex_utils
     .raw_mutex
+    libc.src.__support.common
+    libc.src.__support.OSUtil.osutil
   COMPILE_OPTIONS
     -DLIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT=${LIBC_CONF_RWLOCK_DEFAULT_SPIN_COUNT}
     ${monotonicity_flags}
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 168d17e3c42ff..c5a3498054f4f 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -15,8 +15,6 @@
 #include "src/__support/OSUtil/syscall.h"
 #include "src/__support/common.h"
 #include "src/__support/libc_assert.h"
-#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/optimization.h"
 #include "src/__support/threads/linux/futex_utils.h"
 #include "src/__support/threads/linux/futex_word.h"
 #include "src/__support/threads/linux/raw_mutex.h"

>From e143ee63ea4f1f0b2586a6dd85a8fcd567331718 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 11:47:51 -0700
Subject: [PATCH 05/13] [libc] separate out the notification part

---
 libc/src/__support/threads/linux/rwlock.h | 53 +++++++++++------------
 1 file changed, 26 insertions(+), 27 deletions(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index c5a3498054f4f..7f3644adcc2bf 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -460,57 +460,56 @@ class RwLock {
                 &WaitingQueue::writer_wait, &State::can_acquire_writer>(
         timeout, spin_count);
   }
+
+private:
+  LIBC_INLINE void notify_pending_threads() {
+    enum class WakeTarget { Readers, Writers, None };
+    WakeTarget status;
+
+    {
+      WaitingQueue::Guard guard = queue.acquire();
+      if (guard.pending_writer() != 0) {
+        guard.writer_serialization()++;
+        status = WakeTarget::Writers;
+      } else if (guard.pending_reader() != 0) {
+        guard.reader_serialization()++;
+        status = WakeTarget::Readers;
+      } else
+        status = WakeTarget::None;
+    }
+
+    if (status == WakeTarget::Readers)
+      queue.reader_notify_all(is_pshared);
+    else if (status == WakeTarget::Writers)
+      queue.writer_notify_one(is_pshared);
+  }
+
+public:
   LIBC_INLINE LockResult unlock() {
     State old = State::load(state, cpp::MemoryOrder::RELAXED);
-
     if (old.has_active_writer()) {
       // The lock is held by a writer.
-
       // Check if we are the owner of the lock.
       if (writer_tid.load(cpp::MemoryOrder::RELAXED) != gettid())
         return LockResult::PermissionDenied;
-
       // clear writer tid.
       writer_tid.store(0, cpp::MemoryOrder::RELAXED);
-
       // clear the writer bit.
       old = State::fetch_clear_active_writer(state);
-
       // If there is no pending readers or writers, we are done.
       if (!old.has_pending())
         return LockResult::Success;
     } else if (old.has_active_reader()) {
       // The lock is held by readers.
-
       // Decrease the reader count.
       old = State::fetch_sub_reader_count(state);
-
       // If there is no pending readers or writers, we are done.
       if (!old.has_last_reader() || !old.has_pending())
         return LockResult::Success;
     } else
       return LockResult::PermissionDenied;
 
-    enum class WakeTarget { Readers, Writers, None };
-    WakeTarget status;
-
-    {
-      WaitingQueue::Guard guard = queue.acquire();
-      if (guard.pending_writer() != 0) {
-        guard.writer_serialization()++;
-        status = WakeTarget::Writers;
-      } else if (guard.pending_reader() != 0) {
-        guard.reader_serialization()++;
-        status = WakeTarget::Readers;
-      } else
-        status = WakeTarget::None;
-    }
-
-    if (status == WakeTarget::Readers)
-      queue.reader_notify_all(is_pshared);
-    else if (status == WakeTarget::Writers)
-      queue.writer_notify_one(is_pshared);
-
+    notify_pending_threads();
     return LockResult::Success;
   }
 };

>From a7e2f5041d461307a070d42fc0df8dad464233f3 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 11:50:28 -0700
Subject: [PATCH 06/13] [libc] correct permission of Guard

---
 libc/src/__support/threads/linux/rwlock.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 7f3644adcc2bf..5dc6e3459079f 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -62,7 +62,7 @@ class RwLock {
       LIBC_INLINE FutexWordType &writer_serialization() {
         return queue.writer_serialization.val;
       }
-      friend RwLock;
+      friend WaitingQueue;
     };
 
   public:

>From 5b04d51ce4e10d1cd6462830bda485f9f4c0b7b6 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 14:07:57 -0700
Subject: [PATCH 07/13] [libc] address CRs

---
 .../llvm-libc-types/pthread_rwlock_t.h        |  26 +++++
 libc/src/__support/threads/linux/rwlock.h     | 102 ++++++++++--------
 2 files changed, 84 insertions(+), 44 deletions(-)
 create mode 100644 libc/include/llvm-libc-types/pthread_rwlock_t.h

diff --git a/libc/include/llvm-libc-types/pthread_rwlock_t.h b/libc/include/llvm-libc-types/pthread_rwlock_t.h
new file mode 100644
index 0000000000000..b7ba2821a9994
--- /dev/null
+++ b/libc/include/llvm-libc-types/pthread_rwlock_t.h
@@ -0,0 +1,26 @@
+//===-- Definition of pthread_mutex_t type --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+
+#include "llvm-libc-types/__futex_word.h"
+#include "llvm-libc-types/pid_t.h"
+typedef struct {
+  bool __is_pshared;
+  char __preference;
+  int __state;
+  pid_t __writier_tid;
+  __futex_word __wait_queue_lock;
+  __futex_word __pending_reader;
+  __futex_word __pending_writer;
+  __futex_word __reader_serialization;
+  __futex_word __writer_serialization;
+} pthread_rwlock_t;
+
+#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 5dc6e3459079f..300aaaf031316 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -15,6 +15,7 @@
 #include "src/__support/OSUtil/syscall.h"
 #include "src/__support/common.h"
 #include "src/__support/libc_assert.h"
+#include "src/__support/macros/attributes.h"
 #include "src/__support/threads/linux/futex_utils.h"
 #include "src/__support/threads/linux/futex_word.h"
 #include "src/__support/threads/linux/raw_mutex.h"
@@ -350,28 +351,51 @@ class RwLock {
   }
 
 private:
-  template <State (&SpinReload)(cpp::Atomic<int32_t> &, Preference, unsigned),
-            State (&SetPending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder),
-            State (&ClearPending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder),
-            FutexWordType &(WaitingQueue::Guard::*Serialization)(),
-            FutexWordType &(WaitingQueue::Guard::*PendingCount)(),
-            LockResult (RwLock::*TryLock)(State &),
-            long (WaitingQueue::*Wait)(FutexWordType,
-                                       cpp::optional<Futex::Timeout>, bool),
-            bool (State::*CanAcquire)(Preference) const>
+  struct Proxy {
+    State (&spin_reload)(cpp::Atomic<int32_t> &, Preference, unsigned);
+    State (&set_pending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder);
+    State (&clear_pending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder);
+    FutexWordType &(WaitingQueue::Guard::*serialization)();
+    FutexWordType &(WaitingQueue::Guard::*pending_count)();
+    LockResult (RwLock::*try_lock)(State &);
+    long (WaitingQueue::*wait)(FutexWordType, cpp::optional<Futex::Timeout>,
+                               bool);
+    bool (State::*can_acquire)(Preference) const;
+  };
+
+  LIBC_INLINE_VAR static constexpr Proxy READER = {
+      State::spin_reload_for_reader,
+      State::fetch_set_pending_reader,
+      State::fetch_clear_pending_reader,
+      &WaitingQueue::Guard::reader_serialization,
+      &WaitingQueue::Guard::pending_reader,
+      &RwLock::try_read_lock,
+      &WaitingQueue::reader_wait,
+      &State::can_acquire_reader};
+
+  LIBC_INLINE_VAR static constexpr Proxy WRITER = {
+      State::spin_reload_for_writer,
+      State::fetch_set_pending_writer,
+      State::fetch_clear_pending_writer,
+      &WaitingQueue::Guard::writer_serialization,
+      &WaitingQueue::Guard::pending_writer,
+      &RwLock::try_write_lock,
+      &WaitingQueue::writer_wait,
+      &State::can_acquire_writer};
+
   LIBC_INLINE LockResult
-  lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+  lock(const Proxy &proxy, cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
        unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
     // Phase 1: deadlock detection.
     // A deadlock happens if this is a RAW/WAW lock in the same thread.
     if (writer_tid.load(cpp::MemoryOrder::RELAXED) == gettid())
       return LockResult::Deadlock;
 
-    // Phase 2: spin to get the initial state. We ignore the timing due to spin
-    // since it should end quickly.
-    State old = SpinReload(state, preference, spin_count);
+    // Phase 2: spin to get the initial state. We ignore the timing due to
+    // spin since it should end quickly.
+    State old = proxy.spin_reload(state, preference, spin_count);
     {
-      LockResult result = (this->*TryLock)(old);
+      LockResult result = (this->*proxy.try_lock)(old);
       if (result != LockResult::Busy)
         return result;
     }
@@ -385,7 +409,7 @@ class RwLock {
     // Enter the main acquisition loop.
     for (;;) {
       // Phase 4: if the lock can be acquired, try to acquire it.
-      LockResult result = (this->*TryLock)(old);
+      LockResult result = (this->*proxy.try_lock)(old);
       if (result != LockResult::Busy)
         return result;
 
@@ -394,39 +418,39 @@ class RwLock {
       {
         // The queue need to be protected by a mutex since the operations in
         // this block must be executed as a whole transaction. It is possible
-        // that this lock will make the timeout imprecise, but this is the best
-        // we can do. The transaction is small and everyone should make
+        // that this lock will make the timeout imprecise, but this is the
+        // best we can do. The transaction is small and everyone should make
         // progress rather quickly.
         WaitingQueue::Guard guard = queue.acquire();
-        (guard.*PendingCount)()++;
+        (guard.*proxy.pending_count)()++;
 
         // Use atomic operation to guarantee the total order of the operations
         // on the state. The pending flag update should be visible to any
-        // succeeding unlock events. Or, if a unlock does happen before we sleep
-        // on the futex, we can avoid such waiting.
-        old = SetPending(state, cpp::MemoryOrder::RELAXED);
+        // succeeding unlock events. Or, if a unlock does happen before we
+        // sleep on the futex, we can avoid such waiting.
+        old = proxy.set_pending(state, cpp::MemoryOrder::RELAXED);
         // no need to use atomic since it is already protected by the mutex.
-        serial_number = (guard.*Serialization)();
+        serial_number = (guard.*proxy.serialization)();
       }
 
       // Phase 6: do futex wait until the lock is available or timeout is
       // reached.
       bool timeout_flag = false;
-      if (!(old.*CanAcquire)(preference)) {
-        timeout_flag =
-            ((queue.*Wait)(serial_number, timeout, is_pshared) == -ETIMEDOUT);
+      if (!(old.*proxy.can_acquire)(preference)) {
+        timeout_flag = ((queue.*proxy.wait)(serial_number, timeout,
+                                            is_pshared) == -ETIMEDOUT);
 
         // Phase 7: unregister ourselves as a pending reader.
         {
           // Similarly, the unregister operation should also be an atomic
           // transaction.
           WaitingQueue::Guard guard = queue.acquire();
-          (guard.*PendingCount)()--;
-          // Clear the flag if we are the last reader. The flag must be cleared
-          // otherwise operations like trylock may fail even though there is no
-          // competitors.
-          if ((guard.*PendingCount)() == 0)
-            ClearPending(state, cpp::MemoryOrder::RELAXED);
+          (guard.*proxy.pending_count)()--;
+          // Clear the flag if we are the last reader. The flag must be
+          // cleared otherwise operations like trylock may fail even though
+          // there is no competitors.
+          if ((guard.*proxy.pending_count)() == 0)
+            proxy.clear_pending(state, cpp::MemoryOrder::RELAXED);
         }
 
         // Phase 8: exit the loop is timeout is reached.
@@ -434,7 +458,7 @@ class RwLock {
           return LockResult::TimedOut;
 
         // Phase 9: reload the state and retry the acquisition.
-        old = SpinReload(state, preference, spin_count);
+        old = proxy.spin_reload(state, preference, spin_count);
       }
     }
   }
@@ -443,22 +467,12 @@ class RwLock {
   LIBC_INLINE LockResult
   read_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
             unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock<State::spin_reload_for_reader, State::fetch_set_pending_reader,
-                State::fetch_clear_pending_reader,
-                &WaitingQueue::Guard::reader_serialization,
-                &WaitingQueue::Guard::pending_reader, &RwLock::try_read_lock,
-                &WaitingQueue::reader_wait, &State::can_acquire_reader>(
-        timeout, spin_count);
+    return lock(READER, timeout, spin_count);
   }
   LIBC_INLINE LockResult
   write_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
              unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock<State::spin_reload_for_writer, State::fetch_set_pending_writer,
-                State::fetch_clear_pending_writer,
-                &WaitingQueue::Guard::writer_serialization,
-                &WaitingQueue::Guard::pending_writer, &RwLock::try_write_lock,
-                &WaitingQueue::writer_wait, &State::can_acquire_writer>(
-        timeout, spin_count);
+    return lock(WRITER, timeout, spin_count);
   }
 
 private:

>From 847772cf38baf7835f18f1edf3bb647309924be5 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 14:09:59 -0700
Subject: [PATCH 08/13] [libc] address CRs

---
 libc/src/__support/threads/linux/rwlock.h | 32 +++++++++++------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 300aaaf031316..6b13c5270be4c 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -364,24 +364,24 @@ class RwLock {
   };
 
   LIBC_INLINE_VAR static constexpr Proxy READER = {
-      State::spin_reload_for_reader,
-      State::fetch_set_pending_reader,
-      State::fetch_clear_pending_reader,
-      &WaitingQueue::Guard::reader_serialization,
-      &WaitingQueue::Guard::pending_reader,
-      &RwLock::try_read_lock,
-      &WaitingQueue::reader_wait,
-      &State::can_acquire_reader};
+      /*spin_reload=*/State::spin_reload_for_reader,
+      /*set_pending=*/State::fetch_set_pending_reader,
+      /*clear_pending=*/State::fetch_clear_pending_reader,
+      /*serialization=*/&WaitingQueue::Guard::reader_serialization,
+      /*pending_count=*/&WaitingQueue::Guard::pending_reader,
+      /*try_lock=*/&RwLock::try_read_lock,
+      /*wait=*/&WaitingQueue::reader_wait,
+      /*can_acquire=*/&State::can_acquire_reader};
 
   LIBC_INLINE_VAR static constexpr Proxy WRITER = {
-      State::spin_reload_for_writer,
-      State::fetch_set_pending_writer,
-      State::fetch_clear_pending_writer,
-      &WaitingQueue::Guard::writer_serialization,
-      &WaitingQueue::Guard::pending_writer,
-      &RwLock::try_write_lock,
-      &WaitingQueue::writer_wait,
-      &State::can_acquire_writer};
+      /*spin_reload=*/State::spin_reload_for_writer,
+      /*set_pending=*/State::fetch_set_pending_writer,
+      /*clear_pending=*/State::fetch_clear_pending_writer,
+      /*serialization=*/&WaitingQueue::Guard::writer_serialization,
+      /*pending_count=*/&WaitingQueue::Guard::pending_writer,
+      /*try_lock=*/&RwLock::try_write_lock,
+      /*wait=*/&WaitingQueue::writer_wait,
+      /*can_acquire=*/&State::can_acquire_writer};
 
   LIBC_INLINE LockResult
   lock(const Proxy &proxy, cpp::optional<Futex::Timeout> timeout = cpp::nullopt,

>From ff183ca67f49c8893206a855f067ba29e7d9d613 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 15:05:26 -0700
Subject: [PATCH 09/13] [libc] add pthread_rwlock_init

---
 libc/config/linux/api.td                      |  2 +
 libc/config/linux/x86_64/entrypoints.txt      |  1 +
 libc/include/CMakeLists.txt                   |  1 +
 libc/include/llvm-libc-types/CMakeLists.txt   |  1 +
 .../llvm-libc-types/pthread_rwlock_t.h        |  8 +--
 libc/include/pthread.h.def                    |  1 +
 libc/spec/posix.td                            | 11 +++
 libc/src/__support/threads/linux/rwlock.h     |  7 ++
 libc/src/pthread/CMakeLists.txt               | 11 +++
 libc/src/pthread/pthread_rwlock_init.cpp      | 69 +++++++++++++++++++
 libc/src/pthread/pthread_rwlock_init.h        | 21 ++++++
 11 files changed, 129 insertions(+), 4 deletions(-)
 create mode 100644 libc/src/pthread/pthread_rwlock_init.cpp
 create mode 100644 libc/src/pthread/pthread_rwlock_init.h

diff --git a/libc/config/linux/api.td b/libc/config/linux/api.td
index 902839b3e5b8f..eb0090c80b0da 100644
--- a/libc/config/linux/api.td
+++ b/libc/config/linux/api.td
@@ -181,6 +181,7 @@ def PThreadAPI : PublicAPI<"pthread.h"> {
       "pthread_mutexattr_t",
       "pthread_once_t",
       "pthread_rwlockattr_t",
+      "pthread_rwlock_t",
       "pthread_t",
   ];
 }
@@ -270,6 +271,7 @@ def SysTypesAPI : PublicAPI<"sys/types.h"> {
     "pthread_mutexattr_t",
     "pthread_once_t",
     "pthread_rwlockattr_t",
+    "pthread_rwlock_t",
     "pthread_t",
     "size_t",
     "ssize_t",
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 367db7d384d23..1cac4ea2d28b7 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -680,6 +680,7 @@ if(LLVM_LIBC_FULL_BUILD)
     libc.src.pthread.pthread_mutexattr_setrobust
     libc.src.pthread.pthread_mutexattr_settype
     libc.src.pthread.pthread_once
+    libc.src.pthread.pthread_rwlock_init
     libc.src.pthread.pthread_rwlockattr_destroy
     libc.src.pthread.pthread_rwlockattr_getkind_np
     libc.src.pthread.pthread_rwlockattr_getpshared
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index 2a41ec46abdab..bb10fd4c94703 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -332,6 +332,7 @@ add_gen_header(
     .llvm-libc-types.pthread_mutex_t
     .llvm-libc-types.pthread_mutexattr_t
     .llvm-libc-types.pthread_once_t
+    .llvm-libc-types.pthread_rwlock_t
     .llvm-libc-types.pthread_rwlockattr_t
     .llvm-libc-types.pthread_t
 )
diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt
index ee2c910b85b00..c9646253aad54 100644
--- a/libc/include/llvm-libc-types/CMakeLists.txt
+++ b/libc/include/llvm-libc-types/CMakeLists.txt
@@ -54,6 +54,7 @@ add_header(pthread_key_t HDR pthread_key_t.h)
 add_header(pthread_mutex_t HDR pthread_mutex_t.h DEPENDS .__futex_word .__mutex_type)
 add_header(pthread_mutexattr_t HDR pthread_mutexattr_t.h)
 add_header(pthread_once_t HDR pthread_once_t.h DEPENDS .__futex_word)
+add_header(pthread_rwlock_t HDR pthread_rwlock_t.h DEPENDS .__futex_word .pid_t)
 add_header(pthread_rwlockattr_t HDR pthread_rwlockattr_t.h)
 add_header(pthread_t HDR pthread_t.h DEPENDS .__thread_type)
 add_header(rlim_t HDR rlim_t.h)
diff --git a/libc/include/llvm-libc-types/pthread_rwlock_t.h b/libc/include/llvm-libc-types/pthread_rwlock_t.h
index b7ba2821a9994..4950547004632 100644
--- a/libc/include/llvm-libc-types/pthread_rwlock_t.h
+++ b/libc/include/llvm-libc-types/pthread_rwlock_t.h
@@ -6,8 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
-#define LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_RWLOCK_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_RWLOCK_T_H
 
 #include "llvm-libc-types/__futex_word.h"
 #include "llvm-libc-types/pid_t.h"
@@ -16,11 +16,11 @@ typedef struct {
   char __preference;
   int __state;
   pid_t __writier_tid;
-  __futex_word __wait_queue_lock;
+  __futex_word __wait_queue_mutex;
   __futex_word __pending_reader;
   __futex_word __pending_writer;
   __futex_word __reader_serialization;
   __futex_word __writer_serialization;
 } pthread_rwlock_t;
 
-#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_RWLOCK_T_H
diff --git a/libc/include/pthread.h.def b/libc/include/pthread.h.def
index d41273b5590ea..33bd0060a5b4d 100644
--- a/libc/include/pthread.h.def
+++ b/libc/include/pthread.h.def
@@ -17,6 +17,7 @@
 #define PTHREAD_STACK_MIN (1 << 14) // 16KB
 
 #define PTHREAD_MUTEX_INITIALIZER {0}
+#define PTHREAD_RWLOCK_INITIALIZER {0}
 #define PTHREAD_ONCE_INIT {0}
 
 enum {
diff --git a/libc/spec/posix.td b/libc/spec/posix.td
index e16353b8142de..ce772a6e43482 100644
--- a/libc/spec/posix.td
+++ b/libc/spec/posix.td
@@ -113,6 +113,7 @@ def POSIX : StandardSpec<"POSIX"> {
   NamedType PThreadRWLockAttrTType = NamedType<"pthread_rwlockattr_t">;
   PtrType PThreadRWLockAttrTPtr = PtrType<PThreadRWLockAttrTType>;
   ConstType ConstPThreadRWLockAttrTPtr = ConstType<PThreadRWLockAttrTPtr>;
+  ConstType ConstRestrictedPThreadRWLockAttrTPtr = ConstType<RestrictedPtrType<PThreadRWLockAttrTType>>;
 
   NamedType PThreadMutexAttrTType = NamedType<"pthread_mutexattr_t">;
   PtrType PThreadMutexAttrTPtr = PtrType<PThreadMutexAttrTType>;
@@ -126,6 +127,9 @@ def POSIX : StandardSpec<"POSIX"> {
   ConstType ConstPThreadMutexTPtr = ConstType<PThreadMutexTPtr>;
   ConstType ConstRestrictedPThreadMutexTPtr = ConstType<RestrictedPThreadMutexTPtr>;
 
+  NamedType PThreadRWLockTType = NamedType<"pthread_rwlock_t">;
+  PtrType PThreadRWLockTPtr = PtrType<PThreadRWLockTType>;
+
   PtrType PThreadTPtr = PtrType<PThreadTType>;
   RestrictedPtrType RestrictedPThreadTPtr = RestrictedPtrType<PThreadTType>;
 
@@ -1003,6 +1007,7 @@ def POSIX : StandardSpec<"POSIX"> {
         PThreadOnceCallback,
         PThreadOnceT,
         PThreadRWLockAttrTType,
+        PThreadRWLockTType,
         PThreadStartT,
         PThreadTSSDtorT,
         PThreadTType,
@@ -1259,6 +1264,11 @@ def POSIX : StandardSpec<"POSIX"> {
           RetValSpec<IntType>,
           [ArgSpec<PThreadRWLockAttrTPtr>, ArgSpec<IntType>]
       >,
+      FunctionSpec<
+        "pthread_rwlock_init",
+        RetValSpec<IntType>,
+        [ArgSpec<PThreadRWLockTPtr>, ArgSpec<ConstRestrictedPThreadRWLockAttrTPtr>]
+      >
     ]
   >;
 
@@ -1616,6 +1626,7 @@ def POSIX : StandardSpec<"POSIX"> {
       PThreadMutexTType,
       PThreadOnceT,
       PThreadRWLockAttrTType,
+      PThreadRWLockTType,
       PThreadTType,
       PidT,
       SSizeTType,
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 6b13c5270be4c..f3eaada748f41 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -526,6 +526,13 @@ class RwLock {
     notify_pending_threads();
     return LockResult::Success;
   }
+
+  LIBC_INLINE LockResult check_for_destroy() {
+    State old = State::load(state, cpp::MemoryOrder::RELAXED);
+    if (old.has_acitve_owner())
+      return LockResult::Busy;
+    return LockResult::Success;
+  }
 };
 } // namespace LIBC_NAMESPACE
 
diff --git a/libc/src/pthread/CMakeLists.txt b/libc/src/pthread/CMakeLists.txt
index e5bebb63c6401..8225bc30a566b 100644
--- a/libc/src/pthread/CMakeLists.txt
+++ b/libc/src/pthread/CMakeLists.txt
@@ -522,6 +522,17 @@ add_entrypoint_object(
     libc.include.errno
 )
 
+add_entrypoint_object(
+  pthread_rwlock_init
+  SRCS
+    pthread_rwlock_init.cpp
+  HDRS
+    pthread_rwlock_init.h
+  DEPENDS
+    libc.include.pthread
+    libc.src.__support.threads.linux.rwlock
+)
+
 add_entrypoint_object(
   pthread_once
   SRCS
diff --git a/libc/src/pthread/pthread_rwlock_init.cpp b/libc/src/pthread/pthread_rwlock_init.cpp
new file mode 100644
index 0000000000000..cc71df9b38165
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_init.cpp
@@ -0,0 +1,69 @@
+//===-- Linux implementation of the pthread_rwlock_init function ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_rwlock_init.h"
+
+#include "src/__support/common.h"
+#include "src/__support/threads/linux/rwlock.h"
+
+#include <errno.h>
+#include <pthread.h>
+
+LIBC_INLINE void *operator new(size_t, pthread_rwlock_t *addr) noexcept {
+  return addr;
+}
+
+namespace LIBC_NAMESPACE {
+
+static_assert(
+    sizeof(RwLock) == sizeof(pthread_rwlock_t) &&
+        alignof(RwLock) == alignof(pthread_rwlock_t),
+    "The public pthread_rwlock_t type must be of the same size and alignment "
+    "as the internal rwlock type.");
+
+LLVM_LIBC_FUNCTION(int, pthread_rwlock_init,
+                   (pthread_rwlock_t * rwlock,
+                    const pthread_rwlockattr_t *__restrict attr)) {
+  pthread_rwlockattr_t rwlockattr{
+      /*pshared=*/PTHREAD_PROCESS_PRIVATE,
+      /*pref*/ PTHREAD_RWLOCK_PREFER_READER_NP,
+  };
+  if (attr)
+    rwlockattr = *attr;
+
+  ::new (rwlock) RwLock();
+
+  // PTHREAD_RWLOCK_PREFER_WRITER_NP is not supported.
+  RwLock::Preference preference;
+  switch (rwlockattr.pref) {
+  case PTHREAD_RWLOCK_PREFER_READER_NP:
+    preference = RwLock::Preference::Reader;
+    break;
+  case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP:
+    preference = RwLock::Preference::Writer;
+    break;
+  default:
+    return EINVAL;
+  }
+  bool is_pshared;
+  switch (rwlockattr.pshared) {
+  case PTHREAD_PROCESS_PRIVATE:
+    is_pshared = false;
+    break;
+  case PTHREAD_PROCESS_SHARED:
+    is_pshared = true;
+    break;
+  default:
+    return EINVAL;
+  }
+
+  new (rwlock) RwLock(preference, is_pshared);
+  return 0;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/pthread/pthread_rwlock_init.h b/libc/src/pthread/pthread_rwlock_init.h
new file mode 100644
index 0000000000000..59a4abe1abbb7
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_init.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for pthread_rwlock_init function ---*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_INIT_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_INIT_H
+
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE {
+
+int pthread_rwlock_init(pthread_rwlock_t *mutex,
+                        const pthread_rwlockattr_t *__restrict attr);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_INIT_H

>From 2557a400d0214df39bfef8257071aa96f8152806 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 15:21:43 -0700
Subject: [PATCH 10/13] [libc] add pthread_rwlock_tryrdlock

---
 libc/config/linux/x86_64/entrypoints.txt      |  1 +
 libc/include/pthread.h.def                    |  2 +-
 libc/spec/posix.td                            |  5 ++++
 libc/src/__support/threads/linux/rwlock.h     |  2 +-
 libc/src/pthread/CMakeLists.txt               | 11 +++++++
 libc/src/pthread/pthread_rwlock_init.h        |  2 +-
 libc/src/pthread/pthread_rwlock_tryrdlock.cpp | 30 +++++++++++++++++++
 libc/src/pthread/pthread_rwlock_tryrdlock.h   | 20 +++++++++++++
 8 files changed, 70 insertions(+), 3 deletions(-)
 create mode 100644 libc/src/pthread/pthread_rwlock_tryrdlock.cpp
 create mode 100644 libc/src/pthread/pthread_rwlock_tryrdlock.h

diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 1cac4ea2d28b7..606a67edc9889 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -681,6 +681,7 @@ if(LLVM_LIBC_FULL_BUILD)
     libc.src.pthread.pthread_mutexattr_settype
     libc.src.pthread.pthread_once
     libc.src.pthread.pthread_rwlock_init
+    libc.src.pthread.pthread_rwlock_tryrdlock
     libc.src.pthread.pthread_rwlockattr_destroy
     libc.src.pthread.pthread_rwlockattr_getkind_np
     libc.src.pthread.pthread_rwlockattr_getpshared
diff --git a/libc/include/pthread.h.def b/libc/include/pthread.h.def
index 33bd0060a5b4d..4dbeed6b5f321 100644
--- a/libc/include/pthread.h.def
+++ b/libc/include/pthread.h.def
@@ -17,7 +17,7 @@
 #define PTHREAD_STACK_MIN (1 << 14) // 16KB
 
 #define PTHREAD_MUTEX_INITIALIZER {0}
-#define PTHREAD_RWLOCK_INITIALIZER {0}
+#define PTHREAD_RWLOCK_INITIALIZER {}
 #define PTHREAD_ONCE_INIT {0}
 
 enum {
diff --git a/libc/spec/posix.td b/libc/spec/posix.td
index ce772a6e43482..d909c0d1981ae 100644
--- a/libc/spec/posix.td
+++ b/libc/spec/posix.td
@@ -1268,6 +1268,11 @@ def POSIX : StandardSpec<"POSIX"> {
         "pthread_rwlock_init",
         RetValSpec<IntType>,
         [ArgSpec<PThreadRWLockTPtr>, ArgSpec<ConstRestrictedPThreadRWLockAttrTPtr>]
+      >,
+      FunctionSpec<
+        "pthread_rwlock_tryrdlock",
+        RetValSpec<IntType>,
+        [ArgSpec<PThreadRWLockTPtr>]
       >
     ]
   >;
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index f3eaada748f41..b454a794cda87 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -94,7 +94,7 @@ class RwLock {
 
 public:
   enum class Preference : char { Reader, Writer };
-  enum class LockResult {
+  enum class LockResult : int {
     Success = 0,
     TimedOut = ETIMEDOUT,
     Overflow = EAGAIN,
diff --git a/libc/src/pthread/CMakeLists.txt b/libc/src/pthread/CMakeLists.txt
index 8225bc30a566b..30c27d4336fbe 100644
--- a/libc/src/pthread/CMakeLists.txt
+++ b/libc/src/pthread/CMakeLists.txt
@@ -533,6 +533,17 @@ add_entrypoint_object(
     libc.src.__support.threads.linux.rwlock
 )
 
+add_entrypoint_object(
+  pthread_rwlock_tryrdlock
+  SRCS
+    pthread_rwlock_tryrdlock.cpp
+  HDRS
+    pthread_rwlock_tryrdlock.h
+  DEPENDS
+    libc.include.pthread
+    libc.src.__support.threads.linux.rwlock
+)
+
 add_entrypoint_object(
   pthread_once
   SRCS
diff --git a/libc/src/pthread/pthread_rwlock_init.h b/libc/src/pthread/pthread_rwlock_init.h
index 59a4abe1abbb7..78d2934882c1d 100644
--- a/libc/src/pthread/pthread_rwlock_init.h
+++ b/libc/src/pthread/pthread_rwlock_init.h
@@ -13,7 +13,7 @@
 
 namespace LIBC_NAMESPACE {
 
-int pthread_rwlock_init(pthread_rwlock_t *mutex,
+int pthread_rwlock_init(pthread_rwlock_t *rwlock,
                         const pthread_rwlockattr_t *__restrict attr);
 
 } // namespace LIBC_NAMESPACE
diff --git a/libc/src/pthread/pthread_rwlock_tryrdlock.cpp b/libc/src/pthread/pthread_rwlock_tryrdlock.cpp
new file mode 100644
index 0000000000000..a2101d2f4714f
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_tryrdlock.cpp
@@ -0,0 +1,30 @@
+//===-- Implementation of the Rwlock's tryrdlock function -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_rwlock_tryrdlock.h"
+
+#include "src/__support/common.h"
+#include "src/__support/threads/linux/rwlock.h"
+
+#include <errno.h>
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE {
+
+static_assert(
+    sizeof(RwLock) == sizeof(pthread_rwlock_t) &&
+        alignof(RwLock) == alignof(pthread_rwlock_t),
+    "The public pthread_rwlock_t type must be of the same size and alignment "
+    "as the internal rwlock type.");
+
+LLVM_LIBC_FUNCTION(int, pthread_rwlock_tryrdlock, (pthread_rwlock_t * rwlock)) {
+  RwLock *rw = reinterpret_cast<RwLock *>(rwlock);
+  return static_cast<int>(rw->try_read_lock());
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/pthread/pthread_rwlock_tryrdlock.h b/libc/src/pthread/pthread_rwlock_tryrdlock.h
new file mode 100644
index 0000000000000..b07ab5b152b1a
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_tryrdlock.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for Rwlock's tryrdlock function ----*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_TRYRDLOCK_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_TRYRDLOCK_H
+
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE {
+
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_TRYRDLOCK_H

>From 4222eac1d879d4adcc20ec8d3105ea798d54f319 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 18:03:36 -0700
Subject: [PATCH 11/13] [libc] clean up again

---
 libc/src/__support/threads/linux/rwlock.h | 296 ++++++++++------------
 libc/src/pthread/pthread_rwlock_init.cpp  |   6 +-
 2 files changed, 141 insertions(+), 161 deletions(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index b454a794cda87..2ec2c5977bf85 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -36,6 +36,9 @@
 
 namespace LIBC_NAMESPACE {
 class RwLock {
+public:
+  enum class Role : char { Reader, Writer };
+
 private:
   class WaitingQueue final : private RawMutex {
     FutexWordType pending_reader;
@@ -51,17 +54,17 @@ class RwLock {
 
     public:
       LIBC_INLINE ~Guard() { queue.unlock(); }
-      LIBC_INLINE FutexWordType &pending_reader() {
-        return queue.pending_reader;
-      }
-      LIBC_INLINE FutexWordType &pending_writer() {
-        return queue.pending_writer;
+      template <Role role> LIBC_INLINE FutexWordType &pending_count() {
+        if constexpr (role == Role::Reader)
+          return queue.pending_reader;
+        else
+          return queue.pending_writer;
       }
-      LIBC_INLINE FutexWordType &reader_serialization() {
-        return queue.reader_serialization.val;
-      }
-      LIBC_INLINE FutexWordType &writer_serialization() {
-        return queue.writer_serialization.val;
+      template <Role role> LIBC_INLINE FutexWordType &serialization() {
+        if constexpr (role == Role::Reader)
+          return queue.reader_serialization.val;
+        else
+          return queue.writer_serialization.val;
       }
       friend WaitingQueue;
     };
@@ -74,26 +77,26 @@ class RwLock {
       this->lock();
       return Guard(*this);
     }
-    LIBC_INLINE long reader_wait(FutexWordType expected,
-                                 cpp::optional<Futex::Timeout> timeout,
-                                 bool is_pshared) {
-      return reader_serialization.wait(expected, timeout, is_pshared);
-    }
-    LIBC_INLINE long reader_notify_all(bool is_pshared) {
-      return reader_serialization.notify_all(is_pshared);
-    }
-    LIBC_INLINE long writer_wait(FutexWordType expected,
-                                 cpp::optional<Futex::Timeout> timeout,
-                                 bool is_pshared) {
-      return writer_serialization.wait(expected, timeout, is_pshared);
+
+    template <Role role>
+    LIBC_INLINE long wait(FutexWordType expected,
+                          cpp::optional<Futex::Timeout> timeout,
+                          bool is_pshared) {
+      if constexpr (role == Role::Reader)
+        return reader_serialization.wait(expected, timeout, is_pshared);
+      else
+        return writer_serialization.wait(expected, timeout, is_pshared);
     }
-    LIBC_INLINE long writer_notify_one(bool is_pshared) {
-      return writer_serialization.notify_one(is_pshared);
+
+    template <Role role> LIBC_INLINE long notify(bool is_pshared) {
+      if constexpr (role == Role::Reader)
+        return reader_serialization.notify_all(is_pshared);
+      else
+        return writer_serialization.notify_one(is_pshared);
     }
   };
 
 public:
-  enum class Preference : char { Reader, Writer };
   enum class LockResult : int {
     Success = 0,
     TimedOut = ETIMEDOUT,
@@ -168,21 +171,23 @@ class RwLock {
     LIBC_INLINE constexpr State set_writer_bit() const {
       return State(state | ACTIVE_WRITER_BIT);
     }
+
     // The preference parameter changes the behavior of the lock acquisition
     // if there are both readers and writers waiting for the lock. If writers
     // are preferred, reader acquisition will be blocked until all pending
     // writers are served.
-    LIBC_INLINE bool can_acquire_reader(Preference preference) const {
-      switch (preference) {
-      case Preference::Reader:
-        return !has_active_writer();
-      case Preference::Writer:
-        return !has_active_writer() && !has_pending_writer();
-      }
-    }
-    LIBC_INLINE bool can_acquire_writer(Preference /*unused*/) const {
-      return !has_acitve_owner();
+    template <Role role> LIBC_INLINE bool can_acquire(Role preference) const {
+      if constexpr (role == Role::Reader) {
+        switch (preference) {
+        case Role::Reader:
+          return !has_active_writer();
+        case Role::Writer:
+          return !has_active_writer() && !has_pending_writer();
+        }
+      } else
+        return !has_acitve_owner();
     }
+
     // This function check if it is possible to grow the reader count without
     // overflowing the state.
     LIBC_INLINE cpp::optional<State> try_increase_reader_count() const {
@@ -202,30 +207,30 @@ class RwLock {
                            cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
       return State(target.fetch_sub(ACTIVE_READER_COUNT_UNIT, order));
     }
+
     LIBC_INLINE static State
     load(cpp::Atomic<Type> &target,
          cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
       return State(target.load(order));
     }
-    LIBC_INLINE static State fetch_set_pending_reader(
-        cpp::Atomic<Type> &target,
-        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
-      return State(target.fetch_or(PENDING_READER_BIT, order));
-    }
-    LIBC_INLINE static State fetch_clear_pending_reader(
-        cpp::Atomic<Type> &target,
-        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
-      return State(target.fetch_and(~PENDING_READER_BIT, order));
-    }
-    LIBC_INLINE static State fetch_set_pending_writer(
-        cpp::Atomic<Type> &target,
-        cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
-      return State(target.fetch_or(PENDING_WRITER_BIT, order));
+
+    template <Role role>
+    LIBC_INLINE static State
+    fetch_set_pending_bit(cpp::Atomic<Type> &target,
+                          cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
+      if constexpr (role == Role::Reader)
+        return State(target.fetch_or(PENDING_READER_BIT, order));
+      else
+        return State(target.fetch_or(PENDING_WRITER_BIT, order));
     }
-    LIBC_INLINE static State fetch_clear_pending_writer(
+    template <Role role>
+    LIBC_INLINE static State fetch_clear_pending_bit(
         cpp::Atomic<Type> &target,
         cpp::MemoryOrder order = cpp::MemoryOrder::SEQ_CST) {
-      return State(target.fetch_and(~PENDING_WRITER_BIT, order));
+      if constexpr (role == Role::Reader)
+        return State(target.fetch_and(~PENDING_READER_BIT, order));
+      else
+        return State(target.fetch_and(~PENDING_WRITER_BIT, order));
     }
     LIBC_INLINE static State fetch_set_active_writer(
         cpp::Atomic<Type> &target,
@@ -261,31 +266,33 @@ class RwLock {
     }
 
   public:
-    // Return the reader state if either the lock is available or there is any
-    // ongoing contention.
-    LIBC_INLINE static State spin_reload_for_reader(cpp::Atomic<Type> &target,
-                                                    Preference preference,
-                                                    unsigned spin_count) {
-      return spin_reload_until(
-          target,
-          [=](State state) {
-            return state.can_acquire_reader(preference) || state.has_pending();
-          },
-          spin_count);
-    }
-    // Return the writer state if either the lock is available or there is any
-    // contention *between writers*. Since writers can be way less than readers,
-    // we allow them to spin more to improve the fairness.
-    LIBC_INLINE static State spin_reload_for_writer(cpp::Atomic<Type> &target,
-                                                    Preference preference,
-                                                    unsigned spin_count) {
-      return spin_reload_until(
-          target,
-          [=](State state) {
-            return state.can_acquire_writer(preference) ||
-                   state.has_pending_writer();
-          },
-          spin_count);
+    template <Role role>
+    LIBC_INLINE static State spin_reload(cpp::Atomic<Type> &target,
+                                         Role preference, unsigned spin_count) {
+      if constexpr (role == Role::Reader) {
+        // Return the reader state if either the lock is available or there is
+        // any
+        // ongoing contention.
+        return spin_reload_until(
+            target,
+            [=](State state) {
+              return state.can_acquire<Role::Reader>(preference) ||
+                     state.has_pending();
+            },
+            spin_count);
+      } else {
+        // Return the writer state if either the lock is available or there is
+        // any
+        // contention *between writers*. Since writers can be way less than
+        // readers, we allow them to spin more to improve the fairness.
+        return spin_reload_until(
+            target,
+            [=](State state) {
+              return state.can_acquire<Role::Writer>(preference) ||
+                     state.has_pending_writer();
+            },
+            spin_count);
+      }
     }
   };
 
@@ -293,7 +300,7 @@ class RwLock {
   // Whether the RwLock is shared between processes.
   bool is_pshared;
   // Reader/Writer preference.
-  Preference preference;
+  Role preference;
   // State to keep track of the RwLock.
   cpp::Atomic<int32_t> state;
   // writer_tid is used to keep track of the thread id of the writer. Notice
@@ -307,84 +314,55 @@ class RwLock {
   // TODO: use cached thread id once implemented.
   LIBC_INLINE static pid_t gettid() { return syscall_impl<pid_t>(SYS_gettid); }
 
-  LIBC_INLINE LockResult try_read_lock(State &old) {
-    while (LIBC_LIKELY(old.can_acquire_reader(preference))) {
-      cpp::optional<State> next = old.try_increase_reader_count();
-      if (!next)
-        return LockResult::Overflow;
-      if (LIBC_LIKELY(old.compare_exchange_weak_with(
-              state, *next, cpp::MemoryOrder::ACQUIRE,
-              cpp::MemoryOrder::RELAXED)))
-        return LockResult::Success;
-      // Notice that old is updated by the compare_exchange_weak_with function.
-    }
-    return LockResult::Busy;
-  }
-
-  LIBC_INLINE LockResult try_write_lock(State &old) {
-    // This while loop should terminate quickly
-    while (LIBC_LIKELY(old.can_acquire_writer(preference))) {
-      if (LIBC_LIKELY(old.compare_exchange_weak_with(
-              state, old.set_writer_bit(), cpp::MemoryOrder::ACQUIRE,
-              cpp::MemoryOrder::RELAXED))) {
-        writer_tid.store(gettid(), cpp::MemoryOrder::RELAXED);
-        return LockResult::Success;
+  template <Role role> LIBC_INLINE LockResult try_lock(State &old) {
+    if constexpr (role == Role::Reader) {
+      while (LIBC_LIKELY(old.can_acquire<Role::Reader>(preference))) {
+        cpp::optional<State> next = old.try_increase_reader_count();
+        if (!next)
+          return LockResult::Overflow;
+        if (LIBC_LIKELY(old.compare_exchange_weak_with(
+                state, *next, cpp::MemoryOrder::ACQUIRE,
+                cpp::MemoryOrder::RELAXED)))
+          return LockResult::Success;
+        // Notice that old is updated by the compare_exchange_weak_with
+        // function.
+      }
+      return LockResult::Busy;
+    } else {
+      // This while loop should terminate quickly
+      while (LIBC_LIKELY(old.can_acquire<Role::Writer>(preference))) {
+        if (LIBC_LIKELY(old.compare_exchange_weak_with(
+                state, old.set_writer_bit(), cpp::MemoryOrder::ACQUIRE,
+                cpp::MemoryOrder::RELAXED))) {
+          writer_tid.store(gettid(), cpp::MemoryOrder::RELAXED);
+          return LockResult::Success;
+        }
+        // Notice that old is updated by the compare_exchange_weak_with
+        // function.
       }
-      // Notice that old is updated by the compare_exchange_weak_with function.
+      return LockResult::Busy;
     }
-    return LockResult::Busy;
   }
 
 public:
-  LIBC_INLINE constexpr RwLock(Preference preference = Preference::Reader,
+  LIBC_INLINE constexpr RwLock(Role preference = Role::Reader,
                                bool is_pshared = false)
       : is_pshared(is_pshared), preference(preference), state(0), writer_tid(0),
         queue() {}
 
   LIBC_INLINE LockResult try_read_lock() {
     State old = State::load(state, cpp::MemoryOrder::RELAXED);
-    return try_read_lock(old);
+    return try_lock<Role::Reader>(old);
   }
   LIBC_INLINE LockResult try_write_lock() {
     State old = State::load(state, cpp::MemoryOrder::RELAXED);
-    return try_write_lock(old);
+    return try_lock<Role::Writer>(old);
   }
 
 private:
-  struct Proxy {
-    State (&spin_reload)(cpp::Atomic<int32_t> &, Preference, unsigned);
-    State (&set_pending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder);
-    State (&clear_pending)(cpp::Atomic<int32_t> &, cpp::MemoryOrder);
-    FutexWordType &(WaitingQueue::Guard::*serialization)();
-    FutexWordType &(WaitingQueue::Guard::*pending_count)();
-    LockResult (RwLock::*try_lock)(State &);
-    long (WaitingQueue::*wait)(FutexWordType, cpp::optional<Futex::Timeout>,
-                               bool);
-    bool (State::*can_acquire)(Preference) const;
-  };
-
-  LIBC_INLINE_VAR static constexpr Proxy READER = {
-      /*spin_reload=*/State::spin_reload_for_reader,
-      /*set_pending=*/State::fetch_set_pending_reader,
-      /*clear_pending=*/State::fetch_clear_pending_reader,
-      /*serialization=*/&WaitingQueue::Guard::reader_serialization,
-      /*pending_count=*/&WaitingQueue::Guard::pending_reader,
-      /*try_lock=*/&RwLock::try_read_lock,
-      /*wait=*/&WaitingQueue::reader_wait,
-      /*can_acquire=*/&State::can_acquire_reader};
-
-  LIBC_INLINE_VAR static constexpr Proxy WRITER = {
-      /*spin_reload=*/State::spin_reload_for_writer,
-      /*set_pending=*/State::fetch_set_pending_writer,
-      /*clear_pending=*/State::fetch_clear_pending_writer,
-      /*serialization=*/&WaitingQueue::Guard::writer_serialization,
-      /*pending_count=*/&WaitingQueue::Guard::pending_writer,
-      /*try_lock=*/&RwLock::try_write_lock,
-      /*wait=*/&WaitingQueue::writer_wait,
-      /*can_acquire=*/&State::can_acquire_writer};
-
+  template <Role role>
   LIBC_INLINE LockResult
-  lock(const Proxy &proxy, cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+  lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
        unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
     // Phase 1: deadlock detection.
     // A deadlock happens if this is a RAW/WAW lock in the same thread.
@@ -393,9 +371,9 @@ class RwLock {
 
     // Phase 2: spin to get the initial state. We ignore the timing due to
     // spin since it should end quickly.
-    State old = proxy.spin_reload(state, preference, spin_count);
+    State old = State::spin_reload<role>(state, preference, spin_count);
     {
-      LockResult result = (this->*proxy.try_lock)(old);
+      LockResult result = try_lock<role>(old);
       if (result != LockResult::Busy)
         return result;
     }
@@ -409,7 +387,7 @@ class RwLock {
     // Enter the main acquisition loop.
     for (;;) {
       // Phase 4: if the lock can be acquired, try to acquire it.
-      LockResult result = (this->*proxy.try_lock)(old);
+      LockResult result = try_lock<role>(old);
       if (result != LockResult::Busy)
         return result;
 
@@ -422,35 +400,37 @@ class RwLock {
         // best we can do. The transaction is small and everyone should make
         // progress rather quickly.
         WaitingQueue::Guard guard = queue.acquire();
-        (guard.*proxy.pending_count)()++;
+        guard.template pending_count<role>()++;
 
         // Use atomic operation to guarantee the total order of the operations
         // on the state. The pending flag update should be visible to any
         // succeeding unlock events. Or, if a unlock does happen before we
         // sleep on the futex, we can avoid such waiting.
-        old = proxy.set_pending(state, cpp::MemoryOrder::RELAXED);
+        old = State::fetch_set_pending_bit<role>(state,
+                                                 cpp::MemoryOrder::RELAXED);
         // no need to use atomic since it is already protected by the mutex.
-        serial_number = (guard.*proxy.serialization)();
+        serial_number = guard.serialization<role>();
       }
 
       // Phase 6: do futex wait until the lock is available or timeout is
       // reached.
       bool timeout_flag = false;
-      if (!(old.*proxy.can_acquire)(preference)) {
-        timeout_flag = ((queue.*proxy.wait)(serial_number, timeout,
-                                            is_pshared) == -ETIMEDOUT);
+      if (!old.can_acquire<role>(preference)) {
+        timeout_flag = (queue.wait<role>(serial_number, timeout, is_pshared) ==
+                        -ETIMEDOUT);
 
         // Phase 7: unregister ourselves as a pending reader.
         {
           // Similarly, the unregister operation should also be an atomic
           // transaction.
           WaitingQueue::Guard guard = queue.acquire();
-          (guard.*proxy.pending_count)()--;
+          guard.pending_count<role>()--;
           // Clear the flag if we are the last reader. The flag must be
           // cleared otherwise operations like trylock may fail even though
           // there is no competitors.
-          if ((guard.*proxy.pending_count)() == 0)
-            proxy.clear_pending(state, cpp::MemoryOrder::RELAXED);
+          if (guard.pending_count<role>() == 0)
+            State::fetch_clear_pending_bit<role>(state,
+                                                 cpp::MemoryOrder::RELAXED);
         }
 
         // Phase 8: exit the loop is timeout is reached.
@@ -458,7 +438,7 @@ class RwLock {
           return LockResult::TimedOut;
 
         // Phase 9: reload the state and retry the acquisition.
-        old = proxy.spin_reload(state, preference, spin_count);
+        old = State::spin_reload<role>(state, preference, spin_count);
       }
     }
   }
@@ -467,12 +447,12 @@ class RwLock {
   LIBC_INLINE LockResult
   read_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
             unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock(READER, timeout, spin_count);
+    return lock<Role::Reader>(timeout, spin_count);
   }
   LIBC_INLINE LockResult
   write_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
              unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock(WRITER, timeout, spin_count);
+    return lock<Role::Writer>(timeout, spin_count);
   }
 
 private:
@@ -482,20 +462,20 @@ class RwLock {
 
     {
       WaitingQueue::Guard guard = queue.acquire();
-      if (guard.pending_writer() != 0) {
-        guard.writer_serialization()++;
+      if (guard.pending_count<Role::Writer>() != 0) {
+        guard.serialization<Role::Writer>()++;
         status = WakeTarget::Writers;
-      } else if (guard.pending_reader() != 0) {
-        guard.reader_serialization()++;
+      } else if (guard.pending_count<Role::Reader>() != 0) {
+        guard.serialization<Role::Reader>()++;
         status = WakeTarget::Readers;
       } else
         status = WakeTarget::None;
     }
 
     if (status == WakeTarget::Readers)
-      queue.reader_notify_all(is_pshared);
+      queue.notify<Role::Reader>(is_pshared);
     else if (status == WakeTarget::Writers)
-      queue.writer_notify_one(is_pshared);
+      queue.notify<Role::Writer>(is_pshared);
   }
 
 public:
diff --git a/libc/src/pthread/pthread_rwlock_init.cpp b/libc/src/pthread/pthread_rwlock_init.cpp
index cc71df9b38165..dddc7ec655745 100644
--- a/libc/src/pthread/pthread_rwlock_init.cpp
+++ b/libc/src/pthread/pthread_rwlock_init.cpp
@@ -39,13 +39,13 @@ LLVM_LIBC_FUNCTION(int, pthread_rwlock_init,
   ::new (rwlock) RwLock();
 
   // PTHREAD_RWLOCK_PREFER_WRITER_NP is not supported.
-  RwLock::Preference preference;
+  RwLock::Role preference;
   switch (rwlockattr.pref) {
   case PTHREAD_RWLOCK_PREFER_READER_NP:
-    preference = RwLock::Preference::Reader;
+    preference = RwLock::Role::Reader;
     break;
   case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP:
-    preference = RwLock::Preference::Writer;
+    preference = RwLock::Role::Writer;
     break;
   default:
     return EINVAL;

>From 11bfdd08a8b9a68dd20a23ff53277b97c2e1224e Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 20:42:20 -0700
Subject: [PATCH 12/13] [libc] add pthread_rwlock_rdlock

---
 libc/config/linux/x86_64/entrypoints.txt   |  1 +
 libc/spec/posix.td                         |  5 ++++
 libc/src/__support/threads/linux/rwlock.h  |  4 +--
 libc/src/pthread/CMakeLists.txt            | 11 ++++++++
 libc/src/pthread/pthread_rwlock_rdlock.cpp | 30 ++++++++++++++++++++++
 libc/src/pthread/pthread_rwlock_rdlock.h   | 20 +++++++++++++++
 6 files changed, 69 insertions(+), 2 deletions(-)
 create mode 100644 libc/src/pthread/pthread_rwlock_rdlock.cpp
 create mode 100644 libc/src/pthread/pthread_rwlock_rdlock.h

diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 606a67edc9889..bc7c9e836b8e6 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -682,6 +682,7 @@ if(LLVM_LIBC_FULL_BUILD)
     libc.src.pthread.pthread_once
     libc.src.pthread.pthread_rwlock_init
     libc.src.pthread.pthread_rwlock_tryrdlock
+    libc.src.pthread.pthread_rwlock_rdlock
     libc.src.pthread.pthread_rwlockattr_destroy
     libc.src.pthread.pthread_rwlockattr_getkind_np
     libc.src.pthread.pthread_rwlockattr_getpshared
diff --git a/libc/spec/posix.td b/libc/spec/posix.td
index d909c0d1981ae..de0eec24683f5 100644
--- a/libc/spec/posix.td
+++ b/libc/spec/posix.td
@@ -1273,6 +1273,11 @@ def POSIX : StandardSpec<"POSIX"> {
         "pthread_rwlock_tryrdlock",
         RetValSpec<IntType>,
         [ArgSpec<PThreadRWLockTPtr>]
+      >,
+      FunctionSpec<
+        "pthread_rwlock_rdlock",
+        RetValSpec<IntType>,
+        [ArgSpec<PThreadRWLockTPtr>]
       >
     ]
   >;
diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index 2ec2c5977bf85..ccf9378286098 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -361,7 +361,7 @@ class RwLock {
 
 private:
   template <Role role>
-  LIBC_INLINE LockResult
+  [[gnu::always_inline]] LIBC_INLINE LockResult
   lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
        unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
     // Phase 1: deadlock detection.
@@ -444,7 +444,7 @@ class RwLock {
   }
 
 public:
-  LIBC_INLINE LockResult
+  [[gnu::always_inline]] LIBC_INLINE LockResult
   read_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
             unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
     return lock<Role::Reader>(timeout, spin_count);
diff --git a/libc/src/pthread/CMakeLists.txt b/libc/src/pthread/CMakeLists.txt
index 30c27d4336fbe..e09e3e2e42359 100644
--- a/libc/src/pthread/CMakeLists.txt
+++ b/libc/src/pthread/CMakeLists.txt
@@ -544,6 +544,17 @@ add_entrypoint_object(
     libc.src.__support.threads.linux.rwlock
 )
 
+add_entrypoint_object(
+  pthread_rwlock_rdlock
+  SRCS
+    pthread_rwlock_rdlock.cpp
+  HDRS
+    pthread_rwlock_rdlock.h
+  DEPENDS
+    libc.include.pthread
+    libc.src.__support.threads.linux.rwlock
+)
+
 add_entrypoint_object(
   pthread_once
   SRCS
diff --git a/libc/src/pthread/pthread_rwlock_rdlock.cpp b/libc/src/pthread/pthread_rwlock_rdlock.cpp
new file mode 100644
index 0000000000000..cb7bc439c1b1e
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_rdlock.cpp
@@ -0,0 +1,30 @@
+//===-- Implementation of the Rwlock's rdlock function --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/pthread/pthread_rwlock_rdlock.h"
+
+#include "src/__support/common.h"
+#include "src/__support/threads/linux/rwlock.h"
+
+#include <errno.h>
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE {
+
+static_assert(
+    sizeof(RwLock) == sizeof(pthread_rwlock_t) &&
+        alignof(RwLock) == alignof(pthread_rwlock_t),
+    "The public pthread_rwlock_t type must be of the same size and alignment "
+    "as the internal rwlock type.");
+
+LLVM_LIBC_FUNCTION(int, pthread_rwlock_rdlock, (pthread_rwlock_t * rwlock)) {
+  RwLock *rw = reinterpret_cast<RwLock *>(rwlock);
+  return static_cast<int>(rw->read_lock());
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/pthread/pthread_rwlock_rdlock.h b/libc/src/pthread/pthread_rwlock_rdlock.h
new file mode 100644
index 0000000000000..79027739f4b7c
--- /dev/null
+++ b/libc/src/pthread/pthread_rwlock_rdlock.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for Rwlock's rdlock function -------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_RDLOCK_H
+#define LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_RDLOCK_H
+
+#include <pthread.h>
+
+namespace LIBC_NAMESPACE {
+
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_PTHREAD_PTHREAD_RWLOCK_RDLOCK_H

>From 88fcb9dfb0f04d9f91e1049f0f6cb8701e0f4143 Mon Sep 17 00:00:00 2001
From: Yifan Zhu <yifzhu at nvidia.com>
Date: Sun, 2 Jun 2024 20:47:41 -0700
Subject: [PATCH 13/13] [libc] adjust phase order

---
 libc/src/__support/threads/linux/rwlock.h | 33 ++++++++++++-----------
 1 file changed, 17 insertions(+), 16 deletions(-)

diff --git a/libc/src/__support/threads/linux/rwlock.h b/libc/src/__support/threads/linux/rwlock.h
index ccf9378286098..72db6ed47adbb 100644
--- a/libc/src/__support/threads/linux/rwlock.h
+++ b/libc/src/__support/threads/linux/rwlock.h
@@ -361,29 +361,24 @@ class RwLock {
 
 private:
   template <Role role>
-  [[gnu::always_inline]] LIBC_INLINE LockResult
-  lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
-       unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
+  LIBC_INLINE LockResult
+  lock_slow(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
+            unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
     // Phase 1: deadlock detection.
     // A deadlock happens if this is a RAW/WAW lock in the same thread.
     if (writer_tid.load(cpp::MemoryOrder::RELAXED) == gettid())
       return LockResult::Deadlock;
 
-    // Phase 2: spin to get the initial state. We ignore the timing due to
-    // spin since it should end quickly.
-    State old = State::spin_reload<role>(state, preference, spin_count);
-    {
-      LockResult result = try_lock<role>(old);
-      if (result != LockResult::Busy)
-        return result;
-    }
-
 #if LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
-    // Phase 3: convert the timeout if necessary.
+    // Phase 2: convert the timeout if necessary.
     if (timeout)
       ensure_monotonicity(*timeout);
 #endif
 
+    // Phase 3: spin to get the initial state. We ignore the timing due to
+    // spin since it should end quickly.
+    State old = State::spin_reload<role>(state, preference, spin_count);
+
     // Enter the main acquisition loop.
     for (;;) {
       // Phase 4: if the lock can be acquired, try to acquire it.
@@ -444,15 +439,21 @@ class RwLock {
   }
 
 public:
-  [[gnu::always_inline]] LIBC_INLINE LockResult
+  LIBC_INLINE LockResult
   read_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
             unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock<Role::Reader>(timeout, spin_count);
+    LockResult result = try_read_lock();
+    if (LIBC_LIKELY(result != LockResult::Busy))
+      return result;
+    return lock_slow<Role::Reader>(timeout, spin_count);
   }
   LIBC_INLINE LockResult
   write_lock(cpp::optional<Futex::Timeout> timeout = cpp::nullopt,
              unsigned spin_count = LIBC_COPT_RWLOCK_DEFAULT_SPIN_COUNT) {
-    return lock<Role::Writer>(timeout, spin_count);
+    LockResult result = try_write_lock();
+    if (LIBC_LIKELY(result != LockResult::Busy))
+      return result;
+    return lock_slow<Role::Writer>(timeout, spin_count);
   }
 
 private:



More information about the libc-commits mailing list