[libc-commits] [libc] ba01a2c - [libc] Add memory fences to device-local locking calls
Jon Chesterfield via libc-commits
libc-commits at lists.llvm.org
Thu Jun 22 09:46:17 PDT 2023
Author: Jon Chesterfield
Date: 2023-06-22T17:46:09+01:00
New Revision: ba01a2c608cb6272934c41596cb3a8c3915cc514
URL: https://github.com/llvm/llvm-project/commit/ba01a2c608cb6272934c41596cb3a8c3915cc514
DIFF: https://github.com/llvm/llvm-project/commit/ba01a2c608cb6272934c41596cb3a8c3915cc514.diff
LOG: [libc] Add memory fences to device-local locking calls
This makes the interface less error prone. The acquire was previously
forgotten. Release is currently missing if recv() is the last operation made
before close.
Reviewed By: jhuber6
Differential Revision: https://reviews.llvm.org/D153571
Added:
Modified:
libc/src/__support/RPC/rpc.h
Removed:
################################################################################
diff --git a/libc/src/__support/RPC/rpc.h b/libc/src/__support/RPC/rpc.h
index c8427a7835e22..98555da93176b 100644
--- a/libc/src/__support/RPC/rpc.h
+++ b/libc/src/__support/RPC/rpc.h
@@ -184,13 +184,24 @@ template <bool Invert, uint32_t lane_size> struct Process {
//
// mask != packed implies at least one of the threads got the lock
// atomic semantics of fetch_or mean at most one of the threads for the lock
- return lane_mask != packed;
+
+ // If holding the lock then the caller can load values knowing said loads
+ // won't move past the lock. No such guarantee is needed if the lock acquire
+ // failed. This conditional branch is expected to fold in the caller after
+ // inlining the current function.
+ bool holding_lock = lane_mask != packed;
+ if (holding_lock)
+ atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
+ return holding_lock;
}
/// Unlock the lock at index. We need a lane sync to keep this function
/// convergent, otherwise the compiler will sink the store and deadlock.
[[clang::convergent]] LIBC_INLINE void unlock(uint64_t lane_mask,
uint64_t index) {
+ // Do not move any writes past the unlock
+ atomic_thread_fence(cpp::MemoryOrder::RELEASE);
+
// Wait for other threads in the warp to finish using the lock
gpu::sync_lane(lane_mask);
@@ -479,9 +490,6 @@ Client::try_open() {
if (!this->try_lock(lane_mask, index))
continue;
- // The mailbox state must be read with the lock held.
- atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
-
uint32_t in = this->load_inbox(index);
uint32_t out = this->load_outbox(index);
@@ -528,13 +536,9 @@ template <uint32_t lane_size>
// Attempt to acquire the lock on this index.
uint64_t lane_mask = gpu::get_lane_mask();
- // Attempt to acquire the lock on this index.
if (!this->try_lock(lane_mask, index))
continue;
- // The mailbox state must be read with the lock held.
- atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
-
in = this->load_inbox(index);
out = this->load_outbox(index);
More information about the libc-commits
mailing list