[libc-commits] [libc] 9cb68b4 - [libc] Make the RPC headers work when included from CUDA or HIP (#120016)

via libc-commits libc-commits at lists.llvm.org
Mon Dec 16 07:09:06 PST 2024


Author: Joseph Huber
Date: 2024-12-16T09:09:03-06:00
New Revision: 9cb68b4ddad84f8c1f3a96ca3304d9916f3ee7da

URL: https://github.com/llvm/llvm-project/commit/9cb68b4ddad84f8c1f3a96ca3304d9916f3ee7da
DIFF: https://github.com/llvm/llvm-project/commit/9cb68b4ddad84f8c1f3a96ca3304d9916f3ee7da.diff

LOG: [libc] Make the RPC headers work when included from CUDA or HIP (#120016)

Summary:
In order for this to work with CUDA we need to declare functions as
__host__ and __device__ while also making sure we only call the GPU
functions during the CUDA / HIP compile stage.

Added: 
    

Modified: 
    libc/shared/rpc.h
    libc/shared/rpc_util.h

Removed: 
    


################################################################################
diff  --git a/libc/shared/rpc.h b/libc/shared/rpc.h
index 91dd8f9ad6aaf4..dd46d5dcb3dc0d 100644
--- a/libc/shared/rpc.h
+++ b/libc/shared/rpc.h
@@ -20,12 +20,6 @@
 
 #include "rpc_util.h"
 
-#include <stdint.h>
-
-#ifndef RPC_INLINE
-#define RPC_INLINE inline
-#endif
-
 namespace rpc {
 
 /// Use scoped atomic variants if they are available for the target.
@@ -78,12 +72,12 @@ constexpr static uint64_t MAX_PORT_COUNT = 4096;
 ///   - The server will always start with a 'recv' operation.
 ///   - Every 'send' or 'recv' call is mirrored by the other process.
 template <bool Invert> struct Process {
-  RPC_INLINE Process() = default;
-  RPC_INLINE Process(const Process &) = delete;
-  RPC_INLINE Process &operator=(const Process &) = delete;
-  RPC_INLINE Process(Process &&) = default;
-  RPC_INLINE Process &operator=(Process &&) = default;
-  RPC_INLINE ~Process() = default;
+  RPC_ATTRS Process() = default;
+  RPC_ATTRS Process(const Process &) = delete;
+  RPC_ATTRS Process &operator=(const Process &) = delete;
+  RPC_ATTRS Process(Process &&) = default;
+  RPC_ATTRS Process &operator=(Process &&) = default;
+  RPC_ATTRS ~Process() = default;
 
   const uint32_t port_count = 0;
   const uint32_t *const inbox = nullptr;
@@ -94,7 +88,7 @@ template <bool Invert> struct Process {
   static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
   uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
 
-  RPC_INLINE Process(uint32_t port_count, void *buffer)
+  RPC_ATTRS Process(uint32_t port_count, void *buffer)
       : port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
                                     advance(buffer, inbox_offset(port_count)))),
         outbox(reinterpret_cast<uint32_t *>(
@@ -113,20 +107,20 @@ template <bool Invert> struct Process {
   ///   Header header[port_count];
   ///   Buffer packet[port_count][lane_size];
   /// };
-  RPC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
-                                                       uint32_t lane_size) {
+  RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t port_count,
+                                                      uint32_t lane_size) {
     return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
   }
 
   /// Retrieve the inbox state from memory shared between processes.
-  RPC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
+  RPC_ATTRS uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
     return rpc::broadcast_value(
         lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
                                           __MEMORY_SCOPE_SYSTEM));
   }
 
   /// Retrieve the outbox state from memory shared between processes.
-  RPC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
+  RPC_ATTRS uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
     return rpc::broadcast_value(
         lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
                                           __MEMORY_SCOPE_SYSTEM));
@@ -136,7 +130,7 @@ template <bool Invert> struct Process {
   /// Equivalent to loading outbox followed by store of the inverted value
   /// The outbox is write only by this warp and tracking the value locally is
   /// cheaper than calling load_outbox to get the value to store.
-  RPC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
+  RPC_ATTRS uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
     uint32_t inverted_outbox = !current_outbox;
     __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
     __scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
@@ -146,8 +140,8 @@ template <bool Invert> struct Process {
 
   // Given the current outbox and inbox values, wait until the inbox changes
   // to indicate that this thread owns the buffer element.
-  RPC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
-                                     uint32_t outbox, uint32_t in) {
+  RPC_ATTRS void wait_for_ownership(uint64_t lane_mask, uint32_t index,
+                                    uint32_t outbox, uint32_t in) {
     while (buffer_unavailable(in, outbox)) {
       sleep_briefly();
       in = load_inbox(lane_mask, index);
@@ -158,14 +152,14 @@ template <bool Invert> struct Process {
   /// The packet is a linearly allocated array of buffers used to communicate
   /// with the other process. This function returns the appropriate slot in this
   /// array such that the process can operate on an entire warp or wavefront.
-  RPC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
+  RPC_ATTRS Buffer *get_packet(uint32_t index, uint32_t lane_size) {
     return &packet[index * lane_size];
   }
 
   /// Determines if this process needs to wait for ownership of the buffer. We
   /// invert the condition on one of the processes to indicate that if one
   /// process owns the buffer then the other does not.
-  RPC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
+  RPC_ATTRS static bool buffer_unavailable(uint32_t in, uint32_t out) {
     bool cond = in != out;
     return Invert ? !cond : cond;
   }
@@ -174,7 +168,7 @@ template <bool Invert> struct Process {
   /// lane_mask is a bitmap of the threads in the warp that would hold the
   /// single lock on success, e.g. the result of rpc::get_lane_mask()
   /// The lock is held when the n-th bit of the lock bitfield is set.
-  RPC_INLINE bool try_lock(uint64_t lane_mask, uint32_t index) {
+  RPC_ATTRS bool try_lock(uint64_t lane_mask, uint32_t index) {
     // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
     // On volta, need to handle 
diff erences between the threads running and
     // the threads that were detected in the previous call to get_lane_mask()
@@ -214,7 +208,7 @@ template <bool Invert> struct Process {
 
   /// Unlock the lock at index. We need a lane sync to keep this function
   /// convergent, otherwise the compiler will sink the store and deadlock.
-  RPC_INLINE void unlock(uint64_t lane_mask, uint32_t index) {
+  RPC_ATTRS void unlock(uint64_t lane_mask, uint32_t index) {
     // Do not move any writes past the unlock.
     __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
 
@@ -227,40 +221,40 @@ template <bool Invert> struct Process {
   }
 
   /// Number of bytes to allocate for an inbox or outbox.
-  RPC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
+  RPC_ATTRS static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
     return port_count * sizeof(uint32_t);
   }
 
   /// Number of bytes to allocate for the buffer containing the packets.
-  RPC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
-                                                    uint32_t lane_size) {
+  RPC_ATTRS static constexpr uint64_t buffer_bytes(uint32_t port_count,
+                                                   uint32_t lane_size) {
     return port_count * lane_size * sizeof(Buffer);
   }
 
   /// Offset of the inbox in memory. This is the same as the outbox if inverted.
-  RPC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
+  RPC_ATTRS static constexpr uint64_t inbox_offset(uint32_t port_count) {
     return Invert ? mailbox_bytes(port_count) : 0;
   }
 
   /// Offset of the outbox in memory. This is the same as the inbox if inverted.
-  RPC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
+  RPC_ATTRS static constexpr uint64_t outbox_offset(uint32_t port_count) {
     return Invert ? 0 : mailbox_bytes(port_count);
   }
 
   /// Offset of the buffer containing the packets after the inbox and outbox.
-  RPC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
+  RPC_ATTRS static constexpr uint64_t header_offset(uint32_t port_count) {
     return align_up(2 * mailbox_bytes(port_count), alignof(Header));
   }
 
   /// Offset of the buffer containing the packets after the inbox and outbox.
-  RPC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
+  RPC_ATTRS static constexpr uint64_t buffer_offset(uint32_t port_count) {
     return align_up(header_offset(port_count) + port_count * sizeof(Header),
                     alignof(Buffer));
   }
 
   /// Conditionally set the n-th bit in the atomic bitfield.
-  RPC_INLINE static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
-                                               bool cond) {
+  RPC_ATTRS static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
+                                              bool cond) {
     uint32_t slot = index / NUM_BITS_IN_WORD;
     uint32_t bit = index % NUM_BITS_IN_WORD;
     return __scoped_atomic_fetch_or(&bits[slot],
@@ -270,8 +264,8 @@ template <bool Invert> struct Process {
   }
 
   /// Conditionally clear the n-th bit in the atomic bitfield.
-  RPC_INLINE static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
-                                                 bool cond) {
+  RPC_ATTRS static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
+                                                bool cond) {
     uint32_t slot = index / NUM_BITS_IN_WORD;
     uint32_t bit = index % NUM_BITS_IN_WORD;
     return __scoped_atomic_fetch_and(&bits[slot],
@@ -283,8 +277,8 @@ template <bool Invert> struct Process {
 
 /// Invokes a function accross every active buffer across the total lane size.
 template <typename F>
-RPC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
-                                  uint64_t lane_mask, Buffer *slot) {
+RPC_ATTRS static void invoke_rpc(F &&fn, uint32_t lane_size, uint64_t lane_mask,
+                                 Buffer *slot) {
   if constexpr (is_process_gpu()) {
     fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
   } else {
@@ -298,40 +292,37 @@ RPC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
 /// processes. A port is conceptually an index into the memory provided by the
 /// underlying process that is guarded by a lock bit.
 template <bool T> struct Port {
-  RPC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
-                  uint32_t index, uint32_t out)
+  RPC_ATTRS Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
+                 uint32_t index, uint32_t out)
       : process(process), lane_mask(lane_mask), lane_size(lane_size),
         index(index), out(out), receive(false), owns_buffer(true) {}
-  RPC_INLINE ~Port() = default;
+  RPC_ATTRS ~Port() = default;
 
 private:
-  RPC_INLINE Port(const Port &) = delete;
-  RPC_INLINE Port &operator=(const Port &) = delete;
-  RPC_INLINE Port(Port &&) = default;
-  RPC_INLINE Port &operator=(Port &&) = default;
+  RPC_ATTRS Port(const Port &) = delete;
+  RPC_ATTRS Port &operator=(const Port &) = delete;
+  RPC_ATTRS Port(Port &&) = default;
+  RPC_ATTRS Port &operator=(Port &&) = default;
 
   friend struct Client;
   friend struct Server;
   friend class rpc::optional<Port<T>>;
 
 public:
-  template <typename U> RPC_INLINE void recv(U use);
-  template <typename F> RPC_INLINE void send(F fill);
-  template <typename F, typename U>
-  RPC_INLINE void send_and_recv(F fill, U use);
-  template <typename W> RPC_INLINE void recv_and_send(W work);
-  RPC_INLINE void send_n(const void *const *src, uint64_t *size);
-  RPC_INLINE void send_n(const void *src, uint64_t size);
+  template <typename U> RPC_ATTRS void recv(U use);
+  template <typename F> RPC_ATTRS void send(F fill);
+  template <typename F, typename U> RPC_ATTRS void send_and_recv(F fill, U use);
+  template <typename W> RPC_ATTRS void recv_and_send(W work);
+  RPC_ATTRS void send_n(const void *const *src, uint64_t *size);
+  RPC_ATTRS void send_n(const void *src, uint64_t size);
   template <typename A>
-  RPC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
+  RPC_ATTRS void recv_n(void **dst, uint64_t *size, A &&alloc);
 
-  RPC_INLINE uint32_t get_opcode() const {
-    return process.header[index].opcode;
-  }
+  RPC_ATTRS uint32_t get_opcode() const { return process.header[index].opcode; }
 
-  RPC_INLINE uint32_t get_index() const { return index; }
+  RPC_ATTRS uint32_t get_index() const { return index; }
 
-  RPC_INLINE void close() {
+  RPC_ATTRS void close() {
     // Wait for all lanes to finish using the port.
     rpc::sync_lane(lane_mask);
 
@@ -354,16 +345,16 @@ template <bool T> struct Port {
 
 /// The RPC client used to make requests to the server.
 struct Client {
-  RPC_INLINE Client() = default;
-  RPC_INLINE Client(const Client &) = delete;
-  RPC_INLINE Client &operator=(const Client &) = delete;
-  RPC_INLINE ~Client() = default;
+  RPC_ATTRS Client() = default;
+  RPC_ATTRS Client(const Client &) = delete;
+  RPC_ATTRS Client &operator=(const Client &) = delete;
+  RPC_ATTRS ~Client() = default;
 
-  RPC_INLINE Client(uint32_t port_count, void *buffer)
+  RPC_ATTRS Client(uint32_t port_count, void *buffer)
       : process(port_count, buffer) {}
 
   using Port = rpc::Port<false>;
-  template <uint32_t opcode> RPC_INLINE Port open();
+  template <uint32_t opcode> RPC_ATTRS Port open();
 
 private:
   Process<false> process;
@@ -371,21 +362,21 @@ struct Client {
 
 /// The RPC server used to respond to the client.
 struct Server {
-  RPC_INLINE Server() = default;
-  RPC_INLINE Server(const Server &) = delete;
-  RPC_INLINE Server &operator=(const Server &) = delete;
-  RPC_INLINE ~Server() = default;
+  RPC_ATTRS Server() = default;
+  RPC_ATTRS Server(const Server &) = delete;
+  RPC_ATTRS Server &operator=(const Server &) = delete;
+  RPC_ATTRS ~Server() = default;
 
-  RPC_INLINE Server(uint32_t port_count, void *buffer)
+  RPC_ATTRS Server(uint32_t port_count, void *buffer)
       : process(port_count, buffer) {}
 
   using Port = rpc::Port<true>;
-  RPC_INLINE rpc::optional<Port> try_open(uint32_t lane_size,
-                                          uint32_t start = 0);
-  RPC_INLINE Port open(uint32_t lane_size);
+  RPC_ATTRS rpc::optional<Port> try_open(uint32_t lane_size,
+                                         uint32_t start = 0);
+  RPC_ATTRS Port open(uint32_t lane_size);
 
-  RPC_INLINE static uint64_t allocation_size(uint32_t lane_size,
-                                             uint32_t port_count) {
+  RPC_ATTRS static uint64_t allocation_size(uint32_t lane_size,
+                                            uint32_t port_count) {
     return Process<true>::allocation_size(port_count, lane_size);
   }
 
@@ -394,7 +385,7 @@ struct Server {
 };
 
 /// Applies \p fill to the shared buffer and initiates a send operation.
-template <bool T> template <typename F> RPC_INLINE void Port<T>::send(F fill) {
+template <bool T> template <typename F> RPC_ATTRS void Port<T>::send(F fill) {
   uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
 
   // We need to wait until we own the buffer before sending.
@@ -409,7 +400,7 @@ template <bool T> template <typename F> RPC_INLINE void Port<T>::send(F fill) {
 }
 
 /// Applies \p use to the shared buffer and acknowledges the send.
-template <bool T> template <typename U> RPC_INLINE void Port<T>::recv(U use) {
+template <bool T> template <typename U> RPC_ATTRS void Port<T>::recv(U use) {
   // We only exchange ownership of the buffer during a receive if we are waiting
   // for a previous receive to finish.
   if (receive) {
@@ -432,7 +423,7 @@ template <bool T> template <typename U> RPC_INLINE void Port<T>::recv(U use) {
 /// Combines a send and receive into a single function.
 template <bool T>
 template <typename F, typename U>
-RPC_INLINE void Port<T>::send_and_recv(F fill, U use) {
+RPC_ATTRS void Port<T>::send_and_recv(F fill, U use) {
   send(fill);
   recv(use);
 }
@@ -442,7 +433,7 @@ RPC_INLINE void Port<T>::send_and_recv(F fill, U use) {
 /// the copy back.
 template <bool T>
 template <typename W>
-RPC_INLINE void Port<T>::recv_and_send(W work) {
+RPC_ATTRS void Port<T>::recv_and_send(W work) {
   recv(work);
   send([](Buffer *, uint32_t) { /* no-op */ });
 }
@@ -450,7 +441,7 @@ RPC_INLINE void Port<T>::recv_and_send(W work) {
 /// Helper routine to simplify the interface when sending from the GPU using
 /// thread private pointers to the underlying value.
 template <bool T>
-RPC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
+RPC_ATTRS void Port<T>::send_n(const void *src, uint64_t size) {
   const void **src_ptr = &src;
   uint64_t *size_ptr = &size;
   send_n(src_ptr, size_ptr);
@@ -459,7 +450,7 @@ RPC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
 /// Sends an arbitrarily sized data buffer \p src across the shared channel in
 /// multiples of the packet length.
 template <bool T>
-RPC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
+RPC_ATTRS void Port<T>::send_n(const void *const *src, uint64_t *size) {
   uint64_t num_sends = 0;
   send([&](Buffer *buffer, uint32_t id) {
     reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
@@ -490,7 +481,7 @@ RPC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
 /// size of the data so that we can initialize the size of the \p dst buffer.
 template <bool T>
 template <typename A>
-RPC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
+RPC_ATTRS void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
   uint64_t num_recvs = 0;
   recv([&](Buffer *buffer, uint32_t id) {
     lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
@@ -524,7 +515,7 @@ RPC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
 /// port. Each port instance uses an associated \p opcode to tell the server
 /// what to do. The Client interface provides the appropriate lane size to the
 /// port using the platform's returned value.
-template <uint32_t opcode> RPC_INLINE Client::Port Client::open() {
+template <uint32_t opcode> RPC_ATTRS Client::Port Client::open() {
   // Repeatedly perform a naive linear scan for a port that can be opened to
   // send data.
   for (uint32_t index = 0;; ++index) {
@@ -558,7 +549,7 @@ template <uint32_t opcode> RPC_INLINE Client::Port Client::open() {
 
 /// Attempts to open a port to use as the server. The server can only open a
 /// port if it has a pending receive operation
-RPC_INLINE rpc::optional<typename Server::Port>
+RPC_ATTRS rpc::optional<typename Server::Port>
 Server::try_open(uint32_t lane_size, uint32_t start) {
   // Perform a naive linear scan for a port that has a pending request.
   for (uint32_t index = start; index < process.port_count; ++index) {
@@ -588,7 +579,7 @@ Server::try_open(uint32_t lane_size, uint32_t start) {
   return rpc::nullopt;
 }
 
-RPC_INLINE Server::Port Server::open(uint32_t lane_size) {
+RPC_ATTRS Server::Port Server::open(uint32_t lane_size) {
   for (;;) {
     if (rpc::optional<Server::Port> p = try_open(lane_size))
       return rpc::move(p.value());
@@ -596,6 +587,7 @@ RPC_INLINE Server::Port Server::open(uint32_t lane_size) {
   }
 }
 
+#undef RPC_ATTRS
 #if !__has_builtin(__scoped_atomic_load_n)
 #undef __scoped_atomic_load_n
 #undef __scoped_atomic_store_n

diff  --git a/libc/shared/rpc_util.h b/libc/shared/rpc_util.h
index bb0177c01b85ea..9406de59f63b71 100644
--- a/libc/shared/rpc_util.h
+++ b/libc/shared/rpc_util.h
@@ -12,7 +12,9 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#if defined(__NVPTX__) || defined(__AMDGPU__)
+#if (defined(__NVPTX__) || defined(__AMDGPU__)) &&                             \
+    !((defined(__CUDA__) && !defined(__CUDA_ARCH__)) ||                        \
+      (defined(__HIP__) && !defined(__HIP_DEVICE_COMPILE__)))
 #include <gpuintrin.h>
 #define RPC_TARGET_IS_GPU
 #endif
@@ -22,8 +24,12 @@
 #define __has_builtin(x) 0
 #endif
 
-#ifndef RPC_INLINE
-#define RPC_INLINE inline
+#ifndef RPC_ATTRS
+#if defined(__CUDA__) || defined(__HIP__)
+#define RPC_ATTRS __attribute__((host, device)) inline
+#else
+#define RPC_ATTRS inline
+#endif
 #endif
 
 namespace rpc {
@@ -45,26 +51,26 @@ template <class T> struct is_const<const T> : type_constant<bool, true> {};
 
 /// Freestanding implementation of std::move.
 template <class T>
-RPC_INLINE constexpr typename remove_reference<T>::type &&move(T &&t) {
+RPC_ATTRS constexpr typename remove_reference<T>::type &&move(T &&t) {
   return static_cast<typename remove_reference<T>::type &&>(t);
 }
 
 /// Freestanding implementation of std::forward.
 template <typename T>
-RPC_INLINE constexpr T &&forward(typename remove_reference<T>::type &value) {
+RPC_ATTRS constexpr T &&forward(typename remove_reference<T>::type &value) {
   return static_cast<T &&>(value);
 }
 template <typename T>
-RPC_INLINE constexpr T &&forward(typename remove_reference<T>::type &&value) {
+RPC_ATTRS constexpr T &&forward(typename remove_reference<T>::type &&value) {
   return static_cast<T &&>(value);
 }
 
 struct in_place_t {
-  RPC_INLINE explicit in_place_t() = default;
+  RPC_ATTRS explicit in_place_t() = default;
 };
 
 struct nullopt_t {
-  RPC_INLINE constexpr explicit nullopt_t() = default;
+  RPC_ATTRS constexpr explicit nullopt_t() = default;
 };
 
 constexpr inline in_place_t in_place{};
@@ -80,15 +86,15 @@ template <typename T> class optional {
 
     bool in_use = false;
 
-    RPC_INLINE ~OptionalStorage() { reset(); }
+    RPC_ATTRS ~OptionalStorage() { reset(); }
 
-    RPC_INLINE constexpr OptionalStorage() : empty() {}
+    RPC_ATTRS constexpr OptionalStorage() : empty() {}
 
     template <typename... Args>
-    RPC_INLINE constexpr explicit OptionalStorage(in_place_t, Args &&...args)
+    RPC_ATTRS constexpr explicit OptionalStorage(in_place_t, Args &&...args)
         : stored_value(forward<Args>(args)...) {}
 
-    RPC_INLINE constexpr void reset() {
+    RPC_ATTRS constexpr void reset() {
       if (in_use)
         stored_value.~U();
       in_use = false;
@@ -98,58 +104,58 @@ template <typename T> class optional {
   OptionalStorage<T> storage;
 
 public:
-  RPC_INLINE constexpr optional() = default;
-  RPC_INLINE constexpr optional(nullopt_t) {}
+  RPC_ATTRS constexpr optional() = default;
+  RPC_ATTRS constexpr optional(nullopt_t) {}
 
-  RPC_INLINE constexpr optional(const T &t) : storage(in_place, t) {
+  RPC_ATTRS constexpr optional(const T &t) : storage(in_place, t) {
     storage.in_use = true;
   }
-  RPC_INLINE constexpr optional(const optional &) = default;
+  RPC_ATTRS constexpr optional(const optional &) = default;
 
-  RPC_INLINE constexpr optional(T &&t) : storage(in_place, move(t)) {
+  RPC_ATTRS constexpr optional(T &&t) : storage(in_place, move(t)) {
     storage.in_use = true;
   }
-  RPC_INLINE constexpr optional(optional &&O) = default;
+  RPC_ATTRS constexpr optional(optional &&O) = default;
 
-  RPC_INLINE constexpr optional &operator=(T &&t) {
+  RPC_ATTRS constexpr optional &operator=(T &&t) {
     storage = move(t);
     return *this;
   }
-  RPC_INLINE constexpr optional &operator=(optional &&) = default;
+  RPC_ATTRS constexpr optional &operator=(optional &&) = default;
 
-  RPC_INLINE constexpr optional &operator=(const T &t) {
+  RPC_ATTRS constexpr optional &operator=(const T &t) {
     storage = t;
     return *this;
   }
-  RPC_INLINE constexpr optional &operator=(const optional &) = default;
+  RPC_ATTRS constexpr optional &operator=(const optional &) = default;
 
-  RPC_INLINE constexpr void reset() { storage.reset(); }
+  RPC_ATTRS constexpr void reset() { storage.reset(); }
 
-  RPC_INLINE constexpr const T &value() const & { return storage.stored_value; }
+  RPC_ATTRS constexpr const T &value() const & { return storage.stored_value; }
 
-  RPC_INLINE constexpr T &value() & { return storage.stored_value; }
+  RPC_ATTRS constexpr T &value() & { return storage.stored_value; }
 
-  RPC_INLINE constexpr explicit operator bool() const { return storage.in_use; }
-  RPC_INLINE constexpr bool has_value() const { return storage.in_use; }
-  RPC_INLINE constexpr const T *operator->() const {
+  RPC_ATTRS constexpr explicit operator bool() const { return storage.in_use; }
+  RPC_ATTRS constexpr bool has_value() const { return storage.in_use; }
+  RPC_ATTRS constexpr const T *operator->() const {
     return &storage.stored_value;
   }
-  RPC_INLINE constexpr T *operator->() { return &storage.stored_value; }
-  RPC_INLINE constexpr const T &operator*() const & {
+  RPC_ATTRS constexpr T *operator->() { return &storage.stored_value; }
+  RPC_ATTRS constexpr const T &operator*() const & {
     return storage.stored_value;
   }
-  RPC_INLINE constexpr T &operator*() & { return storage.stored_value; }
+  RPC_ATTRS constexpr T &operator*() & { return storage.stored_value; }
 
-  RPC_INLINE constexpr T &&value() && { return move(storage.stored_value); }
-  RPC_INLINE constexpr T &&operator*() && { return move(storage.stored_value); }
+  RPC_ATTRS constexpr T &&value() && { return move(storage.stored_value); }
+  RPC_ATTRS constexpr T &&operator*() && { return move(storage.stored_value); }
 };
 
 /// Suspend the thread briefly to assist the thread scheduler during busy loops.
-RPC_INLINE void sleep_briefly() {
-#if defined(__NVPTX__)
+RPC_ATTRS void sleep_briefly() {
+#if defined(__NVPTX__) && defined(RPC_TARGET_IS_GPU)
   if (__nvvm_reflect("__CUDA_ARCH") >= 700)
     asm("nanosleep.u32 64;" ::: "memory");
-#elif defined(__AMDGPU__)
+#elif defined(__AMDGPU__) && defined(RPC_TARGET_IS_GPU)
   __builtin_amdgcn_s_sleep(2);
 #elif __has_builtin(__builtin_ia32_pause)
   __builtin_ia32_pause();
@@ -161,7 +167,7 @@ RPC_INLINE void sleep_briefly() {
 }
 
 /// Conditional to indicate if this process is running on the GPU.
-RPC_INLINE constexpr bool is_process_gpu() {
+RPC_ATTRS constexpr bool is_process_gpu() {
 #ifdef RPC_TARGET_IS_GPU
   return true;
 #else
@@ -170,14 +176,15 @@ RPC_INLINE constexpr bool is_process_gpu() {
 }
 
 /// Wait for all lanes in the group to complete.
-RPC_INLINE void sync_lane(uint64_t lane_mask) {
+RPC_ATTRS void sync_lane([[maybe_unused]] uint64_t lane_mask) {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_sync_lane(lane_mask);
 #endif
 }
 
 /// Copies the value from the first active thread to the rest.
-RPC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
+RPC_ATTRS uint32_t broadcast_value([[maybe_unused]] uint64_t lane_mask,
+                                   uint32_t x) {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_read_first_lane_u32(lane_mask, x);
 #else
@@ -186,7 +193,7 @@ RPC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
 }
 
 /// Returns the number lanes that participate in the RPC interface.
-RPC_INLINE uint32_t get_num_lanes() {
+RPC_ATTRS uint32_t get_num_lanes() {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_num_lanes();
 #else
@@ -195,7 +202,7 @@ RPC_INLINE uint32_t get_num_lanes() {
 }
 
 /// Returns the id of the thread inside of an AMD wavefront executing together.
-RPC_INLINE uint64_t get_lane_mask() {
+RPC_ATTRS uint64_t get_lane_mask() {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_lane_mask();
 #else
@@ -204,7 +211,7 @@ RPC_INLINE uint64_t get_lane_mask() {
 }
 
 /// Returns the id of the thread inside of an AMD wavefront executing together.
-RPC_INLINE uint32_t get_lane_id() {
+RPC_ATTRS uint32_t get_lane_id() {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_lane_id();
 #else
@@ -213,7 +220,7 @@ RPC_INLINE uint32_t get_lane_id() {
 }
 
 /// Conditional that is only true for a single thread in a lane.
-RPC_INLINE bool is_first_lane(uint64_t lane_mask) {
+RPC_ATTRS bool is_first_lane([[maybe_unused]] uint64_t lane_mask) {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_is_first_in_lane(lane_mask);
 #else
@@ -222,7 +229,7 @@ RPC_INLINE bool is_first_lane(uint64_t lane_mask) {
 }
 
 /// Returns a bitmask of threads in the current lane for which \p x is true.
-RPC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
+RPC_ATTRS uint64_t ballot([[maybe_unused]] uint64_t lane_mask, bool x) {
 #ifdef RPC_TARGET_IS_GPU
   return __gpu_ballot(lane_mask, x);
 #else
@@ -232,7 +239,7 @@ RPC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
 
 /// Return \p val aligned "upwards" according to \p align.
 template <typename V, typename A>
-RPC_INLINE constexpr V align_up(V val, A align) {
+RPC_ATTRS constexpr V align_up(V val, A align) {
   return ((val + V(align) - 1) / V(align)) * V(align);
 }
 
@@ -240,14 +247,14 @@ RPC_INLINE constexpr V align_up(V val, A align) {
 /// model. On the GPU stack variables are always private to a lane so we can
 /// simply use the variable passed in. On the CPU we need to allocate enough
 /// space for the whole lane and index into it.
-template <typename V> RPC_INLINE V &lane_value(V *val, uint32_t id) {
+template <typename V> RPC_ATTRS V &lane_value(V *val, uint32_t id) {
   if constexpr (is_process_gpu())
     return *val;
   return val[id];
 }
 
 /// Advance the \p p by \p bytes.
-template <typename T, typename U> RPC_INLINE T *advance(T *ptr, U bytes) {
+template <typename T, typename U> RPC_ATTRS T *advance(T *ptr, U bytes) {
   if constexpr (is_const<T>::value)
     return reinterpret_cast<T *>(reinterpret_cast<const uint8_t *>(ptr) +
                                  bytes);
@@ -256,11 +263,11 @@ template <typename T, typename U> RPC_INLINE T *advance(T *ptr, U bytes) {
 }
 
 /// Wrapper around the optimal memory copy implementation for the target.
-RPC_INLINE void rpc_memcpy(void *dst, const void *src, size_t count) {
+RPC_ATTRS void rpc_memcpy(void *dst, const void *src, size_t count) {
   __builtin_memcpy(dst, src, count);
 }
 
-template <class T> RPC_INLINE constexpr const T &max(const T &a, const T &b) {
+template <class T> RPC_ATTRS constexpr const T &max(const T &a, const T &b) {
   return (a < b) ? b : a;
 }
 


        


More information about the libc-commits mailing list