[libc-commits] [libc] [libc] Move RPC interface to `libc/shared` to export it (PR #117034)
Joseph Huber via libc-commits
libc-commits at lists.llvm.org
Wed Nov 20 11:31:26 PST 2024
https://github.com/jhuber6 created https://github.com/llvm/llvm-project/pull/117034
Summary:
Previous patches have made the `rpc.h` header independent of the `libc`
internals. This allows us to include it directly rather than providing
an indirect C API. This patch only does the work to move the header. A
future patch will pull out the `rpc_server` interface and simply replace
it with a single function that handles the opcodes.
>From da3dfac7987e28a6cb7f1dff16fa1939a015bcbf Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Wed, 20 Nov 2024 13:24:04 -0600
Subject: [PATCH] [libc] Move RPC interface to `libc/shared` to export it
Summary:
Previous patches have made the `rpc.h` header independent of the `libc`
internals. This allows us to include it directly rather than providing
an indirect C API. This patch only does the work to move the header. A
future patch will pull out the `rpc_server` interface and simply replace
it with a single function that handles the opcodes.
---
libc/{src/__support/RPC => shared}/rpc.h | 162 +++++++++---------
libc/{src/__support/RPC => shared}/rpc_util.h | 109 ++++++------
libc/src/__support/RPC/CMakeLists.txt | 15 --
libc/src/__support/RPC/rpc_client.cpp | 2 +-
libc/src/__support/RPC/rpc_client.h | 8 +-
.../startup/gpu/rpc_interface_test.cpp | 50 ++++--
.../startup/gpu/rpc_stream_test.cpp | 6 +-
.../test/integration/startup/gpu/rpc_test.cpp | 14 +-
libc/utils/gpu/server/rpc_server.cpp | 52 ++++--
9 files changed, 222 insertions(+), 196 deletions(-)
rename libc/{src/__support/RPC => shared}/rpc.h (80%)
rename libc/{src/__support/RPC => shared}/rpc_util.h (61%)
diff --git a/libc/src/__support/RPC/rpc.h b/libc/shared/rpc.h
similarity index 80%
rename from libc/src/__support/RPC/rpc.h
rename to libc/shared/rpc.h
index 30dd2c1a8125d7..489a8cebfb807c 100644
--- a/libc/src/__support/RPC/rpc.h
+++ b/libc/shared/rpc.h
@@ -15,16 +15,17 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
-#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
+#ifndef LLVM_LIBC_SHARED_RPC_H
+#define LLVM_LIBC_SHARED_RPC_H
#include "rpc_util.h"
-#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
#include <stdint.h>
-namespace LIBC_NAMESPACE_DECL {
+#ifndef RPC_INLINE
+#define RPC_INLINE inline
+#endif
+
namespace rpc {
/// Use scoped atomic variants if they are available for the target.
@@ -70,12 +71,12 @@ constexpr static uint64_t MAX_PORT_COUNT = 4096;
/// - The server will always start with a 'recv' operation.
/// - Every 'send' or 'recv' call is mirrored by the other process.
template <bool Invert> struct Process {
- LIBC_INLINE Process() = default;
- LIBC_INLINE Process(const Process &) = delete;
- LIBC_INLINE Process &operator=(const Process &) = delete;
- LIBC_INLINE Process(Process &&) = default;
- LIBC_INLINE Process &operator=(Process &&) = default;
- LIBC_INLINE ~Process() = default;
+ RPC_INLINE Process() = default;
+ RPC_INLINE Process(const Process &) = delete;
+ RPC_INLINE Process &operator=(const Process &) = delete;
+ RPC_INLINE Process(Process &&) = default;
+ RPC_INLINE Process &operator=(Process &&) = default;
+ RPC_INLINE ~Process() = default;
uint32_t port_count = 0;
uint32_t *inbox = nullptr;
@@ -86,7 +87,7 @@ template <bool Invert> struct Process {
static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
- LIBC_INLINE Process(uint32_t port_count, void *buffer)
+ RPC_INLINE Process(uint32_t port_count, void *buffer)
: port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
advance(buffer, inbox_offset(port_count)))),
outbox(reinterpret_cast<uint32_t *>(
@@ -105,20 +106,20 @@ template <bool Invert> struct Process {
/// Header header[port_count];
/// Buffer packet[port_count][lane_size];
/// };
- LIBC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
- uint32_t lane_size) {
+ RPC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
+ uint32_t lane_size) {
return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
}
/// Retrieve the inbox state from memory shared between processes.
- LIBC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
+ RPC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
return rpc::broadcast_value(
lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM));
}
/// Retrieve the outbox state from memory shared between processes.
- LIBC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
+ RPC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
return rpc::broadcast_value(
lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM));
@@ -128,7 +129,7 @@ template <bool Invert> struct Process {
/// Equivalent to loading outbox followed by store of the inverted value
/// The outbox is write only by this warp and tracking the value locally is
/// cheaper than calling load_outbox to get the value to store.
- LIBC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
+ RPC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
uint32_t inverted_outbox = !current_outbox;
__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
__scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
@@ -138,8 +139,8 @@ template <bool Invert> struct Process {
// Given the current outbox and inbox values, wait until the inbox changes
// to indicate that this thread owns the buffer element.
- LIBC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
- uint32_t outbox, uint32_t in) {
+ RPC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
+ uint32_t outbox, uint32_t in) {
while (buffer_unavailable(in, outbox)) {
sleep_briefly();
in = load_inbox(lane_mask, index);
@@ -150,14 +151,14 @@ template <bool Invert> struct Process {
/// The packet is a linearly allocated array of buffers used to communicate
/// with the other process. This function returns the appropriate slot in this
/// array such that the process can operate on an entire warp or wavefront.
- LIBC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
+ RPC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
return &packet[index * lane_size];
}
/// Determines if this process needs to wait for ownership of the buffer. We
/// invert the condition on one of the processes to indicate that if one
/// process owns the buffer then the other does not.
- LIBC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
+ RPC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
bool cond = in != out;
return Invert ? !cond : cond;
}
@@ -166,7 +167,7 @@ template <bool Invert> struct Process {
/// lane_mask is a bitmap of the threads in the warp that would hold the
/// single lock on success, e.g. the result of rpc::get_lane_mask()
/// The lock is held when the n-th bit of the lock bitfield is set.
- LIBC_INLINE bool try_lock(uint64_t lane_mask, uint32_t index) {
+ RPC_INLINE bool try_lock(uint64_t lane_mask, uint32_t index) {
// On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
// On volta, need to handle differences between the threads running and
// the threads that were detected in the previous call to get_lane_mask()
@@ -206,7 +207,7 @@ template <bool Invert> struct Process {
/// Unlock the lock at index. We need a lane sync to keep this function
/// convergent, otherwise the compiler will sink the store and deadlock.
- LIBC_INLINE void unlock(uint64_t lane_mask, uint32_t index) {
+ RPC_INLINE void unlock(uint64_t lane_mask, uint32_t index) {
// Do not move any writes past the unlock.
__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
@@ -219,40 +220,40 @@ template <bool Invert> struct Process {
}
/// Number of bytes to allocate for an inbox or outbox.
- LIBC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
+ RPC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
return port_count * sizeof(uint32_t);
}
/// Number of bytes to allocate for the buffer containing the packets.
- LIBC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
- uint32_t lane_size) {
+ RPC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
+ uint32_t lane_size) {
return port_count * lane_size * sizeof(Buffer);
}
/// Offset of the inbox in memory. This is the same as the outbox if inverted.
- LIBC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
+ RPC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
return Invert ? mailbox_bytes(port_count) : 0;
}
/// Offset of the outbox in memory. This is the same as the inbox if inverted.
- LIBC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
+ RPC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
return Invert ? 0 : mailbox_bytes(port_count);
}
/// Offset of the buffer containing the packets after the inbox and outbox.
- LIBC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
+ RPC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
return align_up(2 * mailbox_bytes(port_count), alignof(Header));
}
/// Offset of the buffer containing the packets after the inbox and outbox.
- LIBC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
+ RPC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
return align_up(header_offset(port_count) + port_count * sizeof(Header),
alignof(Buffer));
}
/// Conditionally set the n-th bit in the atomic bitfield.
- LIBC_INLINE static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
- bool cond) {
+ RPC_INLINE static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
+ bool cond) {
uint32_t slot = index / NUM_BITS_IN_WORD;
uint32_t bit = index % NUM_BITS_IN_WORD;
return __scoped_atomic_fetch_or(&bits[slot],
@@ -262,8 +263,8 @@ template <bool Invert> struct Process {
}
/// Conditionally clear the n-th bit in the atomic bitfield.
- LIBC_INLINE static constexpr uint32_t clear_nth(uint32_t *bits,
- uint32_t index, bool cond) {
+ RPC_INLINE static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
+ bool cond) {
uint32_t slot = index / NUM_BITS_IN_WORD;
uint32_t bit = index % NUM_BITS_IN_WORD;
return __scoped_atomic_fetch_and(&bits[slot],
@@ -275,8 +276,8 @@ template <bool Invert> struct Process {
/// Invokes a function accross every active buffer across the total lane size.
template <typename F>
-LIBC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
- uint64_t lane_mask, Buffer *slot) {
+RPC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
+ uint64_t lane_mask, Buffer *slot) {
if constexpr (is_process_gpu()) {
fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
} else {
@@ -290,40 +291,40 @@ LIBC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
/// processes. A port is conceptually an index into the memory provided by the
/// underlying process that is guarded by a lock bit.
template <bool T> struct Port {
- LIBC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
- uint32_t index, uint32_t out)
+ RPC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
+ uint32_t index, uint32_t out)
: process(process), lane_mask(lane_mask), lane_size(lane_size),
index(index), out(out), receive(false), owns_buffer(true) {}
- LIBC_INLINE ~Port() = default;
+ RPC_INLINE ~Port() = default;
private:
- LIBC_INLINE Port(const Port &) = delete;
- LIBC_INLINE Port &operator=(const Port &) = delete;
- LIBC_INLINE Port(Port &&) = default;
- LIBC_INLINE Port &operator=(Port &&) = default;
+ RPC_INLINE Port(const Port &) = delete;
+ RPC_INLINE Port &operator=(const Port &) = delete;
+ RPC_INLINE Port(Port &&) = default;
+ RPC_INLINE Port &operator=(Port &&) = default;
friend struct Client;
friend struct Server;
friend class rpc::optional<Port<T>>;
public:
- template <typename U> LIBC_INLINE void recv(U use);
- template <typename F> LIBC_INLINE void send(F fill);
+ template <typename U> RPC_INLINE void recv(U use);
+ template <typename F> RPC_INLINE void send(F fill);
template <typename F, typename U>
- LIBC_INLINE void send_and_recv(F fill, U use);
- template <typename W> LIBC_INLINE void recv_and_send(W work);
- LIBC_INLINE void send_n(const void *const *src, uint64_t *size);
- LIBC_INLINE void send_n(const void *src, uint64_t size);
+ RPC_INLINE void send_and_recv(F fill, U use);
+ template <typename W> RPC_INLINE void recv_and_send(W work);
+ RPC_INLINE void send_n(const void *const *src, uint64_t *size);
+ RPC_INLINE void send_n(const void *src, uint64_t size);
template <typename A>
- LIBC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
+ RPC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
- LIBC_INLINE uint32_t get_opcode() const {
+ RPC_INLINE uint32_t get_opcode() const {
return process.header[index].opcode;
}
- LIBC_INLINE uint32_t get_index() const { return index; }
+ RPC_INLINE uint32_t get_index() const { return index; }
- LIBC_INLINE void close() {
+ RPC_INLINE void close() {
// Wait for all lanes to finish using the port.
rpc::sync_lane(lane_mask);
@@ -346,16 +347,16 @@ template <bool T> struct Port {
/// The RPC client used to make requests to the server.
struct Client {
- LIBC_INLINE Client() = default;
- LIBC_INLINE Client(const Client &) = delete;
- LIBC_INLINE Client &operator=(const Client &) = delete;
- LIBC_INLINE ~Client() = default;
+ RPC_INLINE Client() = default;
+ RPC_INLINE Client(const Client &) = delete;
+ RPC_INLINE Client &operator=(const Client &) = delete;
+ RPC_INLINE ~Client() = default;
- LIBC_INLINE Client(uint32_t port_count, void *buffer)
+ RPC_INLINE Client(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
using Port = rpc::Port<false>;
- template <uint32_t opcode> LIBC_INLINE Port open();
+ template <uint32_t opcode> RPC_INLINE Port open();
private:
Process<false> process;
@@ -363,21 +364,21 @@ struct Client {
/// The RPC server used to respond to the client.
struct Server {
- LIBC_INLINE Server() = default;
- LIBC_INLINE Server(const Server &) = delete;
- LIBC_INLINE Server &operator=(const Server &) = delete;
- LIBC_INLINE ~Server() = default;
+ RPC_INLINE Server() = default;
+ RPC_INLINE Server(const Server &) = delete;
+ RPC_INLINE Server &operator=(const Server &) = delete;
+ RPC_INLINE ~Server() = default;
- LIBC_INLINE Server(uint32_t port_count, void *buffer)
+ RPC_INLINE Server(uint32_t port_count, void *buffer)
: process(port_count, buffer) {}
using Port = rpc::Port<true>;
- LIBC_INLINE rpc::optional<Port> try_open(uint32_t lane_size,
- uint32_t start = 0);
- LIBC_INLINE Port open(uint32_t lane_size);
+ RPC_INLINE rpc::optional<Port> try_open(uint32_t lane_size,
+ uint32_t start = 0);
+ RPC_INLINE Port open(uint32_t lane_size);
- LIBC_INLINE static uint64_t allocation_size(uint32_t lane_size,
- uint32_t port_count) {
+ RPC_INLINE static uint64_t allocation_size(uint32_t lane_size,
+ uint32_t port_count) {
return Process<true>::allocation_size(port_count, lane_size);
}
@@ -386,7 +387,7 @@ struct Server {
};
/// Applies \p fill to the shared buffer and initiates a send operation.
-template <bool T> template <typename F> LIBC_INLINE void Port<T>::send(F fill) {
+template <bool T> template <typename F> RPC_INLINE void Port<T>::send(F fill) {
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
// We need to wait until we own the buffer before sending.
@@ -401,7 +402,7 @@ template <bool T> template <typename F> LIBC_INLINE void Port<T>::send(F fill) {
}
/// Applies \p use to the shared buffer and acknowledges the send.
-template <bool T> template <typename U> LIBC_INLINE void Port<T>::recv(U use) {
+template <bool T> template <typename U> RPC_INLINE void Port<T>::recv(U use) {
// We only exchange ownership of the buffer during a receive if we are waiting
// for a previous receive to finish.
if (receive) {
@@ -424,7 +425,7 @@ template <bool T> template <typename U> LIBC_INLINE void Port<T>::recv(U use) {
/// Combines a send and receive into a single function.
template <bool T>
template <typename F, typename U>
-LIBC_INLINE void Port<T>::send_and_recv(F fill, U use) {
+RPC_INLINE void Port<T>::send_and_recv(F fill, U use) {
send(fill);
recv(use);
}
@@ -434,7 +435,7 @@ LIBC_INLINE void Port<T>::send_and_recv(F fill, U use) {
/// the copy back.
template <bool T>
template <typename W>
-LIBC_INLINE void Port<T>::recv_and_send(W work) {
+RPC_INLINE void Port<T>::recv_and_send(W work) {
recv(work);
send([](Buffer *, uint32_t) { /* no-op */ });
}
@@ -442,7 +443,7 @@ LIBC_INLINE void Port<T>::recv_and_send(W work) {
/// Helper routine to simplify the interface when sending from the GPU using
/// thread private pointers to the underlying value.
template <bool T>
-LIBC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
+RPC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
const void **src_ptr = &src;
uint64_t *size_ptr = &size;
send_n(src_ptr, size_ptr);
@@ -451,7 +452,7 @@ LIBC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
/// Sends an arbitrarily sized data buffer \p src across the shared channel in
/// multiples of the packet length.
template <bool T>
-LIBC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
+RPC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
uint64_t num_sends = 0;
send([&](Buffer *buffer, uint32_t id) {
reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
@@ -482,7 +483,7 @@ LIBC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
/// size of the data so that we can initialize the size of the \p dst buffer.
template <bool T>
template <typename A>
-LIBC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
+RPC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
uint64_t num_recvs = 0;
recv([&](Buffer *buffer, uint32_t id) {
lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
@@ -516,7 +517,7 @@ LIBC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
/// port. Each port instance uses an associated \p opcode to tell the server
/// what to do. The Client interface provides the appropriate lane size to the
/// port using the platform's returned value.
-template <uint32_t opcode> LIBC_INLINE Client::Port Client::open() {
+template <uint32_t opcode> RPC_INLINE Client::Port Client::open() {
// Repeatedly perform a naive linear scan for a port that can be opened to
// send data.
for (uint32_t index = 0;; ++index) {
@@ -550,7 +551,7 @@ template <uint32_t opcode> LIBC_INLINE Client::Port Client::open() {
/// Attempts to open a port to use as the server. The server can only open a
/// port if it has a pending receive operation
-LIBC_INLINE rpc::optional<typename Server::Port>
+RPC_INLINE rpc::optional<typename Server::Port>
Server::try_open(uint32_t lane_size, uint32_t start) {
// Perform a naive linear scan for a port that has a pending request.
for (uint32_t index = start; index < process.port_count; ++index) {
@@ -580,7 +581,7 @@ Server::try_open(uint32_t lane_size, uint32_t start) {
return rpc::nullopt;
}
-LIBC_INLINE Server::Port Server::open(uint32_t lane_size) {
+RPC_INLINE Server::Port Server::open(uint32_t lane_size) {
for (;;) {
if (rpc::optional<Server::Port> p = try_open(lane_size))
return rpc::move(p.value());
@@ -599,6 +600,5 @@ LIBC_INLINE Server::Port Server::open(uint32_t lane_size) {
#endif
} // namespace rpc
-} // namespace LIBC_NAMESPACE_DECL
-#endif
+#endif // LLVM_LIBC_SHARED_RPC_H
diff --git a/libc/src/__support/RPC/rpc_util.h b/libc/shared/rpc_util.h
similarity index 61%
rename from libc/src/__support/RPC/rpc_util.h
rename to libc/shared/rpc_util.h
index 7067dfc974eb31..502014d839ae94 100644
--- a/libc/src/__support/RPC/rpc_util.h
+++ b/libc/shared/rpc_util.h
@@ -6,11 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
-#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
-
-#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
+#ifndef LLVM_LIBC_SHARED_RPC_UTIL_H
+#define LLVM_LIBC_SHARED_RPC_UTIL_H
#include <stddef.h>
#include <stdint.h>
@@ -20,7 +17,10 @@
#define RPC_TARGET_IS_GPU
#endif
-namespace LIBC_NAMESPACE_DECL {
+#ifndef RPC_INLINE
+#define RPC_INLINE inline
+#endif
+
namespace rpc {
template <typename T> struct type_identity {
@@ -40,26 +40,26 @@ template <class T> struct is_const<const T> : type_constant<bool, true> {};
/// Freestanding implementation of std::move.
template <class T>
-LIBC_INLINE constexpr typename remove_reference<T>::type &&move(T &&t) {
+RPC_INLINE constexpr typename remove_reference<T>::type &&move(T &&t) {
return static_cast<typename remove_reference<T>::type &&>(t);
}
/// Freestanding implementation of std::forward.
template <typename T>
-LIBC_INLINE constexpr T &&forward(typename remove_reference<T>::type &value) {
+RPC_INLINE constexpr T &&forward(typename remove_reference<T>::type &value) {
return static_cast<T &&>(value);
}
template <typename T>
-LIBC_INLINE constexpr T &&forward(typename remove_reference<T>::type &&value) {
+RPC_INLINE constexpr T &&forward(typename remove_reference<T>::type &&value) {
return static_cast<T &&>(value);
}
struct in_place_t {
- LIBC_INLINE explicit in_place_t() = default;
+ RPC_INLINE explicit in_place_t() = default;
};
struct nullopt_t {
- LIBC_INLINE constexpr explicit nullopt_t() = default;
+ RPC_INLINE constexpr explicit nullopt_t() = default;
};
constexpr inline in_place_t in_place{};
@@ -75,15 +75,15 @@ template <typename T> class optional {
bool in_use = false;
- LIBC_INLINE ~OptionalStorage() { reset(); }
+ RPC_INLINE ~OptionalStorage() { reset(); }
- LIBC_INLINE constexpr OptionalStorage() : empty() {}
+ RPC_INLINE constexpr OptionalStorage() : empty() {}
template <typename... Args>
- LIBC_INLINE constexpr explicit OptionalStorage(in_place_t, Args &&...args)
+ RPC_INLINE constexpr explicit OptionalStorage(in_place_t, Args &&...args)
: stored_value(forward<Args>(args)...) {}
- LIBC_INLINE constexpr void reset() {
+ RPC_INLINE constexpr void reset() {
if (in_use)
stored_value.~U();
in_use = false;
@@ -93,60 +93,54 @@ template <typename T> class optional {
OptionalStorage<T> storage;
public:
- LIBC_INLINE constexpr optional() = default;
- LIBC_INLINE constexpr optional(nullopt_t) {}
+ RPC_INLINE constexpr optional() = default;
+ RPC_INLINE constexpr optional(nullopt_t) {}
- LIBC_INLINE constexpr optional(const T &t) : storage(in_place, t) {
+ RPC_INLINE constexpr optional(const T &t) : storage(in_place, t) {
storage.in_use = true;
}
- LIBC_INLINE constexpr optional(const optional &) = default;
+ RPC_INLINE constexpr optional(const optional &) = default;
- LIBC_INLINE constexpr optional(T &&t) : storage(in_place, move(t)) {
+ RPC_INLINE constexpr optional(T &&t) : storage(in_place, move(t)) {
storage.in_use = true;
}
- LIBC_INLINE constexpr optional(optional &&O) = default;
+ RPC_INLINE constexpr optional(optional &&O) = default;
- LIBC_INLINE constexpr optional &operator=(T &&t) {
+ RPC_INLINE constexpr optional &operator=(T &&t) {
storage = move(t);
return *this;
}
- LIBC_INLINE constexpr optional &operator=(optional &&) = default;
+ RPC_INLINE constexpr optional &operator=(optional &&) = default;
- LIBC_INLINE constexpr optional &operator=(const T &t) {
+ RPC_INLINE constexpr optional &operator=(const T &t) {
storage = t;
return *this;
}
- LIBC_INLINE constexpr optional &operator=(const optional &) = default;
+ RPC_INLINE constexpr optional &operator=(const optional &) = default;
- LIBC_INLINE constexpr void reset() { storage.reset(); }
+ RPC_INLINE constexpr void reset() { storage.reset(); }
- LIBC_INLINE constexpr const T &value() const & {
- return storage.stored_value;
- }
+ RPC_INLINE constexpr const T &value() const & { return storage.stored_value; }
- LIBC_INLINE constexpr T &value() & { return storage.stored_value; }
+ RPC_INLINE constexpr T &value() & { return storage.stored_value; }
- LIBC_INLINE constexpr explicit operator bool() const {
- return storage.in_use;
- }
- LIBC_INLINE constexpr bool has_value() const { return storage.in_use; }
- LIBC_INLINE constexpr const T *operator->() const {
+ RPC_INLINE constexpr explicit operator bool() const { return storage.in_use; }
+ RPC_INLINE constexpr bool has_value() const { return storage.in_use; }
+ RPC_INLINE constexpr const T *operator->() const {
return &storage.stored_value;
}
- LIBC_INLINE constexpr T *operator->() { return &storage.stored_value; }
- LIBC_INLINE constexpr const T &operator*() const & {
+ RPC_INLINE constexpr T *operator->() { return &storage.stored_value; }
+ RPC_INLINE constexpr const T &operator*() const & {
return storage.stored_value;
}
- LIBC_INLINE constexpr T &operator*() & { return storage.stored_value; }
+ RPC_INLINE constexpr T &operator*() & { return storage.stored_value; }
- LIBC_INLINE constexpr T &&value() && { return move(storage.stored_value); }
- LIBC_INLINE constexpr T &&operator*() && {
- return move(storage.stored_value);
- }
+ RPC_INLINE constexpr T &&value() && { return move(storage.stored_value); }
+ RPC_INLINE constexpr T &&operator*() && { return move(storage.stored_value); }
};
/// Suspend the thread briefly to assist the thread scheduler during busy loops.
-LIBC_INLINE void sleep_briefly() {
+RPC_INLINE void sleep_briefly() {
#if defined(LIBC_TARGET_ARCH_IS_NVPTX)
if (__nvvm_reflect("__CUDA_ARCH") >= 700)
asm("nanosleep.u32 64;" ::: "memory");
@@ -164,7 +158,7 @@ LIBC_INLINE void sleep_briefly() {
}
/// Conditional to indicate if this process is running on the GPU.
-LIBC_INLINE constexpr bool is_process_gpu() {
+RPC_INLINE constexpr bool is_process_gpu() {
#ifdef RPC_TARGET_IS_GPU
return true;
#else
@@ -173,14 +167,14 @@ LIBC_INLINE constexpr bool is_process_gpu() {
}
/// Wait for all lanes in the group to complete.
-LIBC_INLINE void sync_lane(uint64_t lane_mask) {
+RPC_INLINE void sync_lane(uint64_t lane_mask) {
#ifdef RPC_TARGET_IS_GPU
return __gpu_sync_lane(lane_mask);
#endif
}
/// Copies the value from the first active thread to the rest.
-LIBC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
+RPC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
#ifdef RPC_TARGET_IS_GPU
return __gpu_read_first_lane_u32(lane_mask, x);
#else
@@ -189,7 +183,7 @@ LIBC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
}
/// Returns the number lanes that participate in the RPC interface.
-LIBC_INLINE uint32_t get_num_lanes() {
+RPC_INLINE uint32_t get_num_lanes() {
#ifdef RPC_TARGET_IS_GPU
return __gpu_num_lanes();
#else
@@ -198,7 +192,7 @@ LIBC_INLINE uint32_t get_num_lanes() {
}
/// Returns the id of the thread inside of an AMD wavefront executing together.
-LIBC_INLINE uint64_t get_lane_mask() {
+RPC_INLINE uint64_t get_lane_mask() {
#ifdef RPC_TARGET_IS_GPU
return __gpu_lane_mask();
#else
@@ -207,7 +201,7 @@ LIBC_INLINE uint64_t get_lane_mask() {
}
/// Returns the id of the thread inside of an AMD wavefront executing together.
-LIBC_INLINE uint32_t get_lane_id() {
+RPC_INLINE uint32_t get_lane_id() {
#ifdef RPC_TARGET_IS_GPU
return __gpu_lane_id();
#else
@@ -216,7 +210,7 @@ LIBC_INLINE uint32_t get_lane_id() {
}
/// Conditional that is only true for a single thread in a lane.
-LIBC_INLINE bool is_first_lane(uint64_t lane_mask) {
+RPC_INLINE bool is_first_lane(uint64_t lane_mask) {
#ifdef RPC_TARGET_IS_GPU
return __gpu_is_first_in_lane(lane_mask);
#else
@@ -225,7 +219,7 @@ LIBC_INLINE bool is_first_lane(uint64_t lane_mask) {
}
/// Returns a bitmask of threads in the current lane for which \p x is true.
-LIBC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
+RPC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
#ifdef RPC_TARGET_IS_GPU
return __gpu_ballot(lane_mask, x);
#else
@@ -235,7 +229,7 @@ LIBC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
/// Return \p val aligned "upwards" according to \p align.
template <typename V, typename A>
-LIBC_INLINE constexpr V align_up(V val, A align) {
+RPC_INLINE constexpr V align_up(V val, A align) {
return ((val + V(align) - 1) / V(align)) * V(align);
}
@@ -243,14 +237,14 @@ LIBC_INLINE constexpr V align_up(V val, A align) {
/// model. On the GPU stack variables are always private to a lane so we can
/// simply use the variable passed in. On the CPU we need to allocate enough
/// space for the whole lane and index into it.
-template <typename V> LIBC_INLINE V &lane_value(V *val, uint32_t id) {
+template <typename V> RPC_INLINE V &lane_value(V *val, uint32_t id) {
if constexpr (is_process_gpu())
return *val;
return val[id];
}
/// Advance the \p p by \p bytes.
-template <typename T, typename U> LIBC_INLINE T *advance(T *ptr, U bytes) {
+template <typename T, typename U> RPC_INLINE T *advance(T *ptr, U bytes) {
if constexpr (is_const<T>::value)
return reinterpret_cast<T *>(reinterpret_cast<const uint8_t *>(ptr) +
bytes);
@@ -259,15 +253,14 @@ template <typename T, typename U> LIBC_INLINE T *advance(T *ptr, U bytes) {
}
/// Wrapper around the optimal memory copy implementation for the target.
-LIBC_INLINE void rpc_memcpy(void *dst, const void *src, size_t count) {
+RPC_INLINE void rpc_memcpy(void *dst, const void *src, size_t count) {
__builtin_memcpy(dst, src, count);
}
-template <class T> LIBC_INLINE constexpr const T &max(const T &a, const T &b) {
+template <class T> RPC_INLINE constexpr const T &max(const T &a, const T &b) {
return (a < b) ? b : a;
}
} // namespace rpc
-} // namespace LIBC_NAMESPACE_DECL
-#endif // LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
+#endif // LLVM_LIBC_SHARED_RPC_UTIL_H
diff --git a/libc/src/__support/RPC/CMakeLists.txt b/libc/src/__support/RPC/CMakeLists.txt
index 183fc6f8683e06..0a7141fb60bf03 100644
--- a/libc/src/__support/RPC/CMakeLists.txt
+++ b/libc/src/__support/RPC/CMakeLists.txt
@@ -2,20 +2,6 @@ if(NOT LIBC_TARGET_OS_IS_GPU)
return()
endif()
-add_header_library(
- rpc
- HDRS
- rpc.h
- rpc_util.h
- DEPENDS
- libc.src.__support.common
- libc.src.__support.CPP.algorithm
- libc.src.__support.CPP.atomic
- libc.src.__support.CPP.functional
- libc.src.__support.CPP.optional
- libc.src.__support.GPU.utils
-)
-
add_object_library(
rpc_client
SRCS
@@ -25,5 +11,4 @@ add_object_library(
DEPENDS
libc.include.gpu_rpc
libc.src.__support.GPU.utils
- .rpc
)
diff --git a/libc/src/__support/RPC/rpc_client.cpp b/libc/src/__support/RPC/rpc_client.cpp
index 232b20d008d1d5..c26cf9ca2ddbe6 100644
--- a/libc/src/__support/RPC/rpc_client.cpp
+++ b/libc/src/__support/RPC/rpc_client.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "rpc_client.h"
-#include "rpc.h"
+
#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/__support/RPC/rpc_client.h b/libc/src/__support/RPC/rpc_client.h
index 7bd6d0b5e00b47..8923e62e0e22a0 100644
--- a/libc/src/__support/RPC/rpc_client.h
+++ b/libc/src/__support/RPC/rpc_client.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_CLIENT_H
#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_CLIENT_H
-#include "rpc.h"
+#include "shared/rpc.h"
#include "include/llvm-libc-types/rpc_opcodes_t.h"
#include "src/__support/CPP/type_traits.h"
@@ -18,6 +18,12 @@
namespace LIBC_NAMESPACE_DECL {
namespace rpc {
+using ::rpc::Buffer;
+using ::rpc::Client;
+using ::rpc::Port;
+using ::rpc::Process;
+using ::rpc::Server;
+
static_assert(cpp::is_trivially_copyable<Client>::value &&
sizeof(Process<true>) == sizeof(Process<false>),
"The client is not trivially copyable from the server");
diff --git a/libc/test/integration/startup/gpu/rpc_interface_test.cpp b/libc/test/integration/startup/gpu/rpc_interface_test.cpp
index 2dafa911783ffc..b05ffb92699bf7 100644
--- a/libc/test/integration/startup/gpu/rpc_interface_test.cpp
+++ b/libc/test/integration/startup/gpu/rpc_interface_test.cpp
@@ -17,27 +17,43 @@ using namespace LIBC_NAMESPACE;
// as long as they are mirrored.
static void test_interface(bool end_with_send) {
uint64_t cnt = 0;
- rpc::Client::Port port = rpc::client.open<RPC_TEST_INTERFACE>();
- port.send(
- [&](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = end_with_send; });
- port.send(
- [&](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = cnt = cnt + 1; });
- port.recv([&](rpc::Buffer *buffer, uint32_t) { cnt = buffer->data[0]; });
- port.send(
- [&](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = cnt = cnt + 1; });
- port.recv([&](rpc::Buffer *buffer, uint32_t) { cnt = buffer->data[0]; });
- port.send(
- [&](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = cnt = cnt + 1; });
- port.send(
- [&](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = cnt = cnt + 1; });
- port.recv([&](rpc::Buffer *buffer, uint32_t) { cnt = buffer->data[0]; });
- port.recv([&](rpc::Buffer *buffer, uint32_t) { cnt = buffer->data[0]; });
+ LIBC_NAMESPACE::rpc::Client::Port port =
+ LIBC_NAMESPACE::rpc::client.open<RPC_TEST_INTERFACE>();
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = end_with_send;
+ });
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = cnt = cnt + 1;
+ });
+ port.recv([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ cnt = buffer->data[0];
+ });
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = cnt = cnt + 1;
+ });
+ port.recv([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ cnt = buffer->data[0];
+ });
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = cnt = cnt + 1;
+ });
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = cnt = cnt + 1;
+ });
+ port.recv([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ cnt = buffer->data[0];
+ });
+ port.recv([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ cnt = buffer->data[0];
+ });
if (end_with_send)
- port.send([&](rpc::Buffer *buffer, uint32_t) {
+ port.send([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
buffer->data[0] = cnt = cnt + 1;
});
else
- port.recv([&](rpc::Buffer *buffer, uint32_t) { cnt = buffer->data[0]; });
+ port.recv([&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ cnt = buffer->data[0];
+ });
port.close();
ASSERT_TRUE(cnt == 9 && "Invalid number of increments");
diff --git a/libc/test/integration/startup/gpu/rpc_stream_test.cpp b/libc/test/integration/startup/gpu/rpc_stream_test.cpp
index 09a4ae67256e3a..208130bcfd9a96 100644
--- a/libc/test/integration/startup/gpu/rpc_stream_test.cpp
+++ b/libc/test/integration/startup/gpu/rpc_stream_test.cpp
@@ -34,7 +34,8 @@ static void test_stream() {
inline_memcpy(send_ptr, str, send_size);
ASSERT_TRUE(inline_memcmp(send_ptr, str, send_size) == 0 && "Data mismatch");
- rpc::Client::Port port = rpc::client.open<RPC_TEST_STREAM>();
+ LIBC_NAMESPACE::rpc::Client::Port port =
+ LIBC_NAMESPACE::rpc::client.open<RPC_TEST_STREAM>();
port.send_n(send_ptr, send_size);
port.recv_n(&recv_ptr, &recv_size,
[](uint64_t size) { return malloc(size); });
@@ -77,7 +78,8 @@ static void test_divergent() {
inline_memcpy(buffer, &data[offset], offset);
ASSERT_TRUE(inline_memcmp(buffer, &data[offset], offset) == 0 &&
"Data mismatch");
- rpc::Client::Port port = rpc::client.open<RPC_TEST_STREAM>();
+ LIBC_NAMESPACE::rpc::Client::Port port =
+ LIBC_NAMESPACE::rpc::client.open<RPC_TEST_STREAM>();
port.send_n(buffer, offset);
inline_memset(buffer, offset, 0);
port.recv_n(&recv_ptr, &recv_size, [&](uint64_t) { return buffer; });
diff --git a/libc/test/integration/startup/gpu/rpc_test.cpp b/libc/test/integration/startup/gpu/rpc_test.cpp
index bec8171180a055..3deb72b9f85dab 100644
--- a/libc/test/integration/startup/gpu/rpc_test.cpp
+++ b/libc/test/integration/startup/gpu/rpc_test.cpp
@@ -18,12 +18,13 @@ static void test_add_simple() {
10 + 10 * gpu::get_thread_id() + 10 * gpu::get_block_id();
uint64_t cnt = 0;
for (uint32_t i = 0; i < num_additions; ++i) {
- rpc::Client::Port port = rpc::client.open<RPC_TEST_INCREMENT>();
+ LIBC_NAMESPACE::rpc::Client::Port port =
+ LIBC_NAMESPACE::rpc::client.open<RPC_TEST_INCREMENT>();
port.send_and_recv(
- [=](rpc::Buffer *buffer, uint32_t) {
+ [=](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
reinterpret_cast<uint64_t *>(buffer->data)[0] = cnt;
},
- [&](rpc::Buffer *buffer, uint32_t) {
+ [&](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
cnt = reinterpret_cast<uint64_t *>(buffer->data)[0];
});
port.close();
@@ -33,8 +34,11 @@ static void test_add_simple() {
// Test to ensure that the RPC mechanism doesn't hang on divergence.
static void test_noop(uint8_t data) {
- rpc::Client::Port port = rpc::client.open<RPC_NOOP>();
- port.send([=](rpc::Buffer *buffer, uint32_t) { buffer->data[0] = data; });
+ LIBC_NAMESPACE::rpc::Client::Port port =
+ LIBC_NAMESPACE::rpc::client.open<RPC_NOOP>();
+ port.send([=](LIBC_NAMESPACE::rpc::Buffer *buffer, uint32_t) {
+ buffer->data[0] = data;
+ });
port.close();
}
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index 972601aaf1d5e0..d877cbc25a13d0 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -14,15 +14,16 @@
// Make sure these are included first so they don't conflict with the system.
#include <limits.h>
+#include "shared/rpc.h"
+
#include "llvmlibc_rpc_server.h"
-#include "src/__support/RPC/rpc.h"
+#include "include/llvm-libc-types/rpc_opcodes_t.h"
#include "src/__support/arg_list.h"
#include "src/stdio/printf_core/converter.h"
#include "src/stdio/printf_core/parser.h"
#include "src/stdio/printf_core/writer.h"
-#include "src/stdio/gpu/file.h"
#include <algorithm>
#include <atomic>
#include <cstdio>
@@ -53,6 +54,26 @@ struct TempStorage {
};
} // namespace
+enum Stream {
+ File = 0,
+ Stdin = 1,
+ Stdout = 2,
+ Stderr = 3,
+};
+
+// Get the associated stream out of an encoded number.
+LIBC_INLINE ::FILE *to_stream(uintptr_t f) {
+ ::FILE *stream = reinterpret_cast<FILE *>(f & ~0x3ull);
+ Stream type = static_cast<Stream>(f & 0x3ull);
+ if (type == Stdin)
+ return stdin;
+ if (type == Stdout)
+ return stdout;
+ if (type == Stderr)
+ return stderr;
+ return stream;
+}
+
template <bool packed, uint32_t lane_size>
static void handle_printf(rpc::Server::Port &port, TempStorage &temp_storage) {
FILE *files[lane_size] = {nullptr};
@@ -260,7 +281,7 @@ rpc_status_t handle_server_impl(
port->recv([&](rpc::Buffer *buffer, uint32_t id) {
data[id] = temp_storage.alloc(buffer->data[0]);
sizes[id] =
- fread(data[id], 1, buffer->data[0], file::to_stream(buffer->data[1]));
+ fread(data[id], 1, buffer->data[0], to_stream(buffer->data[1]));
});
port->send_n(data, sizes);
port->send([&](rpc::Buffer *buffer, uint32_t id) {
@@ -273,9 +294,8 @@ rpc_status_t handle_server_impl(
void *data[lane_size] = {nullptr};
port->recv([&](rpc::Buffer *buffer, uint32_t id) {
data[id] = temp_storage.alloc(buffer->data[0]);
- const char *str =
- fgets(reinterpret_cast<char *>(data[id]), buffer->data[0],
- file::to_stream(buffer->data[1]));
+ const char *str = fgets(reinterpret_cast<char *>(data[id]),
+ buffer->data[0], to_stream(buffer->data[1]));
sizes[id] = !str ? 0 : std::strlen(str) + 1;
});
port->send_n(data, sizes);
@@ -335,46 +355,46 @@ rpc_status_t handle_server_impl(
}
case RPC_FEOF: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = feof(file::to_stream(buffer->data[0]));
+ buffer->data[0] = feof(to_stream(buffer->data[0]));
});
break;
}
case RPC_FERROR: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = ferror(file::to_stream(buffer->data[0]));
+ buffer->data[0] = ferror(to_stream(buffer->data[0]));
});
break;
}
case RPC_CLEARERR: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- clearerr(file::to_stream(buffer->data[0]));
+ clearerr(to_stream(buffer->data[0]));
});
break;
}
case RPC_FSEEK: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = fseek(file::to_stream(buffer->data[0]),
- static_cast<long>(buffer->data[1]),
- static_cast<int>(buffer->data[2]));
+ buffer->data[0] =
+ fseek(to_stream(buffer->data[0]), static_cast<long>(buffer->data[1]),
+ static_cast<int>(buffer->data[2]));
});
break;
}
case RPC_FTELL: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = ftell(file::to_stream(buffer->data[0]));
+ buffer->data[0] = ftell(to_stream(buffer->data[0]));
});
break;
}
case RPC_FFLUSH: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = fflush(file::to_stream(buffer->data[0]));
+ buffer->data[0] = fflush(to_stream(buffer->data[0]));
});
break;
}
case RPC_UNGETC: {
port->recv_and_send([](rpc::Buffer *buffer, uint32_t) {
- buffer->data[0] = ungetc(static_cast<int>(buffer->data[0]),
- file::to_stream(buffer->data[1]));
+ buffer->data[0] =
+ ungetc(static_cast<int>(buffer->data[0]), to_stream(buffer->data[1]));
});
break;
}
More information about the libc-commits
mailing list