[llvm] [Offload] Move RPC server handling to a dedicated thread (PR #112988)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 18 14:52:20 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-offload
@llvm/pr-subscribers-backend-amdgpu
Author: Joseph Huber (jhuber6)
<details>
<summary>Changes</summary>
Summary:
Handling the RPC server requires running through list of jobs that the
device has requested to be done. Currently this is handled by the thread
that does the waiting for the kernel to finish. However, this is not
sound on NVIDIA architectures and only works for async launches in the
OpenMP model that uses helper threads.
However, we also don't want to have this thread doing work
unnnecessarily. For this reason we track the execution of kernels and
cause the thread to sleep via a condition variable (usually backed by
some kind of futex or other intelligent sleeping mechanism) so that the
thread will be idle while no kernels are running.
---
Patch is 20.67 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112988.diff
7 Files Affected:
- (modified) offload/plugins-nextgen/amdgpu/src/rtl.cpp (+75-52)
- (modified) offload/plugins-nextgen/common/include/RPC.h (+54-5)
- (modified) offload/plugins-nextgen/common/src/PluginInterface.cpp (+4-1)
- (modified) offload/plugins-nextgen/common/src/RPC.cpp (+53-12)
- (modified) offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp (+1)
- (modified) offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h (+3)
- (modified) offload/plugins-nextgen/cuda/src/rtl.cpp (+17-20)
``````````diff
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index f0cc0c2e4d08e5..bd1d60075bb36a 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -626,9 +626,9 @@ struct AMDGPUSignalTy {
}
/// Wait until the signal gets a zero value.
- Error wait(const uint64_t ActiveTimeout = 0, RPCServerTy *RPCServer = nullptr,
+ Error wait(const uint64_t ActiveTimeout = 0,
GenericDeviceTy *Device = nullptr) const {
- if (ActiveTimeout && !RPCServer) {
+ if (ActiveTimeout) {
hsa_signal_value_t Got = 1;
Got = hsa_signal_wait_scacquire(HSASignal, HSA_SIGNAL_CONDITION_EQ, 0,
ActiveTimeout, HSA_WAIT_STATE_ACTIVE);
@@ -637,14 +637,11 @@ struct AMDGPUSignalTy {
}
// If there is an RPC device attached to this stream we run it as a server.
- uint64_t Timeout = RPCServer ? 8192 : UINT64_MAX;
- auto WaitState = RPCServer ? HSA_WAIT_STATE_ACTIVE : HSA_WAIT_STATE_BLOCKED;
+ uint64_t Timeout = UINT64_MAX;
+ auto WaitState = HSA_WAIT_STATE_BLOCKED;
while (hsa_signal_wait_scacquire(HSASignal, HSA_SIGNAL_CONDITION_EQ, 0,
- Timeout, WaitState) != 0) {
- if (RPCServer && Device)
- if (auto Err = RPCServer->runServer(*Device))
- return Err;
- }
+ Timeout, WaitState) != 0)
+ ;
return Plugin::success();
}
@@ -927,6 +924,8 @@ struct AMDGPUStreamTy {
AMDGPUSignalManagerTy *SignalManager;
};
+ using AMDGPUStreamCallbackTy = Error(void *Data);
+
/// The stream is composed of N stream's slots. The struct below represents
/// the fields of each slot. Each slot has a signal and an optional action
/// function. When appending an HSA asynchronous operation to the stream, one
@@ -942,65 +941,81 @@ struct AMDGPUStreamTy {
/// operation as input signal.
AMDGPUSignalTy *Signal;
- /// The action that must be performed after the operation's completion. Set
+ /// The actions that must be performed after the operation's completion. Set
/// to nullptr when there is no action to perform.
- Error (*ActionFunction)(void *);
+ llvm::SmallVector<AMDGPUStreamCallbackTy *> Callbacks;
/// Space for the action's arguments. A pointer to these arguments is passed
/// to the action function. Notice the space of arguments is limited.
- union {
+ union ActionArgsTy {
MemcpyArgsTy MemcpyArgs;
ReleaseBufferArgsTy ReleaseBufferArgs;
ReleaseSignalArgsTy ReleaseSignalArgs;
- } ActionArgs;
+ void *CallbackArgs;
+ };
+
+ llvm::SmallVector<ActionArgsTy> ActionArgs;
/// Create an empty slot.
- StreamSlotTy() : Signal(nullptr), ActionFunction(nullptr) {}
+ StreamSlotTy() : Signal(nullptr), Callbacks({}), ActionArgs({}) {}
/// Schedule a host memory copy action on the slot.
Error schedHostMemoryCopy(void *Dst, const void *Src, size_t Size) {
- ActionFunction = memcpyAction;
- ActionArgs.MemcpyArgs = MemcpyArgsTy{Dst, Src, Size};
+ Callbacks.emplace_back(memcpyAction);
+ ActionArgs.emplace_back().MemcpyArgs = MemcpyArgsTy{Dst, Src, Size};
return Plugin::success();
}
/// Schedule a release buffer action on the slot.
Error schedReleaseBuffer(void *Buffer, AMDGPUMemoryManagerTy &Manager) {
- ActionFunction = releaseBufferAction;
- ActionArgs.ReleaseBufferArgs = ReleaseBufferArgsTy{Buffer, &Manager};
+ Callbacks.emplace_back(releaseBufferAction);
+ ActionArgs.emplace_back().ReleaseBufferArgs =
+ ReleaseBufferArgsTy{Buffer, &Manager};
return Plugin::success();
}
/// Schedule a signal release action on the slot.
Error schedReleaseSignal(AMDGPUSignalTy *SignalToRelease,
AMDGPUSignalManagerTy *SignalManager) {
- ActionFunction = releaseSignalAction;
- ActionArgs.ReleaseSignalArgs =
+ Callbacks.emplace_back(releaseSignalAction);
+ ActionArgs.emplace_back().ReleaseSignalArgs =
ReleaseSignalArgsTy{SignalToRelease, SignalManager};
return Plugin::success();
}
+ /// Register a callback to be called on compleition
+ Error schedCallback(AMDGPUStreamCallbackTy *Func, void *Data) {
+ Callbacks.emplace_back(Func);
+ ActionArgs.emplace_back().CallbackArgs = Data;
+
+ return Plugin::success();
+ }
+
// Perform the action if needed.
Error performAction() {
- if (!ActionFunction)
+ if (Callbacks.empty())
return Plugin::success();
- // Perform the action.
- if (ActionFunction == memcpyAction) {
- if (auto Err = memcpyAction(&ActionArgs))
- return Err;
- } else if (ActionFunction == releaseBufferAction) {
- if (auto Err = releaseBufferAction(&ActionArgs))
- return Err;
- } else if (ActionFunction == releaseSignalAction) {
- if (auto Err = releaseSignalAction(&ActionArgs))
- return Err;
- } else {
- return Plugin::error("Unknown action function!");
+ for (auto [Callback, ActionArg] : llvm::zip(Callbacks, ActionArgs)) {
+ // Perform the action.
+ if (Callback == memcpyAction) {
+ if (auto Err = memcpyAction(&ActionArg))
+ return Err;
+ } else if (Callback == releaseBufferAction) {
+ if (auto Err = releaseBufferAction(&ActionArg))
+ return Err;
+ } else if (Callback == releaseSignalAction) {
+ if (auto Err = releaseSignalAction(&ActionArg))
+ return Err;
+ } else {
+ if (auto Err = Callback(ActionArg.CallbackArgs))
+ return Err;
+ }
}
// Invalidate the action.
- ActionFunction = nullptr;
+ Callbacks.clear();
+ ActionArgs.clear();
return Plugin::success();
}
@@ -1034,11 +1049,6 @@ struct AMDGPUStreamTy {
/// operation that was already finalized in a previous stream sycnhronize.
uint32_t SyncCycle;
- /// A pointer associated with an RPC server running on the given device. If
- /// RPC is not being used this will be a null pointer. Otherwise, this
- /// indicates that an RPC server is expected to be run on this stream.
- RPCServerTy *RPCServer;
-
/// Mutex to protect stream's management.
mutable std::mutex Mutex;
@@ -1218,9 +1228,6 @@ struct AMDGPUStreamTy {
/// Deinitialize the stream's signals.
Error deinit() { return Plugin::success(); }
- /// Attach an RPC server to this stream.
- void setRPCServer(RPCServerTy *Server) { RPCServer = Server; }
-
/// Push a asynchronous kernel to the stream. The kernel arguments must be
/// placed in a special allocation for kernel args and must keep alive until
/// the kernel finalizes. Once the kernel is finished, the stream will release
@@ -1248,10 +1255,30 @@ struct AMDGPUStreamTy {
if (auto Err = Slots[Curr].schedReleaseBuffer(KernelArgs, MemoryManager))
return Err;
+ // If we are running an RPC server we want to wake up the server thread
+ // whenever there is a kernel running and let it sleep otherwise.
+ if (Device.getRPCServer())
+ Device.Plugin.getRPCServer().Thread->notify();
+
// Push the kernel with the output signal and an input signal (optional)
- return Queue->pushKernelLaunch(Kernel, KernelArgs, NumThreads, NumBlocks,
- GroupSize, StackSize, OutputSignal,
- InputSignal);
+ if (auto Err = Queue->pushKernelLaunch(Kernel, KernelArgs, NumThreads,
+ NumBlocks, GroupSize, StackSize,
+ OutputSignal, InputSignal))
+ return Err;
+
+ // Register a callback to indicate when the kernel is complete.
+ if (Device.getRPCServer()) {
+ if (auto Err = Slots[Curr].schedCallback(
+ [](void *Data) -> llvm::Error {
+ GenericPluginTy &Plugin =
+ *reinterpret_cast<GenericPluginTy *>(Data);
+ Plugin.getRPCServer().Thread->finish();
+ return Error::success();
+ },
+ &Device.Plugin))
+ return Err;
+ }
+ return Plugin::success();
}
/// Push an asynchronous memory copy between pinned memory buffers.
@@ -1461,8 +1488,8 @@ struct AMDGPUStreamTy {
return Plugin::success();
// Wait until all previous operations on the stream have completed.
- if (auto Err = Slots[last()].Signal->wait(StreamBusyWaitMicroseconds,
- RPCServer, &Device))
+ if (auto Err =
+ Slots[last()].Signal->wait(StreamBusyWaitMicroseconds, &Device))
return Err;
// Reset the stream and perform all pending post actions.
@@ -3006,7 +3033,7 @@ AMDGPUStreamTy::AMDGPUStreamTy(AMDGPUDeviceTy &Device)
: Agent(Device.getAgent()), Queue(nullptr),
SignalManager(Device.getSignalManager()), Device(Device),
// Initialize the std::deque with some empty positions.
- Slots(32), NextSlot(0), SyncCycle(0), RPCServer(nullptr),
+ Slots(32), NextSlot(0), SyncCycle(0),
StreamBusyWaitMicroseconds(Device.getStreamBusyWaitMicroseconds()),
UseMultipleSdmaEngines(Device.useMultipleSdmaEngines()) {}
@@ -3359,10 +3386,6 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
if (auto Err = AMDGPUDevice.getStream(AsyncInfoWrapper, Stream))
return Err;
- // If this kernel requires an RPC server we attach its pointer to the stream.
- if (GenericDevice.getRPCServer())
- Stream->setRPCServer(GenericDevice.getRPCServer());
-
// Only COV5 implicitargs needs to be set. COV4 implicitargs are not used.
if (ImplArgs &&
getImplicitArgsSize() == sizeof(hsa_utils::AMDGPUImplicitArgsTy)) {
diff --git a/offload/plugins-nextgen/common/include/RPC.h b/offload/plugins-nextgen/common/include/RPC.h
index 01bf539bcb3f32..9b7ebee4bdb785 100644
--- a/offload/plugins-nextgen/common/include/RPC.h
+++ b/offload/plugins-nextgen/common/include/RPC.h
@@ -19,7 +19,11 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Error.h"
+#include <atomic>
+#include <condition_variable>
#include <cstdint>
+#include <mutex>
+#include <thread>
namespace llvm::omp::target {
namespace plugin {
@@ -37,6 +41,9 @@ struct RPCServerTy {
/// Initializes the handles to the number of devices we may need to service.
RPCServerTy(plugin::GenericPluginTy &Plugin);
+ /// Deinitialize the associated memory and resources.
+ llvm::Error shutDown();
+
/// Check if this device image is using an RPC server. This checks for the
/// precense of an externally visible symbol in the device image that will
/// be present whenever RPC code is called.
@@ -51,17 +58,59 @@ struct RPCServerTy {
plugin::GenericGlobalHandlerTy &Handler,
plugin::DeviceImageTy &Image);
- /// Runs the RPC server associated with the \p Device until the pending work
- /// is cleared.
- llvm::Error runServer(plugin::GenericDeviceTy &Device);
-
/// Deinitialize the RPC server for the given device. This will free the
/// memory associated with the k
llvm::Error deinitDevice(plugin::GenericDeviceTy &Device);
private:
/// Array from this device's identifier to its attached devices.
- llvm::SmallVector<uintptr_t> Handles;
+ std::unique_ptr<std::atomic<uintptr_t>[]> Handles;
+
+ /// A helper class for running the user thread that handles
+ struct ServerThread {
+ std::thread Worker;
+
+ /// A boolean indicating whether or not the worker thread should continue.
+ std::atomic<bool> Running;
+
+ /// The number of currently executing kernels across all devices that need
+ /// the server thread to be running.
+ std::atomic<uint32_t> NumUsers;
+
+ /// The condition variable used to suspend the thread if no work is needed.
+ std::condition_variable CV;
+ std::mutex Mutex;
+
+ /// A reference to all the RPC interfaces that the server is handling.
+ llvm::ArrayRef<std::atomic<uintptr_t>> Handles;
+
+ /// Initialize the worker thread to run in the background.
+ ServerThread(std::atomic<uintptr_t> Handles[], size_t Length);
+ ~ServerThread() { assert(!Running && "Thread not shut down explicitly\n"); }
+
+ /// Notify the worker thread that there is a user that needs it.
+ void notify() {
+ NumUsers.fetch_add(1, std::memory_order_seq_cst);
+ CV.notify_all();
+ }
+
+ /// Indicate that one of the dependent users has finished.
+ void finish() {
+ NumUsers.fetch_sub(1, std::memory_order_seq_cst);
+ CV.notify_all();
+ }
+
+ /// Destroy the worker thread and wait.
+ void shutDown();
+
+ /// Run the server thread to continuously check the RPC interface for work
+ /// to be done for the device.
+ void run();
+ };
+
+public:
+ /// Pointer to the server thread instance.
+ std::unique_ptr<ServerThread> Thread;
};
} // namespace llvm::omp::target
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 25b815b7f96694..2be0fc0a713da5 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -1624,8 +1624,11 @@ Error GenericPluginTy::deinit() {
if (GlobalHandler)
delete GlobalHandler;
- if (RPCServer)
+ if (RPCServer) {
+ if (Error Err = RPCServer->shutDown())
+ return Err;
delete RPCServer;
+ }
if (RecordReplay)
delete RecordReplay;
diff --git a/offload/plugins-nextgen/common/src/RPC.cpp b/offload/plugins-nextgen/common/src/RPC.cpp
index faa2cbd4f02fe1..d47a869331a80d 100644
--- a/offload/plugins-nextgen/common/src/RPC.cpp
+++ b/offload/plugins-nextgen/common/src/RPC.cpp
@@ -21,8 +21,60 @@ using namespace llvm;
using namespace omp;
using namespace target;
+void RPCServerTy::ServerThread::shutDown() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ Running.store(false, std::memory_order_release);
+ CV.notify_all();
+ if (Worker.joinable())
+ Worker.join();
+#endif
+}
+
+void RPCServerTy::ServerThread::run() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ while (Running.load(std::memory_order_acquire)) {
+ std::unique_lock<decltype(Mutex)> Lock(Mutex);
+ CV.wait(Lock, [&]() {
+ return NumUsers.load(std::memory_order_relaxed) ||
+ !Running.load(std::memory_order_relaxed);
+ });
+
+ while (NumUsers.load(std::memory_order_relaxed) &&
+ Running.load(std::memory_order_relaxed)) {
+ for (const auto &Handle : Handles) {
+ rpc_device_t RPCDevice{Handle};
+ [[maybe_unused]] rpc_status_t Err = rpc_handle_server(RPCDevice);
+ assert(Err == RPC_STATUS_SUCCESS &&
+ "Checking the RPC server should not fail");
+ }
+ }
+ }
+#endif
+}
+
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+RPCServerTy::ServerThread::ServerThread(std::atomic<uintptr_t> Handles[],
+ size_t Length)
+ : Worker(std::thread([this]() { run(); })), Running(true), NumUsers(0),
+ CV(), Mutex(), Handles(Handles, Length) {}
+#else
+RPCServerTy::ServerThread::ServerThread(std::atomic<uintptr_t> Handles[],
+ size_t Length)
+ : Worker(), Running(true), NumUsers(0), CV(), Mutex(),
+ Handles(Handles, Length) {}
+#endif
+
RPCServerTy::RPCServerTy(plugin::GenericPluginTy &Plugin)
- : Handles(Plugin.getNumDevices()) {}
+ : Handles(
+ std::make_unique<std::atomic<uintptr_t>[]>(Plugin.getNumDevices())),
+ Thread(new ServerThread(Handles.get(), Plugin.getNumDevices())) {}
+
+llvm::Error RPCServerTy::shutDown() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ Thread->shutDown();
+ return Error::success();
+#endif
+}
llvm::Expected<bool>
RPCServerTy::isDeviceUsingRPC(plugin::GenericDeviceTy &Device,
@@ -109,17 +161,6 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
return Error::success();
}
-Error RPCServerTy::runServer(plugin::GenericDeviceTy &Device) {
-#ifdef LIBOMPTARGET_RPC_SUPPORT
- rpc_device_t RPCDevice{Handles[Device.getDeviceId()]};
- if (rpc_status_t Err = rpc_handle_server(RPCDevice))
- return plugin::Plugin::error(
- "Error while running RPC server on device %d: %d", Device.getDeviceId(),
- Err);
-#endif
- return Error::success();
-}
-
Error RPCServerTy::deinitDevice(plugin::GenericDeviceTy &Device) {
#ifdef LIBOMPTARGET_RPC_SUPPORT
rpc_device_t RPCDevice{Handles[Device.getDeviceId()]};
diff --git a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
index 5ec3adb9e4e3a1..7878499dbfcb7e 100644
--- a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
+++ b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
@@ -63,6 +63,7 @@ DLWRAP(cuStreamCreate, 2)
DLWRAP(cuStreamDestroy, 1)
DLWRAP(cuStreamSynchronize, 1)
DLWRAP(cuStreamQuery, 1)
+DLWRAP(cuStreamAddCallback, 4)
DLWRAP(cuCtxSetCurrent, 1)
DLWRAP(cuDevicePrimaryCtxRelease, 1)
DLWRAP(cuDevicePrimaryCtxGetState, 3)
diff --git a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
index 16c8f7ad46c445..ad874735a25ed9 100644
--- a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
+++ b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
@@ -286,6 +286,8 @@ static inline void *CU_LAUNCH_PARAM_END = (void *)0x00;
static inline void *CU_LAUNCH_PARAM_BUFFER_POINTER = (void *)0x01;
static inline void *CU_LAUNCH_PARAM_BUFFER_SIZE = (void *)0x02;
+typedef void (*CUstreamCallback)(CUstream, CUresult, void *);
+
CUresult cuCtxGetDevice(CUdevice *);
CUresult cuDeviceGet(CUdevice *, int);
CUresult cuDeviceGetAttribute(int *, CUdevice_attribute, CUdevice);
@@ -326,6 +328,7 @@ CUresult cuStreamCreate(CUstream *, unsigned);
CUresult cuStreamDestroy(CUstream);
CUresult cuStreamSynchronize(CUstream);
CUresult cuStreamQuery(CUstream);
+CUresult cuStreamAddCallback(CUstream, CUstreamCallback, void *, unsigned int);
CUresult cuCtxSetCurrent(CUcontext);
CUresult cuDevicePrimaryCtxRelease(CUdevice);
CUresult cuDevicePrimaryCtxGetState(CUdevice, unsigned *, int *);
diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp
index 015c7775ba3513..7c876c603aa46c 100644
--- a/offload/plugins-nextgen/cuda/src/rtl.cpp
+++ b/offload/plugins-nextgen/cuda/src/rtl.cpp
@@ -632,15 +632,7 @@ struct CUDADeviceTy : public GenericDeviceTy {
CUresult Res;
// If we have an RPC server running on this device we will continuously
// query it for work rather than blocking.
- if (!getRPCServer()) {
- Res = cuStreamSynchronize(Stream);
- } else {
- do {
- Res = cuStreamQuery(Stream);
- if (auto Err = getRPCServer()->runServer(*this))
- return Err;
- } while (Res == CUDA_ERROR_NOT_READY);
- }
+ Res = cuStreamSynchronize(Stream);
// Once the stream is synchronized, return it to stream pool and reset
// AsyncInfo. This is to make sure the synchronization only works for its
@@ -825,17 +817,6 @@ struct CUDADeviceTy : public GenericDeviceTy {
if (auto Err = getStream(AsyncInfoWrapper, Stream))
return Err;
- // If there is already pending work on the stream it could be waiting for
- // someone to check the RPC server.
- if (auto *RPCServer = getRPCServer()) {
- CUresult Res = cuStreamQuery(Stream);
- while (Res == CUDA_ERROR_NOT_READY) {
- if (auto Err = RPCServer->runServer(*this))
- return Err;
- Res = cuStreamQuery(Stream);
- }
- }
-
CUresult Res = cuMemcpyDtoHAsync(HstPtr, (CUdeviceptr)TgtPtr, Size, Stream);
return Plugin::check(Res, "Error in cuMemcpyDtoHAsync: %s");
}
@@ -1294,10 +1275,26 @@ Error CUDAKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
reinterpret_cast<void *>(&LaunchParams.Size),
CU_LAUNCH_PARAM_END};
+ // If we are running an RPC server we want to wake up the server thread
+ // whenever there is a kernel running and let it sleep otherwise.
+ if (GenericDevice.getRPCServer())
+ GenericDevice.Plugin.getRPCServer().Thread->notify();
+
CUresult Res = cuLaunchKernel(Func, NumBlocks, /*gridDimY=*/1,
...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/112988
More information about the llvm-commits
mailing list