[llvm] [Offload] Move RPC server handling to a dedicated thread (PR #112988)
Ivan Butygin via llvm-commits
llvm-commits at lists.llvm.org
Sat Oct 19 06:57:23 PDT 2024
================
@@ -21,8 +21,64 @@ using namespace llvm;
using namespace omp;
using namespace target;
+void RPCServerTy::ServerThread::shutDown() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ {
+ std::lock_guard<decltype(Mutex)> Lock(Mutex);
+ Running.store(false, std::memory_order_release);
+ CV.notify_all();
+ }
+ if (Worker.joinable())
+ Worker.join();
+#endif
+}
+
+void RPCServerTy::ServerThread::run() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ for (;;) {
+ std::unique_lock<decltype(Mutex)> Lock(Mutex);
+ CV.wait(Lock, [&]() {
+ return NumUsers.load(std::memory_order_acquire) > 0 ||
+ !Running.load(std::memory_order_acquire);
+ });
+
+ if (!Running.load(std::memory_order_acq_rel))
+ return;
+
+ while (NumUsers.load(std::memory_order_relaxed) > 0 &&
+ Running.load(std::memory_order_relaxed)) {
+ Lock.unlock();
+ for (const auto &Handle : Handles) {
+ rpc_device_t RPCDevice{Handle};
+ [[maybe_unused]] rpc_status_t Err = rpc_handle_server(RPCDevice);
+ assert(Err == RPC_STATUS_SUCCESS &&
+ "Checking the RPC server should not fail");
+ }
+ Lock.lock();
+ }
+ }
+#endif
+}
+
+RPCServerTy::ServerThread::ServerThread(std::atomic<uintptr_t> Handles[],
+ size_t Length)
+ : Running(true), NumUsers(0), CV(), Mutex(), Handles(Handles, Length) {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ Worker = std::thread([this]() { run(); });
+#endif
+}
+
RPCServerTy::RPCServerTy(plugin::GenericPluginTy &Plugin)
- : Handles(Plugin.getNumDevices()) {}
+ : Handles(
+ std::make_unique<std::atomic<uintptr_t>[]>(Plugin.getNumDevices())),
+ Thread(new ServerThread(Handles.get(), Plugin.getNumDevices())) {}
+
+llvm::Error RPCServerTy::shutDown() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ Thread->shutDown();
+ return Error::success();
----------------
Hardcode84 wrote:
Return should be after `#endif`
https://github.com/llvm/llvm-project/pull/112988
More information about the llvm-commits
mailing list