[llvm] [Offload] Add olKernelMaxGroupSize (PR #142950)
Ross Brunton via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 5 05:05:12 PDT 2025
https://github.com/RossBrunton created https://github.com/llvm/llvm-project/pull/142950
This is equivalent to `cuOccupancyMaxPotentialBlockSize`. It is currently
only implented on Cuda; AMDGPU and Host return the legal-but-suboptimal
value of `1`.
Co-Authored-By: Callum Fare <callum at codeplay.com>
>From b3937926bb1cf3047487d0d9630adfee9c36384b Mon Sep 17 00:00:00 2001
From: Ross Brunton <ross at codeplay.com>
Date: Thu, 5 Jun 2025 12:21:52 +0100
Subject: [PATCH] [Offload] Add olKernelMaxGroupSize
This is equivalent to `cuOccupancyMaxPotentialBlockSize`. It is currently
only implented on Cuda; AMDGPU and Host return the legal-but-suboptimal
value of `1`.
Co-Authored-By: Callum Fare <callum at codeplay.com>
---
offload/liboffload/API/Kernel.td | 13 +++++++
offload/liboffload/src/OffloadImpl.cpp | 20 +++++++++-
offload/plugins-nextgen/amdgpu/src/rtl.cpp | 8 ++++
.../common/include/PluginInterface.h | 3 ++
.../cuda/dynamic_cuda/cuda.cpp | 1 +
.../plugins-nextgen/cuda/dynamic_cuda/cuda.h | 3 ++
offload/plugins-nextgen/cuda/src/rtl.cpp | 14 +++++++
offload/plugins-nextgen/host/src/rtl.cpp | 7 ++++
offload/unittests/OffloadAPI/CMakeLists.txt | 1 +
.../kernel/olKernelMaxGroupSize.cpp | 37 +++++++++++++++++++
10 files changed, 106 insertions(+), 1 deletion(-)
create mode 100644 offload/unittests/OffloadAPI/kernel/olKernelMaxGroupSize.cpp
diff --git a/offload/liboffload/API/Kernel.td b/offload/liboffload/API/Kernel.td
index 247f9c1bf5b6a..71869cf4a68bb 100644
--- a/offload/liboffload/API/Kernel.td
+++ b/offload/liboffload/API/Kernel.td
@@ -24,6 +24,19 @@ def : Function {
let returns = [];
}
+def : Function {
+ let name = "olKernelMaxGroupSize";
+ let desc = "Get the maximum block size needed to achieve maximum occupancy.";
+ let details = [];
+ let params = [
+ Param<"ol_kernel_handle_t", "Kernel", "handle of the kernel", PARAM_IN>,
+ Param<"ol_device_handle_t", "Device", "device intended to run the kernel", PARAM_IN>,
+ Param<"size_t", "SharedMemory", "dynamic shared memory required", PARAM_IN>,
+ Param<"size_t*", "GroupSize", "maximum block size", PARAM_OUT>
+ ];
+ let returns = [];
+}
+
def : Struct {
let name = "ol_kernel_launch_size_args_t";
let desc = "Size-related arguments for a kernel launch.";
diff --git a/offload/liboffload/src/OffloadImpl.cpp b/offload/liboffload/src/OffloadImpl.cpp
index 7b67cbba43e68..a3f8d4ba52d1e 100644
--- a/offload/liboffload/src/OffloadImpl.cpp
+++ b/offload/liboffload/src/OffloadImpl.cpp
@@ -468,6 +468,10 @@ Error olDestroyProgram_impl(ol_program_handle_t Program) {
return olDestroy(Program);
}
+inline GenericKernelTy *getOmpKernel(ol_kernel_handle_t OlKernel) {
+ return reinterpret_cast<GenericKernelTy *>(OlKernel);
+}
+
Error olGetKernel_impl(ol_program_handle_t Program, const char *KernelName,
ol_kernel_handle_t *Kernel) {
@@ -484,6 +488,20 @@ Error olGetKernel_impl(ol_program_handle_t Program, const char *KernelName,
return Error::success();
}
+Error olKernelMaxGroupSize_impl(ol_kernel_handle_t Kernel,
+ ol_device_handle_t Device,
+ size_t DynamicMemSize, size_t *GroupSize) {
+ auto *KernelImpl = getOmpKernel(Kernel);
+
+ auto Res = KernelImpl->maxGroupSize(*Device->Device, DynamicMemSize);
+ if (auto Err = Res.takeError()) {
+ return Err;
+ }
+ *GroupSize = *Res;
+
+ return Error::success();
+}
+
Error olLaunchKernel_impl(ol_queue_handle_t Queue, ol_device_handle_t Device,
ol_kernel_handle_t Kernel, const void *ArgumentsData,
size_t ArgumentsSize,
@@ -514,7 +532,7 @@ Error olLaunchKernel_impl(ol_queue_handle_t Queue, ol_device_handle_t Device,
// Don't do anything with pointer indirection; use arg data as-is
LaunchArgs.Flags.IsCUDA = true;
- auto *KernelImpl = reinterpret_cast<GenericKernelTy *>(Kernel);
+ auto *KernelImpl = getOmpKernel(Kernel);
auto Err = KernelImpl->launch(*DeviceImpl, LaunchArgs.ArgPtrs, nullptr,
LaunchArgs, AsyncInfoWrapper);
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index e4c32713e2c15..bed9764bddf55 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -570,6 +570,14 @@ struct AMDGPUKernelTy : public GenericKernelTy {
KernelLaunchParamsTy LaunchParams,
AsyncInfoWrapperTy &AsyncInfoWrapper) const override;
+ /// Return maximum block size for maximum occupancy
+ ///
+ /// TODO: This needs to be implemented for amdgpu
+ Expected<size_t> maxGroupSize(GenericDeviceTy &GenericDevice,
+ size_t DynamicMemSize) const override {
+ return 1;
+ }
+
/// Print more elaborate kernel launch info for AMDGPU
Error printLaunchInfoDetails(GenericDeviceTy &GenericDevice,
KernelArgsTy &KernelArgs, uint32_t NumThreads[3],
diff --git a/offload/plugins-nextgen/common/include/PluginInterface.h b/offload/plugins-nextgen/common/include/PluginInterface.h
index d2437908a0a6f..5d4e9fa212f52 100644
--- a/offload/plugins-nextgen/common/include/PluginInterface.h
+++ b/offload/plugins-nextgen/common/include/PluginInterface.h
@@ -276,6 +276,9 @@ struct GenericKernelTy {
KernelLaunchParamsTy LaunchParams,
AsyncInfoWrapperTy &AsyncInfoWrapper) const = 0;
+ virtual Expected<size_t> maxGroupSize(GenericDeviceTy &GenericDevice,
+ size_t DynamicMemSize) const = 0;
+
/// Get the kernel name.
const char *getName() const { return Name; }
diff --git a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
index e5332686fcffb..e6699ee78596d 100644
--- a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
+++ b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.cpp
@@ -71,6 +71,7 @@ DLWRAP(cuDevicePrimaryCtxGetState, 3)
DLWRAP(cuDevicePrimaryCtxSetFlags, 2)
DLWRAP(cuDevicePrimaryCtxRetain, 2)
DLWRAP(cuModuleLoadDataEx, 5)
+DLWRAP(cuOccupancyMaxPotentialBlockSize, 6)
DLWRAP(cuDeviceCanAccessPeer, 3)
DLWRAP(cuCtxEnablePeerAccess, 2)
diff --git a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
index 1c5b421768894..2c856c68a9368 100644
--- a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
+++ b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
@@ -289,6 +289,7 @@ static inline void *CU_LAUNCH_PARAM_BUFFER_POINTER = (void *)0x01;
static inline void *CU_LAUNCH_PARAM_BUFFER_SIZE = (void *)0x02;
typedef void (*CUstreamCallback)(CUstream, CUresult, void *);
+typedef size_t (*CUoccupancyB2DSize)(int);
CUresult cuCtxGetDevice(CUdevice *);
CUresult cuDeviceGet(CUdevice *, int);
@@ -370,5 +371,7 @@ CUresult cuMemSetAccess(CUdeviceptr ptr, size_t size,
CUresult cuMemGetAllocationGranularity(size_t *granularity,
const CUmemAllocationProp *prop,
CUmemAllocationGranularity_flags option);
+CUresult cuOccupancyMaxPotentialBlockSize(int *, int *, CUfunction,
+ CUoccupancyB2DSize, size_t, int);
#endif
diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp
index 44ccfc47a21c9..45d9647da9e53 100644
--- a/offload/plugins-nextgen/cuda/src/rtl.cpp
+++ b/offload/plugins-nextgen/cuda/src/rtl.cpp
@@ -157,6 +157,20 @@ struct CUDAKernelTy : public GenericKernelTy {
KernelLaunchParamsTy LaunchParams,
AsyncInfoWrapperTy &AsyncInfoWrapper) const override;
+ /// Return maximum block size for maximum occupancy
+ Expected<size_t> maxGroupSize(GenericDeviceTy &,
+ size_t DynamicMemSize) const override {
+ int minGridSize;
+ int maxBlockSize;
+ auto Res = cuOccupancyMaxPotentialBlockSize(
+ &minGridSize, &maxBlockSize, Func, NULL, DynamicMemSize, INT_MAX);
+ if (auto Err = Plugin::check(
+ Res, "error in cuOccupancyMaxPotentialBlockSize: %s")) {
+ return Err;
+ }
+ return maxBlockSize;
+ }
+
private:
/// The CUDA kernel function to execute.
CUfunction Func;
diff --git a/offload/plugins-nextgen/host/src/rtl.cpp b/offload/plugins-nextgen/host/src/rtl.cpp
index 9916f4d0ab250..a96aa346d33e5 100644
--- a/offload/plugins-nextgen/host/src/rtl.cpp
+++ b/offload/plugins-nextgen/host/src/rtl.cpp
@@ -114,6 +114,13 @@ struct GenELF64KernelTy : public GenericKernelTy {
return Plugin::success();
}
+ /// Return maximum block size for maximum occupancy
+ Expected<size_t> maxGroupSize(GenericDeviceTy &Device,
+ size_t DynamicMemSize) const override {
+ // TODO
+ return 1;
+ }
+
private:
/// The kernel function to execute.
void (*Func)(void);
diff --git a/offload/unittests/OffloadAPI/CMakeLists.txt b/offload/unittests/OffloadAPI/CMakeLists.txt
index 2844b675e5de1..ac302d502c30c 100644
--- a/offload/unittests/OffloadAPI/CMakeLists.txt
+++ b/offload/unittests/OffloadAPI/CMakeLists.txt
@@ -14,6 +14,7 @@ add_offload_unittest("event"
add_offload_unittest("kernel"
kernel/olGetKernel.cpp
+ kernel/olKernelMaxGroupSize.cpp
kernel/olLaunchKernel.cpp)
add_offload_unittest("memory"
diff --git a/offload/unittests/OffloadAPI/kernel/olKernelMaxGroupSize.cpp b/offload/unittests/OffloadAPI/kernel/olKernelMaxGroupSize.cpp
new file mode 100644
index 0000000000000..e83775ae0d896
--- /dev/null
+++ b/offload/unittests/OffloadAPI/kernel/olKernelMaxGroupSize.cpp
@@ -0,0 +1,37 @@
+//===------- Offload API tests - olKernelMaxGroupSize ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../common/Fixtures.hpp"
+#include <OffloadAPI.h>
+#include <gtest/gtest.h>
+
+using olKernelMaxGroupSizeTest = OffloadKernelTest;
+OFFLOAD_TESTS_INSTANTIATE_DEVICE_FIXTURE(olKernelMaxGroupSizeTest);
+
+TEST_P(olKernelMaxGroupSizeTest, Success) {
+ size_t Size{0};
+ ASSERT_SUCCESS(olKernelMaxGroupSize(Kernel, Device, 0, &Size));
+ ASSERT_GT(Size, 0);
+}
+
+TEST_P(olKernelMaxGroupSizeTest, NullKernel) {
+ size_t Size;
+ ASSERT_ERROR(OL_ERRC_INVALID_NULL_HANDLE,
+ olKernelMaxGroupSize(nullptr, Device, 0, &Size));
+}
+
+TEST_P(olKernelMaxGroupSizeTest, NullDevice) {
+ size_t Size;
+ ASSERT_ERROR(OL_ERRC_INVALID_NULL_HANDLE,
+ olKernelMaxGroupSize(Kernel, nullptr, 0, &Size));
+}
+
+TEST_P(olKernelMaxGroupSizeTest, NullOutput) {
+ ASSERT_ERROR(OL_ERRC_INVALID_NULL_POINTER,
+ olKernelMaxGroupSize(Kernel, Device, 0, nullptr));
+}
More information about the llvm-commits
mailing list