[clang] [llvm] [Offload] Introduce the concept of "default streams" (PR #95371)
Johannes Doerfert via cfe-commits
cfe-commits at lists.llvm.org
Thu Jun 13 01:37:33 PDT 2024
https://github.com/jdoerfert updated https://github.com/llvm/llvm-project/pull/95371
>From d06585044bd6d2dd76d6110bce933e01fd4b333e Mon Sep 17 00:00:00 2001
From: Johannes Doerfert <johannes at jdoerfert.de>
Date: Mon, 3 Jun 2024 19:52:12 -0700
Subject: [PATCH 1/3] [Offload][CUDA] Allow CUDA kernels to use LLVM/Offload
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Through the new `-foffload-via-llvm` flag, CUDA kernels can now be
lowered to the LLVM/Offload API. On the Clang side, this is simply done
by using the OpenMP offload toolchain and emitting calls to `llvm*`
functions to orchestrate the kernel launch rather than `cuda*`
functions. These `llvm*` functions are implemented on top of the
existing LLVM/Offload API.
As we are about to redefine the Offload API, this wil help us in the
design process as a second offload language.
We do not support any CUDA APIs yet, however, we could:
https://www.osti.gov/servlets/purl/1892137
For proper host execution we need to resurrect/rebase
https://tianshilei.me/wp-content/uploads/2021/12/llpp-2021.pdf
(which was designed for debugging).
```
❯❯❯ cat test.cu
extern "C" {
void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
}
__global__ void square(int *A) { *A = 42; }
int main(int argc, char **argv) {
int DevNo = 0;
int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
*Ptr = 7;
printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr);
square<<<1, 1>>>(Ptr);
printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr);
llvm_omp_target_free_shared(Ptr, DevNo);
}
❯❯❯ clang++ test.cu -O3 -o test123 -foffload-via-llvm --offload-arch=native
❯❯❯ llvm-objdump --offloading test123
test123: file format elf64-x86-64
OFFLOADING IMAGE [0]:
kind elf
arch gfx90a
triple amdgcn-amd-amdhsa
producer openmp
❯❯❯ LIBOMPTARGET_INFO=16 ./test123
Ptr 0x155448ac8000, *Ptr 7
Ptr 0x155448ac8000, *Ptr 42
```
---
clang/include/clang/Basic/LangOptions.def | 1 +
clang/include/clang/Driver/Options.td | 6 ++
clang/lib/CodeGen/CGCUDANV.cpp | 97 ++++++++++++++++---
clang/lib/Driver/Driver.cpp | 19 ++--
clang/lib/Driver/ToolChains/Clang.cpp | 27 +++++-
clang/lib/Driver/ToolChains/CommonArgs.cpp | 7 +-
clang/lib/Driver/ToolChains/Cuda.cpp | 27 +++---
clang/lib/Headers/CMakeLists.txt | 18 +++-
.../llvm_offload_wrappers/__llvm_offload.h | 31 ++++++
.../__llvm_offload_device.h | 10 ++
.../__llvm_offload_host.h | 15 +++
.../__clang_openmp_device_functions.h | 9 +-
clang/lib/Sema/SemaCUDA.cpp | 3 +
clang/test/CodeGenCUDA/offload_via_llvm.cu | 97 +++++++++++++++++++
clang/test/Driver/cuda-via-liboffload.cu | 23 +++++
offload/include/Shared/APITypes.h | 5 +-
offload/include/omptarget.h | 2 +-
.../common/src/PluginInterface.cpp | 13 ++-
offload/src/CMakeLists.txt | 1 +
offload/src/KernelLanguage/API.cpp | 76 +++++++++++++++
offload/src/exports | 3 +
offload/test/lit.cfg | 2 +-
offload/test/offloading/CUDA/basic_launch.cu | 31 ++++++
.../CUDA/basic_launch_blocks_and_threads.cu | 32 ++++++
.../offloading/CUDA/basic_launch_multi_arg.cu | 41 ++++++++
offload/test/offloading/CUDA/kernel_tu.cu.inc | 1 +
offload/test/offloading/CUDA/launch_tu.cu | 32 ++++++
27 files changed, 576 insertions(+), 53 deletions(-)
create mode 100644 clang/lib/Headers/llvm_offload_wrappers/__llvm_offload.h
create mode 100644 clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_device.h
create mode 100644 clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_host.h
create mode 100644 clang/test/CodeGenCUDA/offload_via_llvm.cu
create mode 100644 clang/test/Driver/cuda-via-liboffload.cu
create mode 100644 offload/src/KernelLanguage/API.cpp
create mode 100644 offload/test/offloading/CUDA/basic_launch.cu
create mode 100644 offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
create mode 100644 offload/test/offloading/CUDA/basic_launch_multi_arg.cu
create mode 100644 offload/test/offloading/CUDA/kernel_tu.cu.inc
create mode 100644 offload/test/offloading/CUDA/launch_tu.cu
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index 2dea3cd4d795b..e8d3be7e89dbb 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -288,6 +288,7 @@ LANGOPT(GPUMaxThreadsPerBlock, 32, 1024, "default max threads per block for kern
LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
LANGOPT(GPUExcludeWrongSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
LANGOPT(OffloadingNewDriver, 1, 0, "use the new driver for generating offloading code.")
+LANGOPT(OffloadViaLLVM, 1, 0, "target LLVM/Offload as portable offloading runtime.")
LANGOPT(SYCLIsDevice , 1, 0, "Generate code for SYCL device")
LANGOPT(SYCLIsHost , 1, 0, "SYCL host compilation")
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index d44faa55c456f..b77fd063f5519 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -1293,6 +1293,12 @@ def no_offload_compress : Flag<["--"], "no-offload-compress">;
def offload_compression_level_EQ : Joined<["--"], "offload-compression-level=">,
Flags<[HelpHidden]>,
HelpText<"Compression level for offload device binaries (HIP only)">;
+
+defm offload_via_llvm : BoolFOption<"offload-via-llvm",
+ LangOpts<"OffloadViaLLVM">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't use">,
+ BothFlags<[], [ClangOption], " LLVM/Offload as portable offloading runtime.">>;
}
// CUDA options
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 43dfbbb90dd52..2ebe0bf802dfa 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -15,10 +15,12 @@
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/Cuda.h"
#include "clang/CodeGen/CodeGenABITypes.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Frontend/Offloading/Utility.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
@@ -36,6 +38,11 @@ constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
class CGNVCUDARuntime : public CGCUDARuntime {
+ /// The prefix used for function calls and section names (CUDA, HIP, LLVM)
+ StringRef Prefix;
+ /// TODO: We should transition the OpenMP section to LLVM/Offload
+ StringRef SectionPrefix;
+
private:
llvm::IntegerType *IntTy, *SizeTy;
llvm::Type *VoidTy;
@@ -132,6 +139,9 @@ class CGNVCUDARuntime : public CGCUDARuntime {
return DummyFunc;
}
+ Address prepareKernelArgs(CodeGenFunction &CGF, FunctionArgList &Args);
+ Address prepareKernelArgsLLVMOffload(CodeGenFunction &CGF,
+ FunctionArgList &Args);
void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
std::string getDeviceSideName(const NamedDecl *ND) override;
@@ -191,15 +201,11 @@ class CGNVCUDARuntime : public CGCUDARuntime {
} // end anonymous namespace
std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
- if (CGM.getLangOpts().HIP)
- return ((Twine("hip") + Twine(FuncName)).str());
- return ((Twine("cuda") + Twine(FuncName)).str());
+ return (Prefix + FuncName).str();
}
std::string
CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
- if (CGM.getLangOpts().HIP)
- return ((Twine("__hip") + Twine(FuncName)).str());
- return ((Twine("__cuda") + Twine(FuncName)).str());
+ return ("__" + Prefix + FuncName).str();
}
static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
@@ -227,6 +233,14 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
PtrTy = CGM.UnqualPtrTy;
+
+ if (CGM.getLangOpts().OffloadViaLLVM) {
+ Prefix = "llvm";
+ SectionPrefix = "omp";
+ } else if (CGM.getLangOpts().HIP)
+ SectionPrefix = Prefix = "hip";
+ else
+ SectionPrefix = Prefix = "cuda";
}
llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
@@ -305,18 +319,58 @@ void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
}
if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
CudaFeature::CUDA_USES_NEW_LAUNCH) ||
- (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
+ (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI) ||
+ (CGF.getLangOpts().OffloadViaLLVM))
emitDeviceStubBodyNew(CGF, Args);
else
emitDeviceStubBodyLegacy(CGF, Args);
}
-// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
-// array and kernels are launched using cudaLaunchKernel().
-void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
- FunctionArgList &Args) {
- // Build the shadow stack entry at the very start of the function.
+/// CUDA passes the arguments with a level of indirection. For example, a
+/// (void*, short, void*) is passed as {void **, short *, void **} to the launch
+/// function. For the LLVM/offload launch we flatten the arguments into the
+/// struct directly. In addition, we include the size of the arguments, thus
+/// pass {sizeof({void *, short, void *}), ptr to {void *, short, void *},
+/// nullptr}. The last nullptr needs to be initialized to an array of pointers
+/// pointing to the arguments if we want to offload to the host.
+Address CGNVCUDARuntime::prepareKernelArgsLLVMOffload(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ SmallVector<llvm::Type *> ArgTypes, KernelLaunchParamsTypes;
+ for (auto &Arg : Args)
+ ArgTypes.push_back(CGF.ConvertTypeForMem(Arg->getType()));
+ llvm::StructType *KernelArgsTy = llvm::StructType::create(ArgTypes);
+
+ auto *Int64Ty = CGF.Builder.getInt64Ty();
+ KernelLaunchParamsTypes.push_back(Int64Ty);
+ KernelLaunchParamsTypes.push_back(PtrTy);
+ KernelLaunchParamsTypes.push_back(PtrTy);
+
+ llvm::StructType *KernelLaunchParamsTy =
+ llvm::StructType::create(KernelLaunchParamsTypes);
+ Address KernelArgs = CGF.CreateTempAllocaWithoutCast(
+ KernelArgsTy, CharUnits::fromQuantity(16), "kernel_args");
+ Address KernelLaunchParams = CGF.CreateTempAllocaWithoutCast(
+ KernelLaunchParamsTy, CharUnits::fromQuantity(16),
+ "kernel_launch_params");
+
+ auto KernelArgsSize = CGM.getDataLayout().getTypeAllocSize(KernelArgsTy);
+ CGF.Builder.CreateStore(llvm::ConstantInt::get(Int64Ty, KernelArgsSize),
+ CGF.Builder.CreateStructGEP(KernelLaunchParams, 0));
+ CGF.Builder.CreateStore(KernelArgs.emitRawPointer(CGF),
+ CGF.Builder.CreateStructGEP(KernelLaunchParams, 1));
+ CGF.Builder.CreateStore(llvm::Constant::getNullValue(PtrTy),
+ CGF.Builder.CreateStructGEP(KernelLaunchParams, 2));
+
+ for (unsigned i = 0; i < Args.size(); ++i) {
+ auto *ArgVal = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[i]));
+ CGF.Builder.CreateStore(ArgVal, CGF.Builder.CreateStructGEP(KernelArgs, i));
+ }
+ return KernelLaunchParams;
+}
+
+Address CGNVCUDARuntime::prepareKernelArgs(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
// Calculate amount of space we will need for all arguments. If we have no
// args, allocate a single pointer so we still have a valid pointer to the
// argument array that we can pass to runtime, even if it will be unused.
@@ -331,6 +385,17 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
VoidVarPtr, CGF.Builder.CreateConstGEP1_32(
PtrTy, KernelArgs.emitRawPointer(CGF), i));
}
+ return KernelArgs;
+}
+
+// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
+// array and kernels are launched using cudaLaunchKernel().
+void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the shadow stack entry at the very start of the function.
+ Address KernelArgs = CGF.getLangOpts().OffloadViaLLVM
+ ? prepareKernelArgsLLVMOffload(CGF, Args)
+ : prepareKernelArgs(CGF, Args);
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
@@ -1129,8 +1194,9 @@ void CGNVCUDARuntime::transformManagedVars() {
// registered. The linker will provide a pointer to this section so we can
// register the symbols with the linked device image.
void CGNVCUDARuntime::createOffloadingEntries() {
- StringRef Section = CGM.getLangOpts().HIP ? "hip_offloading_entries"
- : "cuda_offloading_entries";
+ SmallVector<char, 32> Out;
+ StringRef Section = (SectionPrefix + "_offloading_entries").toStringRef(Out);
+
llvm::Module &M = CGM.getModule();
for (KernelInfo &I : EmittedKernels)
llvm::offloading::emitOffloadingEntry(
@@ -1199,7 +1265,8 @@ llvm::Function *CGNVCUDARuntime::finalizeModule() {
}
return nullptr;
}
- if (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode)
+ if (CGM.getLangOpts().OffloadViaLLVM ||
+ (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode))
createOffloadingEntries();
else
return makeModuleCtorFunction();
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 67bf0604acd6e..0f55b429512c2 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -792,11 +792,13 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
}) ||
C.getInputArgs().hasArg(options::OPT_hip_link) ||
C.getInputArgs().hasArg(options::OPT_hipstdpar);
+ bool UseLLVMOffload = C.getInputArgs().hasArg(
+ options::OPT_foffload_via_llvm, options::OPT_fno_offload_via_llvm, false);
if (IsCuda && IsHIP) {
Diag(clang::diag::err_drv_mix_cuda_hip);
return;
}
- if (IsCuda) {
+ if (IsCuda && !UseLLVMOffload) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
auto OFK = Action::OFK_Cuda;
@@ -818,7 +820,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
CudaInstallation.WarnIfUnsupportedVersion();
}
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
- } else if (IsHIP) {
+ } else if (IsHIP && !UseLLVMOffload) {
if (auto *OMPTargetArg =
C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
Diag(clang::diag::err_drv_unsupported_opt_for_language_mode)
@@ -842,10 +844,11 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
// We need to generate an OpenMP toolchain if the user specified targets with
// the -fopenmp-targets option or used --offload-arch with OpenMP enabled.
bool IsOpenMPOffloading =
- C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false) &&
- (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
- C.getInputArgs().hasArg(options::OPT_offload_arch_EQ));
+ ((IsCuda || IsHIP) && UseLLVMOffload) ||
+ (C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false) &&
+ (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
+ C.getInputArgs().hasArg(options::OPT_offload_arch_EQ)));
if (IsOpenMPOffloading) {
// We expect that -fopenmp-targets is always used in conjunction with the
// option -fopenmp specifying a valid runtime with offloading support, i.e.
@@ -873,7 +876,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
for (StringRef T : OpenMPTargets->getValues())
OpenMPTriples.insert(T);
} else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) &&
- !IsHIP && !IsCuda) {
+ ((!IsHIP && !IsCuda) || UseLLVMOffload)) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
auto AMDTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
auto NVPTXTriple = getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(),
@@ -4146,6 +4149,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
bool UseNewOffloadingDriver =
C.isOffloadingHostKind(Action::OFK_OpenMP) ||
+ Args.hasFlag(options::OPT_foffload_via_llvm,
+ options::OPT_fno_offload_via_llvm, false) ||
Args.hasFlag(options::OPT_offload_new_driver,
options::OPT_no_offload_new_driver, false);
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index b8d8ff3db5d1f..77d365bad229f 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1125,6 +1125,18 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
CmdArgs.push_back("__clang_openmp_device_functions.h");
}
+ if (Args.hasArg(options::OPT_foffload_via_llvm)) {
+ // Add llvm_wrappers/* to our system include path. This lets us wrap
+ // standard library headers and other headers.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include", "llvm_offload_wrappers");
+ CmdArgs.append({"-internal-isystem", Args.MakeArgString(P), "-include"});
+ if (JA.isDeviceOffloading(Action::OFK_OpenMP))
+ CmdArgs.push_back("__llvm_offload_device.h");
+ else
+ CmdArgs.push_back("__llvm_offload_host.h");
+ }
+
// Add -i* options, and automatically translate to
// -include-pch/-include-pth for transparent PCH support. It's
// wonky, but we include looking for .gch so we can support seamless
@@ -6598,6 +6610,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// device offloading action other than OpenMP.
if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false) &&
+ !Args.hasFlag(options::OPT_foffload_via_llvm,
+ options::OPT_fno_offload_via_llvm, false) &&
(JA.isDeviceOffloading(Action::OFK_None) ||
JA.isDeviceOffloading(Action::OFK_OpenMP))) {
switch (D.getOpenMPRuntime(Args)) {
@@ -6675,11 +6689,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptOutFlag(CmdArgs, options::OPT_fopenmp_extensions,
options::OPT_fno_openmp_extensions);
}
-
- // Forward the new driver to change offloading code generation.
- if (Args.hasFlag(options::OPT_offload_new_driver,
- options::OPT_no_offload_new_driver, false))
+ // Forward the offload runtime change to code generation, liboffload implies
+ // new driver. Otherwise, check if we should forward the new driver to change
+ // offloading code generation.
+ if (Args.hasFlag(options::OPT_foffload_via_llvm,
+ options::OPT_fno_offload_via_llvm, false)) {
+ CmdArgs.append({"--offload-new-driver", "-foffload-via-llvm"});
+ } else if (Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false)) {
CmdArgs.push_back("--offload-new-driver");
+ }
SanitizeArgs.addArgs(TC, Args, CmdArgs, InputType);
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 2a4c1369f5a73..d142cc791925d 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -1205,8 +1205,13 @@ bool tools::addOpenMPRuntime(const Compilation &C, ArgStringList &CmdArgs,
bool ForceStaticHostRuntime, bool IsOffloadingHost,
bool GompNeedsRT) {
if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false))
+ options::OPT_fno_openmp, false)) {
+ // We need libomptarget (liboffload) if it's the choosen offloading runtime.
+ if (Args.hasFlag(options::OPT_foffload_via_llvm,
+ options::OPT_fno_offload_via_llvm, false))
+ CmdArgs.push_back("-lomptarget");
return false;
+ }
Driver::OpenMPRuntimeKind RTKind = TC.getDriver().getOpenMPRuntime(Args);
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index 2dfc7457b0ac7..7d154865ce3f2 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -861,17 +861,15 @@ void CudaToolChain::addClangTargetOptions(
DeviceOffloadingKind == Action::OFK_Cuda) &&
"Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
- if (DeviceOffloadingKind == Action::OFK_Cuda) {
- CC1Args.append(
- {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
-
- // Unsized function arguments used for variadics were introduced in CUDA-9.0
- // We still do not support generating code that actually uses variadic
- // arguments yet, but we do need to allow parsing them as recent CUDA
- // headers rely on that. https://github.com/llvm/llvm-project/issues/58410
- if (CudaInstallation.version() >= CudaVersion::CUDA_90)
- CC1Args.push_back("-fcuda-allow-variadic-functions");
- }
+ CC1Args.append(
+ {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
+
+ // Unsized function arguments used for variadics were introduced in CUDA-9.0
+ // We still do not support generating code that actually uses variadic
+ // arguments yet, but we do need to allow parsing them as recent CUDA
+ // headers rely on that. https://github.com/llvm/llvm-project/issues/58410
+ if (CudaInstallation.version() >= CudaVersion::CUDA_90)
+ CC1Args.push_back("-fcuda-allow-variadic-functions");
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
@@ -889,6 +887,13 @@ void CudaToolChain::addClangTargetOptions(
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+ // For now, we don't use any Offload/OpenMP device runtime when we offload
+ // CUDA via LLVM/Offload. We should split the Offload/OpenMP device runtime
+ // and include the "generic" (or CUDA-specific) parts.
+ if (DriverArgs.hasFlag(options::OPT_foffload_via_llvm,
+ options::OPT_fno_offload_via_llvm, false))
+ return;
+
clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index d3090e488306f..9e0eb0f4cde89 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -325,6 +325,12 @@ set(openmp_wrapper_files
openmp_wrappers/new
)
+set(llvm_offload_wrapper_files
+ llvm_offload_wrappers/__llvm_offload.h
+ llvm_offload_wrappers/__llvm_offload_host.h
+ llvm_offload_wrappers/__llvm_offload_device.h
+)
+
set(llvm_libc_wrapper_files
llvm_libc_wrappers/assert.h
llvm_libc_wrappers/stdio.h
@@ -375,7 +381,7 @@ endfunction(clang_generate_header)
# Copy header files from the source directory to the build directory
foreach( f ${files} ${cuda_wrapper_files} ${cuda_wrapper_bits_files}
${ppc_wrapper_files} ${openmp_wrapper_files} ${zos_wrapper_files} ${hlsl_files}
- ${llvm_libc_wrapper_files})
+ ${llvm_libc_wrapper_files} ${llvm_offload_wrapper_files})
copy_header_to_output_dir(${CMAKE_CURRENT_SOURCE_DIR} ${f})
endforeach( f )
@@ -501,6 +507,7 @@ add_header_target("hlsl-resource-headers" ${hlsl_files})
add_header_target("opencl-resource-headers" ${opencl_files})
add_header_target("llvm-libc-resource-headers" ${llvm_libc_wrapper_files})
add_header_target("openmp-resource-headers" ${openmp_wrapper_files})
+add_header_target("llvm-offload-resource-headers" ${llvm_libc_wrapper_files})
add_header_target("windows-resource-headers" ${windows_only_files})
add_header_target("utility-resource-headers" ${utility_files})
@@ -542,6 +549,11 @@ install(
DESTINATION ${header_install_dir}/openmp_wrappers
COMPONENT clang-resource-headers)
+install(
+ FILES ${llvm_offload_wrapper_files}
+ DESTINATION ${header_install_dir}/llvm_offload_wrappers
+ COMPONENT clang-resource-headers)
+
install(
FILES ${zos_wrapper_files}
DESTINATION ${header_install_dir}/zos_wrappers
@@ -704,8 +716,8 @@ install(
COMPONENT openmp-resource-headers)
install(
- FILES ${openmp_wrapper_files}
- DESTINATION ${header_install_dir}/openmp_wrappers
+ FILES ${llvm_offload_wrapper_files}
+ DESTINATION ${header_install_dir}/llvm_offload_wrappers
EXCLUDE_FROM_ALL
COMPONENT openmp-resource-headers)
diff --git a/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload.h b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload.h
new file mode 100644
index 0000000000000..2898898904e29
--- /dev/null
+++ b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload.h
@@ -0,0 +1,31 @@
+/*===------ LLVM/Offload helpers for kernel languages (CUDA/HIP) -*- c++ -*-===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#include <stddef.h>
+
+#define __host__ __attribute__((host))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+#define __managed__ __attribute__((managed))
+
+extern "C" {
+
+typedef struct dim3 {
+ dim3() {}
+ dim3(unsigned x) : x(x) {}
+ unsigned x = 0, y = 0, z = 0;
+} dim3;
+
+// TODO: For some reason the CUDA device compilation requires this declaration
+// to be present on the device while it is only used on the host.
+unsigned __llvmPushCallConfiguration(dim3 gridDim, dim3 blockDim,
+ size_t sharedMem = 0, void *stream = 0);
+}
diff --git a/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_device.h b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_device.h
new file mode 100644
index 0000000000000..1a813b331515b
--- /dev/null
+++ b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_device.h
@@ -0,0 +1,10 @@
+/*===------ LLVM/Offload helpers for kernel languages (CUDA/HIP) -*- c++ -*-===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#include "__llvm_offload.h"
diff --git a/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_host.h b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_host.h
new file mode 100644
index 0000000000000..160289d169b55
--- /dev/null
+++ b/clang/lib/Headers/llvm_offload_wrappers/__llvm_offload_host.h
@@ -0,0 +1,15 @@
+/*===------ LLVM/Offload helpers for kernel languages (CUDA/HIP) -*- c++ -*-===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#include "__llvm_offload.h"
+
+extern "C" {
+unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
+ void **args, size_t sharedMem = 0, void *stream = 0);
+}
diff --git a/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index d5b6846b03488..3e354c63efc66 100644
--- a/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
+++ b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -10,17 +10,15 @@
#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
+#ifdef __NVPTX__
#pragma omp begin declare variant match( \
device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+#pragma push_macro("__CUDA__")
#define __CUDA__
#define __OPENMP_NVPTX__
@@ -31,9 +29,10 @@ extern "C" {
#include <__clang_cuda_device_functions.h>
#undef __OPENMP_NVPTX__
-#undef __CUDA__
+#pragma pop_macro("__CUDA__")
#pragma omp end declare variant
+#endif
#ifdef __AMDGCN__
#pragma omp begin declare variant match(device = {arch(amdgcn)})
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 580b9872c6a1d..ec37c0df56c67 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -1068,6 +1068,9 @@ void SemaCUDA::inheritTargetAttrs(FunctionDecl *FD,
}
std::string SemaCUDA::getConfigureFuncName() const {
+ if (getLangOpts().OffloadViaLLVM)
+ return "__llvmPushCallConfiguration";
+
if (getLangOpts().HIP)
return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration"
: "hipConfigureCall";
diff --git a/clang/test/CodeGenCUDA/offload_via_llvm.cu b/clang/test/CodeGenCUDA/offload_via_llvm.cu
new file mode 100644
index 0000000000000..3eb580850fc48
--- /dev/null
+++ b/clang/test/CodeGenCUDA/offload_via_llvm.cu
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang -Xclang -triple -Xclang "x86_64-unknown-linux-gnu" -S -c -foffload-via-llvm -emit-llvm -o - %s | FileCheck %s
+
+// Check that we generate LLVM/Offload calls, including the KERNEL_LAUNCH_PARAMS argument.
+
+// CHECK-LABEL: define dso_local void @_Z18__device_stub__fooisPvS_(
+// CHECK-SAME: i32 noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], ptr noundef [[TMP2:%.*]], ptr noundef [[TMP3:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
+// CHECK-NEXT: [[DOTADDR2:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTADDR3:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[TMP0]], align 16
+// CHECK-NEXT: [[KERNEL_LAUNCH_PARAMS:%.*]] = alloca [[TMP1]], align 16
+// CHECK-NEXT: [[GRID_DIM:%.*]] = alloca [[STRUCT_DIM3:%.*]], align 8
+// CHECK-NEXT: [[BLOCK_DIM:%.*]] = alloca [[STRUCT_DIM3]], align 8
+// CHECK-NEXT: [[SHMEM_SIZE:%.*]] = alloca i64, align 8
+// CHECK-NEXT: [[STREAM:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[GRID_DIM_COERCE:%.*]] = alloca { i64, i32 }, align 8
+// CHECK-NEXT: [[BLOCK_DIM_COERCE:%.*]] = alloca { i64, i32 }, align 8
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4
+// CHECK-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
+// CHECK-NEXT: store ptr [[TMP2]], ptr [[DOTADDR2]], align 8
+// CHECK-NEXT: store ptr [[TMP3]], ptr [[DOTADDR3]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[TMP1]], ptr [[KERNEL_LAUNCH_PARAMS]], i32 0, i32 0
+// CHECK-NEXT: store i64 24, ptr [[TMP4]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[TMP1]], ptr [[KERNEL_LAUNCH_PARAMS]], i32 0, i32 1
+// CHECK-NEXT: store ptr [[KERNEL_ARGS]], ptr [[TMP5]], align 8
+// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[TMP1]], ptr [[KERNEL_LAUNCH_PARAMS]], i32 0, i32 2
+// CHECK-NEXT: store ptr null, ptr [[TMP6]], align 16
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTADDR]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[TMP0]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP8]], align 16
+// CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[DOTADDR1]], align 2
+// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[TMP0]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT: store i16 [[TMP9]], ptr [[TMP10]], align 4
+// CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTADDR2]], align 8
+// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[TMP0]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT: store ptr [[TMP11]], ptr [[TMP12]], align 8
+// CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTADDR3]], align 8
+// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[TMP0]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT: store ptr [[TMP13]], ptr [[TMP14]], align 16
+// CHECK-NEXT: [[TMP15:%.*]] = call i32 @__llvmPopCallConfiguration(ptr [[GRID_DIM]], ptr [[BLOCK_DIM]], ptr [[SHMEM_SIZE]], ptr [[STREAM]])
+// CHECK-NEXT: [[TMP16:%.*]] = load i64, ptr [[SHMEM_SIZE]], align 8
+// CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[STREAM]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[GRID_DIM_COERCE]], ptr align 8 [[GRID_DIM]], i64 12, i1 false)
+// CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[GRID_DIM_COERCE]], i32 0, i32 0
+// CHECK-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP18]], align 8
+// CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[GRID_DIM_COERCE]], i32 0, i32 1
+// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[BLOCK_DIM_COERCE]], ptr align 8 [[BLOCK_DIM]], i64 12, i1 false)
+// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[BLOCK_DIM_COERCE]], i32 0, i32 0
+// CHECK-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP22]], align 8
+// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[BLOCK_DIM_COERCE]], i32 0, i32 1
+// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 8
+// CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @llvmLaunchKernel(ptr noundef @_Z18__device_stub__fooisPvS_, i64 [[TMP19]], i32 [[TMP21]], i64 [[TMP23]], i32 [[TMP25]], ptr noundef [[KERNEL_LAUNCH_PARAMS]], i64 noundef [[TMP16]], ptr noundef [[TMP17]])
+// CHECK-NEXT: br label %[[SETUP_END:.*]]
+// CHECK: [[SETUP_END]]:
+// CHECK-NEXT: ret void
+//
+__global__ void foo(int, short, void *, void *) {}
+
+// CHECK-LABEL: define dso_local void @_Z5test1Pv(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_DIM3:%.*]], align 4
+// CHECK-NEXT: [[AGG_TMP1:%.*]] = alloca [[STRUCT_DIM3]], align 4
+// CHECK-NEXT: [[AGG_TMP_COERCE:%.*]] = alloca { i64, i32 }, align 4
+// CHECK-NEXT: [[AGG_TMP1_COERCE:%.*]] = alloca { i64, i32 }, align 4
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: call void @_ZN4dim3C2Ej(ptr noundef nonnull align 4 dereferenceable(12) [[AGG_TMP]], i32 noundef 3)
+// CHECK-NEXT: call void @_ZN4dim3C2Ej(ptr noundef nonnull align 4 dereferenceable(12) [[AGG_TMP1]], i32 noundef 7)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[AGG_TMP_COERCE]], ptr align 4 [[AGG_TMP]], i64 12, i1 false)
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[AGG_TMP_COERCE]], i32 0, i32 0
+// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[AGG_TMP_COERCE]], i32 0, i32 1
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[AGG_TMP1_COERCE]], ptr align 4 [[AGG_TMP1]], i64 12, i1 false)
+// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[AGG_TMP1_COERCE]], i32 0, i32 0
+// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds { i64, i32 }, ptr [[AGG_TMP1_COERCE]], i32 0, i32 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
+// CHECK-NEXT: [[CALL:%.*]] = call i32 @__llvmPushCallConfiguration(i64 [[TMP1]], i32 [[TMP3]], i64 [[TMP5]], i32 [[TMP7]], i64 noundef 0, ptr noundef null)
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL]], 0
+// CHECK-NEXT: br i1 [[TOBOOL]], label %[[KCALL_END:.*]], label %[[KCALL_CONFIGOK:.*]]
+// CHECK: [[KCALL_CONFIGOK]]:
+// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: call void @_Z18__device_stub__fooisPvS_(i32 noundef 13, i16 noundef signext 1, ptr noundef [[TMP8]], ptr noundef [[TMP9]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: br label %[[KCALL_END]]
+// CHECK: [[KCALL_END]]:
+// CHECK-NEXT: ret void
+//
+void test1(void *Ptr) {
+ foo<<<3, 7>>>(13, 1, Ptr, Ptr);
+}
diff --git a/clang/test/Driver/cuda-via-liboffload.cu b/clang/test/Driver/cuda-via-liboffload.cu
new file mode 100644
index 0000000000000..68dc963e906b2
--- /dev/null
+++ b/clang/test/Driver/cuda-via-liboffload.cu
@@ -0,0 +1,23 @@
+// RUN: %clang -### -target x86_64-linux-gnu -foffload-via-llvm -ccc-print-bindings \
+// RUN: --offload-arch=sm_35 --offload-arch=sm_70 %s 2>&1 \
+// RUN: | FileCheck -check-prefix BINDINGS %s
+
+// BINDINGS: "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HOST_BC:.+]]"
+// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[PTX_SM_35:.+]]"
+// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX_SM_35]]"], output: "[[CUBIN_SM_35:.+]]"
+// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[PTX_SM_70:.+]]"
+// BINDINGS-NEXT: "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX_SM_70:.+]]"], output: "[[CUBIN_SM_70:.+]]"
+// BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "Offload::Packager", inputs: ["[[CUBIN_SM_35]]", "[[CUBIN_SM_70]]"], output: "[[BINARY:.+]]"
+// BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[HOST_BC]]", "[[BINARY]]"], output: "[[HOST_OBJ:.+]]"
+// BINDINGS-NEXT: "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[HOST_OBJ]]"], output: "a.out"
+
+// RUN: %clang -### -target x86_64-linux-gnu -foffload-via-llvm -ccc-print-bindings \
+// RUN: --offload-arch=sm_35 --offload-arch=sm_70 %s 2>&1 \
+// RUN: | FileCheck -check-prefix BINDINGS-DEVICE %s
+
+// BINDINGS-DEVICE: # "nvptx64-nvidia-cuda" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[PTX:.+]]"
+// BINDINGS-DEVICE: # "nvptx64-nvidia-cuda" - "NVPTX::Assembler", inputs: ["[[PTX]]"], output: "[[CUBIN:.+]]"
+
+// RUN: %clang -### -target x86_64-linux-gnu -ccc-print-bindings --offload-link -foffload-via-llvm %s 2>&1 | FileCheck -check-prefix DEVICE-LINK %s
+
+// DEVICE-LINK: "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[INPUT:.+]]"], output: "a.out"
diff --git a/offload/include/Shared/APITypes.h b/offload/include/Shared/APITypes.h
index 5b22bbaac144f..4c1f7712249a3 100644
--- a/offload/include/Shared/APITypes.h
+++ b/offload/include/Shared/APITypes.h
@@ -102,8 +102,9 @@ struct KernelArgsTy {
0; // Tripcount for the teams / distribute loop, 0 otherwise.
struct {
uint64_t NoWait : 1; // Was this kernel spawned with a `nowait` clause.
- uint64_t Unused : 63;
- } Flags = {0, 0};
+ uint64_t IsCUDA : 1; // Was this kernel spawned via CUDA.
+ uint64_t Unused : 62;
+ } Flags = {0, 0, 0};
// The number of teams (for x,y,z dimension).
uint32_t NumTeams[3] = {0, 0, 0};
// The number of threads (for x,y,z dimension).
diff --git a/offload/include/omptarget.h b/offload/include/omptarget.h
index 323dee41630f2..2b6445e9fbe55 100644
--- a/offload/include/omptarget.h
+++ b/offload/include/omptarget.h
@@ -107,7 +107,7 @@ enum TargetAllocTy : int32_t {
inline KernelArgsTy CTorDTorKernelArgs = {1, 0, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr,
- 0, {0,0}, {1, 0, 0}, {1, 0, 0}, 0};
+ 0, {0,0,0}, {1, 0, 0}, {1, 0, 0}, 0};
struct DeviceTy;
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 94f9d4670b672..2608af016284b 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -549,9 +549,16 @@ Error GenericKernelTy::launch(GenericDeviceTy &GenericDevice, void **ArgPtrs,
if (!KernelLaunchEnvOrErr)
return KernelLaunchEnvOrErr.takeError();
- KernelLaunchParamsTy LaunchParams =
- prepareArgs(GenericDevice, ArgPtrs, ArgOffsets, KernelArgs.NumArgs, Args,
- Ptrs, *KernelLaunchEnvOrErr);
+ KernelLaunchParamsTy LaunchParams;
+
+ // Kernel languages don't use indirection.
+ if (KernelArgs.Flags.IsCUDA) {
+ LaunchParams = *reinterpret_cast<KernelLaunchParamsTy *>(KernelArgs.ArgPtrs);
+ } else {
+ LaunchParams =
+ prepareArgs(GenericDevice, ArgPtrs, ArgOffsets, KernelArgs.NumArgs,
+ Args, Ptrs, *KernelLaunchEnvOrErr);
+ }
uint32_t NumThreads = getNumThreads(GenericDevice, KernelArgs.ThreadLimit);
uint64_t NumBlocks =
diff --git a/offload/src/CMakeLists.txt b/offload/src/CMakeLists.txt
index efa5cdab33ec9..b442df45deaa5 100644
--- a/offload/src/CMakeLists.txt
+++ b/offload/src/CMakeLists.txt
@@ -22,6 +22,7 @@ add_llvm_library(omptarget
OpenMP/InteropAPI.cpp
OpenMP/OMPT/Callback.cpp
+ KernelLanguage/API.cpp
ADDITIONAL_HEADER_DIRS
${LIBOMPTARGET_INCLUDE_DIR}
diff --git a/offload/src/KernelLanguage/API.cpp b/offload/src/KernelLanguage/API.cpp
new file mode 100644
index 0000000000000..9ffc199b5da7d
--- /dev/null
+++ b/offload/src/KernelLanguage/API.cpp
@@ -0,0 +1,76 @@
+//===------ API.cpp - Kernel Language (CUDA/HIP) entry points ----- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "Shared/APITypes.h"
+
+#include <cstdio>
+
+struct dim3 {
+ unsigned x = 0, y = 0, z = 0;
+};
+
+struct __omp_kernel_t {
+ dim3 __grid_size;
+ dim3 __block_size;
+ size_t __shared_memory;
+
+ void *__stream;
+};
+
+static __omp_kernel_t __current_kernel = {};
+#pragma omp threadprivate(__current_kernel);
+
+extern "C" {
+
+// TODO: There is little reason we need to keep these names or the way calls are
+// issued. For now we do to avoid modifying Clang's CUDA codegen. Unclear when
+// we actually need to push/pop configurations.
+unsigned __llvmPushCallConfiguration(dim3 __grid_size, dim3 __block_size,
+ size_t __shared_memory, void *__stream) {
+ __omp_kernel_t &__kernel = __current_kernel;
+ __kernel.__grid_size = __grid_size;
+ __kernel.__block_size = __block_size;
+ __kernel.__shared_memory = __shared_memory;
+ __kernel.__stream = __stream;
+ return 0;
+}
+
+unsigned __llvmPopCallConfiguration(dim3 *__grid_size, dim3 *__block_size,
+ size_t *__shared_memory, void *__stream) {
+ __omp_kernel_t &__kernel = __current_kernel;
+ *__grid_size = __kernel.__grid_size;
+ *__block_size = __kernel.__block_size;
+ *__shared_memory = __kernel.__shared_memory;
+ *((void **)__stream) = __kernel.__stream;
+ return 0;
+}
+
+int __tgt_target_kernel(void *Loc, int64_t DeviceId, int32_t NumTeams,
+ int32_t ThreadLimit, const void *HostPtr,
+ KernelArgsTy *Args);
+
+unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
+ void *args, size_t sharedMem, void *stream) {
+ KernelArgsTy Args = {};
+ Args.DynCGroupMem = sharedMem;
+ Args.NumTeams[0] = gridDim.x;
+ Args.NumTeams[1] = gridDim.y;
+ Args.NumTeams[2] = gridDim.z;
+ Args.ThreadLimit[0] = blockDim.x;
+ Args.ThreadLimit[1] = blockDim.y;
+ Args.ThreadLimit[2] = blockDim.z;
+ Args.ArgPtrs = reinterpret_cast<void **>(args);
+ Args.Flags.IsCUDA = true;
+ int rv = __tgt_target_kernel(nullptr, 0, gridDim.x,
+ blockDim.x, func, &Args);
+ return rv;
+}
+}
diff --git a/offload/src/exports b/offload/src/exports
index f95544ec8329c..7bdc7d2a531bb 100644
--- a/offload/src/exports
+++ b/offload/src/exports
@@ -71,6 +71,9 @@ VERS1.0 {
__tgt_interop_use;
__tgt_interop_destroy;
ompt_libomptarget_connect;
+ __llvmPushCallConfiguration;
+ __llvmPopCallConfiguration;
+ llvmLaunchKernel;
local:
*;
};
diff --git a/offload/test/lit.cfg b/offload/test/lit.cfg
index 6c590603079c4..9053151e44a78 100644
--- a/offload/test/lit.cfg
+++ b/offload/test/lit.cfg
@@ -66,7 +66,7 @@ def evaluate_bool_env(env):
config.name = 'libomptarget :: ' + config.libomptarget_current_target
# suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.cc', '.f90']
+config.suffixes = ['.c', '.cpp', '.cc', '.f90', '.cu']
# excludes: A list of directories to exclude from the testuites.
config.excludes = ['Inputs']
diff --git a/offload/test/offloading/CUDA/basic_launch.cu b/offload/test/offloading/CUDA/basic_launch.cu
new file mode 100644
index 0000000000000..2915a7c216ab5
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_launch.cu
@@ -0,0 +1,31 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t -fopenmp
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+__global__ void square(int *A) { *A = 42; }
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ *Ptr = 7;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 7
+ square<<<1, 1>>>(Ptr);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
diff --git a/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu b/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
new file mode 100644
index 0000000000000..615cae6f7b233
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
@@ -0,0 +1,32 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t -fopenmp
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+__global__ void square(int *A) {
+ __scoped_atomic_fetch_add(A, 1, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_DEVICE);
+}
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ *Ptr = 0;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
+ square<<<7, 6>>>(Ptr);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
diff --git a/offload/test/offloading/CUDA/basic_launch_multi_arg.cu b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu
new file mode 100644
index 0000000000000..f95f1dbacc79c
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu
@@ -0,0 +1,41 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t -fopenmp
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+__global__ void square(int *Dst, short Q, int *Src, short P) {
+ *Dst = (Src[0] + Src[1]) * (Q + P);
+ Src[0] = Q;
+ Src[1] = P;
+}
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ int *Src = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(8, DevNo));
+ *Ptr = 7;
+ Src[0] = -2;
+ Src[1] = 8;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 7
+ printf("Src: %i : %i\n", Src[0], Src[1]);
+ // CHECK: Src: -2 : 8
+ square<<<1, 1>>>(Ptr, 3, Src, 4);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ printf("Src: %i : %i\n", Src[0], Src[1]);
+ // CHECK: Src: 3 : 4
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
diff --git a/offload/test/offloading/CUDA/kernel_tu.cu.inc b/offload/test/offloading/CUDA/kernel_tu.cu.inc
new file mode 100644
index 0000000000000..d7d28a109dfc5
--- /dev/null
+++ b/offload/test/offloading/CUDA/kernel_tu.cu.inc
@@ -0,0 +1 @@
+__global__ void square(int *A) { *A = 42; }
diff --git a/offload/test/offloading/CUDA/launch_tu.cu b/offload/test/offloading/CUDA/launch_tu.cu
new file mode 100644
index 0000000000000..c45f40b115b56
--- /dev/null
+++ b/offload/test/offloading/CUDA/launch_tu.cu
@@ -0,0 +1,32 @@
+// clang-format off
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t.launch_tu.o -c
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native -x cuda %S/kernel_tu.cu.inc -o %t.kernel_tu.o -c
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %t.launch_tu.o %t.kernel_tu.o -o %t
+// RUN: %t | %fcheck-generic
+// clang-format on
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+extern __global__ void square(int *A);
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ *Ptr = 7;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 7
+ square<<<1, 1>>>(Ptr);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
>From 710445500e47cc1ac6b611d7583690e881135da9 Mon Sep 17 00:00:00 2001
From: Johannes Doerfert <johannes at jdoerfert.de>
Date: Fri, 7 Jun 2024 17:06:02 -0700
Subject: [PATCH 2/3] [Offload][CUDA] Add initial cuda_runtime.h overlay
This provides the header overlay for cuda_runtime.h which is found
before any CUDA installation (none is necessary). Some basic APIs are
defined in terms of the omp_target_* ones, but with the API redesign
the requirements of CUDA should be taken into account.
Based on: https://github.com/llvm/llvm-project/pull/94549
---
clang/lib/Headers/CMakeLists.txt | 1 +
.../llvm_offload_wrappers/cuda_runtime.h | 131 ++++++++++++++++++
.../offloading/CUDA/basic_api_malloc_free.cu | 41 ++++++
.../test/offloading/CUDA/basic_api_memcpy.cu | 46 ++++++
.../test/offloading/CUDA/basic_api_memset.cu | 43 ++++++
5 files changed, 262 insertions(+)
create mode 100644 clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
create mode 100644 offload/test/offloading/CUDA/basic_api_malloc_free.cu
create mode 100644 offload/test/offloading/CUDA/basic_api_memcpy.cu
create mode 100644 offload/test/offloading/CUDA/basic_api_memset.cu
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 9e0eb0f4cde89..251e5b0ba2381 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -329,6 +329,7 @@ set(llvm_offload_wrapper_files
llvm_offload_wrappers/__llvm_offload.h
llvm_offload_wrappers/__llvm_offload_host.h
llvm_offload_wrappers/__llvm_offload_device.h
+ llvm_offload_wrappers/cuda_runtime.h
)
set(llvm_libc_wrapper_files
diff --git a/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h b/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
new file mode 100644
index 0000000000000..8718e462a82d3
--- /dev/null
+++ b/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
@@ -0,0 +1,131 @@
+/*===- __cuda_runtime.h - LLVM/Offload wrappers for CUDA runtime API -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CUDA_RUNTIME_API__
+#define __CUDA_RUNTIME_API__
+
+#include <cstddef>
+#include <optional>
+
+extern "C" {
+int omp_get_initial_device(void);
+void omp_target_free(void *Ptr, int Device);
+void *omp_target_alloc(size_t Size, int Device);
+int omp_target_memcpy(void *Dst, const void *Src, size_t Length,
+ size_t DstOffset, size_t SrcOffset, int DstDevice,
+ int SrcDevice);
+void *omp_target_memset(void *Ptr, int C, size_t N, int DeviceNum);
+}
+
+// TODO: There are many fields missing in this enumeration.
+typedef enum cudaError {
+ cudaSuccess = 0,
+ cudaErrorInvalidValue = 1,
+ cudaErrorMemoryAllocation = 2,
+ cudaErrorNoDevice = 100,
+ cudaErrorInvalidDevice = 101,
+ cudaErrorOTHER = -1,
+} cudaError_t;
+
+enum cudaMemcpyKind {
+ cudaMemcpyHostToHost = 0,
+ cudaMemcpyHostToDevice = 1,
+ cudaMemcpyDeviceToHost = 2,
+ cudaMemcpyDeviceToDevice = 3,
+ cudaMemcpyDefault = 4
+};
+
+typedef void *cudaStream_t;
+
+static thread_local cudaError_t __cudaomp_last_error = cudaSuccess;
+
+// Returns the last error that has been produced and resets it to cudaSuccess.
+inline cudaError_t cudaGetLastError() {
+ cudaError_t TempError = __cudaomp_last_error;
+ __cudaomp_last_error = cudaSuccess;
+ return TempError;
+}
+
+// Returns the last error that has been produced without reseting it.
+inline cudaError_t cudaPeekAtLastError() { return __cudaomp_last_error; }
+
+inline cudaError_t __cudaMalloc(void **devPtr, size_t size) {
+ int DeviceNum = 0;
+ *devPtr = omp_target_alloc(size, DeviceNum);
+ if (*devPtr == NULL)
+ return __cudaomp_last_error = cudaErrorMemoryAllocation;
+
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+template <class T> cudaError_t cudaMalloc(T **devPtr, size_t size) {
+ return __cudaMalloc((void **)devPtr, size);
+}
+
+inline cudaError_t __cudaFree(void *devPtr) {
+ int DeviceNum = 0;
+ omp_target_free(devPtr, DeviceNum);
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+template <class T> inline cudaError_t cudaFree(T *ptr) {
+ return __cudaFree((void *)ptr);
+}
+
+inline cudaError_t __cudaMemcpy(void *dst, const void *src, size_t count,
+ cudaMemcpyKind kind) {
+ // get the host device number (which is the inital device)
+ int HostDeviceNum = omp_get_initial_device();
+
+ // use the default device for gpu
+ int GPUDeviceNum = 0;
+
+ // default to copy from host to device
+ int DstDeviceNum = GPUDeviceNum;
+ int SrcDeviceNum = HostDeviceNum;
+
+ if (kind == cudaMemcpyDeviceToHost)
+ std::swap(DstDeviceNum, SrcDeviceNum);
+
+ // omp_target_memcpy returns 0 on success and non-zero on failure
+ if (omp_target_memcpy(dst, src, count, 0, 0, DstDeviceNum, SrcDeviceNum))
+ return __cudaomp_last_error = cudaErrorInvalidValue;
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+template <class T>
+inline cudaError_t cudaMemcpy(T *dst, const T *src, size_t count,
+ cudaMemcpyKind kind) {
+ return __cudaMemcpy((void *)dst, (const void *)src, count, kind);
+}
+
+inline cudaError_t __cudaMemset(void *devPtr, int value, size_t count,
+ cudaStream_t stream = 0) {
+ int DeviceNum = 0;
+ if (!omp_target_memset(devPtr, value, count, DeviceNum))
+ return __cudaomp_last_error = cudaErrorInvalidValue;
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+template <class T>
+inline cudaError_t cudaMemset(T *devPtr, int value, size_t count) {
+ return __cudaMemset((void *)devPtr, value, count);
+}
+
+inline cudaError_t cudaDeviceSynchronize() {
+ // TODO: not implemented, not async yet.
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+inline cudaError_t cudaDeviceReset(void) {
+ // TODO: not implemented.
+ return __cudaomp_last_error = cudaSuccess;
+}
+
+#endif
diff --git a/offload/test/offloading/CUDA/basic_api_malloc_free.cu b/offload/test/offloading/CUDA/basic_api_malloc_free.cu
new file mode 100644
index 0000000000000..86dc5f8fef68e
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_api_malloc_free.cu
@@ -0,0 +1,41 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <cuda_runtime.h>
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+__global__ void kernel(int *A, int *DevPtr, int N) {
+ for (int i = 0; i < N; ++i)
+ DevPtr[i] = 1;
+ for (int i = 0; i < N; ++i)
+ *A += DevPtr[i];
+}
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ int *DevPtr;
+ auto Err = cudaMalloc(&DevPtr, 42 * sizeof(int));
+ if (Err != cudaSuccess)
+ return -1;
+ *Ptr = 0;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
+ kernel<<<1, 1>>>(Ptr, DevPtr, 42);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ Err = cudaFree(DevPtr);
+ if (Err != cudaSuccess)
+ return -1;
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
diff --git a/offload/test/offloading/CUDA/basic_api_memcpy.cu b/offload/test/offloading/CUDA/basic_api_memcpy.cu
new file mode 100644
index 0000000000000..d5c0929abe43c
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_api_memcpy.cu
@@ -0,0 +1,46 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <cuda_runtime.h>
+#include <stdio.h>
+
+__global__ void kernel(int *DevPtr, int N) {
+ for (int i = 0; i < N; ++i)
+ DevPtr[i]--;
+}
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int Res = 0;
+ int *DevPtr;
+ auto Err = cudaMalloc(&DevPtr, 42 * sizeof(int));
+ if (Err != cudaSuccess)
+ return -1;
+ int HstPtr[42];
+ for (int i = 0; i < 42; ++i) {
+ HstPtr[i] = 2;
+ }
+ Err = cudaMemcpy(DevPtr, HstPtr, 42 * sizeof(int), cudaMemcpyHostToDevice);
+ if (Err != cudaSuccess)
+ return -1;
+ printf("Res: %i\n", Res);
+ // CHECK: Res: 0
+ kernel<<<1, 1>>>(DevPtr, 42);
+ Err = cudaMemcpy(HstPtr, DevPtr, 42 * sizeof(int), cudaMemcpyDeviceToHost);
+ if (Err != cudaSuccess)
+ return -1;
+ for (int i = 0; i < 42; ++i) {
+ printf("%i : %i\n", i, HstPtr[i]);
+ Res += HstPtr[i];
+ }
+ printf("Res: %i\n", Res);
+ // CHECK: Res: 42
+ Err = cudaFree(DevPtr);
+ if (Err != cudaSuccess)
+ return -1;
+}
diff --git a/offload/test/offloading/CUDA/basic_api_memset.cu b/offload/test/offloading/CUDA/basic_api_memset.cu
new file mode 100644
index 0000000000000..36dcc729a3a9e
--- /dev/null
+++ b/offload/test/offloading/CUDA/basic_api_memset.cu
@@ -0,0 +1,43 @@
+// RUN: %clang++ -foffload-via-llvm --offload-arch=native %s -o %t
+// RUN: %t | %fcheck-generic
+
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+#include <cuda_runtime.h>
+#include <stdio.h>
+
+extern "C" {
+void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum);
+void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum);
+}
+
+__global__ void kernel(int *A, int *DevPtr, int N) {
+ for (int i = 0; i < N; ++i)
+ *A += DevPtr[i];
+ *A *= -1;
+}
+
+int main(int argc, char **argv) {
+ int DevNo = 0;
+ int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo));
+ int *DevPtr;
+ auto Err = cudaMalloc(&DevPtr, 42 * sizeof(int));
+ if (Err != cudaSuccess)
+ return -1;
+ Err = cudaMemset(DevPtr, -1, 42 * sizeof(int));
+ if (Err != cudaSuccess)
+ return -1;
+ *Ptr = 0;
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
+ kernel<<<1, 1>>>(Ptr, DevPtr, 42);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
+ // CHECK: Ptr [[Ptr]], *Ptr: 42
+ Err = cudaFree(DevPtr);
+ if (Err != cudaSuccess)
+ return -1;
+ llvm_omp_target_free_shared(Ptr, DevNo);
+}
>From 705de43498aec79565d6469a00a54e65e988faf8 Mon Sep 17 00:00:00 2001
From: Johannes Doerfert <johannes at jdoerfert.de>
Date: Wed, 12 Jun 2024 06:37:09 -0700
Subject: [PATCH 3/3] [Offload] Introduce the concept of "default streams"
The offload APIs, and the CUDA wrappers in clang, now support "default
streams" per thread (and per device). It should be per context but we
don't really expose that concept yet. The KernelArguments allow an
LLVM/Offload user to provide a "AsyncInfoQueue", which is plugin
dependent and can later also be created outside or queried from the
runtime. User managed "queues" are kept persistent, thus not returned to
the pool once synchronized.
The CUDA tests will synchronize via `cudaDeviceSynchronize` before
checking the results.
Based on: https://github.com/llvm/llvm-project/pull/94821
---
.../llvm_offload_wrappers/cuda_runtime.h | 18 +++++--
.../llvm/Frontend/OpenMP/OMPConstants.h | 2 +-
.../include/llvm/Frontend/OpenMP/OMPKinds.def | 2 +-
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 4 +-
offload/include/Shared/APITypes.h | 9 +++-
offload/include/omptarget.h | 18 ++++++-
offload/plugins-nextgen/amdgpu/src/rtl.cpp | 21 +++++---
.../common/src/PluginInterface.cpp | 2 +
offload/plugins-nextgen/cuda/src/rtl.cpp | 16 ++++--
offload/src/KernelLanguage/API.cpp | 14 +++++-
offload/src/exports | 2 +
offload/src/interface.cpp | 49 ++++++++++++++++++-
offload/src/omptarget.cpp | 2 +-
.../offloading/CUDA/basic_api_malloc_free.cu | 3 +-
.../test/offloading/CUDA/basic_api_memcpy.cu | 1 +
.../test/offloading/CUDA/basic_api_memset.cu | 1 +
offload/test/offloading/CUDA/basic_launch.cu | 3 +-
.../CUDA/basic_launch_blocks_and_threads.cu | 2 +
.../offloading/CUDA/basic_launch_multi_arg.cu | 2 +
19 files changed, 143 insertions(+), 28 deletions(-)
diff --git a/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h b/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
index 8718e462a82d3..ed1b07b49afee 100644
--- a/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
+++ b/clang/lib/Headers/llvm_offload_wrappers/cuda_runtime.h
@@ -11,6 +11,7 @@
#define __CUDA_RUNTIME_API__
#include <cstddef>
+#include <cstdint>
#include <optional>
extern "C" {
@@ -21,6 +22,8 @@ int omp_target_memcpy(void *Dst, const void *Src, size_t Length,
size_t DstOffset, size_t SrcOffset, int DstDevice,
int SrcDevice);
void *omp_target_memset(void *Ptr, int C, size_t N, int DeviceNum);
+int __tgt_target_synchronize_async_info_queue(void *Loc, int64_t DeviceNum,
+ void *AsyncInfoQueue);
}
// TODO: There are many fields missing in this enumeration.
@@ -55,7 +58,15 @@ inline cudaError_t cudaGetLastError() {
// Returns the last error that has been produced without reseting it.
inline cudaError_t cudaPeekAtLastError() { return __cudaomp_last_error; }
+inline cudaError_t cudaDeviceSynchronize() {
+ int DeviceNum = 0;
+ return __cudaomp_last_error =
+ (cudaError_t)__tgt_target_synchronize_async_info_queue(
+ /*Loc=*/nullptr, DeviceNum, /*AsyncInfoQueue=*/nullptr);
+}
+
inline cudaError_t __cudaMalloc(void **devPtr, size_t size) {
+ cudaDeviceSynchronize();
int DeviceNum = 0;
*devPtr = omp_target_alloc(size, DeviceNum);
if (*devPtr == NULL)
@@ -69,6 +80,7 @@ template <class T> cudaError_t cudaMalloc(T **devPtr, size_t size) {
}
inline cudaError_t __cudaFree(void *devPtr) {
+ cudaDeviceSynchronize();
int DeviceNum = 0;
omp_target_free(devPtr, DeviceNum);
return __cudaomp_last_error = cudaSuccess;
@@ -118,12 +130,8 @@ inline cudaError_t cudaMemset(T *devPtr, int value, size_t count) {
return __cudaMemset((void *)devPtr, value, count);
}
-inline cudaError_t cudaDeviceSynchronize() {
- // TODO: not implemented, not async yet.
- return __cudaomp_last_error = cudaSuccess;
-}
-
inline cudaError_t cudaDeviceReset(void) {
+ cudaDeviceSynchronize();
// TODO: not implemented.
return __cudaomp_last_error = cudaSuccess;
}
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
index 338b56226f204..a7be3f51fac7d 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
@@ -72,7 +72,7 @@ enum class IdentFlag {
#include "llvm/Frontend/OpenMP/OMPKinds.def"
// Version of the kernel argument format used by the omp runtime.
-#define OMP_KERNEL_ARG_VERSION 3
+#define OMP_KERNEL_ARG_VERSION 4
// Minimum version of the compiler that generates a kernel dynamic pointer.
#define OMP_KERNEL_ARG_MIN_VERSION_WITH_DYN_PTR 3
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index fe09bb8177c28..0be3827185e2e 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -90,7 +90,7 @@ __OMP_ARRAY_TYPE(Int32Arr3, Int32, 3)
__OMP_STRUCT_TYPE(Ident, ident_t, false, Int32, Int32, Int32, Int32, Int8Ptr)
__OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidPtrPtr,
VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr,
- Int64, Int64, Int32Arr3Ty, Int32Arr3Ty, Int32)
+ Int64, Int64, Int32Arr3Ty, Int32Arr3Ty, Int32, VoidPtr)
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)
__OMP_STRUCT_TYPE(Task, kmp_task_ompbuilder_t, false, VoidPtr, VoidPtr, Int32, VoidPtr, VoidPtr)
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 92213e19c9d9d..ba73d48bc9a8b 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -496,6 +496,7 @@ void OpenMPIRBuilder::getKernelArgsVector(TargetKernelArgs &KernelArgs,
auto Int32Ty = Type::getInt32Ty(Builder.getContext());
Value *ZeroArray = Constant::getNullValue(ArrayType::get(Int32Ty, 3));
Value *Flags = Builder.getInt64(KernelArgs.HasNoWait);
+ Value *AsyncInfoQueue = Constant::getNullValue(Builder.getPtrTy());
Value *NumTeams3D =
Builder.CreateInsertValue(ZeroArray, KernelArgs.NumTeams, {0});
@@ -514,7 +515,8 @@ void OpenMPIRBuilder::getKernelArgsVector(TargetKernelArgs &KernelArgs,
Flags,
NumTeams3D,
NumThreads3D,
- KernelArgs.DynCGGroupMem};
+ KernelArgs.DynCGGroupMem,
+ AsyncInfoQueue};
}
void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
diff --git a/offload/include/Shared/APITypes.h b/offload/include/Shared/APITypes.h
index 4c1f7712249a3..f96b2f9ca259d 100644
--- a/offload/include/Shared/APITypes.h
+++ b/offload/include/Shared/APITypes.h
@@ -85,6 +85,9 @@ struct __tgt_async_info {
/// ensure it is a valid location while the transfer to the device is
/// happening.
KernelLaunchEnvironmentTy KernelLaunchEnvironment;
+
+ /// Flag to indicate the Queue should be persistent.
+ bool PersistentQueue = false;
};
/// This struct contains all of the arguments to a target kernel region launch.
@@ -110,12 +113,16 @@ struct KernelArgsTy {
// The number of threads (for x,y,z dimension).
uint32_t ThreadLimit[3] = {0, 0, 0};
uint32_t DynCGroupMem = 0; // Amount of dynamic cgroup memory requested.
+ // A __tgt_async_info queue pointer to be used for the kernel and all
+ // associated device interactions. The operations are implicitly made
+ // non-blocking.
+ void *AsyncInfoQueue = nullptr;
};
static_assert(sizeof(KernelArgsTy().Flags) == sizeof(uint64_t),
"Invalid struct size");
static_assert(sizeof(KernelArgsTy) ==
(8 * sizeof(int32_t) + 3 * sizeof(int64_t) +
- 4 * sizeof(void **) + 2 * sizeof(int64_t *)),
+ 5 * sizeof(void **) + 2 * sizeof(int64_t *)),
"Invalid struct size");
/// Flat array of kernel launch parameters and their total size.
diff --git a/offload/include/omptarget.h b/offload/include/omptarget.h
index 2b6445e9fbe55..8730879905984 100644
--- a/offload/include/omptarget.h
+++ b/offload/include/omptarget.h
@@ -136,8 +136,19 @@ class AsyncInfoTy {
/// Synchronization method to be used.
SyncTy SyncType;
- AsyncInfoTy(DeviceTy &Device, SyncTy SyncType = SyncTy::BLOCKING)
+ AsyncInfoTy(DeviceTy &Device, SyncTy SyncType = SyncTy::BLOCKING)
: Device(Device), SyncType(SyncType) {}
+ AsyncInfoTy(DeviceTy &Device, void *AsyncInfoQueue)
+ : Device(Device), SyncType(AsyncInfoQueue ? SyncTy::NON_BLOCKING : SyncTy::BLOCKING) {
+ AsyncInfo.Queue = AsyncInfoQueue;
+ AsyncInfo.PersistentQueue = !!AsyncInfoQueue;
+ }
+ AsyncInfoTy(DeviceTy &Device, void *AsyncInfoQueue, SyncTy SyncType)
+ : Device(Device), SyncType(SyncType) {
+ AsyncInfo.Queue = AsyncInfoQueue;
+ AsyncInfo.PersistentQueue = !!AsyncInfoQueue;
+ }
+
~AsyncInfoTy() { synchronize(); }
/// Implicit conversion to the __tgt_async_info which is used in the
@@ -207,8 +218,9 @@ class TaskAsyncInfoWrapperTy {
void **TaskAsyncInfoPtr = nullptr;
public:
- TaskAsyncInfoWrapperTy(DeviceTy &Device)
+ TaskAsyncInfoWrapperTy(DeviceTy &Device, void *AsyncInfoQueue= nullptr)
: ExecThreadID(__kmpc_global_thread_num(NULL)), LocalAsyncInfo(Device) {
+ assert(!AsyncInfoQueue && "Async tasks do not support predefined async queue pointers!");
// If we failed to acquired the current global thread id, we cannot
// re-enqueue the current task. Thus we should use the local blocking async
// info.
@@ -425,6 +437,8 @@ int __tgt_activate_record_replay(int64_t DeviceId, uint64_t MemorySize,
void *VAddr, bool IsRecord, bool SaveOutput,
uint64_t &ReqPtrArgOffset);
+void *__tgt_target_get_default_queue(void *Loc, int64_t DeviceId);
+
#ifdef __cplusplus
}
#endif
diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
index e678213df18ce..b37c3e23d44ab 100644
--- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -2208,8 +2208,11 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
return Err;
// Once the stream is synchronized, return it to stream pool and reset
- // AsyncInfo. This is to make sure the synchronization only works for its
- // own tasks.
+ // AsyncInfo if the queue is not persistent. This is to make sure the
+ // synchronization only works for its own tasks.
+ if (AsyncInfo.PersistentQueue)
+ return Plugin::success();
+
AsyncInfo.Queue = nullptr;
return AMDGPUStreamManager.returnResource(Stream);
}
@@ -2228,9 +2231,12 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
if (!(*CompletedOrErr))
return Plugin::success();
- // Once the stream is completed, return it to stream pool and reset
- // AsyncInfo. This is to make sure the synchronization only works for its
- // own tasks.
+ // Once the stream is synchronized, return it to stream pool and reset
+ // AsyncInfo if the queue is not persistent. This is to make sure the
+ // synchronization only works for its own tasks.
+ if (AsyncInfo.PersistentQueue)
+ return Plugin::success();
+
AsyncInfo.Queue = nullptr;
return AMDGPUStreamManager.returnResource(Stream);
}
@@ -2443,7 +2449,10 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
/// Initialize the async info for interoperability purposes.
Error initAsyncInfoImpl(AsyncInfoWrapperTy &AsyncInfoWrapper) override {
- // TODO: Implement this function.
+ AMDGPUStreamTy *Stream;
+ if (auto Err = getStream(AsyncInfoWrapper, Stream))
+ return Err;
+
return Plugin::success();
}
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 2608af016284b..ff0f6edfcd693 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -1435,8 +1435,10 @@ Error GenericDeviceTy::launchKernel(void *EntryPtr, void **ArgPtrs,
Error GenericDeviceTy::initAsyncInfo(__tgt_async_info **AsyncInfoPtr) {
assert(AsyncInfoPtr && "Invalid async info");
+ assert(!(*AsyncInfoPtr) && "Already initialized async info");
*AsyncInfoPtr = new __tgt_async_info();
+ (*AsyncInfoPtr)->PersistentQueue = true;
AsyncInfoWrapperTy AsyncInfoWrapper(*this, *AsyncInfoPtr);
diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp
index b6465d61bd033..bfbc101529e18 100644
--- a/offload/plugins-nextgen/cuda/src/rtl.cpp
+++ b/offload/plugins-nextgen/cuda/src/rtl.cpp
@@ -643,8 +643,11 @@ struct CUDADeviceTy : public GenericDeviceTy {
}
// Once the stream is synchronized, return it to stream pool and reset
- // AsyncInfo. This is to make sure the synchronization only works for its
- // own tasks.
+ // AsyncInfo if the queue is not persistent. This is to make sure the
+ // synchronization only works for its own tasks.
+ if (AsyncInfo.PersistentQueue)
+ return Plugin::success();
+
AsyncInfo.Queue = nullptr;
if (auto Err = CUDAStreamManager.returnResource(Stream))
return Err;
@@ -777,9 +780,12 @@ struct CUDADeviceTy : public GenericDeviceTy {
if (Res == CUDA_ERROR_NOT_READY)
return Plugin::success();
- // Once the stream is synchronized and the operations completed (or an error
- // occurs), return it to stream pool and reset AsyncInfo. This is to make
- // sure the synchronization only works for its own tasks.
+ // Once the stream is synchronized, return it to stream pool and reset
+ // AsyncInfo if the queue is not persistent. This is to make sure the
+ // synchronization only works for its own tasks.
+ if (AsyncInfo.PersistentQueue)
+ return Plugin::success();
+
AsyncInfo.Queue = nullptr;
if (auto Err = CUDAStreamManager.returnResource(Stream))
return Err;
diff --git a/offload/src/KernelLanguage/API.cpp b/offload/src/KernelLanguage/API.cpp
index 9ffc199b5da7d..779751deed661 100644
--- a/offload/src/KernelLanguage/API.cpp
+++ b/offload/src/KernelLanguage/API.cpp
@@ -8,9 +8,11 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "Shared/APITypes.h"
+#include <cstdint>
#include <cstdio>
struct dim3 {
@@ -56,10 +58,13 @@ unsigned __llvmPopCallConfiguration(dim3 *__grid_size, dim3 *__block_size,
int __tgt_target_kernel(void *Loc, int64_t DeviceId, int32_t NumTeams,
int32_t ThreadLimit, const void *HostPtr,
KernelArgsTy *Args);
+void *__tgt_target_get_default_async_info_queue(void *Loc, int64_t DeviceId);
unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
void *args, size_t sharedMem, void *stream) {
+ int64_t DeviceNo = 0;
KernelArgsTy Args = {};
+ Args.Version = OMP_KERNEL_ARG_VERSION;
Args.DynCGroupMem = sharedMem;
Args.NumTeams[0] = gridDim.x;
Args.NumTeams[1] = gridDim.y;
@@ -69,8 +74,13 @@ unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
Args.ThreadLimit[2] = blockDim.z;
Args.ArgPtrs = reinterpret_cast<void **>(args);
Args.Flags.IsCUDA = true;
- int rv = __tgt_target_kernel(nullptr, 0, gridDim.x,
- blockDim.x, func, &Args);
+ if (stream)
+ Args.AsyncInfoQueue = stream;
+ else
+ Args.AsyncInfoQueue =
+ __tgt_target_get_default_async_info_queue(nullptr, DeviceNo);
+ int rv = __tgt_target_kernel(nullptr, DeviceNo, gridDim.x, blockDim.x, func,
+ &Args);
return rv;
}
}
diff --git a/offload/src/exports b/offload/src/exports
index 7bdc7d2a531bb..11830f62af388 100644
--- a/offload/src/exports
+++ b/offload/src/exports
@@ -29,6 +29,8 @@ VERS1.0 {
__tgt_target_kernel;
__tgt_target_kernel_nowait;
__tgt_target_nowait_query;
+ __tgt_target_get_default_async_info_queue;
+ __tgt_target_synchronize_async_info_queue;
__tgt_target_kernel_replay;
__tgt_activate_record_replay;
__tgt_mapper_num_components;
diff --git a/offload/src/interface.cpp b/offload/src/interface.cpp
index 763b051cc6d77..759fb54a11262 100644
--- a/offload/src/interface.cpp
+++ b/offload/src/interface.cpp
@@ -14,6 +14,8 @@
#include "OpenMP/OMPT/Interface.h"
#include "OpenMP/OMPT/Callback.h"
#include "PluginManager.h"
+#include "Shared/APITypes.h"
+#include "omptarget.h"
#include "private.h"
#include "Shared/EnvironmentVar.h"
@@ -312,7 +314,7 @@ static inline int targetKernel(ident_t *Loc, int64_t DeviceId, int32_t NumTeams,
if (!DeviceOrErr)
FATAL_MESSAGE(DeviceId, "%s", toString(DeviceOrErr.takeError()).c_str());
- TargetAsyncInfoTy TargetAsyncInfo(*DeviceOrErr);
+ TargetAsyncInfoTy TargetAsyncInfo(*DeviceOrErr, KernelArgs->AsyncInfoQueue);
AsyncInfoTy &AsyncInfo = TargetAsyncInfo;
/// RAII to establish tool anchors before and after target region
OMPT_IF_BUILT(InterfaceRAII TargetRAII(
@@ -510,3 +512,48 @@ EXTERN void __tgt_target_nowait_query(void **AsyncHandle) {
delete AsyncInfo;
*AsyncHandle = nullptr;
}
+
+EXTERN void *__tgt_target_get_default_async_info_queue(void *Loc,
+ int64_t DeviceId) {
+ assert(PM && "Runtime not initialized");
+
+ static thread_local void **AsyncInfoQueue = nullptr;
+
+ if (!AsyncInfoQueue)
+ AsyncInfoQueue = reinterpret_cast<void **>(
+ calloc(PM->getNumDevices(), sizeof(AsyncInfoQueue[0])));
+
+ if (!AsyncInfoQueue[DeviceId]) {
+ auto DeviceOrErr = PM->getDevice(DeviceId);
+ if (!DeviceOrErr)
+ FATAL_MESSAGE(DeviceId, "%s", toString(DeviceOrErr.takeError()).c_str());
+
+ __tgt_async_info *AsyncInfo = nullptr;
+ DeviceOrErr->RTL->init_async_info(DeviceId, &AsyncInfo);
+ AsyncInfoQueue[DeviceId] = AsyncInfo->Queue;
+ }
+
+ return AsyncInfoQueue[DeviceId];
+}
+
+EXTERN int __tgt_target_synchronize_async_info_queue(void *Loc,
+ int64_t DeviceId,
+ void *AsyncInfoQueue) {
+ assert(PM && "Runtime not initialized");
+
+ auto DeviceOrErr = PM->getDevice(DeviceId);
+ if (!DeviceOrErr)
+ FATAL_MESSAGE(DeviceId, "%s", toString(DeviceOrErr.takeError()).c_str());
+ if (!AsyncInfoQueue)
+ AsyncInfoQueue = __tgt_target_get_default_async_info_queue(Loc, DeviceId);
+ AsyncInfoTy AsyncInfo(*DeviceOrErr, AsyncInfoQueue,
+ AsyncInfoTy::SyncTy::BLOCKING);
+
+ if (AsyncInfo.synchronize())
+ FATAL_MESSAGE0(1, "Error while querying the async queue for completion.\n");
+ [[maybe_unused]] __tgt_async_info *ASI = AsyncInfo;
+ assert(ASI->Queue);
+ assert(ASI->Queue && ASI->PersistentQueue);
+
+ return 0;
+}
diff --git a/offload/src/omptarget.cpp b/offload/src/omptarget.cpp
index 9bca8529c5ee3..73e26c0def6da 100644
--- a/offload/src/omptarget.cpp
+++ b/offload/src/omptarget.cpp
@@ -49,7 +49,7 @@ int AsyncInfoTy::synchronize() {
case SyncTy::BLOCKING:
// If we have a queue we need to synchronize it now.
Result = Device.synchronize(*this);
- assert(AsyncInfo.Queue == nullptr &&
+ assert((AsyncInfo.PersistentQueue || !AsyncInfo.Queue) &&
"The device plugin should have nulled the queue to indicate there "
"are no outstanding actions!");
break;
diff --git a/offload/test/offloading/CUDA/basic_api_malloc_free.cu b/offload/test/offloading/CUDA/basic_api_malloc_free.cu
index 86dc5f8fef68e..1ee5b4efa4fcc 100644
--- a/offload/test/offloading/CUDA/basic_api_malloc_free.cu
+++ b/offload/test/offloading/CUDA/basic_api_malloc_free.cu
@@ -32,9 +32,10 @@ int main(int argc, char **argv) {
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
kernel<<<1, 1>>>(Ptr, DevPtr, 42);
- printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr]], *Ptr: 42
+ // Implicit sync via cudaFree.
Err = cudaFree(DevPtr);
+ printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
if (Err != cudaSuccess)
return -1;
llvm_omp_target_free_shared(Ptr, DevNo);
diff --git a/offload/test/offloading/CUDA/basic_api_memcpy.cu b/offload/test/offloading/CUDA/basic_api_memcpy.cu
index d5c0929abe43c..088e20ffa9e2b 100644
--- a/offload/test/offloading/CUDA/basic_api_memcpy.cu
+++ b/offload/test/offloading/CUDA/basic_api_memcpy.cu
@@ -31,6 +31,7 @@ int main(int argc, char **argv) {
printf("Res: %i\n", Res);
// CHECK: Res: 0
kernel<<<1, 1>>>(DevPtr, 42);
+ cudaDeviceSynchronize();
Err = cudaMemcpy(HstPtr, DevPtr, 42 * sizeof(int), cudaMemcpyDeviceToHost);
if (Err != cudaSuccess)
return -1;
diff --git a/offload/test/offloading/CUDA/basic_api_memset.cu b/offload/test/offloading/CUDA/basic_api_memset.cu
index 36dcc729a3a9e..474eb2a46f0a2 100644
--- a/offload/test/offloading/CUDA/basic_api_memset.cu
+++ b/offload/test/offloading/CUDA/basic_api_memset.cu
@@ -34,6 +34,7 @@ int main(int argc, char **argv) {
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
kernel<<<1, 1>>>(Ptr, DevPtr, 42);
+ cudaDeviceSynchronize();
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr]], *Ptr: 42
Err = cudaFree(DevPtr);
diff --git a/offload/test/offloading/CUDA/basic_launch.cu b/offload/test/offloading/CUDA/basic_launch.cu
index 2915a7c216ab5..298aa7db83bad 100644
--- a/offload/test/offloading/CUDA/basic_launch.cu
+++ b/offload/test/offloading/CUDA/basic_launch.cu
@@ -8,7 +8,7 @@
// UNSUPPORTED: x86_64-pc-linux-gnu
// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
-
+#include <cuda_runtime.h>
#include <stdio.h>
extern "C" {
@@ -25,6 +25,7 @@ int main(int argc, char **argv) {
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr:0x.*]], *Ptr: 7
square<<<1, 1>>>(Ptr);
+ cudaDeviceSynchronize();
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr]], *Ptr: 42
llvm_omp_target_free_shared(Ptr, DevNo);
diff --git a/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu b/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
index 615cae6f7b233..c47b1a1b83bde 100644
--- a/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
+++ b/offload/test/offloading/CUDA/basic_launch_blocks_and_threads.cu
@@ -8,6 +8,7 @@
// UNSUPPORTED: x86_64-pc-linux-gnu
// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+#include <cuda_runtime.h>
#include <stdio.h>
extern "C" {
@@ -26,6 +27,7 @@ int main(int argc, char **argv) {
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr:0x.*]], *Ptr: 0
square<<<7, 6>>>(Ptr);
+ cudaDeviceSynchronize();
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr]], *Ptr: 42
llvm_omp_target_free_shared(Ptr, DevNo);
diff --git a/offload/test/offloading/CUDA/basic_launch_multi_arg.cu b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu
index f95f1dbacc79c..58ff89dcd4aac 100644
--- a/offload/test/offloading/CUDA/basic_launch_multi_arg.cu
+++ b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu
@@ -8,6 +8,7 @@
// UNSUPPORTED: x86_64-pc-linux-gnu
// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+#include <cuda_runtime.h>
#include <stdio.h>
extern "C" {
@@ -33,6 +34,7 @@ int main(int argc, char **argv) {
printf("Src: %i : %i\n", Src[0], Src[1]);
// CHECK: Src: -2 : 8
square<<<1, 1>>>(Ptr, 3, Src, 4);
+ cudaDeviceSynchronize();
printf("Ptr %p, *Ptr: %i\n", Ptr, *Ptr);
// CHECK: Ptr [[Ptr]], *Ptr: 42
printf("Src: %i : %i\n", Src[0], Src[1]);
More information about the cfe-commits
mailing list