[clang] [CUDA] Add device-side kernel launch support (PR #165519)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Oct 30 04:33:06 PDT 2025
https://github.com/darkbuck updated https://github.com/llvm/llvm-project/pull/165519
>From a5320cb7a4c88e6b97a7b6f3d728874a159a1677 Mon Sep 17 00:00:00 2001
From: Michael Liao <michael.hliao at gmail.com>
Date: Sat, 18 Oct 2025 19:46:39 -0400
Subject: [PATCH] [CUDA] Add device-side kernel launch support
- CUDA's dynamic parallelism extension allows device-side kernel
launches, which share the identical syntax to host-side launches,
e.g.,
kernel<<<Dg, Db, Ns, S>>>(arguments);
but differ from the code generation. That device-side kernel launches
is eventually translated into the following sequence
config = cudaGetParameterBuffer(alignment, size);
// setup arguments by copying them into `config`.
cudaLaunchDevice(func, config, Dg, Db, Ns, S);
- To support the device-side kernel launch, 'CUDAKernelCallExpr' is
reused but its config expr is set to a call to 'cudaLaunchDevice'.
During the code generation, 'CUDAKernelCallExpr' is expanded into the
sequence aforementioned.
- As the device-side kernel launch requires the source to be compiled as
relocatable device code and linked with '-lcudadevrt'. Linkers are
changed to pass relevant link options to 'nvlink'.
---
clang/include/clang/AST/ASTContext.h | 16 +++
.../clang/Basic/DiagnosticSemaKinds.td | 8 ++
clang/include/clang/Sema/SemaCUDA.h | 5 +
clang/include/clang/Serialization/ASTReader.h | 2 +-
clang/lib/CodeGen/CGCUDARuntime.cpp | 110 ++++++++++++++++++
clang/lib/CodeGen/CGCUDARuntime.h | 4 +
clang/lib/CodeGen/CGExprCXX.cpp | 6 +
clang/lib/Sema/SemaCUDA.cpp | 99 +++++++++++++++-
clang/lib/Sema/SemaDecl.cpp | 32 +++--
clang/lib/Serialization/ASTReader.cpp | 8 +-
clang/lib/Serialization/ASTWriter.cpp | 37 +++---
clang/test/CodeGenCUDA/Inputs/cuda.h | 7 +-
clang/test/CodeGenCUDA/device-kernel-call.cu | 17 +++
clang/test/SemaCUDA/Inputs/cuda.h | 6 +
.../test/SemaCUDA/call-kernel-from-kernel.cu | 5 +-
clang/test/SemaCUDA/device-kernel-call.cu | 23 ++++
clang/test/SemaCUDA/function-overload.cu | 26 ++---
clang/test/SemaCUDA/function-target.cu | 4 +-
clang/test/SemaCUDA/reference-to-kernel-fn.cu | 4 +-
.../ClangLinkerWrapper.cpp | 10 ++
.../ClangNVLinkWrapper.cpp | 8 +-
21 files changed, 383 insertions(+), 54 deletions(-)
create mode 100644 clang/test/CodeGenCUDA/device-kernel-call.cu
create mode 100644 clang/test/SemaCUDA/device-kernel-call.cu
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 33aa2d343aa7a..f64e29be3205f 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -488,6 +488,10 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// Declaration for the CUDA cudaConfigureCall function.
FunctionDecl *cudaConfigureCallDecl = nullptr;
+ /// Declaration for the CUDA cudaGetParameterBuffer function.
+ FunctionDecl *cudaGetParameterBufferDecl = nullptr;
+ /// Declaration for the CUDA cudaLaunchDevice function.
+ FunctionDecl *cudaLaunchDeviceDecl = nullptr;
/// Keeps track of all declaration attributes.
///
@@ -1641,6 +1645,18 @@ class ASTContext : public RefCountedBase<ASTContext> {
return cudaConfigureCallDecl;
}
+ void setcudaGetParameterBufferDecl(FunctionDecl *FD) {
+ cudaGetParameterBufferDecl = FD;
+ }
+
+ FunctionDecl *getcudaGetParameterBufferDecl() {
+ return cudaGetParameterBufferDecl;
+ }
+
+ void setcudaLaunchDeviceDecl(FunctionDecl *FD) { cudaLaunchDeviceDecl = FD; }
+
+ FunctionDecl *getcudaLaunchDeviceDecl() { return cudaLaunchDeviceDecl; }
+
/// Returns true iff we need copy/dispose helpers for the given type.
bool BlockRequiresCopying(QualType Ty, const VarDecl *D);
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 4e369be0bbb92..01752fbc169ab 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -9499,6 +9499,8 @@ def err_kern_is_nonstatic_method : Error<
"kernel function %0 must be a free function or static member function">;
def err_config_scalar_return : Error<
"CUDA special function '%0' must have scalar return type">;
+def err_config_pointer_return
+ : Error<"CUDA special function '%0' must have pointer return type">;
def err_kern_call_not_global_function : Error<
"kernel call to non-global function %0">;
def err_global_call_not_config : Error<
@@ -13690,4 +13692,10 @@ def err_amdgcn_load_lds_size_invalid_value : Error<"invalid size value">;
def note_amdgcn_load_lds_size_valid_value : Note<"size must be %select{1, 2, or 4|1, 2, 4, 12 or 16}0">;
def err_amdgcn_coop_atomic_invalid_as : Error<"cooperative atomic requires a global or generic pointer">;
+
+def err_cuda_device_kernel_launch_not_supported
+ : Error<"device-side kernel call/launch is not supported">;
+def err_cuda_device_kernel_launch_require_rdc
+ : Error<"kernel launch from __device__ or __global__ function requires "
+ "relocatable device code, also known as separate compilation mode">;
} // end of sema component.
diff --git a/clang/include/clang/Sema/SemaCUDA.h b/clang/include/clang/Sema/SemaCUDA.h
index dbc1432860d89..dbb4290f5d149 100644
--- a/clang/include/clang/Sema/SemaCUDA.h
+++ b/clang/include/clang/Sema/SemaCUDA.h
@@ -273,6 +273,11 @@ class SemaCUDA : public SemaBase {
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getConfigureFuncName() const;
+ /// Return the name of the parameter buffer allocation function for the
+ /// device kernel launch.
+ std::string getGetParameterBufferFuncName() const;
+ /// Return the name of the device kernel launch function.
+ std::string getLaunchDeviceFuncName() const;
/// Record variables that are potentially ODR-used in CUDA/HIP.
void recordPotentialODRUsedVariable(MultiExprArg Args,
diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h
index af856a8097ab1..a65f7fd2d1d43 100644
--- a/clang/include/clang/Serialization/ASTReader.h
+++ b/clang/include/clang/Serialization/ASTReader.h
@@ -1013,7 +1013,7 @@ class ASTReader
///
/// The AST context tracks a few important decls, currently cudaConfigureCall,
/// directly.
- SmallVector<GlobalDeclID, 2> CUDASpecialDeclRefs;
+ SmallVector<GlobalDeclID, 4> CUDASpecialDeclRefs;
/// The floating point pragma option settings.
SmallVector<uint64_t, 1> FPPragmaOptions;
diff --git a/clang/lib/CodeGen/CGCUDARuntime.cpp b/clang/lib/CodeGen/CGCUDARuntime.cpp
index 121a481213396..cd1476ebd6754 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.cpp
+++ b/clang/lib/CodeGen/CGCUDARuntime.cpp
@@ -22,6 +22,116 @@ using namespace CodeGen;
CGCUDARuntime::~CGCUDARuntime() {}
+static llvm::Value *emitGetParamBuf(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E) {
+ auto *GetParamBuf = CGF.getContext().getcudaGetParameterBufferDecl();
+ const FunctionProtoType *GetParamBufProto =
+ GetParamBuf->getType()->getAs<FunctionProtoType>();
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(
+ CGF.getContext(), {}, {}, GetParamBuf,
+ /*RefersToEnclosingVariableOrCapture=*/false, GetParamBuf->getNameInfo(),
+ GetParamBuf->getType(), VK_PRValue);
+ auto *ImpCast = ImplicitCastExpr::Create(
+ CGF.getContext(), CGF.getContext().getPointerType(GetParamBuf->getType()),
+ CK_FunctionToPointerDecay, DRE, nullptr, VK_PRValue, FPOptionsOverride());
+
+ CGCallee Callee = CGF.EmitCallee(ImpCast);
+ CallArgList Args;
+ // Use 64B alignment.
+ Args.add(RValue::get(CGF.CGM.getSize(CharUnits::fromQuantity(64))),
+ CGF.getContext().getSizeType());
+ // Calculate parameter sizes.
+ const PointerType *PT = E->getCallee()->getType()->getAs<PointerType>();
+ const FunctionProtoType *FTP =
+ PT->getPointeeType()->getAs<FunctionProtoType>();
+ CharUnits Offset = CharUnits::Zero();
+ for (auto ArgTy : FTP->getParamTypes()) {
+ auto TInfo = CGF.CGM.getContext().getTypeInfoInChars(ArgTy);
+ Offset = Offset.alignTo(TInfo.Align);
+ Offset += TInfo.Width;
+ }
+ Args.add(RValue::get(CGF.CGM.getSize(Offset)),
+ CGF.getContext().getSizeType());
+ const CGFunctionInfo &CallInfo = CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, GetParamBufProto, /*ChainCall=*/false);
+ auto Ret = CGF.EmitCall(CallInfo, Callee, /*ReturnValue=*/{}, Args);
+
+ return Ret.getScalarVal();
+}
+
+RValue CGCUDARuntime::EmitCUDADeviceKernelCallExpr(
+ CodeGenFunction &CGF, const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke) {
+ ASTContext &Ctx = CGM.getContext();
+ assert(Ctx.getcudaLaunchDeviceDecl() == E->getConfig()->getDirectCallee());
+
+ llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("dkcall.configok");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("dkcall.end");
+
+ llvm::Value *Config = emitGetParamBuf(CGF, E);
+ CGF.Builder.CreateCondBr(
+ CGF.Builder.CreateICmpNE(Config,
+ llvm::Constant::getNullValue(Config->getType())),
+ ConfigOKBlock, ContBlock);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(ConfigOKBlock);
+
+ QualType KernelCalleeFuncTy =
+ E->getCallee()->getType()->getAs<PointerType>()->getPointeeType();
+ CGCallee KernelCallee = CGF.EmitCallee(E->getCallee());
+ // Emit kernel arguments.
+ CallArgList KernelCallArgs;
+ CGF.EmitCallArgs(
+ KernelCallArgs,
+ dyn_cast<FunctionProtoType>(KernelCalleeFuncTy->castAs<FunctionType>()),
+ E->arguments(), E->getDirectCallee());
+ // Copy emitted kernel arguments into that parameter buffer.
+ RawAddress CfgBase(Config, CGM.Int8Ty,
+ /*Alignment=*/CharUnits::fromQuantity(64));
+ CharUnits Offset = CharUnits::Zero();
+ for (auto &Arg : KernelCallArgs) {
+ auto TInfo = CGM.getContext().getTypeInfoInChars(Arg.getType());
+ Offset = Offset.alignTo(TInfo.Align);
+ Address Addr =
+ CGF.Builder.CreateConstInBoundsGEP(CfgBase, Offset.getQuantity());
+ Arg.copyInto(CGF, Addr);
+ Offset += TInfo.Width;
+ }
+ // Make `cudaLaunchDevice` call, i.e. E->getConfig().
+ const CallExpr *LaunchCall = E->getConfig();
+ QualType LaunchCalleeFuncTy = LaunchCall->getCallee()
+ ->getType()
+ ->getAs<PointerType>()
+ ->getPointeeType();
+ CGCallee LaunchCallee = CGF.EmitCallee(LaunchCall->getCallee());
+ CallArgList LaunchCallArgs;
+ CGF.EmitCallArgs(
+ LaunchCallArgs,
+ dyn_cast<FunctionProtoType>(LaunchCalleeFuncTy->castAs<FunctionType>()),
+ LaunchCall->arguments(), LaunchCall->getDirectCallee());
+ // Replace func and paramterbuffer arguments.
+ LaunchCallArgs[0] = CallArg(RValue::get(KernelCallee.getFunctionPointer()),
+ CGM.getContext().VoidPtrTy);
+ LaunchCallArgs[1] = CallArg(RValue::get(Config), CGM.getContext().VoidPtrTy);
+ const CGFunctionInfo &LaunchCallInfo = CGM.getTypes().arrangeFreeFunctionCall(
+ LaunchCallArgs,
+ dyn_cast<FunctionProtoType>(LaunchCalleeFuncTy->castAs<FunctionType>()),
+ /*ChainCall=*/false);
+ CGF.EmitCall(LaunchCallInfo, LaunchCallee, ReturnValue, LaunchCallArgs,
+ CallOrInvoke,
+ /*IsMustTail=*/false, E->getExprLoc());
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ return RValue::get(nullptr);
+}
+
RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue,
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
index 86f776004ee7c..64fb9a31422e0 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -88,6 +88,10 @@ class CGCUDARuntime {
ReturnValueSlot ReturnValue,
llvm::CallBase **CallOrInvoke = nullptr);
+ virtual RValue EmitCUDADeviceKernelCallExpr(
+ CodeGenFunction &CGF, const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke = nullptr);
+
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 14d8db32bafc6..0c01933790100 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -503,6 +503,12 @@ RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue,
llvm::CallBase **CallOrInvoke) {
+ auto *FD = E->getConfig()->getDirectCallee();
+ // Emit as a device kernel call if the config is prepared using
+ // 'cudaGetParameterBuffer'.
+ if (FD && CGM.getContext().getcudaLaunchDeviceDecl() == FD)
+ return CGM.getCUDARuntime().EmitCUDADeviceKernelCallExpr(
+ *this, E, ReturnValue, CallOrInvoke);
return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue,
CallOrInvoke);
}
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 31735a0f5feb3..6ff8179841c26 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -52,16 +52,94 @@ bool SemaCUDA::PopForceHostDevice() {
ExprResult SemaCUDA::ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc) {
- FunctionDecl *ConfigDecl = getASTContext().getcudaConfigureCallDecl();
+ bool IsDeviceKernelCall = false;
+ switch (CurrentTarget()) {
+ case CUDAFunctionTarget::Global:
+ case CUDAFunctionTarget::Device:
+ IsDeviceKernelCall = true;
+ break;
+ case CUDAFunctionTarget::HostDevice:
+ if (getLangOpts().CUDAIsDevice) {
+ IsDeviceKernelCall = true;
+ if (FunctionDecl *Caller =
+ SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
+ Caller && isImplicitHostDeviceFunction(Caller)) {
+ // Under the device compilation, config call under an HD function should
+ // be treated as a device kernel call. But, for implicit HD ones (such
+ // as lambdas), need to check whether RDC is enabled or not.
+ if (!getLangOpts().GPURelocatableDeviceCode)
+ IsDeviceKernelCall = false;
+ // HIP doesn't support device-side kernel call yet. Still treat it as
+ // the host-side kernel call.
+ if (getLangOpts().HIP)
+ IsDeviceKernelCall = false;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (IsDeviceKernelCall && getLangOpts().HIP)
+ return ExprError(
+ Diag(LLLLoc, diag::err_cuda_device_kernel_launch_not_supported));
+
+ if (IsDeviceKernelCall && !getLangOpts().GPURelocatableDeviceCode)
+ return ExprError(
+ Diag(LLLLoc, diag::err_cuda_device_kernel_launch_require_rdc));
+
+ FunctionDecl *ConfigDecl = IsDeviceKernelCall
+ ? getASTContext().getcudaLaunchDeviceDecl()
+ : getASTContext().getcudaConfigureCallDecl();
if (!ConfigDecl)
return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << getConfigureFuncName());
+ << (IsDeviceKernelCall ? getLaunchDeviceFuncName()
+ : getConfigureFuncName()));
+ // Additional check on the launch function if it's a device kernel call.
+ if (IsDeviceKernelCall) {
+ auto *GetParamBuf = getASTContext().getcudaGetParameterBufferDecl();
+ if (!GetParamBuf)
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << getGetParameterBufferFuncName());
+ }
+
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (getASTContext()) DeclRefExpr(
getASTContext(), ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
SemaRef.MarkFunctionReferenced(LLLLoc, ConfigDecl);
+ if (IsDeviceKernelCall) {
+ SmallVector<Expr *> Args;
+ // Use a null pointer as the kernel function, which may not be resolvable
+ // here. For example, resolving that kernel function may need additional
+ // kernel arguments.
+ llvm::APInt Zero(SemaRef.Context.getTypeSize(SemaRef.Context.IntTy), 0);
+ Args.push_back(IntegerLiteral::Create(SemaRef.Context, Zero,
+ SemaRef.Context.IntTy, LLLLoc));
+ // Use a null pointer as the parameter buffer, which should be allocated in
+ // the codegen.
+ Args.push_back(IntegerLiteral::Create(SemaRef.Context, Zero,
+ SemaRef.Context.IntTy, LLLLoc));
+ // Add the original config arguments.
+ llvm::append_range(Args, ExecConfig);
+ // Add the default blockDim if it's missing.
+ if (Args.size() < 4) {
+ llvm::APInt One(SemaRef.Context.getTypeSize(SemaRef.Context.IntTy), 1);
+ Args.push_back(IntegerLiteral::Create(SemaRef.Context, One,
+ SemaRef.Context.IntTy, LLLLoc));
+ }
+ // Add the default sharedMemSize if it's missing.
+ if (Args.size() < 5)
+ Args.push_back(IntegerLiteral::Create(SemaRef.Context, Zero,
+ SemaRef.Context.IntTy, LLLLoc));
+ // Add the default stream if it's missing.
+ if (Args.size() < 6)
+ Args.push_back(IntegerLiteral::Create(SemaRef.Context, Zero,
+ SemaRef.Context.IntTy, LLLLoc));
+ return SemaRef.BuildCallExpr(S, ConfigDR, LLLLoc, Args, GGGLoc, nullptr,
+ /*IsExecConfig=*/true);
+ }
return SemaRef.BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
/*IsExecConfig=*/true);
}
@@ -251,7 +329,7 @@ SemaCUDA::IdentifyPreference(const FunctionDecl *Caller,
if (CalleeTarget == CUDAFunctionTarget::Global &&
(CallerTarget == CUDAFunctionTarget::Global ||
CallerTarget == CUDAFunctionTarget::Device))
- return CFP_Never;
+ return CFP_Native;
// (b) Calling HostDevice is OK for everyone.
if (CalleeTarget == CUDAFunctionTarget::HostDevice)
@@ -279,7 +357,8 @@ SemaCUDA::IdentifyPreference(const FunctionDecl *Caller,
if (CallerTarget == CUDAFunctionTarget::HostDevice) {
// It's OK to call a compilation-mode matching function from an HD one.
if ((getLangOpts().CUDAIsDevice &&
- CalleeTarget == CUDAFunctionTarget::Device) ||
+ (CalleeTarget == CUDAFunctionTarget::Device ||
+ CalleeTarget == CUDAFunctionTarget::Global)) ||
(!getLangOpts().CUDAIsDevice &&
(CalleeTarget == CUDAFunctionTarget::Host ||
CalleeTarget == CUDAFunctionTarget::Global)))
@@ -1103,6 +1182,18 @@ std::string SemaCUDA::getConfigureFuncName() const {
return "cudaConfigureCall";
}
+std::string SemaCUDA::getGetParameterBufferFuncName() const {
+ // FIXME: Use the API from CUDA programming guide. Add V2 support when
+ // necessary.
+ return "cudaGetParameterBuffer";
+}
+
+std::string SemaCUDA::getLaunchDeviceFuncName() const {
+ // FIXME: Use the API from CUDA programming guide. Add V2 support when
+ // necessary.
+ return "cudaLaunchDevice";
+}
+
// Record any local constexpr variables that are passed one way on the host
// and another on the device.
void SemaCUDA::recordPotentialODRUsedVariable(
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index fc3aabf5741ca..1e39bfb5e42cd 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -11050,14 +11050,30 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
if (getLangOpts().CUDA) {
- IdentifierInfo *II = NewFD->getIdentifier();
- if (II && II->isStr(CUDA().getConfigureFuncName()) &&
- !NewFD->isInvalidDecl() &&
- NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- if (!R->castAs<FunctionType>()->getReturnType()->isScalarType())
- Diag(NewFD->getLocation(), diag::err_config_scalar_return)
- << CUDA().getConfigureFuncName();
- Context.setcudaConfigureCallDecl(NewFD);
+ if (IdentifierInfo *II = NewFD->getIdentifier()) {
+ if (II->isStr(CUDA().getConfigureFuncName()) && !NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (!R->castAs<FunctionType>()->getReturnType()->isScalarType())
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return)
+ << CUDA().getConfigureFuncName();
+ Context.setcudaConfigureCallDecl(NewFD);
+ }
+ if (II->isStr(CUDA().getGetParameterBufferFuncName()) &&
+ !NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (!R->castAs<FunctionType>()->getReturnType()->isPointerType())
+ Diag(NewFD->getLocation(), diag::err_config_pointer_return)
+ << CUDA().getConfigureFuncName();
+ Context.setcudaGetParameterBufferDecl(NewFD);
+ }
+ if (II->isStr(CUDA().getLaunchDeviceFuncName()) &&
+ !NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (!R->castAs<FunctionType>()->getReturnType()->isScalarType())
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return)
+ << CUDA().getConfigureFuncName();
+ Context.setcudaLaunchDeviceDecl(NewFD);
+ }
}
}
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index e3106f8d8e13c..43fa4e51f4f91 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -5588,9 +5588,13 @@ void ASTReader::InitializeContext() {
// If there were any CUDA special declarations, deserialize them.
if (!CUDASpecialDeclRefs.empty()) {
- assert(CUDASpecialDeclRefs.size() == 1 && "More decl refs than expected!");
+ assert(CUDASpecialDeclRefs.size() == 3 && "More decl refs than expected!");
Context.setcudaConfigureCallDecl(
- cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
+ cast_or_null<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
+ Context.setcudaGetParameterBufferDecl(
+ cast_or_null<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[1])));
+ Context.setcudaLaunchDeviceDecl(
+ cast_or_null<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[2])));
}
// Re-export any modules that were imported by a non-module AST file.
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 3ac338e013deb..53985c2f92890 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -5714,8 +5714,13 @@ void ASTWriter::PrepareWritingSpecialDecls(Sema &SemaRef) {
GetDeclRef(SemaRef.getStdAlignValT());
}
- if (Context.getcudaConfigureCallDecl())
+ if (Context.getcudaConfigureCallDecl() ||
+ Context.getcudaGetParameterBufferDecl() ||
+ Context.getcudaLaunchDeviceDecl()) {
GetDeclRef(Context.getcudaConfigureCallDecl());
+ GetDeclRef(Context.getcudaGetParameterBufferDecl());
+ GetDeclRef(Context.getcudaLaunchDeviceDecl());
+ }
// Writing all of the known namespaces.
for (const auto &I : SemaRef.KnownNamespaces)
@@ -5840,19 +5845,19 @@ void ASTWriter::WriteSpecialDeclRecords(Sema &SemaRef) {
if (!PendingInstantiations.empty())
Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
+ auto AddEmittedDeclRefOrZero = [this](RecordData &Refs, Decl *D) {
+ if (!D || !wasDeclEmitted(D))
+ Refs.push_back(0);
+ else
+ AddDeclRef(D, Refs);
+ };
+
// Write the record containing declaration references of Sema.
RecordData SemaDeclRefs;
if (SemaRef.StdNamespace || SemaRef.StdBadAlloc || SemaRef.StdAlignValT) {
- auto AddEmittedDeclRefOrZero = [this, &SemaDeclRefs](Decl *D) {
- if (!D || !wasDeclEmitted(D))
- SemaDeclRefs.push_back(0);
- else
- AddDeclRef(D, SemaDeclRefs);
- };
-
- AddEmittedDeclRefOrZero(SemaRef.getStdNamespace());
- AddEmittedDeclRefOrZero(SemaRef.getStdBadAlloc());
- AddEmittedDeclRefOrZero(SemaRef.getStdAlignValT());
+ AddEmittedDeclRefOrZero(SemaDeclRefs, SemaRef.getStdNamespace());
+ AddEmittedDeclRefOrZero(SemaDeclRefs, SemaRef.getStdBadAlloc());
+ AddEmittedDeclRefOrZero(SemaDeclRefs, SemaRef.getStdAlignValT());
}
if (!SemaDeclRefs.empty())
Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
@@ -5868,9 +5873,13 @@ void ASTWriter::WriteSpecialDeclRecords(Sema &SemaRef) {
// Write the record containing CUDA-specific declaration references.
RecordData CUDASpecialDeclRefs;
- if (auto *CudaCallDecl = Context.getcudaConfigureCallDecl();
- CudaCallDecl && wasDeclEmitted(CudaCallDecl)) {
- AddDeclRef(CudaCallDecl, CUDASpecialDeclRefs);
+ if (auto *CudaCallDecl = Context.getcudaConfigureCallDecl(),
+ *CudaGetParamDecl = Context.getcudaGetParameterBufferDecl(),
+ *CudaLaunchDecl = Context.getcudaLaunchDeviceDecl();
+ CudaCallDecl || CudaGetParamDecl || CudaLaunchDecl) {
+ AddEmittedDeclRefOrZero(CUDASpecialDeclRefs, CudaCallDecl);
+ AddEmittedDeclRefOrZero(CUDASpecialDeclRefs, CudaGetParamDecl);
+ AddEmittedDeclRefOrZero(CUDASpecialDeclRefs, CudaLaunchDecl);
Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
}
diff --git a/clang/test/CodeGenCUDA/Inputs/cuda.h b/clang/test/CodeGenCUDA/Inputs/cuda.h
index e7ad784335027..8733567bbd7e7 100644
--- a/clang/test/CodeGenCUDA/Inputs/cuda.h
+++ b/clang/test/CodeGenCUDA/Inputs/cuda.h
@@ -72,7 +72,12 @@ extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim,
extern "C" cudaError_t cudaLaunchKernel_ptsz(const void *func, dim3 gridDim,
dim3 blockDim, void **args,
size_t sharedMem, cudaStream_t stream);
-
+extern __device__ cudaError_t cudaLaunchDevice(void *func,
+ void *parameterBuffer,
+ dim3 gridDim, dim3 blockDim,
+ unsigned int sharedMem,
+ cudaStream_t stream);
+extern __device__ void *cudaGetParameterBuffer(size_t alignment, size_t size);
#endif
extern "C" __device__ int printf(const char*, ...);
diff --git a/clang/test/CodeGenCUDA/device-kernel-call.cu b/clang/test/CodeGenCUDA/device-kernel-call.cu
new file mode 100644
index 0000000000000..a83c888134574
--- /dev/null
+++ b/clang/test/CodeGenCUDA/device-kernel-call.cu
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -fgpu-rdc -emit-llvm %s -o - | FileCheck %s
+
+#include "Inputs/cuda.h"
+
+__global__ void g2(int x) {}
+
+// CHECK-LABEL: define{{.*}}g1
+__global__ void g1(void) {
+ // CHECK: [[CONFIG:%.*]] = call{{.*}}_Z22cudaGetParameterBuffermm(i64{{.*}}64, i64{{.*}}4)
+ // CHECK-NEXT: [[FLAG:%.*]] = icmp ne ptr [[CONFIG]], null
+ // CHECK-NEXT: br i1 [[FLAG]], label %[[THEN:.*]], label %[[ENDIF:.*]]
+ // CHECK: [[THEN]]:
+ // CHECK-NEXT: [[PPTR:%.*]] = getelementptr{{.*}}i8, ptr [[CONFIG]], i64 0
+ // CHECK-NEXT: store i32 42, ptr [[PPTR]]
+ // CHECK: = call{{.*}} i32 @_Z16cudaLaunchDevicePvS_4dim3S0_jP10cudaStream(ptr{{.*}} @_Z2g2i, ptr{{.*}} [[CONFIG]],
+ g2<<<1, 1>>>(42);
+}
diff --git a/clang/test/SemaCUDA/Inputs/cuda.h b/clang/test/SemaCUDA/Inputs/cuda.h
index 2bf45e03d91c7..e4662e6a4a588 100644
--- a/clang/test/SemaCUDA/Inputs/cuda.h
+++ b/clang/test/SemaCUDA/Inputs/cuda.h
@@ -46,6 +46,12 @@ extern "C" int __cudaPushCallConfiguration(dim3 gridSize, dim3 blockSize,
extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim,
dim3 blockDim, void **args,
size_t sharedMem, cudaStream_t stream);
+extern __device__ cudaError_t cudaLaunchDevice(void *func,
+ void *parameterBuffer,
+ dim3 gridDim, dim3 blockDim,
+ unsigned int sharedMem,
+ cudaStream_t stream);
+extern __device__ void *cudaGetParameterBuffer(size_t alignment, size_t size);
#endif
// Host- and device-side placement new overloads.
diff --git a/clang/test/SemaCUDA/call-kernel-from-kernel.cu b/clang/test/SemaCUDA/call-kernel-from-kernel.cu
index 5f8832f3cd070..d9f6f59a70aa7 100644
--- a/clang/test/SemaCUDA/call-kernel-from-kernel.cu
+++ b/clang/test/SemaCUDA/call-kernel-from-kernel.cu
@@ -1,9 +1,12 @@
// RUN: %clang_cc1 %s --std=c++11 -triple nvptx -o - \
// RUN: -verify -fcuda-is-device -fsyntax-only -verify-ignore-unexpected=note
+// RUN: %clang_cc1 %s --std=c++11 -fgpu-rdc -triple nvptx -o - \
+// RUN: -verify=rdc -fcuda-is-device -fsyntax-only -verify-ignore-unexpected=note
+// rdc-no-diagnostics
#include "Inputs/cuda.h"
__global__ void kernel1();
__global__ void kernel2() {
- kernel1<<<1,1>>>(); // expected-error {{reference to __global__ function 'kernel1' in __global__ function}}
+ kernel1<<<1,1>>>(); // expected-error {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
}
diff --git a/clang/test/SemaCUDA/device-kernel-call.cu b/clang/test/SemaCUDA/device-kernel-call.cu
new file mode 100644
index 0000000000000..8920e4b667e35
--- /dev/null
+++ b/clang/test/SemaCUDA/device-kernel-call.cu
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -fcuda-is-device -verify=nordc %s
+// RUN: %clang_cc1 -fcuda-is-device -fgpu-rdc -verify=rdc %s
+// RUN: %clang_cc1 -x hip -fcuda-is-device -verify=hip %s
+
+// rdc-no-diagnostics
+
+#include "Inputs/cuda.h"
+
+__global__ void g2(int x) {}
+
+// CHECK-LABEL: define{{.*}}g1
+__global__ void g1(void) {
+ // CHECK: [[CONFIG:%.*]] = call{{.*}}_Z22cudaGetParameterBuffermm(i64{{.*}}64, i64{{.*}}4)
+ // CHECK-NEXT: [[FLAG:%.*]] = icmp ne ptr [[CONFIG]], null
+ // CHECK-NEXT: br i1 [[FLAG]], label %[[THEN:.*]], label %[[ENDIF:.*]]
+ // CHECK: [[THEN]]:
+ // CHECK-NEXT: [[PPTR:%.*]] = getelementptr{{.*}}i8, ptr [[CONFIG]], i64 0
+ // CHECK-NEXT: store i32 42, ptr [[PPTR]]
+ // CHECK: = call{{.*}} i32 @_Z16cudaLaunchDevicePvS_4dim3S0_jP10cudaStream(ptr{{.*}} @_Z2g2i, ptr{{.*}} [[CONFIG]],
+ g2<<<1, 1>>>(42);
+ // nordc-error at -1 {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
+ // hip-error at -2 {{device-side kernel call/launch is not supported}}
+}
diff --git a/clang/test/SemaCUDA/function-overload.cu b/clang/test/SemaCUDA/function-overload.cu
index 3d05839af7528..82191d5aad223 100644
--- a/clang/test/SemaCUDA/function-overload.cu
+++ b/clang/test/SemaCUDA/function-overload.cu
@@ -91,10 +91,7 @@ __host__ HostReturnTy h() { return HostReturnTy(); }
// devdefer-note at -4 1+ {{candidate function not viable: call to __host__ function from __global__ function}}
__global__ void g() {}
-// dev-note at -1 1+ {{'g' declared here}}
-// devdefer-note at -2 1+ {{candidate function not viable: call to __global__ function from __device__ function}}
// expected-note at -3 0+ {{candidate function not viable: call to __global__ function from __host__ __device__ function}}
-// devdefer-note at -4 1+ {{candidate function not viable: call to __global__ function from __global__ function}}
extern "C" __device__ DeviceReturnTy cd() { return DeviceReturnTy(); }
// host-note at -1 1+ {{'cd' declared here}}
@@ -144,9 +141,9 @@ __device__ void devicef() {
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
- GlobalFnPtr fp_g = g; // dev-error {{reference to __global__ function 'g' in __device__ function}}
- g(); // devdefer-error {{no matching function for call to 'g'}}
- g<<<0,0>>>(); // dev-error {{reference to __global__ function 'g' in __device__ function}}
+ GlobalFnPtr fp_g = g;
+ g(); // expected-error {{call to global function 'g' not configured}}
+ g<<<0,0>>>(); // expected-error {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
}
__global__ void globalf() {
@@ -165,9 +162,9 @@ __global__ void globalf() {
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
- GlobalFnPtr fp_g = g; // dev-error {{reference to __global__ function 'g' in __global__ function}}
- g(); // devdefer-error {{no matching function for call to 'g'}}
- g<<<0,0>>>(); // dev-error {{reference to __global__ function 'g' in __global__ function}}
+ GlobalFnPtr fp_g = g;
+ g(); // expected-error {{call to global function 'g' not configured}}
+ g<<<0,0>>>(); // expected-error {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
}
__host__ __device__ void hostdevicef() {
@@ -199,20 +196,13 @@ __host__ __device__ void hostdevicef() {
CurrentReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g;
-#if defined(__CUDA_ARCH__)
- // expected-error at -2 {{reference to __global__ function 'g' in __host__ __device__ function}}
-#endif
g();
-#if defined (__CUDA_ARCH__)
- // expected-error at -2 {{reference to __global__ function 'g' in __host__ __device__ function}}
-#else
- // expected-error at -4 {{call to global function 'g' not configured}}
-#endif
+ // expected-error at -1 {{call to global function 'g' not configured}}
g<<<0,0>>>();
#if defined(__CUDA_ARCH__)
- // expected-error at -2 {{reference to __global__ function 'g' in __host__ __device__ function}}
+ // expected-error at -2 {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
#endif
}
diff --git a/clang/test/SemaCUDA/function-target.cu b/clang/test/SemaCUDA/function-target.cu
index 64444b6676248..200910156cdfc 100644
--- a/clang/test/SemaCUDA/function-target.cu
+++ b/clang/test/SemaCUDA/function-target.cu
@@ -24,11 +24,11 @@ __host__ void h1(void) {
__host__ void d1h(void); // expected-note {{candidate function not viable: call to __host__ function from __device__ function}}
__device__ void d1d(void);
__host__ __device__ void d1hd(void);
-__global__ void d1g(void); // dev-note {{'d1g' declared here}}
+__global__ void d1g(void);
__device__ void d1(void) {
d1h(); // expected-error {{no matching function}}
d1d();
d1hd();
- d1g<<<1, 1>>>(); // dev-error {{reference to __global__ function 'd1g' in __device__ function}}
+ d1g<<<1, 1>>>(); // expected-error {{kernel launch from __device__ or __global__ function requires relocatable device code, also known as separate compilation mode}}
}
diff --git a/clang/test/SemaCUDA/reference-to-kernel-fn.cu b/clang/test/SemaCUDA/reference-to-kernel-fn.cu
index 70a1cda6ab0c8..bdb70fc8b55d1 100644
--- a/clang/test/SemaCUDA/reference-to-kernel-fn.cu
+++ b/clang/test/SemaCUDA/reference-to-kernel-fn.cu
@@ -8,6 +8,7 @@
// device-side kernel launches.)
// host-no-diagnostics
+// dev-no-diagnostics
#include "Inputs/cuda.h"
@@ -19,11 +20,10 @@ typedef void (*fn_ptr_t)();
__host__ __device__ fn_ptr_t get_ptr_hd() {
return kernel;
- // dev-error at -1 {{reference to __global__ function}}
}
__host__ fn_ptr_t get_ptr_h() {
return kernel;
}
__device__ fn_ptr_t get_ptr_d() {
- return kernel; // dev-error {{reference to __global__ function}}
+ return kernel;
}
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index bfeca17d2147e..efcd211104230 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -545,6 +545,16 @@ Expected<StringRef> clang(ArrayRef<StringRef> InputFiles, const ArgList &Args,
Arg->render(Args, LinkerArgs);
}
llvm::append_range(CmdArgs, LinkerArgs);
+ } else if (Triple.isNVPTX()) {
+ // Need to pass '-lcudadevrt' if any.
+ ArgStringList LinkerArgs;
+ for (const opt::Arg *Arg : Args.filtered(OPT_library, OPT_library_path)) {
+ if (Arg->getOption().matches(OPT_library) &&
+ StringRef(Arg->getValue()) != "cudadevrt")
+ continue;
+ Arg->render(Args, LinkerArgs);
+ }
+ llvm::append_range(CmdArgs, LinkerArgs);
}
// Pass on -mllvm options to the linker invocation.
diff --git a/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp b/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
index 58eb671c61989..40a618a6b125a 100644
--- a/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
+++ b/clang/tools/clang-nvlink-wrapper/ClangNVLinkWrapper.cpp
@@ -497,6 +497,11 @@ Expected<SmallVector<StringRef>> getInput(const ArgList &Args) {
continue;
}
+ // Skip '-lcudadevrt'.
+ if (Arg->getOption().matches(OPT_library) &&
+ StringRef(Arg->getValue()) == "cudadevrt")
+ continue;
+
std::optional<std::string> Filename =
Arg->getOption().matches(OPT_library)
? searchLibrary(Arg->getValue(), /*Root=*/"", LibraryPaths)
@@ -715,7 +720,8 @@ Error runNVLink(ArrayRef<StringRef> Files, const ArgList &Args) {
// Do not forward any inputs that we have processed.
if (Arg->getOption().matches(OPT_INPUT) ||
- Arg->getOption().matches(OPT_library))
+ (Arg->getOption().matches(OPT_library) &&
+ StringRef(Arg->getValue()) != "cudadevrt"))
continue;
Arg->render(Args, NewLinkerArgs);
More information about the cfe-commits
mailing list