[clang] 31d8dbd - [CUDA/SPIR-V] Force passing aggregate type byval
Shangwu Yao via cfe-commits
cfe-commits at lists.llvm.org
Fri Jul 22 13:30:57 PDT 2022
Author: Shangwu Yao
Date: 2022-07-22T20:30:15Z
New Revision: 31d8dbd1e5b4ee0fd04bfeb3a64d8f9f33260905
URL: https://github.com/llvm/llvm-project/commit/31d8dbd1e5b4ee0fd04bfeb3a64d8f9f33260905
DIFF: https://github.com/llvm/llvm-project/commit/31d8dbd1e5b4ee0fd04bfeb3a64d8f9f33260905.diff
LOG: [CUDA/SPIR-V] Force passing aggregate type byval
This patch forces copying aggregate type in kernel arguments by value when
compiling CUDA targeting SPIR-V. The original behavior is not passing by value
when there is any of destructor, copy constructor and move constructor defined
by user. This patch makes the behavior of SPIR-V generated from CUDA follow
the CUDA spec
(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing),
and matches the NVPTX
implementation (
https://github.com/llvm/llvm-project/blob/41958f76d8a2c47484fa176cba1de565cfe84de7/clang/lib/CodeGen/TargetInfo.cpp#L7241).
Differential Revision: https://reviews.llvm.org/D130387
Added:
clang/test/CodeGenCUDASPIRV/copy-aggregate-byval.cu
Modified:
clang/lib/CodeGen/TargetInfo.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index e8ee5533104ca..fc0952e68a667 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -10449,6 +10449,15 @@ ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
LTy = llvm::PointerType::getWithSamePointeeType(PtrTy, GlobalAS);
return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
}
+
+ // Force copying aggregate type in kernel arguments by value when
+ // compiling CUDA targeting SPIR-V. This is required for the object
+ // copied to be valid on the device.
+ // This behavior follows the CUDA spec
+ // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
+ // and matches the NVPTX implementation.
+ if (isAggregateTypeForABI(Ty))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
}
return classifyArgumentType(Ty);
}
diff --git a/clang/test/CodeGenCUDASPIRV/copy-aggregate-byval.cu b/clang/test/CodeGenCUDASPIRV/copy-aggregate-byval.cu
new file mode 100644
index 0000000000000..bceca4d4ee5d6
--- /dev/null
+++ b/clang/test/CodeGenCUDASPIRV/copy-aggregate-byval.cu
@@ -0,0 +1,25 @@
+// Tests CUDA kernel arguments get copied by value when targeting SPIR-V, even with
+// destructor, copy constructor or move constructor defined by user.
+
+// RUN: %clang -Xclang -no-opaque-pointers -emit-llvm --cuda-device-only --offload=spirv32 \
+// RUN: -nocudalib -nocudainc %s -o %t.bc -c 2>&1
+// RUN: llvm-dis %t.bc -o %t.ll
+// RUN: FileCheck %s --input-file=%t.ll
+
+// RUN: %clang -Xclang -no-opaque-pointers -emit-llvm --cuda-device-only --offload=spirv64 \
+// RUN: -nocudalib -nocudainc %s -o %t.bc -c 2>&1
+// RUN: llvm-dis %t.bc -o %t.ll
+// RUN: FileCheck %s --input-file=%t.ll
+
+class GpuData {
+ public:
+ __attribute__((host)) __attribute__((device)) GpuData(int* src) {}
+ __attribute__((host)) __attribute__((device)) ~GpuData() {}
+ __attribute__((host)) __attribute__((device)) GpuData(const GpuData& other) {}
+ __attribute__((host)) __attribute__((device)) GpuData(GpuData&& other) {}
+};
+
+// CHECK: define
+// CHECK-SAME: spir_kernel void @_Z6kernel7GpuData(%class.GpuData* noundef byval(%class.GpuData) align
+
+__attribute__((global)) void kernel(GpuData output) {}
More information about the cfe-commits
mailing list