[clang] 5c082e7 - [CUDA] Add CUDA wrappers over clang builtins for sm_90.

Artem Belevich via cfe-commits cfe-commits at lists.llvm.org
Thu May 25 11:58:48 PDT 2023


Author: Artem Belevich
Date: 2023-05-25T11:57:58-07:00
New Revision: 5c082e7e15e38a2eea1f506725efe636a5b1bf8a

URL: https://github.com/llvm/llvm-project/commit/5c082e7e15e38a2eea1f506725efe636a5b1bf8a
DIFF: https://github.com/llvm/llvm-project/commit/5c082e7e15e38a2eea1f506725efe636a5b1bf8a.diff

LOG: [CUDA] Add CUDA wrappers over clang builtins for sm_90.

Differential Revision: https://reviews.llvm.org/D151362

Added: 
    

Modified: 
    clang/lib/Headers/__clang_cuda_intrinsics.h

Removed: 
    


################################################################################
diff  --git a/clang/lib/Headers/__clang_cuda_intrinsics.h b/clang/lib/Headers/__clang_cuda_intrinsics.h
index 43ed3d77a101..0e0dd4674997 100644
--- a/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -577,6 +577,133 @@ __nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
 }
 #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
 
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
+__device__ inline unsigned __isCtaShared(const void *ptr) {
+  return __isShared(ptr);
+}
+
+__device__ inline unsigned __isClusterShared(const void *__ptr) {
+  return __nvvm_isspacep_shared_cluster(__ptr);
+}
+
+__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
+                                                  unsigned __rank) {
+  return __nvvm_mapa((void *)__ptr, __rank);
+}
+
+__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
+  return __nvvm_getctarank((void *)__ptr);
+}
+
+__device__ inline uint2
+__cluster_map_shared_multicast(const void *__ptr,
+                               unsigned int __cluster_cta_mask) {
+  return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
+                    __cluster_cta_mask);
+}
+
+__device__ inline unsigned __clusterDimIsSpecified() {
+  return __nvvm_is_explicit_cluster();
+}
+
+__device__ inline dim3 __clusterDim() {
+  return {__nvvm_read_ptx_sreg_cluster_nctaid_x(),
+          __nvvm_read_ptx_sreg_cluster_nctaid_y(),
+          __nvvm_read_ptx_sreg_cluster_nctaid_z()};
+}
+
+__device__ inline dim3 __clusterRelativeBlockIdx() {
+  return {__nvvm_read_ptx_sreg_cluster_ctaid_x(),
+          __nvvm_read_ptx_sreg_cluster_ctaid_y(),
+          __nvvm_read_ptx_sreg_cluster_ctaid_z()};
+}
+
+__device__ inline dim3 __clusterGridDimInClusters() {
+  return {__nvvm_read_ptx_sreg_nclusterid_x(),
+          __nvvm_read_ptx_sreg_nclusterid_y(),
+          __nvvm_read_ptx_sreg_nclusterid_z()};
+}
+
+__device__ inline dim3 __clusterIdx() {
+  return {__nvvm_read_ptx_sreg_clusterid_x(),
+          __nvvm_read_ptx_sreg_clusterid_y(),
+          __nvvm_read_ptx_sreg_clusterid_z()};
+}
+
+__device__ inline unsigned __clusterRelativeBlockRank() {
+  return __nvvm_read_ptx_sreg_cluster_ctarank();
+}
+
+__device__ inline unsigned __clusterSizeInBlocks() {
+  return __nvvm_read_ptx_sreg_cluster_nctarank();
+}
+
+__device__ inline void __cluster_barrier_arrive() {
+  __nvvm_barrier_cluster_arrive();
+}
+
+__device__ inline void __cluster_barrier_arrive_relaxed() {
+  __nvvm_barrier_cluster_arrive_relaxed();
+}
+
+__device__ inline void __cluster_barrier_wait() {
+  __nvvm_barrier_cluster_wait();
+}
+
+__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
+
+__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
+  float2 __ret;
+  __asm__("atom.add.v2.f32         {%0, %1}, [%2], {%3, %4};"
+          : "=f"(__ret.x), "=f"(__ret.y)
+          : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+  return __ret;
+}
+
+__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
+  float2 __ret;
+  __asm__("atom.cta.add.v2.f32         {%0, %1}, [%2], {%3, %4};"
+          : "=f"(__ret.x), "=f"(__ret.y)
+          : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+  return __ret;
+}
+
+__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
+  float2 __ret;
+  __asm__("atom.sys.add.v2.f32         {%0, %1}, [%2], {%3, %4};"
+          : "=f"(__ret.x), "=f"(__ret.y)
+          : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+  return __ret;
+}
+
+__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
+  float4 __ret;
+  __asm__("atom.add.v4.f32         {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+          : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+          : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+  return __ret;
+}
+
+__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
+  float4 __ret;
+  __asm__(
+      "atom.cta.add.v4.f32         {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+      : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+      : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+  return __ret;
+}
+
+__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
+  float4 __ret;
+  __asm__(
+      "atom.sys.add.v4.f32         {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+      : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+      : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
+      :);
+  return __ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
 #endif // CUDA_VERSION >= 11000
 
 #endif // defined(__CLANG_CUDA_INTRINSICS_H__)


        


More information about the cfe-commits mailing list