[clang] a350089 - [HIP] Allow target addr space in target builtins

Anshil Gandhi via cfe-commits cfe-commits at lists.llvm.org
Mon Aug 9 15:38:43 PDT 2021


Author: Anshil Gandhi
Date: 2021-08-09T16:38:04-06:00
New Revision: a35008955fa606487f79a050f5cc80fc7ee84dda

URL: https://github.com/llvm/llvm-project/commit/a35008955fa606487f79a050f5cc80fc7ee84dda
DIFF: https://github.com/llvm/llvm-project/commit/a35008955fa606487f79a050f5cc80fc7ee84dda.diff

LOG: [HIP] Allow target addr space in target builtins

This patch allows target specific addr space in target builtins for HIP. It inserts implicit addr
space cast for non-generic pointer to generic pointer in general, and inserts implicit addr
space cast for generic to non-generic for target builtin arguments only.

It is NFC for non-HIP languages.

Differential Revision: https://reviews.llvm.org/D102405

Added: 
    

Modified: 
    clang/include/clang/AST/Type.h
    clang/lib/Basic/Targets/AMDGPU.h
    clang/lib/Sema/SemaExpr.cpp
    clang/test/CodeGenCUDA/builtins-amdgcn.cu

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 9f46d53378976..4238667b8b076 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -495,7 +495,12 @@ class Qualifiers {
            (A == LangAS::Default &&
             (B == LangAS::sycl_private || B == LangAS::sycl_local ||
              B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
-             B == LangAS::sycl_global_host));
+             B == LangAS::sycl_global_host)) ||
+           // In HIP device compilation, any cuda address space is allowed
+           // to implicitly cast into the default address space.
+           (A == LangAS::Default &&
+            (B == LangAS::cuda_constant || B == LangAS::cuda_device ||
+             B == LangAS::cuda_shared));
   }
 
   /// Returns true if the address space in these qualifiers is equal to or

diff  --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h
index 2e580ecf24259..f8772cbe244f0 100644
--- a/clang/lib/Basic/Targets/AMDGPU.h
+++ b/clang/lib/Basic/Targets/AMDGPU.h
@@ -352,7 +352,16 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
   }
 
   LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
-    return LangAS::Default;
+    switch (AS) {
+    case 1:
+      return LangAS::cuda_device;
+    case 3:
+      return LangAS::cuda_shared;
+    case 4:
+      return LangAS::cuda_constant;
+    default:
+      return getLangASFromTargetAS(AS);
+    }
   }
 
   llvm::Optional<LangAS> getConstantAddressSpace() const override {

diff  --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 8ef4a9d96320b..5bde87d02877e 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -6572,6 +6572,53 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
       return ExprError();
 
     checkDirectCallValidity(*this, Fn, FD, ArgExprs);
+
+    // If this expression is a call to a builtin function in HIP device
+    // compilation, allow a pointer-type argument to default address space to be
+    // passed as a pointer-type parameter to a non-default address space.
+    // If Arg is declared in the default address space and Param is declared
+    // in a non-default address space, perform an implicit address space cast to
+    // the parameter type.
+    if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD &&
+        FD->getBuiltinID()) {
+      for (unsigned Idx = 0; Idx < FD->param_size(); ++Idx) {
+        ParmVarDecl *Param = FD->getParamDecl(Idx);
+        if (!ArgExprs[Idx] || !Param || !Param->getType()->isPointerType() ||
+            !ArgExprs[Idx]->getType()->isPointerType())
+          continue;
+
+        auto ParamAS = Param->getType()->getPointeeType().getAddressSpace();
+        auto ArgTy = ArgExprs[Idx]->getType();
+        auto ArgPtTy = ArgTy->getPointeeType();
+        auto ArgAS = ArgPtTy.getAddressSpace();
+
+        // Only allow implicit casting from a non-default address space pointee
+        // type to a default address space pointee type
+        if (ArgAS != LangAS::Default || ParamAS == LangAS::Default)
+          continue;
+
+        // First, ensure that the Arg is an RValue.
+        if (ArgExprs[Idx]->isGLValue()) {
+          ArgExprs[Idx] = ImplicitCastExpr::Create(
+              Context, ArgExprs[Idx]->getType(), CK_NoOp, ArgExprs[Idx],
+              nullptr, VK_PRValue, FPOptionsOverride());
+        }
+
+        // Construct a new arg type with address space of Param
+        Qualifiers ArgPtQuals = ArgPtTy.getQualifiers();
+        ArgPtQuals.setAddressSpace(ParamAS);
+        auto NewArgPtTy =
+            Context.getQualifiedType(ArgPtTy.getUnqualifiedType(), ArgPtQuals);
+        auto NewArgTy =
+            Context.getQualifiedType(Context.getPointerType(NewArgPtTy),
+                                     ArgTy.getQualifiers());
+
+        // Finally perform an implicit address space cast
+        ArgExprs[Idx] = ImpCastExprToType(ArgExprs[Idx], NewArgTy,
+                                          CK_AddressSpaceConversion)
+                            .get();
+      }
+    }
   }
 
   if (Context.isDependenceAllowed() &&

diff  --git a/clang/test/CodeGenCUDA/builtins-amdgcn.cu b/clang/test/CodeGenCUDA/builtins-amdgcn.cu
index 1283bf57db80c..6b0dc7538b412 100644
--- a/clang/test/CodeGenCUDA/builtins-amdgcn.cu
+++ b/clang/test/CodeGenCUDA/builtins-amdgcn.cu
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
 // RUN:  -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
 // RUN:  -o - | FileCheck %s
 
-// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
 // RUN:  -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
 // RUN:  -o - | FileCheck %s
 
@@ -10,7 +10,7 @@
 
 // CHECK-LABEL: @_Z16use_dispatch_ptrPi(
 // CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i8*
+// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
 __global__ void use_dispatch_ptr(int* out) {
   const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
   *out = *dispatch_ptr;
@@ -24,6 +24,39 @@ void test_ds_fmax(float src) {
   volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
 }
 
+// CHECK-LABEL: @_Z12test_ds_faddf(
+// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
+__global__ void test_ds_fadd(float src) {
+  __shared__ float shared;
+  volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
+}
+
+// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
+// CHECK: %shared = alloca float*, align 8, addrspace(5)
+// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
+// CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
+// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
+// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
+// CHECK: store float* %[[S0]], float** %shared.ascast, align 8
+// CHECK: %shared1 = load float*, float** %shared.ascast, align 8
+// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
+// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
+// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
+// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
+__global__ void test_ds_fmin(float src, float *shared) {
+  volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
+}
+
+// CHECK: @_Z33test_ret_builtin_nondef_addrspace
+// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
+// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
+// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
+// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
+__device__ void test_ret_builtin_nondef_addrspace() {
+  void *x = __builtin_amdgcn_dispatch_ptr();
+}
+
 // CHECK-LABEL: @_Z6endpgmv(
 // CHECK: call void @llvm.amdgcn.endpgm()
 __global__ void endpgm() {
@@ -33,12 +66,12 @@ __global__ void endpgm() {
 // Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
 
 // CHECK-LABEL: @_Z14test_uicmp_i64
-// CHECK:  store i64* %out, i64** %out.addr.ascast
+// CHECK:  store i64* %out1, i64** %out.addr.ascast
 // CHECK-NEXT:  store i64 %a, i64* %a.addr.ascast
 // CHECK-NEXT:  store i64 %b, i64* %b.addr.ascast
 // CHECK-NEXT:  %[[V0:.*]] = load i64, i64* %a.addr.ascast
 // CHECK-NEXT:  %[[V1:.*]] = load i64, i64* %b.addr.ascast
-// CHECK-NEXT:  %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %0, i64 %1, i32 35)
+// CHECK-NEXT:  %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
 // CHECK-NEXT:  %[[V3:.*]] = load i64*, i64** %out.addr.ascast
 // CHECK-NEXT:  store i64 %[[V2]], i64* %[[V3]]
 // CHECK-NEXT:  ret void
@@ -58,3 +91,28 @@ __global__ void test_s_memtime(unsigned long long* out)
 {
   *out = __builtin_amdgcn_s_memtime();
 }
+
+// Check a generic pointer can be passed as a shared pointer and a generic pointer.
+__device__ void func(float *x);
+
+// CHECK: @_Z17test_ds_fmin_funcfPf
+// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
+// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
+// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
+// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
+// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
+// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
+// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
+// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
+// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
+// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
+// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
+// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
+// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
+// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
+// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
+// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
+__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
+  volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
+  func(shared);
+}


        


More information about the cfe-commits mailing list