[Mlir-commits] [mlir] [OpenMP][mlir] Add Groupprivate op in omp dialect. (PR #162704)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Sun Nov 30 23:14:40 PST 2025


https://github.com/skc7 updated https://github.com/llvm/llvm-project/pull/162704

>From 699220c26dd5c8c366621a0235c2d24e48c3c24d Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Fri, 26 Sep 2025 10:06:26 +0530
Subject: [PATCH 1/4] [OpenMP][mlir] Add Groupprivate op in omp dialect.

---
 mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td |  30 ++++++
 .../OpenMP/OpenMPToLLVMIRTranslation.cpp      |  73 ++++++++++++-
 mlir/test/Dialect/OpenMP/ops.mlir             |  36 +++++++
 .../Target/LLVMIR/omptarget-groupprivate.mlir |  41 +++++++
 mlir/test/Target/LLVMIR/openmp-llvm.mlir      | 101 ++++++++++++++++++
 5 files changed, 280 insertions(+), 1 deletion(-)
 create mode 100644 mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir

diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 377f1febf6b8f..6922b29115078 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -2224,4 +2224,34 @@ def WorkdistributeOp : OpenMP_Op<"workdistribute"> {
   let assemblyFormat = "$region attr-dict";
 }
 
+//===----------------------------------------------------------------------===//
+// [6.0] groupprivate Directive
+//===----------------------------------------------------------------------===//
+
+def GroupprivateOp : OpenMP_Op<"groupprivate",
+                      [AllTypesMatch<["sym_addr", "gp_addr"]>]> {
+  let summary = "groupprivate directive";
+  let description = [{
+    The groupprivate directive specifies that variables are replicated, with
+    each group having its own copy.
+
+    This operation takes in the address of a symbol that represents the original
+    variable, optional DeviceTypeAttr and returns the address of its groupprivate copy.
+    All occurrences of groupprivate variables in a parallel region should
+    use the groupprivate copy returned by this operation.
+
+    The `sym_addr` refers to the address of the symbol, which is a pointer to
+    the original variable.
+  }];
+
+  let arguments = (ins
+    OpenMP_PointerLikeType:$sym_addr,
+    OptionalAttr<DeclareTargetDeviceTypeAttr>:$device_type
+  );
+  let results = (outs OpenMP_PointerLikeType:$gp_addr);
+  let assemblyFormat = [{
+    $sym_addr `:` type($sym_addr) ( `,` `device_type` $device_type^ )? `->` type($gp_addr) attr-dict
+  }];
+}
+
 #endif // OPENMP_OPS
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 8edec990eaaba..784a7a4649278 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -6013,7 +6013,7 @@ static bool isTargetDeviceOp(Operation *op) {
   // by taking it in as an operand, so we must always lower these in
   // some manner or result in an ICE (whether they end up in a no-op
   // or otherwise).
-  if (mlir::isa<omp::ThreadprivateOp>(op))
+  if (mlir::isa<omp::ThreadprivateOp, omp::GroupprivateOp>(op))
     return true;
 
   if (mlir::isa<omp::TargetAllocMemOp>(op) ||
@@ -6111,6 +6111,74 @@ convertTargetFreeMemOp(Operation &opInst, llvm::IRBuilderBase &builder,
   return success();
 }
 
+/// Converts an OpenMP Groupprivate operation into LLVM IR.
+static LogicalResult
+convertOmpGroupprivate(Operation &opInst, llvm::IRBuilderBase &builder,
+                       LLVM::ModuleTranslation &moduleTranslation) {
+  llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder();
+  auto groupprivateOp = cast<omp::GroupprivateOp>(opInst);
+
+  if (failed(checkImplementationStatus(opInst)))
+    return failure();
+
+  bool isTargetDevice = ompBuilder->Config.isTargetDevice();
+  auto deviceType = groupprivateOp.getDeviceType();
+
+  // skip allocation based on device_type
+  bool shouldAllocate = true;
+  if (deviceType.has_value()) {
+    switch (*deviceType) {
+    case mlir::omp::DeclareTargetDeviceType::host:
+      // Only allocate on host
+      shouldAllocate = !isTargetDevice;
+      break;
+    case mlir::omp::DeclareTargetDeviceType::nohost:
+      // Only allocate on device
+      shouldAllocate = isTargetDevice;
+      break;
+    case mlir::omp::DeclareTargetDeviceType::any:
+      // Allocate on both
+      shouldAllocate = true;
+      break;
+    }
+  }
+
+  Value symAddr = groupprivateOp.getSymAddr();
+  auto *symOp = symAddr.getDefiningOp();
+
+  if (auto asCast = dyn_cast<LLVM::AddrSpaceCastOp>(symOp))
+    symOp = asCast.getOperand().getDefiningOp();
+
+  if (!isa<LLVM::AddressOfOp>(symOp))
+    return opInst.emitError("Addressing symbol not found");
+  LLVM::AddressOfOp addressOfOp = dyn_cast<LLVM::AddressOfOp>(symOp);
+
+  LLVM::GlobalOp global =
+      addressOfOp.getGlobal(moduleTranslation.symbolTable());
+  llvm::GlobalValue *globalValue = moduleTranslation.lookupGlobal(global);
+  llvm::Value *resultPtr;
+
+  if (shouldAllocate) {
+    // Get the size of the variable
+    llvm::Type *varType = globalValue->getValueType();
+    llvm::Module *llvmModule = moduleTranslation.getLLVMModule();
+    llvm::DataLayout DL = llvmModule->getDataLayout();
+    uint64_t typeSize = DL.getTypeAllocSize(varType);
+    // Call omp_alloc_shared to allocate memory for groupprivate variable.
+    llvm::FunctionCallee allocSharedFn = ompBuilder->getOrCreateRuntimeFunction(
+        *llvmModule, llvm::omp::OMPRTL___kmpc_alloc_shared);
+    // Call runtime to allocate shared memory for this group
+    resultPtr = builder.CreateCall(allocSharedFn, {builder.getInt64(typeSize)});
+  } else {
+    // Use original global address when not allocating group-private storage
+    resultPtr = moduleTranslation.lookupValue(symAddr);
+    if (!resultPtr)
+      resultPtr = globalValue;
+  }
+  moduleTranslation.mapValue(opInst.getResult(0), resultPtr);
+  return success();
+}
+
 /// Given an OpenMP MLIR operation, create the corresponding LLVM IR (including
 /// OpenMP runtime calls).
 static LogicalResult
@@ -6294,6 +6362,9 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder,
           .Case([&](omp::TargetFreeMemOp) {
             return convertTargetFreeMemOp(*op, builder, moduleTranslation);
           })
+          .Case([&](omp::GroupprivateOp) {
+            return convertOmpGroupprivate(*op, builder, moduleTranslation);
+          })
           .Default([&](Operation *inst) {
             return inst->emitError()
                    << "not yet implemented: " << inst->getName();
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index ac29e20907b55..1cdcec3395aa5 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -3367,3 +3367,39 @@ func.func @omp_target_map_clause_type_test(%arg0 : memref<?xi32>) -> () {
 
     return
 }
+
+// CHECK-LABEL: func.func @omp_groupprivate_device_type
+func.func @omp_groupprivate_device_type() {
+  %0 = arith.constant 1 : i32
+  %1 = arith.constant 2 : i32
+  // CHECK: [[ARG0:%.*]] = llvm.mlir.addressof @gp : !llvm.ptr
+  %gp_addr = llvm.mlir.addressof @gp : !llvm.ptr
+  // CHECK: [[ARG1:%.*]] = llvm.mlir.addressof @any : !llvm.ptr
+  %any_addr = llvm.mlir.addressof @any : !llvm.ptr
+  // CHECK: [[ARG2:%.*]] = llvm.mlir.addressof @host : !llvm.ptr
+  %host_addr = llvm.mlir.addressof @host : !llvm.ptr
+  // CHECK: [[ARG3:%.*]] = llvm.mlir.addressof @nohost : !llvm.ptr
+  %nohost_addr = llvm.mlir.addressof @nohost : !llvm.ptr
+
+  // CHECK: {{.*}} = omp.groupprivate [[ARG0]] : !llvm.ptr -> !llvm.ptr
+  %group_private_addr = omp.groupprivate %gp_addr : !llvm.ptr -> !llvm.ptr
+
+  // CHECK: {{.*}} = omp.groupprivate [[ARG1]] : !llvm.ptr, device_type (any) -> !llvm.ptr
+  %group_private_any = omp.groupprivate %any_addr : !llvm.ptr, device_type(any) -> !llvm.ptr
+  llvm.store %1, %group_private_any : i32, !llvm.ptr
+
+  // CHECK: {{.*}} = omp.groupprivate [[ARG2]] : !llvm.ptr, device_type (host) -> !llvm.ptr
+  %group_private_host = omp.groupprivate %host_addr : !llvm.ptr, device_type(host) -> !llvm.ptr
+  llvm.store %1, %group_private_host : i32, !llvm.ptr
+
+  // CHECK: {{.*}} = omp.groupprivate [[ARG3]] : !llvm.ptr, device_type (nohost) -> !llvm.ptr
+  %group_private_nohost = omp.groupprivate %nohost_addr : !llvm.ptr, device_type(nohost) -> !llvm.ptr
+  llvm.store %1, %group_private_nohost : i32, !llvm.ptr
+
+  return
+}
+
+llvm.mlir.global internal @gp() : i32
+llvm.mlir.global internal @any() : i32
+llvm.mlir.global internal @host() : i32
+llvm.mlir.global internal @nohost() : i32
diff --git a/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
new file mode 100644
index 0000000000000..46e9639adcc06
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
@@ -0,0 +1,41 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+module attributes {omp.is_target_device = true, llvm.target_triple = "amdgcn-amd-amdhsa",
+                    dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memory_space", 5 : ui32>>} {
+  llvm.func @_QQmain() attributes {fir.bindc_name = "main"} {
+
+    %ga = llvm.mlir.addressof @global_a : !llvm.ptr
+    %map_a = omp.map.info var_ptr(%ga : !llvm.ptr, i32) map_clauses(tofrom) capture(ByCopy) -> !llvm.ptr {name = "i"}
+    omp.target map_entries(%map_a -> %arg1 : !llvm.ptr) {
+      %loaded = llvm.load %arg1 : !llvm.ptr -> i32
+
+      %any_addr = llvm.mlir.addressof @global_any : !llvm.ptr
+      %any_gp = omp.groupprivate %any_addr : !llvm.ptr, device_type(any) -> !llvm.ptr
+      llvm.store %loaded, %any_gp : i32, !llvm.ptr
+
+      %host_addr = llvm.mlir.addressof @global_host : !llvm.ptr
+      %host_gp = omp.groupprivate %host_addr : !llvm.ptr, device_type(host) -> !llvm.ptr
+      llvm.store %loaded, %host_gp : i32, !llvm.ptr
+
+      %nohost_addr = llvm.mlir.addressof @global_nohost : !llvm.ptr
+      %nohost_gp = omp.groupprivate %nohost_addr : !llvm.ptr, device_type(nohost) -> !llvm.ptr
+      llvm.store %loaded, %nohost_gp : i32, !llvm.ptr
+
+      omp.terminator
+    }
+    llvm.return
+  }
+  llvm.mlir.global internal @global_a() : i32
+  llvm.mlir.global internal @global_any() : i32
+  llvm.mlir.global internal @global_host() : i32
+  llvm.mlir.global internal @global_nohost() : i32
+}
+
+// CHECK: define {{.*}} amdgpu_kernel void @__omp_offloading_{{.*}}_{{.*}}__QQmain_{{.*}}(ptr %{{.*}}, ptr %{{.*}}) #{{[0-9]+}} {
+// CHECK-LABEL:  omp.target:
+// CHECK-NEXT :    %[[LOAD:.*]] = load i32, ptr %3, align 4
+// CHECK-NEXT :    %[[ALLOC_any:.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK-NEXT :    store i32 %[[LOAD]], ptr %[[ALLOC_any]], align 4
+// CHECK-NEXT :    store i32 %[[LOAD]], ptr @global_host, align 4
+// CHECK-NEXT :    %[[ALLOC_NOHOST:.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK-NEXT :    store i32 %[[LOAD]], ptr %[[ALLOC_NOHOST]], align 4
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 8bd33a382197e..339b1d18942c2 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -3449,3 +3449,104 @@ llvm.func @nested_task_with_deps() {
 
 // CHECK:         ret void
 // CHECK:       }
+
+// -----
+
+module attributes {omp.is_target_device = false} {
+llvm.mlir.global internal @any() : i32
+llvm.mlir.global internal @host() : i32
+llvm.mlir.global internal @nohost() : i32
+llvm.func @omp_groupprivate_host() {
+  %0 = llvm.mlir.constant(1 : i32) : i32
+  %1 = llvm.mlir.addressof @any : !llvm.ptr
+  %2 = omp.groupprivate %1 : !llvm.ptr, device_type(any) -> !llvm.ptr
+  llvm.store %0, %2 : i32, !llvm.ptr
+
+  %3 = llvm.mlir.addressof @host : !llvm.ptr
+  %4 = omp.groupprivate %3 : !llvm.ptr, device_type(host) -> !llvm.ptr
+  llvm.store %0, %4 : i32, !llvm.ptr
+
+  %5 = llvm.mlir.addressof @nohost : !llvm.ptr
+  %6 = omp.groupprivate %5 : !llvm.ptr, device_type(nohost) -> !llvm.ptr
+  llvm.store %0, %6 : i32, !llvm.ptr
+  llvm.return
+}
+}
+
+// CHECK: @any = internal global i32 undef
+// CHECK: @host = internal global i32 undef
+// CHECK: @nohost = internal global i32 undef
+// CHECK-LABEL: @omp_groupprivate_host
+// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP1]], align 4
+// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP2]], align 4
+// CHECK:  store i32 1, ptr @nohost, align 4
+
+// -----
+
+module attributes {omp.is_target_device = true} {
+llvm.mlir.global internal @any() : i32
+llvm.mlir.global internal @host() : i32
+llvm.mlir.global internal @nohost() : i32
+llvm.func @omp_groupprivate_device() {
+  %0 = llvm.mlir.constant(1 : i32) : i32
+  %1 = llvm.mlir.addressof @any : !llvm.ptr
+  %2 = omp.groupprivate %1 : !llvm.ptr, device_type(any) -> !llvm.ptr
+  llvm.store %0, %2 : i32, !llvm.ptr
+
+  %3 = llvm.mlir.addressof @host : !llvm.ptr
+  %4 = omp.groupprivate %3 : !llvm.ptr, device_type(host) -> !llvm.ptr
+  llvm.store %0, %4 : i32, !llvm.ptr
+
+  %5 = llvm.mlir.addressof @nohost : !llvm.ptr
+  %6 = omp.groupprivate %5 : !llvm.ptr, device_type(nohost) -> !llvm.ptr
+  llvm.store %0, %6 : i32, !llvm.ptr
+  llvm.return
+}
+}
+
+// CHECK: @any = internal global i32 undef
+// CHECK: @host = internal global i32 undef
+// CHECK: @nohost = internal global i32 undef
+// CHECK-LABEL: @omp_groupprivate_device
+// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP1]], align 4
+// CHECK:  store i32 1, ptr @host, align 4
+// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP2]], align 4
+
+// -----
+
+module attributes {omp.is_target_device = false} {
+llvm.mlir.global internal @any1() : i32
+llvm.mlir.global internal @host1() : i32
+llvm.mlir.global internal @nohost1() : i32
+llvm.func @omp_groupprivate_host() {
+  %0 = llvm.mlir.constant(1 : i32) : i32
+  %1 = llvm.mlir.addressof @any1 : !llvm.ptr
+  %2 = omp.groupprivate %1 : !llvm.ptr, device_type(any) -> !llvm.ptr
+  llvm.store %0, %2 : i32, !llvm.ptr
+
+  %3 = llvm.mlir.addressof @host1 : !llvm.ptr
+  %4 = omp.groupprivate %3 : !llvm.ptr, device_type(host) -> !llvm.ptr
+  llvm.store %0, %4 : i32, !llvm.ptr
+
+  %5 = llvm.mlir.addressof @nohost1 : !llvm.ptr
+  %6 = omp.groupprivate %5 : !llvm.ptr, device_type(nohost) -> !llvm.ptr
+  llvm.store %0, %6 : i32, !llvm.ptr
+  llvm.return
+}
+}
+
+// CHECK: @any1 = internal global i32 undef
+// CHECK: @host1 = internal global i32 undef
+// CHECK: @nohost1 = internal global i32 undef
+// CHECK-LABEL: @omp_groupprivate_host
+// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP1]], align 4
+// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
+// CHECK:  store i32 1, ptr [[TMP2]], align 4
+// CHECK:  store i32 1, ptr @nohost1, align 4
+
+// -----

>From 6f16f7daf13566d5716be6f74a3f748eafcfda0f Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Sun, 9 Nov 2025 09:14:30 +0530
Subject: [PATCH 2/4] Use getGlobalFromSymbol for threadprivate and
 groupprivate

---
 .../OpenMP/OpenMPToLLVMIRTranslation.cpp      | 50 ++++++++++++-------
 .../Target/LLVMIR/omptarget-groupprivate.mlir |  2 +-
 2 files changed, 34 insertions(+), 18 deletions(-)

diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 784a7a4649278..4aefc81b047bb 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -3564,6 +3564,26 @@ convertOmpCancellationPoint(omp::CancellationPointOp op,
   return success();
 }
 
+static LLVM::GlobalOp
+getGlobalFromSymbol(Operation *symOp,
+                    LLVM::ModuleTranslation &moduleTranslation,
+                    Operation *opInst) {
+
+  // Handle potential address space cast
+  if (auto asCast = dyn_cast<LLVM::AddrSpaceCastOp>(symOp))
+    symOp = asCast.getOperand().getDefiningOp();
+
+  // Check if we have an AddressOfOp
+  if (!isa<LLVM::AddressOfOp>(symOp)) {
+    if (opInst)
+      opInst->emitError("Addressing symbol not found");
+    return nullptr;
+  }
+
+  LLVM::AddressOfOp addressOfOp = cast<LLVM::AddressOfOp>(symOp);
+  return addressOfOp.getGlobal(moduleTranslation.symbolTable());
+}
+
 /// Converts an OpenMP Threadprivate operation into LLVM IR using
 /// OpenMPIRBuilder.
 static LogicalResult
@@ -3579,15 +3599,10 @@ convertOmpThreadprivate(Operation &opInst, llvm::IRBuilderBase &builder,
   Value symAddr = threadprivateOp.getSymAddr();
   auto *symOp = symAddr.getDefiningOp();
 
-  if (auto asCast = dyn_cast<LLVM::AddrSpaceCastOp>(symOp))
-    symOp = asCast.getOperand().getDefiningOp();
-
-  if (!isa<LLVM::AddressOfOp>(symOp))
-    return opInst.emitError("Addressing symbol not found");
-  LLVM::AddressOfOp addressOfOp = dyn_cast<LLVM::AddressOfOp>(symOp);
-
   LLVM::GlobalOp global =
-      addressOfOp.getGlobal(moduleTranslation.symbolTable());
+      getGlobalFromSymbol(symOp, moduleTranslation, &opInst);
+  if (!global)
+    return failure();
   llvm::GlobalValue *globalValue = moduleTranslation.lookupGlobal(global);
 
   if (!ompBuilder->Config.isTargetDevice()) {
@@ -6144,17 +6159,13 @@ convertOmpGroupprivate(Operation &opInst, llvm::IRBuilderBase &builder,
   }
 
   Value symAddr = groupprivateOp.getSymAddr();
-  auto *symOp = symAddr.getDefiningOp();
-
-  if (auto asCast = dyn_cast<LLVM::AddrSpaceCastOp>(symOp))
-    symOp = asCast.getOperand().getDefiningOp();
-
-  if (!isa<LLVM::AddressOfOp>(symOp))
-    return opInst.emitError("Addressing symbol not found");
-  LLVM::AddressOfOp addressOfOp = dyn_cast<LLVM::AddressOfOp>(symOp);
+  Operation *symOp = symAddr.getDefiningOp();
 
   LLVM::GlobalOp global =
-      addressOfOp.getGlobal(moduleTranslation.symbolTable());
+      getGlobalFromSymbol(symOp, moduleTranslation, &opInst);
+  if (!global)
+    return failure();
+
   llvm::GlobalValue *globalValue = moduleTranslation.lookupGlobal(global);
   llvm::Value *resultPtr;
 
@@ -6175,6 +6186,11 @@ convertOmpGroupprivate(Operation &opInst, llvm::IRBuilderBase &builder,
     if (!resultPtr)
       resultPtr = globalValue;
   }
+
+  llvm::Type *ptrTy = builder.getPtrTy();
+  if (resultPtr->getType() != ptrTy)
+    resultPtr = builder.CreateBitCast(resultPtr, ptrTy);
+
   moduleTranslation.mapValue(opInst.getResult(0), resultPtr);
   return success();
 }
diff --git a/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
index 46e9639adcc06..f6b37e6446fe7 100644
--- a/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
@@ -33,7 +33,7 @@ module attributes {omp.is_target_device = true, llvm.target_triple = "amdgcn-amd
 
 // CHECK: define {{.*}} amdgpu_kernel void @__omp_offloading_{{.*}}_{{.*}}__QQmain_{{.*}}(ptr %{{.*}}, ptr %{{.*}}) #{{[0-9]+}} {
 // CHECK-LABEL:  omp.target:
-// CHECK-NEXT :    %[[LOAD:.*]] = load i32, ptr %3, align 4
+// CHECK-NEXT :    %[[LOAD:.*]] = load i32, ptr %{{.*}}, align 4
 // CHECK-NEXT :    %[[ALLOC_any:.*]] = call ptr @__kmpc_alloc_shared(i64 4)
 // CHECK-NEXT :    store i32 %[[LOAD]], ptr %[[ALLOC_any]], align 4
 // CHECK-NEXT :    store i32 %[[LOAD]], ptr @global_host, align 4

>From 6e2d77aef1e41bf7f3a7c26fe65dded21bcd27fe Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Fri, 28 Nov 2025 21:28:56 +0530
Subject: [PATCH 3/4] Remove bitcast for resultPtr

---
 .../LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp       | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 4aefc81b047bb..aebe368d8cd1a 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -6187,10 +6187,6 @@ convertOmpGroupprivate(Operation &opInst, llvm::IRBuilderBase &builder,
       resultPtr = globalValue;
   }
 
-  llvm::Type *ptrTy = builder.getPtrTy();
-  if (resultPtr->getType() != ptrTy)
-    resultPtr = builder.CreateBitCast(resultPtr, ptrTy);
-
   moduleTranslation.mapValue(opInst.getResult(0), resultPtr);
   return success();
 }

>From 409b57e0ced455f263fb67a9b00fd993db9255c5 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Mon, 1 Dec 2025 12:41:50 +0530
Subject: [PATCH 4/4] Use llvm addrspace(3) globals for amdgpu and nvptx
 grouppriavte lowering

---
 .../OpenMP/OpenMPToLLVMIRTranslation.cpp      | 40 +++++++----
 .../Target/LLVMIR/omptarget-groupprivate.mlir | 12 ++--
 mlir/test/Target/LLVMIR/openmp-llvm.mlir      | 71 +++++--------------
 3 files changed, 53 insertions(+), 70 deletions(-)

diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index aebe368d8cd1a..434370593769d 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -6170,21 +6170,33 @@ convertOmpGroupprivate(Operation &opInst, llvm::IRBuilderBase &builder,
   llvm::Value *resultPtr;
 
   if (shouldAllocate) {
-    // Get the size of the variable
-    llvm::Type *varType = globalValue->getValueType();
-    llvm::Module *llvmModule = moduleTranslation.getLLVMModule();
-    llvm::DataLayout DL = llvmModule->getDataLayout();
-    uint64_t typeSize = DL.getTypeAllocSize(varType);
-    // Call omp_alloc_shared to allocate memory for groupprivate variable.
-    llvm::FunctionCallee allocSharedFn = ompBuilder->getOrCreateRuntimeFunction(
-        *llvmModule, llvm::omp::OMPRTL___kmpc_alloc_shared);
-    // Call runtime to allocate shared memory for this group
-    resultPtr = builder.CreateCall(allocSharedFn, {builder.getInt64(typeSize)});
-  } else {
-    // Use original global address when not allocating group-private storage
-    resultPtr = moduleTranslation.lookupValue(symAddr);
-    if (!resultPtr)
+    if (isTargetDevice) {
+      // Get the size of the variable
+      llvm::Type *varType = globalValue->getValueType();
+      llvm::Module *llvmModule = moduleTranslation.getLLVMModule();
+      // Create a llvm global variable in shared memory
+      llvm::Triple targetTriple = llvm::Triple(llvmModule->getTargetTriple());
+      if (targetTriple.isAMDGCN() || targetTriple.isNVPTX()) {
+        // Shared address space is 3 for amdgpu and nvptx targets.
+        unsigned sharedAddressSpace = 3;
+        llvm::GlobalVariable *sharedVar = new llvm::GlobalVariable(
+            *llvmModule, varType, false, llvm::GlobalValue::InternalLinkage,
+            llvm::PoisonValue::get(varType), globalValue->getName(), nullptr,
+            llvm::GlobalValue::NotThreadLocal, sharedAddressSpace, false);
+        resultPtr = sharedVar;
+      } else {
+        return opInst.emitError()
+               << "Groupprivate operation is not supported for this target: "
+               << targetTriple.str();
+      }
+    } else {
+      // Use original global address when allocating on host device.
+      // TODO: Add support for allocating group-private storage on host device.
       resultPtr = globalValue;
+    }
+  } else {
+    // Use original global address when not allocating group-private storage.
+    resultPtr = globalValue;
   }
 
   moduleTranslation.mapValue(opInst.getResult(0), resultPtr);
diff --git a/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
index f6b37e6446fe7..bdbe6d11d9957 100644
--- a/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-groupprivate.mlir
@@ -31,11 +31,15 @@ module attributes {omp.is_target_device = true, llvm.target_triple = "amdgcn-amd
   llvm.mlir.global internal @global_nohost() : i32
 }
 
+// CHECK-DAG: @global_a = internal global i32 undef
+// CHECK-DAG: @global_any = internal global i32 undef
+// CHECK-DAG: @global_host = internal global i32 undef
+// CHECK-DAG: @global_nohost = internal global i32 undef
+// CHECK-DAG: {{.*}} = internal addrspace(3) global i32 poison
+// CHECK-DAG: {{.*}} = internal addrspace(3) global i32 poison
 // CHECK: define {{.*}} amdgpu_kernel void @__omp_offloading_{{.*}}_{{.*}}__QQmain_{{.*}}(ptr %{{.*}}, ptr %{{.*}}) #{{[0-9]+}} {
 // CHECK-LABEL:  omp.target:
 // CHECK-NEXT :    %[[LOAD:.*]] = load i32, ptr %{{.*}}, align 4
-// CHECK-NEXT :    %[[ALLOC_any:.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK-NEXT :    store i32 %[[LOAD]], ptr %[[ALLOC_any]], align 4
+// CHECK-NEXT :    store i32 %[[LOAD]], ptr addrspace(3) {{.*}}, align 4
 // CHECK-NEXT :    store i32 %[[LOAD]], ptr @global_host, align 4
-// CHECK-NEXT :    %[[ALLOC_NOHOST:.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK-NEXT :    store i32 %[[LOAD]], ptr %[[ALLOC_NOHOST]], align 4
+// CHECK-NEXT :    store i32 %[[LOAD]], ptr addrspace(3) {{.*}}, align 4
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 339b1d18942c2..edb43676228c4 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -3452,40 +3452,7 @@ llvm.func @nested_task_with_deps() {
 
 // -----
 
-module attributes {omp.is_target_device = false} {
-llvm.mlir.global internal @any() : i32
-llvm.mlir.global internal @host() : i32
-llvm.mlir.global internal @nohost() : i32
-llvm.func @omp_groupprivate_host() {
-  %0 = llvm.mlir.constant(1 : i32) : i32
-  %1 = llvm.mlir.addressof @any : !llvm.ptr
-  %2 = omp.groupprivate %1 : !llvm.ptr, device_type(any) -> !llvm.ptr
-  llvm.store %0, %2 : i32, !llvm.ptr
-
-  %3 = llvm.mlir.addressof @host : !llvm.ptr
-  %4 = omp.groupprivate %3 : !llvm.ptr, device_type(host) -> !llvm.ptr
-  llvm.store %0, %4 : i32, !llvm.ptr
-
-  %5 = llvm.mlir.addressof @nohost : !llvm.ptr
-  %6 = omp.groupprivate %5 : !llvm.ptr, device_type(nohost) -> !llvm.ptr
-  llvm.store %0, %6 : i32, !llvm.ptr
-  llvm.return
-}
-}
-
-// CHECK: @any = internal global i32 undef
-// CHECK: @host = internal global i32 undef
-// CHECK: @nohost = internal global i32 undef
-// CHECK-LABEL: @omp_groupprivate_host
-// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP1]], align 4
-// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP2]], align 4
-// CHECK:  store i32 1, ptr @nohost, align 4
-
-// -----
-
-module attributes {omp.is_target_device = true} {
+module attributes {omp.is_target_device = true, llvm.target_triple = "nvptx64-nvidia-cuda"} {
 llvm.mlir.global internal @any() : i32
 llvm.mlir.global internal @host() : i32
 llvm.mlir.global internal @nohost() : i32
@@ -3506,15 +3473,16 @@ llvm.func @omp_groupprivate_device() {
 }
 }
 
-// CHECK: @any = internal global i32 undef
-// CHECK: @host = internal global i32 undef
-// CHECK: @nohost = internal global i32 undef
-// CHECK-LABEL: @omp_groupprivate_device
-// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP1]], align 4
-// CHECK:  store i32 1, ptr @host, align 4
-// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP2]], align 4
+// CHECK-DAG: @any = internal global i32 undef
+// CHECK-DAG: @host = internal global i32 undef
+// CHECK-DAG: @nohost = internal global i32 undef
+// CHECK-DAG: {{.*}} = internal addrspace(3) global i32 poison
+// CHECK-DAG: {{.*}} = internal addrspace(3) global i32 poison
+// CHECK-LABEL: define void @omp_groupprivate_device()
+// CHECK: store i32 1, ptr addrspace(3) {{.*}}, align 4
+// CHECK: store i32 1, ptr @host, align 4
+// CHECK: store i32 1, ptr addrspace(3) {{.*}}, align 4
+// CHECK: ret void
 
 // -----
 
@@ -3539,14 +3507,13 @@ llvm.func @omp_groupprivate_host() {
 }
 }
 
-// CHECK: @any1 = internal global i32 undef
-// CHECK: @host1 = internal global i32 undef
-// CHECK: @nohost1 = internal global i32 undef
-// CHECK-LABEL: @omp_groupprivate_host
-// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP1]], align 4
-// CHECK:  [[TMP2:%.*]] = call ptr @__kmpc_alloc_shared(i64 4)
-// CHECK:  store i32 1, ptr [[TMP2]], align 4
-// CHECK:  store i32 1, ptr @nohost1, align 4
+// CHECK-DAG: @any1 = internal global i32 undef
+// CHECK-DAG: @host1 = internal global i32 undef
+// CHECK-DAG: @nohost1 = internal global i32 undef
+// CHECK-LABEL: define void @omp_groupprivate_host()
+// CHECK: store i32 1, ptr @any1, align 4
+// CHECK: store i32 1, ptr @host1, align 4
+// CHECK: store i32 1, ptr @nohost1, align 4
+// CHECK: ret void
 
 // -----



More information about the Mlir-commits mailing list