[flang-commits] [flang] [flang][cuda] cuf.alloc in device context should be converted to fir.alloc (PR #116110)

Valentin Clement バレンタイン クレメン via flang-commits flang-commits at lists.llvm.org
Wed Nov 13 13:29:54 PST 2024


https://github.com/clementval created https://github.com/llvm/llvm-project/pull/116110

Update `inDeviceContext` to account for the gpu.func operation.

>From 4d99de04fd5ff553c54ddf6e2f9716f5ba7967f6 Mon Sep 17 00:00:00 2001
From: Valentin Clement <clementval at gmail.com>
Date: Wed, 13 Nov 2024 12:59:03 -0800
Subject: [PATCH] [flang][cuda] cuf.alloc in device context should be converted
 to fir.alloca

Update `inDeviceContext` to account for the gpu.func operation.
---
 flang/lib/Optimizer/Transforms/CUFOpConversion.cpp |  2 ++
 flang/test/Fir/CUDA/cuda-alloc-free.fir            | 10 ++++++++++
 2 files changed, 12 insertions(+)

diff --git a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
index 58a3cdc905d36e..cf1f2be5a9ec4e 100644
--- a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
@@ -251,6 +251,8 @@ struct CUFDeallocateOpConversion
 static bool inDeviceContext(mlir::Operation *op) {
   if (op->getParentOfType<cuf::KernelOp>())
     return true;
+  if (auto funcOp = op->getParentOfType<mlir::gpu::GPUFuncOp>())
+    return true;
   if (auto funcOp = op->getParentOfType<mlir::func::FuncOp>()) {
     if (auto cudaProcAttr =
             funcOp.getOperation()->getAttrOfType<cuf::ProcAttributeAttr>(
diff --git a/flang/test/Fir/CUDA/cuda-alloc-free.fir b/flang/test/Fir/CUDA/cuda-alloc-free.fir
index 88b1a00e4a5b25..25545d1f72f52d 100644
--- a/flang/test/Fir/CUDA/cuda-alloc-free.fir
+++ b/flang/test/Fir/CUDA/cuda-alloc-free.fir
@@ -73,4 +73,14 @@ func.func @_QPtest_type() {
 // CHECK: %[[CONV_BYTES:.*]] = fir.convert %[[BYTES]] : (index) -> i64
 // CHECK: fir.call @_FortranACUFMemAlloc(%[[CONV_BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (i64, i32, !fir.ref<i8>, i32) -> !fir.llvm_ptr<i8>
 
+gpu.module @cuda_device_mod [#nvvm.target] {
+  gpu.func @_QMalloc() kernel {
+    %0 = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", data_attr = #cuf.cuda<device>, uniq_name = "_QMallocEa"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+    gpu.return 
+  }
+}
+
+// CHECK-LABEL: gpu.func @_QMalloc() kernel
+// CHECK: fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", uniq_name = "_QMallocEa"}
+
 } // end module



More information about the flang-commits mailing list