[flang-commits] [flang] ec066d3 - [flang][cuda] cuf.alloc in device context should be converted to fir.alloc (#116110)

via flang-commits flang-commits at lists.llvm.org
Wed Nov 13 14:57:47 PST 2024


Author: Valentin Clement (バレンタイン クレメン)
Date: 2024-11-13T14:57:42-08:00
New Revision: ec066d30e29fce388b1722971970d73ec65f14fb

URL: https://github.com/llvm/llvm-project/commit/ec066d30e29fce388b1722971970d73ec65f14fb
DIFF: https://github.com/llvm/llvm-project/commit/ec066d30e29fce388b1722971970d73ec65f14fb.diff

LOG: [flang][cuda] cuf.alloc in device context should be converted to fir.alloc (#116110)

Update `inDeviceContext` to account for the gpu.func operation.

Added: 
    

Modified: 
    flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
    flang/test/Fir/CUDA/cuda-alloc-free.fir

Removed: 
    


################################################################################
diff  --git a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
index 58a3cdc905d36e..cf1f2be5a9ec4e 100644
--- a/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFOpConversion.cpp
@@ -251,6 +251,8 @@ struct CUFDeallocateOpConversion
 static bool inDeviceContext(mlir::Operation *op) {
   if (op->getParentOfType<cuf::KernelOp>())
     return true;
+  if (auto funcOp = op->getParentOfType<mlir::gpu::GPUFuncOp>())
+    return true;
   if (auto funcOp = op->getParentOfType<mlir::func::FuncOp>()) {
     if (auto cudaProcAttr =
             funcOp.getOperation()->getAttrOfType<cuf::ProcAttributeAttr>(

diff  --git a/flang/test/Fir/CUDA/cuda-alloc-free.fir b/flang/test/Fir/CUDA/cuda-alloc-free.fir
index 88b1a00e4a5b25..25545d1f72f52d 100644
--- a/flang/test/Fir/CUDA/cuda-alloc-free.fir
+++ b/flang/test/Fir/CUDA/cuda-alloc-free.fir
@@ -73,4 +73,14 @@ func.func @_QPtest_type() {
 // CHECK: %[[CONV_BYTES:.*]] = fir.convert %[[BYTES]] : (index) -> i64
 // CHECK: fir.call @_FortranACUFMemAlloc(%[[CONV_BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (i64, i32, !fir.ref<i8>, i32) -> !fir.llvm_ptr<i8>
 
+gpu.module @cuda_device_mod [#nvvm.target] {
+  gpu.func @_QMalloc() kernel {
+    %0 = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", data_attr = #cuf.cuda<device>, uniq_name = "_QMallocEa"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+    gpu.return 
+  }
+}
+
+// CHECK-LABEL: gpu.func @_QMalloc() kernel
+// CHECK: fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", uniq_name = "_QMallocEa"}
+
 } // end module


        


More information about the flang-commits mailing list