[flang-commits] [flang] e6dda9c - [flang][cuda] Only create shared memory global when needed (#132999)

via flang-commits flang-commits at lists.llvm.org
Wed Mar 26 09:26:55 PDT 2025


Author: Valentin Clement (バレンタイン クレメン)
Date: 2025-03-26T09:26:50-07:00
New Revision: e6dda9c23a1a91d1da39f43b0de735ca85961220

URL: https://github.com/llvm/llvm-project/commit/e6dda9c23a1a91d1da39f43b0de735ca85961220
DIFF: https://github.com/llvm/llvm-project/commit/e6dda9c23a1a91d1da39f43b0de735ca85961220.diff

LOG: [flang][cuda] Only create shared memory global when needed (#132999)

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Transforms/Passes.td
    flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
    flang/test/Fir/CUDA/cuda-shared-offset.mlir

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index fbab435887b8a..c59416fa2c024 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -463,7 +463,7 @@ def CUFComputeSharedMemoryOffsetsAndSize
     the global and set it.
   }];
 
-  let dependentDialects = ["fir::FIROpsDialect"];
+  let dependentDialects = ["cuf::CUFDialect", "fir::FIROpsDialect"];
 }
 
 def SetRuntimeCallAttributes

diff  --git a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
index dcb5f42902ee6..8009522a82e27 100644
--- a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
@@ -111,6 +111,10 @@ struct CUFComputeSharedMemoryOffsetsAndSize
             llvm::alignTo(sharedMemSize, align) + llvm::alignTo(size, align);
         alignment = std::max(alignment, align);
       }
+
+      if (nbDynamicSharedVariables == 0 && nbStaticSharedVariables == 0)
+        continue;
+
       if (nbDynamicSharedVariables > 0 && nbStaticSharedVariables > 0)
         mlir::emitError(
             funcOp.getLoc(),

diff  --git a/flang/test/Fir/CUDA/cuda-shared-offset.mlir b/flang/test/Fir/CUDA/cuda-shared-offset.mlir
index 5e9aac4e71438..8c377dbadd835 100644
--- a/flang/test/Fir/CUDA/cuda-shared-offset.mlir
+++ b/flang/test/Fir/CUDA/cuda-shared-offset.mlir
@@ -107,3 +107,18 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<
 // CHECK: cuf.shared_memory[%c0{{.*}} : i32] !fir.array<?x?xi32>, %9, %15 : index, index {bindc_name = "s1", uniq_name = "_QMmFss1Es1"} -> !fir.ref<!fir.array<?x?xi32>>
 // CHECK: %[[CONV_DYNSIZE:.*]] = fir.convert %[[DYNSIZE]] : (index) -> i32
 // CHECK: cuf.shared_memory[%[[CONV_DYNSIZE]] : i32] !fir.array<?x?xi32>, %26, %31 : index, index {bindc_name = "s2", uniq_name = "_QMmFss1Es2"} -> !fir.ref<!fir.array<?x?xi32>>
+
+// -----
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
+  gpu.module @cuda_device_mod {
+    gpu.func @_QPnoshared() kernel {
+      gpu.return
+    }
+  }
+}
+
+// CHECK-LABEL: gpu.func @_QPnoshared()
+// CHECK-NOT: fir.global internal @_QPnoshared__shared_mem
+
+


        


More information about the flang-commits mailing list