[flang-commits] [flang] [flang][cuda] Only create shared memory global when needed (PR #132999)
Valentin Clement バレンタイン クレメン via flang-commits
flang-commits at lists.llvm.org
Tue Mar 25 14:36:02 PDT 2025
https://github.com/clementval created https://github.com/llvm/llvm-project/pull/132999
The shared memory global was created even when the kernel has no shared memory variables. This patch avoids its creation when it is not needed.
>From 395dcb25ad7f46e0af36cc5694ca995aaccb9a8a Mon Sep 17 00:00:00 2001
From: Valentin Clement <clementval at gmail.com>
Date: Tue, 25 Mar 2025 14:34:33 -0700
Subject: [PATCH] [flang][cuda] Only create shared memory global when needed
---
.../include/flang/Optimizer/Transforms/Passes.td | 2 +-
.../CUFComputeSharedMemoryOffsetsAndSize.cpp | 4 ++++
flang/test/Fir/CUDA/cuda-shared-offset.mlir | 15 +++++++++++++++
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index fbab435887b8a..c59416fa2c024 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -463,7 +463,7 @@ def CUFComputeSharedMemoryOffsetsAndSize
the global and set it.
}];
- let dependentDialects = ["fir::FIROpsDialect"];
+ let dependentDialects = ["cuf::CUFDialect", "fir::FIROpsDialect"];
}
def SetRuntimeCallAttributes
diff --git a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
index dcb5f42902ee6..8009522a82e27 100644
--- a/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
+++ b/flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp
@@ -111,6 +111,10 @@ struct CUFComputeSharedMemoryOffsetsAndSize
llvm::alignTo(sharedMemSize, align) + llvm::alignTo(size, align);
alignment = std::max(alignment, align);
}
+
+ if (nbDynamicSharedVariables == 0 && nbStaticSharedVariables == 0)
+ continue;
+
if (nbDynamicSharedVariables > 0 && nbStaticSharedVariables > 0)
mlir::emitError(
funcOp.getLoc(),
diff --git a/flang/test/Fir/CUDA/cuda-shared-offset.mlir b/flang/test/Fir/CUDA/cuda-shared-offset.mlir
index 5e9aac4e71438..8c377dbadd835 100644
--- a/flang/test/Fir/CUDA/cuda-shared-offset.mlir
+++ b/flang/test/Fir/CUDA/cuda-shared-offset.mlir
@@ -107,3 +107,18 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<
// CHECK: cuf.shared_memory[%c0{{.*}} : i32] !fir.array<?x?xi32>, %9, %15 : index, index {bindc_name = "s1", uniq_name = "_QMmFss1Es1"} -> !fir.ref<!fir.array<?x?xi32>>
// CHECK: %[[CONV_DYNSIZE:.*]] = fir.convert %[[DYNSIZE]] : (index) -> i32
// CHECK: cuf.shared_memory[%[[CONV_DYNSIZE]] : i32] !fir.array<?x?xi32>, %26, %31 : index, index {bindc_name = "s2", uniq_name = "_QMmFss1Es2"} -> !fir.ref<!fir.array<?x?xi32>>
+
+// -----
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
+ gpu.module @cuda_device_mod {
+ gpu.func @_QPnoshared() kernel {
+ gpu.return
+ }
+ }
+}
+
+// CHECK-LABEL: gpu.func @_QPnoshared()
+// CHECK-NOT: fir.global internal @_QPnoshared__shared_mem
+
+
More information about the flang-commits
mailing list