[flang-commits] [flang] [flang][cuda] Carry over the CUDA attribute in target rewrite (PR #136811)
via flang-commits
flang-commits at lists.llvm.org
Tue Apr 22 21:36:53 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-flang-codegen
Author: Valentin Clement (バレンタイン クレメン) (clementval)
<details>
<summary>Changes</summary>
---
Full diff: https://github.com/llvm/llvm-project/pull/136811.diff
2 Files Affected:
- (modified) flang/lib/Optimizer/CodeGen/TargetRewrite.cpp (+5)
- (modified) flang/test/Fir/CUDA/cuda-target-rewrite.mlir (+2-2)
``````````diff
diff --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
index 6f5e01612fc97..05ebc46cab1b2 100644
--- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
+++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
@@ -531,6 +531,11 @@ class TargetRewrite : public fir::impl::TargetRewritePassBase<TargetRewrite> {
if (callOp.getClusterSizeZ())
newCall.getClusterSizeZMutable().assign(callOp.getClusterSizeZ());
newCallResults.append(newCall.result_begin(), newCall.result_end());
+ if (auto cudaProcAttr =
+ callOp->template getAttrOfType<cuf::ProcAttributeAttr>(
+ cuf::getProcAttrName())) {
+ newCall->setAttr(cuf::getProcAttrName(), cudaProcAttr);
+ }
} else if constexpr (std::is_same_v<std::decay_t<A>, fir::CallOp>) {
fir::CallOp newCall;
if (callOp.getCallee()) {
diff --git a/flang/test/Fir/CUDA/cuda-target-rewrite.mlir b/flang/test/Fir/CUDA/cuda-target-rewrite.mlir
index 10e8b8902aa63..a334934f31723 100644
--- a/flang/test/Fir/CUDA/cuda-target-rewrite.mlir
+++ b/flang/test/Fir/CUDA/cuda-target-rewrite.mlir
@@ -45,7 +45,7 @@ gpu.module @testmod {
func.func @main(%arg0: complex<f64>) {
%0 = llvm.mlir.constant(0 : i64) : i64
%1 = llvm.mlir.constant(0 : i32) : i32
- gpu.launch_func @testmod::@_QPtest blocks in (%0, %0, %0) threads in (%0, %0, %0) : i64 dynamic_shared_memory_size %1 args(%arg0 : complex<f64>)
+ gpu.launch_func @testmod::@_QPtest blocks in (%0, %0, %0) threads in (%0, %0, %0) : i64 dynamic_shared_memory_size %1 args(%arg0 : complex<f64>) {cuf.proc_attr = #cuf.cuda_proc<global>}
return
}
@@ -54,4 +54,4 @@ func.func @main(%arg0: complex<f64>) {
// CHECK-LABEL: gpu.func @_QPtest
// CHECK-SAME: (%arg0: f64, %arg1: f64) kernel {
// CHECK: gpu.return
-// CHECK: gpu.launch_func @testmod::@_QPtest blocks in (%{{.*}}, %{{.*}}, %{{.*}}) threads in (%{{.*}}, %{{.*}}, %{{.*}}) : i64 dynamic_shared_memory_size %{{.*}} args(%{{.*}} : f64, %{{.*}} : f64)
+// CHECK: gpu.launch_func @testmod::@_QPtest blocks in (%{{.*}}, %{{.*}}, %{{.*}}) threads in (%{{.*}}, %{{.*}}, %{{.*}}) : i64 dynamic_shared_memory_size %{{.*}} args(%{{.*}} : f64, %{{.*}} : f64) {cuf.proc_attr = #cuf.cuda_proc<global>}
``````````
</details>
https://github.com/llvm/llvm-project/pull/136811
More information about the flang-commits
mailing list