[Mlir-commits] [mlir] [mlir][gpu] Correctly retrieve kernel function name for nested references (PR #152106)
Longsheng Mou
llvmlistbot at llvm.org
Wed Aug 6 06:07:23 PDT 2025
https://github.com/CoTinker updated https://github.com/llvm/llvm-project/pull/152106
>From 5abc0fc2ccaf7cf07e39e4656670f4ee0905daba Mon Sep 17 00:00:00 2001
From: Longsheng Mou <longshengmou at gmail.com>
Date: Tue, 5 Aug 2025 17:10:27 +0800
Subject: [PATCH] [mlir][gpu] Update attribute definitions in `gpu::LaunchOp`
This PR makes two updates to `gpu.launch`:
- Change the attribute type for kernel function and module from
`SymbolRefAttr` to `FlatSymbolRefAttr` to avoid nested symbol references.
- Rename variables from camel case (kernelFunc, kernelModule) to lower case
(function, module).
---
mlir/include/mlir/Dialect/GPU/IR/GPUOps.td | 6 +++---
.../Dialect/GPU/Transforms/KernelOutlining.cpp | 9 ++++-----
mlir/test/Dialect/GPU/ops.mlir | 12 ++++++++++++
mlir/test/Dialect/GPU/outlining.mlir | 18 +++++++++---------
4 files changed, 28 insertions(+), 17 deletions(-)
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 2ed7d3810b918..872ba5a39c736 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -804,8 +804,8 @@ def GPU_LaunchOp : GPU_Op<"launch", [
Optional<Index>:$clusterSizeY,
Optional<Index>:$clusterSizeZ,
Optional<I32>:$dynamicSharedMemorySize,
- OptionalAttr<SymbolRefAttr>:$kernelFunc,
- OptionalAttr<SymbolRefAttr>:$kernelModule)>,
+ OptionalAttr<FlatSymbolRefAttr>:$function,
+ OptionalAttr<FlatSymbolRefAttr>:$module)>,
Results<(outs Optional<GPU_AsyncToken>:$asyncToken)> {
let summary = "GPU kernel launch operation";
@@ -839,7 +839,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
- a variadic number of Workgroup memory attributions.
- a variadic number of Private memory attributions.
- The `kernelFunc` and `kernelModule` attributes are optional and specifies
+ The `function` and `module` attributes are optional and specifies
the kernel name and a module in which the kernel should be outlined.
Syntax:
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 99f5c5b0cf139..d4978ca768747 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -356,8 +356,8 @@ class GpuKernelOutliningPass
auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
SetVector<Value> operands;
std::string kernelFnName;
- if (op.getKernelFunc()) {
- kernelFnName = op.getKernelFunc()->getRootReference().str();
+ if (op.getFunction()) {
+ kernelFnName = op.getFunction()->str();
} else {
kernelFnName =
Twine(op->getParentOfType<SymbolOpInterface>().getName(),
@@ -403,9 +403,8 @@ class GpuKernelOutliningPass
OpBuilder builder(context);
std::string kernelModuleName;
gpu::GPUModuleOp kernelModule;
- if (gpuLaunchOp.getKernelModule()) {
- kernelModuleName =
- gpuLaunchOp.getKernelModule()->getRootReference().str();
+ if (gpuLaunchOp.getModule()) {
+ kernelModuleName = gpuLaunchOp.getModule()->str();
kernelModule =
parentSymbolTable.lookup<gpu::GPUModuleOp>(kernelModuleName);
} else {
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index ee1fdfa4d02f0..2bdad6db3815e 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -17,6 +17,18 @@ module attributes {gpu.container_module} {
return
}
+ // CHECK-LABEL:func @launch_with_module_func_attr(%{{.*}}: index)
+ func.func @launch_with_module_func_attr(%sz : index) {
+ // CHECK: gpu.launch blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}})
+ gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %sz, %grid_y = %sz, %grid_z = %sz)
+ threads(%tx, %ty, %tz) in (%block_x = %sz, %block_y = %sz, %block_z = %sz) {
+ // CHECK: gpu.terminator
+ gpu.terminator
+ // CHECK: {function = @test_kernel_func, module = @existing_module}
+ } {function = @test_kernel_func, module = @existing_module}
+ return
+ }
+
// CHECK-LABEL:func @args(%{{.*}}: index, %{{.*}}: index, %{{.*}}: f32, %{{.*}}: memref<?xf32, 1>) {
func.func @args(%blk : index, %thrd : index, %float : f32, %data : memref<?xf32,1>) {
// CHECK: gpu.launch blocks(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}) threads(%{{.*}}, %{{.*}}, %{{.*}}) in (%{{.*}} = %{{.*}}, %{{.*}} = %{{.*}}, %{{.*}} = %{{.*}})
diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index d48fa054432d1..0c1921fe1b643 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -509,7 +509,7 @@ func.func @launch_cluster() {
// CHECK-NEXT: = memref.load %[[KERNEL_ARG1]][%[[TID]]] : memref<?xf32, 1>
// -----
-// This test tests the two optional attributes kernelModule and kernelFunc for gpu.launch
+// This test tests the two optional attributes `module` and `function` for gpu.launch
// CHECK-LABEL: func.func @testKernelAttributes()
// CHECK: gpu.launch_func @test_module::@test_kernel_func blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
// CHECK: gpu.module @test_module
@@ -526,12 +526,12 @@ func.func @testKernelAttributes() {
threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @test_module, kernelFunc = @test_kernel_func}
+ } {module = @test_module, function = @test_kernel_func}
return
}
// -----
-// This test tests the two optional attributes kernelModule and kernelFunc for gpu.launch, when kernelModule already exists.
+// This test tests the two optional attributes `module` and `function` for gpu.launch, when kernelModule already exists.
// CHECK-LABEL: gpu.module @existing_module
// CHECK: gpu.func @test_kernel_func()
@@ -559,12 +559,12 @@ func.func @testExistingModule() {
threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @existing_module, kernelFunc = @test_kernel_func}
+ } {module = @existing_module, function = @test_kernel_func}
return
}
// -----
-// This test tests the optional attribute kernelModule for gpu.launch.
+// This test tests the optional attribute `module` for gpu.launch.
// CHECK-LABEL: func.func @testKernelModuleOnly()
// CHECK: gpu.launch_func @test_module::@testKernelModuleOnly_kernel blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
// CHECK: gpu.module @test_module
@@ -581,12 +581,12 @@ func.func @testKernelModuleOnly() {
threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelModule = @test_module}
+ } {module = @test_module}
return
}
// -----
-// This test tests the optional attribute kernelFunc for gpu.launch.
+// This test tests the optional attribute `function` for gpu.launch.
// CHECK-LABEL: func.func @testKernelFuncOnly()
// CHECK: gpu.launch_func @test_kernel_func::@test_kernel_func blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
@@ -604,12 +604,12 @@ func.func @testKernelFuncOnly() {
threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY, %block_z = %bDimZ) {
"some_op"(%bx, %tx) : (index, index) -> ()
gpu.terminator
- } {kernelFunc = @test_kernel_func}
+ } {function = @test_kernel_func}
return
}
// -----
-// This test tests gpu.launch when optional attributes kernelModule and kernelFunc are not specified.
+// This test tests gpu.launch when optional attributes `module` and `function` are not specified.
// CHECK-LABEL: func.func @testNoAttributes()
// CHECK: gpu.launch_func @testNoAttributes_kernel::@testNoAttributes_kernel blocks in (%[[GRID_X:.*]], %[[GRID_Y:.*]], %[[GRID_Z:.*]]) threads in (%[[BLOCK_X:.*]], %[[BLOCK_Y:.*]], %[[BLOCK_Z:.*]])
More information about the Mlir-commits
mailing list