[Mlir-commits] [mlir] Add optional attributes of kernelModule and kernelFunc for outlining kernels. (PR #118861)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Dec 5 11:32:27 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

Author: Zhen Wang (wangzpgi)

<details>
<summary>Changes</summary>

Adding optional attributes so we can specify the kernel function names and the kernel module names generated.

---
Full diff: https://github.com/llvm/llvm-project/pull/118861.diff


2 Files Affected:

- (modified) mlir/include/mlir/Dialect/GPU/IR/GPUOps.td (+17-1) 
- (modified) mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp (+34-7) 


``````````diff
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index d08e7ceb9e6c69..71d14f5f7774b9 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -803,7 +803,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [
                Optional<Index>:$clusterSizeX,
                Optional<Index>:$clusterSizeY,
                Optional<Index>:$clusterSizeZ,
-               Optional<I32>:$dynamicSharedMemorySize)>,
+               Optional<I32>:$dynamicSharedMemorySize,
+               OptionalAttr<SymbolRefAttr>:$kernelFunc,
+               OptionalAttr<SymbolRefAttr>:$kernelModule)>,
     Results<(outs Optional<GPU_AsyncToken>:$asyncToken)> {
   let summary = "GPU kernel launch operation";
 
@@ -837,6 +839,10 @@ def GPU_LaunchOp : GPU_Op<"launch", [
     -   a variadic number of Workgroup memory attributions.
     -   a variadic number of Private memory attributions.
 
+    The `kernelFunc` and `kernelModule` attributes are optional and specifies
+    the kernel name and a module in whichthe kernel should be outlined. 
+
+
     Syntax:
 
     ```
@@ -1030,6 +1036,16 @@ def GPU_LaunchOp : GPU_Op<"launch", [
     static StringRef getNumWorkgroupAttributionsAttrName() {
       return "workgroup_attributions";
     }
+
+    /// Checks if the kernel func name attribute is present.
+    bool hasKernelFuncName() {
+      return getKernelFunc().has_value();
+    }
+
+    /// Checks if the kernel module name attribute is present.
+    bool hasKernelModuleName() {
+      return getKernelModule().has_value();
+    }
   }];
 
   let hasCanonicalizer = 1;
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 5f6556d915f41c..872200566bb315 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -364,9 +364,17 @@ class GpuKernelOutliningPass
       Block::iterator insertPt(func->getNextNode());
       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
         SetVector<Value> operands;
-        std::string kernelFnName =
-            Twine(op->getParentOfType<SymbolOpInterface>().getName(), "_kernel")
-                .str();
+        std::string kernelFnName;
+        if (op.hasKernelFuncName()) {
+          kernelFnName = op->getAttrOfType<mlir::SymbolRefAttr>("kernelFunc")
+                             .getRootReference()
+                             .str();
+        } else {
+          kernelFnName =
+              Twine(op->getParentOfType<SymbolOpInterface>().getName(),
+                    "_kernel")
+                  .str();
+        }
 
         gpu::GPUFuncOp outlinedFunc =
             outlineKernelFuncImpl(op, kernelFnName, operands);
@@ -374,7 +382,7 @@ class GpuKernelOutliningPass
         // Create nested module and insert outlinedFunc. The module will
         // originally get the same name as the function, but may be renamed on
         // insertion into the parent module.
-        auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
+        auto kernelModule = createKernelModule(op, outlinedFunc, symbolTable);
         symbolTable.insert(kernelModule, insertPt);
 
         // Potentially changes signature, pulling in constants.
@@ -395,7 +403,8 @@ class GpuKernelOutliningPass
 
 private:
   /// Returns a gpu.module containing kernelFunc and all callees (recursive).
-  gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
+  gpu::GPUModuleOp createKernelModule(gpu::LaunchOp gpuLaunchOp,
+                                      gpu::GPUFuncOp kernelFunc,
                                       const SymbolTable &parentSymbolTable) {
     // TODO: This code cannot use an OpBuilder because it must be inserted into
     // a SymbolTable by the caller. SymbolTable needs to be refactored to
@@ -403,8 +412,26 @@ class GpuKernelOutliningPass
     // and then this needs to use the OpBuilder.
     auto *context = getOperation().getContext();
     OpBuilder builder(context);
-    auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
-                                                         kernelFunc.getName());
+    std::string kernelModuleName;
+    if (gpuLaunchOp.hasKernelModuleName()) {
+      kernelModuleName =
+          gpuLaunchOp->getAttrOfType<mlir::SymbolRefAttr>("kernelModule")
+              .getRootReference()
+              .str();
+    } else {
+      kernelModuleName = kernelFunc.getName();
+    }
+
+    gpu::GPUModuleOp kernelModule;
+    // Check if the module already exists in the symbol table
+    if (auto existingModule =
+            parentSymbolTable.lookup<gpu::GPUModuleOp>(kernelModuleName)) {
+      kernelModule = existingModule;
+    } else {
+      // If not found, create a new GPU module
+      kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
+                                                      kernelModuleName);
+    }
 
     // If a valid data layout spec was provided, attach it to the kernel module.
     // Otherwise, the default data layout will be used.

``````````

</details>


https://github.com/llvm/llvm-project/pull/118861


More information about the Mlir-commits mailing list