[Mlir-commits] [mlir] [Func][GPU] Create func::ConstantOp using parents with SymbolTable trait (PR #107748)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Sun Sep 8 01:56:41 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-gpu

@llvm/pr-subscribers-mlir-func

Author: Artem Kroviakov (akroviakov)

<details>
<summary>Changes</summary>

This PR enables `func::ConstantOp` creation and usage for device functions inside GPU modules. 
The current main does not see device functions when `func::ConstantOp`, because it only looks for funcs in `ModuleOp`, which, of course, does not contain device functions that are defined in `GPUModuleOp`. This PR proposes a more general solution.

---
Full diff: https://github.com/llvm/llvm-project/pull/107748.diff


2 Files Affected:

- (modified) mlir/lib/Dialect/Func/IR/FuncOps.cpp (+3-1) 
- (added) mlir/test/Integration/GPU/CUDA/indirect-call.mlir (+34) 


``````````diff
diff --git a/mlir/lib/Dialect/Func/IR/FuncOps.cpp b/mlir/lib/Dialect/Func/IR/FuncOps.cpp
index c719981769b9e1..f756c64d793fed 100644
--- a/mlir/lib/Dialect/Func/IR/FuncOps.cpp
+++ b/mlir/lib/Dialect/Func/IR/FuncOps.cpp
@@ -128,7 +128,9 @@ LogicalResult ConstantOp::verify() {
   Type type = getType();
 
   // Try to find the referenced function.
-  auto fn = (*this)->getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(fnName);
+  SymbolTable symbolTable(
+      (*this)->getParentWithTrait<mlir::OpTrait::SymbolTable>());
+  auto fn = symbolTable.lookup<FuncOp>(fnName);
   if (!fn)
     return emitOpError() << "reference to undefined function '" << fnName
                          << "'";
diff --git a/mlir/test/Integration/GPU/CUDA/indirect-call.mlir b/mlir/test/Integration/GPU/CUDA/indirect-call.mlir
new file mode 100644
index 00000000000000..f53a1694daa483
--- /dev/null
+++ b/mlir/test/Integration/GPU/CUDA/indirect-call.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt %s \
+// RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=%gpu_compilation_format" \
+// RUN: | mlir-cpu-runner \
+// RUN:   --shared-libs=%mlir_cuda_runtime \
+// RUN:   --shared-libs=%mlir_runner_utils \
+// RUN:   --entry-point-result=void \
+// RUN: | FileCheck %s
+
+// CHECK: Hello from 0, 1, 3.000000
+module attributes {gpu.container_module} {
+    gpu.module @kernels {
+        func.func @hello(%arg0 : f32) {
+            %0 = gpu.thread_id x
+            %csti8 = arith.constant 2 : i8
+            gpu.printf "Hello from %lld, %d, %f\n" %0, %csti8, %arg0  : index, i8, f32
+            return
+        }
+    
+        gpu.func @hello_indirect() kernel {
+            %cstf32 = arith.constant 3.0 : f32
+            %func_ref = func.constant @hello : (f32) -> ()
+            func.call_indirect %func_ref(%cstf32) : (f32) -> ()
+            gpu.return
+        }
+    }
+
+    func.func @main() {
+        %c1 = arith.constant 1 : index
+        gpu.launch_func @kernels::@hello_indirect
+            blocks in (%c1, %c1, %c1)
+            threads in (%c1, %c1, %c1)
+        return
+    }
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/107748


More information about the Mlir-commits mailing list