[Mlir-commits] [mlir] 7e4b139 - [MLIR] Ensure `gpu.func` must be inside a `gpu.module`.
Frederik Gossen
llvmlistbot at llvm.org
Fri Apr 24 00:21:05 PDT 2020
Author: Frederik Gossen
Date: 2020-04-24T07:17:48Z
New Revision: 7e4b139a04d727bbb721dac1832a0bea863eeda8
URL: https://github.com/llvm/llvm-project/commit/7e4b139a04d727bbb721dac1832a0bea863eeda8
DIFF: https://github.com/llvm/llvm-project/commit/7e4b139a04d727bbb721dac1832a0bea863eeda8.diff
LOG: [MLIR] Ensure `gpu.func` must be inside a `gpu.module`.
Ensure that `gpu.func` is only used within the dedicated `gpu.module`.
Implement the constraint to the GPU dialect and adopt test cases.
Differential Revision: https://reviews.llvm.org/D78541
Added:
Modified:
mlir/include/mlir/Dialect/GPU/GPUBase.td
mlir/include/mlir/Dialect/GPU/GPUOps.td
mlir/test/Dialect/GPU/all-reduce-max.mlir
mlir/test/Dialect/GPU/all-reduce.mlir
mlir/test/Dialect/GPU/invalid.mlir
mlir/test/Dialect/GPU/ops.mlir
mlir/test/Dialect/GPU/promotion.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/GPU/GPUBase.td b/mlir/include/mlir/Dialect/GPU/GPUBase.td
index 39e2f1a940d9..16ce93fb1ed9 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUBase.td
+++ b/mlir/include/mlir/Dialect/GPU/GPUBase.td
@@ -33,9 +33,6 @@ def GPU_Dialect : Dialect {
/// functions.
static StringRef getKernelFuncAttrName() { return "gpu.kernel"; }
- /// Get the name of the attribute used to annotate kernel modules.
- static StringRef getKernelModuleAttrName() { return "gpu.kernel_module"; }
-
/// Returns whether the given function is a kernel function, i.e., has the
/// 'gpu.kernel' attribute.
static bool isKernel(Operation *op);
diff --git a/mlir/include/mlir/Dialect/GPU/GPUOps.td b/mlir/include/mlir/Dialect/GPU/GPUOps.td
index 342b36badd30..7254f0fc425d 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/GPUOps.td
@@ -85,7 +85,8 @@ def GPU_ThreadIdOp : GPU_IndexOp<"thread_id"> {
}];
}
-def GPU_GPUFuncOp : GPU_Op<"func", [AutomaticAllocationScope, FunctionLike,
+def GPU_GPUFuncOp : GPU_Op<"func", [HasParent<"GPUModuleOp">,
+ AutomaticAllocationScope, FunctionLike,
IsolatedFromAbove, Symbol]> {
let summary = "Function executable on a GPU";
diff --git a/mlir/test/Dialect/GPU/all-reduce-max.mlir b/mlir/test/Dialect/GPU/all-reduce-max.mlir
index 9c227a8abfe6..5c94bd4a67da 100644
--- a/mlir/test/Dialect/GPU/all-reduce-max.mlir
+++ b/mlir/test/Dialect/GPU/all-reduce-max.mlir
@@ -1,8 +1,8 @@
// RUN: mlir-opt -test-all-reduce-lowering %s | FileCheck %s
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// CHECK: module @kernels attributes {gpu.kernel_module} {
-module @kernels attributes {gpu.kernel_module} {
+// CHECK: gpu.module @kernels {
+gpu.module @kernels {
// CHECK-LABEL: gpu.func @kernel(
// CHECK-SAME: [[VAL_0:%.*]]: f32) workgroup([[VAL_1:%.*]] : memref<32xf32, 3>) kernel {
diff --git a/mlir/test/Dialect/GPU/all-reduce.mlir b/mlir/test/Dialect/GPU/all-reduce.mlir
index 94ddf8ceea5a..ff7986340ac4 100644
--- a/mlir/test/Dialect/GPU/all-reduce.mlir
+++ b/mlir/test/Dialect/GPU/all-reduce.mlir
@@ -1,8 +1,8 @@
// RUN: mlir-opt -test-all-reduce-lowering %s | FileCheck %s
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// CHECK: module @kernels attributes {gpu.kernel_module} {
-module @kernels attributes {gpu.kernel_module} {
+// CHECK: gpu.module @kernels {
+gpu.module @kernels {
// CHECK-LABEL: gpu.func @kernel(
// CHECK-SAME: [[VAL_0:%.*]]: f32) workgroup([[VAL_1:%.*]] : memref<32xf32, 3>) kernel {
diff --git a/mlir/test/Dialect/GPU/invalid.mlir b/mlir/test/Dialect/GPU/invalid.mlir
index be02dec83913..b0cc4dd7a6eb 100644
--- a/mlir/test/Dialect/GPU/invalid.mlir
+++ b/mlir/test/Dialect/GPU/invalid.mlir
@@ -86,6 +86,17 @@ module attributes {gpu.container_module} {
// -----
+module attributes {gpu.container_module} {
+ module @kernels {
+ // expected-error at +1 {{'gpu.func' op expects parent op 'gpu.module'}}
+ gpu.func @kernel_1(%arg1 : !llvm<"float*">) {
+ gpu.return
+ }
+ }
+}
+
+// -----
+
module attributes {gpu.container_module} {
module @kernels {
}
@@ -336,7 +347,7 @@ module {
// -----
module {
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// expected-error @+1 {{requires 'type' attribute of function type}}
"gpu.func"() ({
gpu.return
@@ -347,7 +358,7 @@ module {
// -----
module {
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// expected-error @+1 {{expected memref type in attribution}}
gpu.func @kernel() workgroup(%0: i32) {
gpu.return
@@ -358,7 +369,7 @@ module {
// -----
module {
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// expected-error @+1 {{expected memory space 3 in attribution}}
gpu.func @kernel() workgroup(%0: memref<4xf32>) {
gpu.return
@@ -369,7 +380,7 @@ module {
// -----
module {
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// expected-error @+1 {{expected memory space 5 in attribution}}
gpu.func @kernel() private(%0: memref<4xf32>) {
gpu.return
@@ -380,7 +391,7 @@ module {
// -----
module {
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// expected-error @+1 {{expected memory space 5 in attribution}}
gpu.func @kernel() private(%0: memref<4xf32>) {
gpu.return
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index f500d7173f71..0a996a5e4eaa 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -83,7 +83,7 @@ module attributes {gpu.container_module} {
return
}
- module @gpu_funcs attributes {gpu.kernel_module} {
+ gpu.module @gpu_funcs {
// CHECK-LABEL: gpu.func @kernel_1({{.*}}: f32)
// CHECK: workgroup
// CHECK: private
diff --git a/mlir/test/Dialect/GPU/promotion.mlir b/mlir/test/Dialect/GPU/promotion.mlir
index bb5a93c420f5..7a51048c858f 100644
--- a/mlir/test/Dialect/GPU/promotion.mlir
+++ b/mlir/test/Dialect/GPU/promotion.mlir
@@ -1,6 +1,7 @@
-// RUN: mlir-opt -allow-unregistered-dialect -test-gpu-memory-promotion -split-input-file %s | FileCheck %s
+// RUN: mlir-opt -allow-unregistered-dialect -test-gpu-memory-promotion -pass-pipeline='gpu.module(gpu.func(test-gpu-memory-promotion))' -split-input-file %s | FileCheck %s
+
+gpu.module @foo {
-module @foo attributes {gpu.kernel_module} {
// Verify that the attribution was indeed introduced
// CHECK-LABEL: @memref3d
// CHECK-SAME: (%[[arg:.*]]: memref<5x4xf32>
@@ -49,7 +50,8 @@ module @foo attributes {gpu.kernel_module} {
// -----
-module @foo attributes {gpu.kernel_module} {
+gpu.module @foo {
+
// Verify that the attribution was indeed introduced
// CHECK-LABEL: @memref5d
// CHECK-SAME: (%[[arg:.*]]: memref<8x7x6x5x4xf32>
@@ -101,7 +103,8 @@ module @foo attributes {gpu.kernel_module} {
// -----
-module @foo attributes {gpu.kernel_module} {
+gpu.module @foo {
+
// Check that attribution insertion works fine.
// CHECK-LABEL: @insert
// CHECK-SAME: (%{{.*}}: memref<4xf32>
More information about the Mlir-commits
mailing list