[Mlir-commits] [mlir] 5f4f922 - [mlir][transform][gpu][python] Add MapForallToBlocks mix-in.

Ingo Müller llvmlistbot at llvm.org
Thu Jul 20 07:20:55 PDT 2023


Author: Ingo Müller
Date: 2023-07-20T14:20:40Z
New Revision: 5f4f9220f9d2f0163b330e4a2e2a60ea8a917378

URL: https://github.com/llvm/llvm-project/commit/5f4f9220f9d2f0163b330e4a2e2a60ea8a917378
DIFF: https://github.com/llvm/llvm-project/commit/5f4f9220f9d2f0163b330e4a2e2a60ea8a917378.diff

LOG: [mlir][transform][gpu][python] Add MapForallToBlocks mix-in.

This patch adds a mix-in class for MapForallToBlocks with overloaded
constructors. This makes it optional to provide the return type of the
op, which is defaulte to `AnyOpType`.

Reviewed By: ftynse

Differential Revision: https://reviews.llvm.org/D155717

Added: 
    mlir/python/mlir/dialects/_gpu_transform_ops_ext.py
    mlir/test/python/dialects/transform_gpu_ext.py

Modified: 
    mlir/python/CMakeLists.txt

Removed: 
    


################################################################################
diff  --git a/mlir/python/CMakeLists.txt b/mlir/python/CMakeLists.txt
index e5d37b2289862d..50fbca38a08fb4 100644
--- a/mlir/python/CMakeLists.txt
+++ b/mlir/python/CMakeLists.txt
@@ -148,6 +148,7 @@ declare_mlir_dialect_extension_python_bindings(
   ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/mlir"
   TD_FILE dialects/GPUTransformOps.td
   SOURCES
+    dialects/_gpu_transform_ops_ext.py
     dialects/transform/gpu.py
   DIALECT_NAME transform
   EXTENSION_NAME gpu_transform)

diff  --git a/mlir/python/mlir/dialects/_gpu_transform_ops_ext.py b/mlir/python/mlir/dialects/_gpu_transform_ops_ext.py
new file mode 100644
index 00000000000000..087606e3d229ec
--- /dev/null
+++ b/mlir/python/mlir/dialects/_gpu_transform_ops_ext.py
@@ -0,0 +1,69 @@
+#  Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+#  See https://llvm.org/LICENSE.txt for license information.
+#  SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+try:
+    from ..ir import *
+    from ..dialects import transform
+except ImportError as e:
+    raise RuntimeError("Error loading imports from extension module") from e
+
+from typing import Optional, Sequence, Union, overload
+
+
+class MapForallToBlocks:
+    """Specialization for MapForallToBlocks class."""
+
+    @overload
+    def __init__(
+        self,
+        result_type: Type,
+        target: Union[Operation, OpView, Value],
+        *,
+        grid_dims: Optional[Sequence[int]] = None,
+        generate_gpu_launch: Optional[bool] = None,
+        loc=None,
+        ip=None
+    ):
+        ...
+
+    @overload
+    def __init__(
+        self,
+        target: Union[Operation, OpView, Value],
+        *,
+        grid_dims: Optional[Sequence[int]] = None,
+        generate_gpu_launch: Optional[bool] = None,
+        loc=None,
+        ip=None
+    ):
+        ...
+
+    def __init__(
+        self,
+        result_type_or_target: Union[Operation, OpView, Type, Value],
+        target_or_none: Optional[Union[Operation, OpView, Value]] = None,
+        *,
+        grid_dims: Optional[Sequence[int]] = None,
+        generate_gpu_launch: Optional[bool] = None,
+        loc=None,
+        ip=None
+    ):
+        if isinstance(result_type_or_target, Type):
+            result_type = result_type_or_target
+            target = target_or_none
+        else:
+            result_type = transform.AnyOpType.get()
+            target = result_type_or_target
+
+        if grid_dims is not None and not isinstance(grid_dims, ArrayAttr):
+            grid_dims = DenseI64ArrayAttr.get(grid_dims)
+
+        super().__init__(
+            result_type,
+            target,
+            grid_dims=grid_dims,
+            generate_gpu_launch=generate_gpu_launch,
+            loc=loc,
+            ip=ip,
+        )

diff  --git a/mlir/test/python/dialects/transform_gpu_ext.py b/mlir/test/python/dialects/transform_gpu_ext.py
new file mode 100644
index 00000000000000..8b14b7e48f5ffb
--- /dev/null
+++ b/mlir/test/python/dialects/transform_gpu_ext.py
@@ -0,0 +1,59 @@
+# RUN: %PYTHON %s | FileCheck %s
+
+from mlir.ir import *
+from mlir.dialects import transform
+from mlir.dialects.transform import gpu
+
+
+def run(f):
+    with Context(), Location.unknown():
+        module = Module.create()
+        with InsertionPoint(module.body):
+            print("\nTEST:", f.__name__)
+            f()
+        print(module)
+    return f
+
+
+ at run
+def testMapForallToBlocksCompact():
+    sequence = transform.SequenceOp(
+        transform.FailurePropagationMode.PROPAGATE, [], transform.AnyOpType.get()
+    )
+    with InsertionPoint(sequence.body):
+        gpu.MapForallToBlocks(sequence.bodyTarget)
+        transform.YieldOp()
+    # CHECK-LABEL: TEST: testMapForallToBlocksCompact
+    # CHECK: = transform.gpu.map_forall_to_blocks
+    # CHECK-NOT: grid_dims
+    # CHECK-SAME: (!transform.any_op) -> !transform.any_op
+    # CHECK-NOT: grid_dims
+
+
+ at run
+def testMapForallToBlocksTyped():
+    sequence = transform.SequenceOp(
+        transform.FailurePropagationMode.PROPAGATE, [], transform.AnyOpType.get()
+    )
+    with InsertionPoint(sequence.body):
+        gpu.MapForallToBlocks(
+            transform.OperationType.get("test.dummy"), sequence.bodyTarget
+        )
+        transform.YieldOp()
+    # CHECK-LABEL: TEST: testMapForallToBlocksTyped
+    # CHECK: = transform.gpu.map_forall_to_blocks
+    # CHECK-SAME: (!transform.any_op) -> !transform.op<"test.dummy">
+
+
+ at run
+def testMapForallToBlocksGridDims():
+    sequence = transform.SequenceOp(
+        transform.FailurePropagationMode.PROPAGATE, [], transform.AnyOpType.get()
+    )
+    with InsertionPoint(sequence.body):
+        gpu.MapForallToBlocks(sequence.bodyTarget, grid_dims=[4, 2])
+        transform.YieldOp()
+    # CHECK-LABEL: TEST: testMapForallToBlocksGridDims
+    # CHECK: = transform.gpu.map_forall_to_blocks
+    # CHECK-SAME: grid_dims = [4, 2]
+    # CHECK-SAME: (!transform.any_op) -> !transform.any_op


        


More information about the Mlir-commits mailing list