[Mlir-commits] [mlir] [mlir][transform] NFC - extract a minimal DomainAndOperandsAffineMapT… (PR #145034)

Nicolas Vasilache llvmlistbot at llvm.org
Fri Jun 20 06:19:08 PDT 2025


https://github.com/nicolasvasilache updated https://github.com/llvm/llvm-project/pull/145034

>From e1cde0dc3583152ab0e9d5f923cbc4f707241984 Mon Sep 17 00:00:00 2001
From: Nicolas Vasilache <nico.vasilache at amd.com>
Date: Fri, 20 Jun 2025 14:42:21 +0200
Subject: [PATCH 1/2] [mlir][transform] NFC - extract a minimal
 DomainAndOperandsAffineMapTransferInterface out of LinalgStructuredInterface
 and use that for PadTilingInterface

---
 .../Dialect/Linalg/IR/LinalgInterfaces.td     | 89 +++++++++++--------
 .../Dialect/Linalg/Transforms/Transforms.h    | 18 ++--
 .../TransformOps/LinalgTransformOps.cpp       | 12 ++-
 .../Linalg/Transforms/PadTilingInterface.cpp  | 10 ++-
 4 files changed, 75 insertions(+), 54 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index dbc1ac60e0973..53a9dac389287 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -222,9 +222,60 @@ def LinalgFillOpInterface : OpInterface<"FillOpInterface"> {
   ];
 }
 
+def DomainAndOperandsAffineMapTransferInterface 
+    : OpInterface<"DomainAndOperandsAffineMapTransferInterface"> {
+  let description = [{
+    Interface for operations that connect an iteration domain to operands via
+    affine maps. Provides methods to access indexing maps between iteration
+    domain and operand index spaces.
+  }];
+  let cppNamespace = "::mlir::linalg";
+  let methods = [
+    InterfaceMethod<
+      /*desc=*/[{
+        Return the indexing maps attribute within the current operation.
+      }],
+      /*retTy=*/"ArrayAttr",
+      /*methodName=*/"getIndexingMaps"
+    >,
+    InterfaceMethod<
+      /*desc=*/[{
+        Return the indexing maps within the current operation.
+      }],
+      /*retTy=*/"SmallVector<AffineMap>",
+      /*methodName=*/"getIndexingMapsArray",
+      /*args=*/(ins),
+      /*methodBody=*/"",
+      /*defaultImplementation=*/[{
+        auto range = $_op.getIndexingMaps()
+          .template getAsValueRange<AffineMapAttr>();
+        return {range.begin(), range.end()};
+      }]
+    >,
+    InterfaceMethod<
+      /*desc=*/[{
+        Return the input or output indexing map for `opOperand`.
+      }],
+      /*retTy=*/"AffineMap",
+      /*methodName=*/"getMatchingIndexingMap",
+      /*args=*/(ins "OpOperand*":$opOperand),
+      /*methodBody=*/"",
+      /*defaultImplementation=*/[{
+        assert(opOperand->getOwner() == this->getOperation());
+        auto indexingMaps =
+          $_op.getIndexingMaps().template getAsValueRange<AffineMapAttr>();
+        return *(indexingMaps.begin() + opOperand->getOperandNumber());
+      }]
+    >,
+  ];
+}
+
 // The 'LinalgStructuredInterface' provides access to the 'LinalgOp' interface.
 def LinalgStructuredInterface
-    : OpInterface<"LinalgOp", [DestinationStyleOpInterface]> {
+    : OpInterface<"LinalgOp", [
+      DestinationStyleOpInterface,
+      DomainAndOperandsAffineMapTransferInterface
+  ]> {
   let cppNamespace = "::mlir::linalg";
   let methods = [
     //===------------------------------------------------------------------===//
@@ -465,21 +516,6 @@ def LinalgStructuredInterface
             blockArgument.getArgNumber());
       }]
     >,
-    InterfaceMethod<
-      /*desc=*/[{
-        Return the input or output indexing map for `opOperand`.
-      }],
-      /*retTy=*/"AffineMap",
-      /*methodName=*/"getMatchingIndexingMap",
-      /*args=*/(ins "OpOperand*":$opOperand),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        assert(opOperand->getOwner() == this->getOperation());
-        auto indexingMaps =
-          $_op.getIndexingMaps().template getAsValueRange<AffineMapAttr>();
-        return *(indexingMaps.begin() + opOperand->getOperandNumber());
-      }]
-    >,
     InterfaceMethod<
       /*desc=*/[{
         Return the indexing map for a `result`.
@@ -576,27 +612,6 @@ def LinalgStructuredInterface
       /*methodBody=*/"",
       /*defaultImplementation=*/[{ return success(); }]
     >,
-    InterfaceMethod<
-      /*desc=*/[{
-        Return the indexing maps attribute within the current operation.
-      }],
-      /*retTy=*/"ArrayAttr",
-      /*methodName=*/"getIndexingMaps"
-    >,
-    InterfaceMethod<
-      /*desc=*/[{
-        Return the indexing maps within the current operation.
-      }],
-      /*retTy=*/"SmallVector<AffineMap>",
-      /*methodName=*/"getIndexingMapsArray",
-      /*args=*/(ins),
-      /*methodBody=*/"",
-      /*defaultImplementation=*/[{
-        auto range = $_op.getIndexingMaps()
-          .template getAsValueRange<AffineMapAttr>();
-        return {range.begin(), range.end()};
-      }]
-    >,
     InterfaceMethod<
       /*desc=*/[{
         Return true if any of the operands has a dynamic shape.
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 59b7fdeef10b3..b59727f7cbfaa 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -613,9 +613,9 @@ using PadSizeComputationFunction =
 
 /// Specific helper for Linalg ops.
 FailureOr<SmallVector<OpFoldResult>>
-computeLinalgPaddedShape(RewriterBase &rewriter, OpOperand &operandToPad,
-                         ArrayRef<Range> iterationDomain,
-                         const PadTilingInterfaceOptions &options);
+computeDomainAndOperandsAffineMapTransferInterfacePaddedShape(
+    RewriterBase &rewriter, OpOperand &operandToPad,
+    ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options);
 
 /// Pad the iterator dimensions `options.paddingDimensions` of `opToPad`.
 ///
@@ -627,12 +627,12 @@ computeLinalgPaddedShape(RewriterBase &rewriter, OpOperand &operandToPad,
 //    tensor::PadOp.
 /// * The tensor::PadOp is returned on success.
 
-FailureOr<TilingInterface>
-rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
-                  const PadTilingInterfaceOptions &constOptions,
-                  SmallVector<tensor::PadOp> &padOps,
-                  PadSizeComputationFunction computePaddingSizeFun =
-                      &computeLinalgPaddedShape);
+FailureOr<TilingInterface> rewriteAsPaddedOp(
+    RewriterBase &rewriter, TilingInterface opToPad,
+    const PadTilingInterfaceOptions &constOptions,
+    SmallVector<tensor::PadOp> &padOps,
+    PadSizeComputationFunction computePaddingSizeFun =
+        &computeDomainAndOperandsAffineMapTransferInterfacePaddedShape);
 
 namespace detail {
 
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index e627fc83f2ba7..5cbf0bf8500c7 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2229,10 +2229,14 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
       return diag;
     }
 
-    // Only Linalg ops for now, until TilingInterface exposes a loopsToOperand
-    // map / C++ APIs to compute the effect of padding on operands.
-    if (!isa<LinalgOp>(targetOp.getOperation())) {
-      auto diag = emitSilenceableError() << "only LinalgOp supported atm";
+    // Only DomainAndOperandsAffineMapTransferInterface ops for now, until
+    // TilingInterface exposes a loopsToOperand map / C++ APIs to compute the
+    // effect of padding on operands.
+    if (!isa<DomainAndOperandsAffineMapTransferInterface>(
+            targetOp.getOperation())) {
+      auto diag = emitSilenceableError()
+                  << "only DomainAndOperandsAffineMapTransferInterface ops "
+                     "supported atm";
       diag.attachNote(target->getLoc()) << "target op";
       return diag;
     }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index a9d7bc64f2a6b..679f3b8322a1f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -155,11 +155,13 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
   return paddedShape;
 }
 
-FailureOr<SmallVector<OpFoldResult>> linalg::computeLinalgPaddedShape(
+FailureOr<SmallVector<OpFoldResult>>
+linalg::computeDomainAndOperandsAffineMapTransferInterfacePaddedShape(
     RewriterBase &rewriter, OpOperand &operandToPad,
     ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options) {
-  auto linalgOp = llvm::dyn_cast<LinalgOp>(operandToPad.getOwner());
-  if (!linalgOp)
+  auto transferOp = llvm::dyn_cast<DomainAndOperandsAffineMapTransferInterface>(
+      operandToPad.getOwner());
+  if (!transferOp)
     return failure();
 
   // clang-format off
@@ -173,7 +175,7 @@ FailureOr<SmallVector<OpFoldResult>> linalg::computeLinalgPaddedShape(
   for (const Range &range : iterationDomain)
     loopUpperBounds.push_back(range.size);
 
-  AffineMap indexingMap = linalgOp.getMatchingIndexingMap(&operandToPad);
+  AffineMap indexingMap = transferOp.getMatchingIndexingMap(&operandToPad);
   return computePaddedShape(
       rewriter, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
       indexingMap, loopUpperBounds, options);

>From 1742a95cfb47e1354ee1bab0cbb3649dd60dcfdc Mon Sep 17 00:00:00 2001
From: Nicolas Vasilache <nico.vasilache at amd.com>
Date: Fri, 20 Jun 2025 15:18:50 +0200
Subject: [PATCH 2/2] Rename and add a test

---
 .../Dialect/Linalg/IR/LinalgInterfaces.td     |  5 ++--
 .../Dialect/Linalg/Transforms/Transforms.h    | 15 +++++-----
 .../TransformOps/LinalgTransformOps.cpp       | 14 ++++------
 .../Linalg/Transforms/PadTilingInterface.cpp  | 27 +++++++++++++-----
 .../transform-op-pad-tiling-interface.mlir    | 28 +++++++++++++++++++
 5 files changed, 63 insertions(+), 26 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index 53a9dac389287..74c4c0a8835f2 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -222,8 +222,7 @@ def LinalgFillOpInterface : OpInterface<"FillOpInterface"> {
   ];
 }
 
-def DomainAndOperandsAffineMapTransferInterface 
-    : OpInterface<"DomainAndOperandsAffineMapTransferInterface"> {
+def IndexingMapOpInterface : OpInterface<"IndexingMapOpInterface"> {
   let description = [{
     Interface for operations that connect an iteration domain to operands via
     affine maps. Provides methods to access indexing maps between iteration
@@ -274,7 +273,7 @@ def DomainAndOperandsAffineMapTransferInterface
 def LinalgStructuredInterface
     : OpInterface<"LinalgOp", [
       DestinationStyleOpInterface,
-      DomainAndOperandsAffineMapTransferInterface
+      IndexingMapOpInterface
   ]> {
   let cppNamespace = "::mlir::linalg";
   let methods = [
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index b59727f7cbfaa..a6dab03d6473f 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -612,8 +612,7 @@ using PadSizeComputationFunction =
         const PadTilingInterfaceOptions &)>;
 
 /// Specific helper for Linalg ops.
-FailureOr<SmallVector<OpFoldResult>>
-computeDomainAndOperandsAffineMapTransferInterfacePaddedShape(
+FailureOr<SmallVector<OpFoldResult>> computeIndexingMapOpInterfacePaddedShape(
     RewriterBase &rewriter, OpOperand &operandToPad,
     ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options);
 
@@ -627,12 +626,12 @@ computeDomainAndOperandsAffineMapTransferInterfacePaddedShape(
 //    tensor::PadOp.
 /// * The tensor::PadOp is returned on success.
 
-FailureOr<TilingInterface> rewriteAsPaddedOp(
-    RewriterBase &rewriter, TilingInterface opToPad,
-    const PadTilingInterfaceOptions &constOptions,
-    SmallVector<tensor::PadOp> &padOps,
-    PadSizeComputationFunction computePaddingSizeFun =
-        &computeDomainAndOperandsAffineMapTransferInterfacePaddedShape);
+FailureOr<TilingInterface>
+rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
+                  const PadTilingInterfaceOptions &constOptions,
+                  SmallVector<tensor::PadOp> &padOps,
+                  PadSizeComputationFunction computePaddingSizeFun =
+                      &computeIndexingMapOpInterfacePaddedShape);
 
 namespace detail {
 
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 5cbf0bf8500c7..5d55adbf46f36 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2229,14 +2229,12 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
       return diag;
     }
 
-    // Only DomainAndOperandsAffineMapTransferInterface ops for now, until
-    // TilingInterface exposes a loopsToOperand map / C++ APIs to compute the
-    // effect of padding on operands.
-    if (!isa<DomainAndOperandsAffineMapTransferInterface>(
-            targetOp.getOperation())) {
-      auto diag = emitSilenceableError()
-                  << "only DomainAndOperandsAffineMapTransferInterface ops "
-                     "supported atm";
+    // Only IndexingMapOpInterface ops for now, until TilingInterface exposes a
+    // loopsToOperand map / C++ APIs to compute the effect of padding on
+    // operands.
+    if (!isa<IndexingMapOpInterface>(targetOp.getOperation())) {
+      auto diag = emitSilenceableError() << "only IndexingMapOpInterface ops "
+                                            "supported atm";
       diag.attachNote(target->getLoc()) << "target op";
       return diag;
     }
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 679f3b8322a1f..5383ae48aeb3a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -156,11 +156,11 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
 }
 
 FailureOr<SmallVector<OpFoldResult>>
-linalg::computeDomainAndOperandsAffineMapTransferInterfacePaddedShape(
+linalg::computeIndexingMapOpInterfacePaddedShape(
     RewriterBase &rewriter, OpOperand &operandToPad,
     ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options) {
-  auto transferOp = llvm::dyn_cast<DomainAndOperandsAffineMapTransferInterface>(
-      operandToPad.getOwner());
+  auto transferOp =
+      llvm::dyn_cast<IndexingMapOpInterface>(operandToPad.getOwner());
   if (!transferOp)
     return failure();
 
@@ -257,7 +257,18 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
   SmallVector<Value> newOperands;
   newOperands.reserve(opToPad->getNumOperands());
   for (OpOperand &opOperand : opToPad->getOpOperands()) {
-    LLVM_DEBUG(DBGS() << "--start padding oprd: " << opOperand.get() << "\n");
+    Value operand = opOperand.get();
+    LLVM_DEBUG(DBGS() << "--start padding oprd: " << operand << "\n");
+
+    // 2.a. Skip scalar-like operands.
+    Type operandType = operand.getType();
+    if (!isa<RankedTensorType>(operandType)) {
+      assert(!isa<ShapedType>(operandType) ||
+             isa<VectorType>(operandType) &&
+                 "Unexpected non-vector ShapedType");
+      newOperands.push_back(operand);
+      continue;
+    }
     // 2.a. Compute padded shape.
     FailureOr<SmallVector<OpFoldResult>> maybePaddedShape =
         computePaddingSizeFun(rewriter, opOperand, iterationDomain, options);
@@ -268,14 +279,16 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
     // 2.b. Expect proper `paddingValues`.
     // TODO: we may want to allow garbage padding in the future, in which case
     // we would just not assert.
-    assert(opOperand.getOperandNumber() < options.paddingValues.size() &&
-           "--no padding value specified");
+    if (opOperand.getOperandNumber() >= options.paddingValues.size()) {
+      return rewriter.notifyMatchFailure(opToPad,
+                                         "--no padding value specified");
+    }
     Attribute paddingValueAttr =
         options.paddingValues[opOperand.getOperandNumber()];
 
     // 2.c. Perform actual padding.
     Value paddedOperand = padOperand(
-        rewriter, opToPad, cast<TypedValue<RankedTensorType>>(opOperand.get()),
+        rewriter, opToPad, cast<TypedValue<RankedTensorType>>(operand),
         *maybePaddedShape, paddingValueAttr);
     LLVM_DEBUG(DBGS() << "--done padding operand: " << paddedOperand << "\n");
 
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
index c361885693cbc..3df513771bbaf 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
@@ -1,5 +1,33 @@
 // RUN: mlir-opt --transform-interpreter -canonicalize -split-input-file --verify-diagnostics %s | FileCheck %s
 
+//     CHECK-LABEL: pad_lhs
+//           CHECK:   linalg.fill ins(%{{.*}} : f32) outs(%{{.*}} : tensor<8x25xf32>) -> tensor<8x25xf32>
+func.func @pad_fill(%value: f32, %output: tensor<24x25xf32>) -> tensor<24x25xf32>
+{
+  %0 = linalg.fill ins(%value : f32) outs(%output : tensor<24x25xf32>) -> tensor<24x25xf32>
+  func.return %0 : tensor<24x25xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+    %fill = transform.structured.match ops{["linalg.fill"]} in %arg1
+      : (!transform.any_op) -> !transform.any_op
+
+    // Tile to 5 then pad to 8
+    %fill_l1, %loops_l1 = transform.structured.tile_using_for %fill tile_sizes [5] 
+      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    %fill_padded, %_ = transform.structured.pad_tiling_interface %fill_l1 to padding_sizes [8] {
+      padding_values=[0.0 : f32, 0.0 : f32],
+      padding_dimensions=[0]
+    } : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+
+    transform.yield
+  }
+}
+
+// -----
+
 //     CHECK-LABEL: pad_lhs
 func.func @pad_lhs(
   %arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>)



More information about the Mlir-commits mailing list