[Mlir-commits] [mlir] 8faf35c - [mlir][linalg] Add scf.for loop peeling to codegen strategy

Matthias Springer llvmlistbot at llvm.org
Mon Sep 13 18:35:24 PDT 2021


Author: Matthias Springer
Date: 2021-09-14T10:35:01+09:00
New Revision: 8faf35c0a5aa8069b02badc7d8c11147b9bfd720

URL: https://github.com/llvm/llvm-project/commit/8faf35c0a5aa8069b02badc7d8c11147b9bfd720
DIFF: https://github.com/llvm/llvm-project/commit/8faf35c0a5aa8069b02badc7d8c11147b9bfd720.diff

LOG: [mlir][linalg] Add scf.for loop peeling to codegen strategy

Only scf.for loops are supported at the moment. linalg.tiled_loop support will be added in a subsequent commit.

Only static tensor sizes are supported. Loops for dynamic tensor sizes can be peeled, but the generated code is not optimal due to a missing canonicalization pattern.

Differential Revision: https://reviews.llvm.org/D109043

Added: 
    mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir

Modified: 
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
    mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 9448a2d809675..fae2c28b0deec 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -524,6 +524,15 @@ struct LinalgTilingOptions {
     paddingValueComputationFunction = std::move(fun);
     return *this;
   }
+
+  /// Peel the specified loops.
+  SmallVector<int64_t> peeledLoops;
+
+  LinalgTilingOptions &setPeeledLoops(ArrayRef<int64_t> loops) {
+    peeledLoops.clear();
+    peeledLoops.append(loops.begin(), loops.end());
+    return *this;
+  }
 };
 
 /// Canonicalization patterns relevant to apply after tiling patterns. These are

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 8541b2aec6a47..d705dace83cd6 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -16,6 +16,7 @@
 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
+#include "mlir/Dialect/SCF/Transforms.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
 #include "mlir/Dialect/Utils/StaticValueUtils.h"
 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
@@ -26,6 +27,7 @@
 #include "mlir/Support/LLVM.h"
 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
 #include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include <type_traits>
@@ -230,6 +232,21 @@ mlir::linalg::LinalgBaseTilingPattern::LinalgBaseTilingPattern(
     : RewritePattern(MatchAnyOpTypeTag(), benefit, context), filter(filter),
       options(options) {}
 
+/// Try to peel a loop `op` and return the new result.
+// TODO: Only scf.for loops are supported at the moment.
+static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter, Operation *op) {
+  return llvm::TypeSwitch<Operation *, SmallVector<Value, 4>>(op)
+      .Case<scf::ForOp>([&](scf::ForOp forOp) {
+        scf::ForOp partialIteration;
+        if (succeeded(scf::peelAndCanonicalizeForLoop(rewriter, forOp,
+                                                      partialIteration)))
+          return partialIteration->getResults();
+        assert(!partialIteration && "expected that loop was not peeled");
+        return forOp->getResults();
+      })
+      .Default([&](Operation *op) { return op->getResults(); });
+}
+
 LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
     Operation *op, PatternRewriter &rewriter, TiledLinalgOp &result) const {
   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
@@ -242,31 +259,38 @@ LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
 
   if (!res)
     return failure();
-
-  // Setup RAII guard to return properly.
-  LinalgOp paddedOp;
-  LinalgOp tiledOp = res->op;
-  auto guard = llvm::make_scope_exit([&]() {
-    // Return relevant information to derived pattern.
-    result = *res;
-    // Update filter.
-    if (paddedOp)
-      filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
-    else
-      filter.replaceLinalgTransformationFilter(rewriter, tiledOp);
-  });
+  // Clear filter to stop recursive pattern application.
+  filter.replaceLinalgTransformationFilter(rewriter, res->op);
+
+  // Peel loops.
+  for (int64_t loop : options.peeledLoops) {
+    assert(loop < static_cast<int64_t>(res->loops.size()) &&
+           "requested peeling of non-existing loop");
+    Operation *loopOp = res->loops[loop];
+    SmallVector<Value, 4> loopResults = peelLoop(rewriter, loopOp);
+    // The result of the loop nest may change with peeling.
+    if (res->tensorResults.size() == loopOp->getNumResults() &&
+        std::equal(res->tensorResults.begin(), res->tensorResults.end(),
+                   loopOp->getResults().begin()))
+      res->tensorResults = loopResults;
+  }
 
   // Consider padding on the fly only if the op has tensor semantics.
   if (!options.paddingValueComputationFunction ||
-      !linalgOp.hasTensorSemantics())
+      !linalgOp.hasTensorSemantics()) {
+    result = *res;
     return success();
+  }
 
   // Try to pad on the fly by rewriting res->op as a padded op. If successful,
   // `res.op` is rewritten in static form with padded operands.
+  LinalgOp paddedOp;
   if (succeeded(rewriteAsPaddedOp(rewriter, res->op,
                                   options.paddingValueComputationFunction,
                                   paddedOp))) {
+    filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
     res->op = paddedOp;
+    result = *res;
     // Do not perform replacement of `linalgOp`, let the derived patterns
     // do this as they see fit, from the resulting TiledLinalgOp.
     return success();

diff  --git a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
index 9698ca62e0656..4e5ae78c466ec 100644
--- a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-and-pad-pattern tile-sizes-for-padding=2,3,4" -canonicalize | FileCheck %s
-// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-and-pad-pattern tile-sizes-for-padding=2,3" -canonicalize | FileCheck %s -check-prefix=CHECK-1DIM-TILE
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern pad-tiles tile-sizes=2,3,4" -canonicalize | FileCheck %s
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern pad-tiles tile-sizes=2,3" -canonicalize | FileCheck %s -check-prefix=CHECK-1DIM-TILE
 
 // CHECK-LABEL: func @matmul_tensors(
 // CHECK-SAME:    %[[TA:[0-9a-z]+]]: tensor<?x?xi8>
@@ -33,7 +33,7 @@ func @matmul_tensors(
 //      CHECK:       scf.yield %[[TD]] : tensor<?x?xi32>
 //      CHECK:     scf.yield %[[TD2]] : tensor<?x?xi32>
 //      CHECK:   scf.yield %[[TD1]] : tensor<?x?xi32>
-  %0 = linalg.matmul_i8_i8_i32 {__internal_linalg_transform__ = "tile-and-pad"}
+  %0 = linalg.matmul_i8_i8_i32 {__internal_linalg_transform__ = "tile"}
       ins(%arg0, %arg1: tensor<?x?xi8>, tensor<?x?xi8>)
      outs(%arg2: tensor<?x?xi32>)
     -> tensor<?x?xi32>
@@ -68,7 +68,7 @@ func @generic_scalar_and_tensor(
       indexing_maps =  [ affine_map<(d0, d1, d2) -> ()>,
                         affine_map<(d0, d1, d2) -> (d0, d1, d2)> ],
       iterator_types = ["parallel", "parallel", "parallel"]}
-      {__internal_linalg_transform__ = "tile-and-pad"}
+      {__internal_linalg_transform__ = "tile"}
      ins(%arg1 : f32)
     outs(%arg0: tensor<?x?x?xf32>) {
       ^bb(%0: f32, %1: f32) :
@@ -87,7 +87,7 @@ func @generic_scalar_and_tensor(
 func @matmul_partially_padded_tensors(
   %arg0: tensor<?x8xi8>, %arg1: tensor<8x?xi8>, %arg2: tensor<?x?xi32>)
     -> tensor<?x?xi32> {
-  %0 = linalg.matmul_i8_i8_i32 {__internal_linalg_transform__ = "tile-and-pad"}
+  %0 = linalg.matmul_i8_i8_i32 {__internal_linalg_transform__ = "tile"}
       ins(%arg0, %arg1: tensor<?x8xi8>, tensor<8x?xi8>)
      outs(%arg2: tensor<?x?xi32>)
     -> tensor<?x?xi32>

diff  --git a/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
new file mode 100644
index 0000000000000..e661fbe20471c
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
@@ -0,0 +1,110 @@
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern tile-sizes=256,128,512 peeled-loops=0" -canonicalize | \
+// RUN:     FileCheck %s -check-prefix=CHECK-PEEL-0
+
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern tile-sizes=256,128,512 peeled-loops=1,2" -canonicalize | \
+// RUN:     FileCheck %s -check-prefix=CHECK-PEEL-12
+
+//     CHECK-PEEL-0: func @matmul_static_tensor
+// CHECK-PEEL-0-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-PEEL-0-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-PEEL-0-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-PEEL-0-DAG:   %[[c512:.*]] = constant 512 : index
+// CHECK-PEEL-0-DAG:   %[[c1280:.*]] = constant 1280 : index
+// CHECK-PEEL-0-DAG:   %[[c1600:.*]] = constant 1600 : index
+// CHECK-PEEL-0-DAG:   %[[c1700:.*]] = constant 1700 : index
+//     CHECK-PEEL-0:   scf.for %{{.*}} = %[[c0]] to %[[c1280]] step %[[c256]] {{.*}} {
+//     CHECK-PEEL-0:     scf.for %{{.*}} = %[[c0]] to %[[c1700]] step %[[c128]] {{.*}} {
+//     CHECK-PEEL-0:       scf.for %{{.*}} = %[[c0]] to %[[c1600]] step %[[c512]] {{.*}} {
+//     CHECK-PEEL-0:         linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-PEEL-0:       }
+//     CHECK-PEEL-0:     }
+//     CHECK-PEEL-0:   }
+//     CHECK-PEEL-0:   scf.for %{{.*}} = %[[c0]] to %[[c1700]] step %[[c128]] {{.*}} {
+//     CHECK-PEEL-0:     scf.for %{{.*}} = %[[c0]] to %[[c1600]] step %[[c512]] {{.*}} {
+//     CHECK-PEEL-0:       linalg.matmul ins({{.*}} : tensor<220x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<220x?xf32>)
+//     CHECK-PEEL-0:     }
+//     CHECK-PEEL-0:   }
+
+//     CHECK-PEEL-12: func @matmul_static_tensor
+// CHECK-PEEL-12-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-PEEL-12-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-PEEL-12-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-PEEL-12-DAG:   %[[c512:.*]] = constant 512 : index
+// CHECK-PEEL-12-DAG:   %[[c1500:.*]] = constant 1500 : index
+// CHECK-PEEL-12-DAG:   %[[c1536:.*]] = constant 1536 : index
+// CHECK-PEEL-12-DAG:   %[[c1600:.*]] = constant 1600 : index
+// CHECK-PEEL-12-DAG:   %[[c1664:.*]] = constant 1664 : index
+//     CHECK-PEEL-12:   scf.for %{{.*}} = %[[c0]] to %[[c1500]] step %[[c256]] {{.*}} {
+//     CHECK-PEEL-12:     scf.for %{{.*}} = %[[c0]] to %[[c1664]] step %[[c128]] {{.*}} {
+//     CHECK-PEEL-12:       scf.for %{{.*}} = %[[c0]] to %[[c1536]] step %[[c512]] {{.*}} {
+//     CHECK-PEEL-12:         linalg.matmul ins({{.*}} : tensor<?x512xf32>, tensor<512x128xf32>) outs({{.*}} : tensor<?x128xf32>)
+//     CHECK-PEEL-12:       }
+//     CHECK-PEEL-12:       linalg.matmul ins({{.*}} : tensor<?x64xf32>, tensor<64x128xf32>) outs({{.*}} : tensor<?x128xf32>)
+//     CHECK-PEEL-12:     }
+//     CHECK-PEEL-12:     scf.for %{{.*}} = %[[c0]] to %[[c1600]] step %[[c512]] {{.*}} {
+//     CHECK-PEEL-12:       linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x36xf32>) outs({{.*}} : tensor<?x36xf32>)
+//     CHECK-PEEL-12:     }
+//     CHECK-PEEL-12:   }
+func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700xf32>)
+    -> tensor<1500x1700xf32> {
+  %out = linalg.init_tensor [1500, 1700] : tensor<1500x1700xf32>
+  %r = linalg.matmul {__internal_linalg_transform__ = "tile"}
+      ins(%arg0, %arg1: tensor<1500x1600xf32>, tensor<1600x1700xf32>)
+      outs(%out: tensor<1500x1700xf32>) -> tensor<1500x1700xf32>
+  return %r : tensor<1500x1700xf32>
+}
+
+// -----
+
+//     CHECK-PEEL-0: func @matmul_dynamic_tensor
+// CHECK-PEEL-0-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-PEEL-0-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-PEEL-0-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-PEEL-0-DAG:   %[[c512:.*]] = constant 512 : index
+//     CHECK-PEEL-0:   scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c256]] {{.*}} {
+//     CHECK-PEEL-0:     scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c128]] {{.*}} {
+//     CHECK-PEEL-0:       scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c512]] {{.*}} {
+//     CHECK-PEEL-0:         linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-PEEL-0:       }
+//     CHECK-PEEL-0:     }
+//     CHECK-PEEL-0:   }
+//     CHECK-PEEL-0:   scf.for %{{.*}} {
+//     CHECK-PEEL-0:     scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c128]] {{.*}} {
+//     CHECK-PEEL-0:       scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c512]] {{.*}} {
+//     CHECK-PEEL-0:         linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-PEEL-0:       }
+//     CHECK-PEEL-0:     }
+//     CHECK-PEEL-0:   }
+
+//     CHECK-PEEL-12: func @matmul_dynamic_tensor
+// CHECK-PEEL-12-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-PEEL-12-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-PEEL-12-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-PEEL-12-DAG:   %[[c512:.*]] = constant 512 : index
+//     CHECK-PEEL-12:   scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c256]] {{.*}} {
+//     CHECK-PEEL-12:     scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c128]] {{.*}} {
+//     CHECK-PEEL-12:       scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c512]] {{.*}} {
+//     CHECK-PEEL-12:         linalg.matmul ins({{.*}} : tensor<?x512xf32>, tensor<512x128xf32>) outs({{.*}} : tensor<?x128xf32>)
+//     CHECK-PEEL-12:       }
+//     CHECK-PEEL-12:       scf.for %{{.*}} {
+//     CHECK-PEEL-12:         linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x128xf32>) outs({{.*}} : tensor<?x128xf32>)
+//     CHECK-PEEL-12:       }
+//     CHECK-PEEL-12:     }
+//     CHECK-PEEL-12:     scf.for %{{.*}} {
+//     CHECK-PEEL-12:       scf.for %{{.*}} = %[[c0]] to %{{.*}} step %[[c512]] {{.*}} {
+//     CHECK-PEEL-12:         linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-PEEL-12:       }
+//     CHECK-PEEL-12:     }
+//     CHECK-PEEL-12:   }
+func @matmul_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
+    -> tensor<?x?xf32> {
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
+  %d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
+  %out = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
+  %r = linalg.matmul {__internal_linalg_transform__ = "tile"}
+      ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
+      outs(%out: tensor<?x?xf32>) -> tensor<?x?xf32>
+  return %r : tensor<?x?xf32>
+}

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
index bd63327b2e45d..cfa8b4ad6eab5 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
@@ -84,9 +84,9 @@ struct TestLinalgTransforms
       llvm::cl::desc("Test a set of patterns that rewrite a linalg contraction "
                      "in vector.contract form"),
       llvm::cl::init(false)};
-  Option<bool> testTileAndPadPattern{
-      *this, "test-tile-and-pad-pattern",
-      llvm::cl::desc("Test tile and pad pattern"), llvm::cl::init(false)};
+  Option<bool> testTilePattern{*this, "test-tile-pattern",
+                               llvm::cl::desc("Test tile pattern"),
+                               llvm::cl::init(false)};
   Option<int> testHoistPadding{*this, "test-hoist-padding",
                                llvm::cl::desc("Test hoist padding"),
                                llvm::cl::init(0)};
@@ -103,10 +103,17 @@ struct TestLinalgTransforms
       llvm::cl::desc("Test rewrite of subtensor(pad_tensor) into "
                      "pad_tensor(subtensor)"),
       llvm::cl::init(false)};
-  ListOption<int64_t> tileSizesForPadding{
-      *this, "tile-sizes-for-padding",
-      llvm::cl::desc("Linalg tile sizes when tile+pad"), llvm::cl::ZeroOrMore,
-      llvm::cl::MiscFlags::CommaSeparated};
+  Option<bool> padTiles{*this, "pad-tiles",
+                        llvm::cl::desc("Pad tiles when test-tile-pattern"),
+                        llvm::cl::init(false)};
+  ListOption<int64_t> peeledLoops{
+      *this, "peeled-loops",
+      llvm::cl::desc("Loops to be peeled when test-tile-pattern"),
+      llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
+  ListOption<int64_t> tileSizes{
+      *this, "tile-sizes",
+      llvm::cl::desc("Linalg tile sizes for test-tile-pattern"),
+      llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
   ListOption<unsigned> testInterchangePattern{
       *this, "test-interchange-pattern", llvm::cl::MiscFlags::CommaSeparated,
       llvm::cl::desc("Test the interchange pattern.")};
@@ -558,18 +565,22 @@ static Value getNeutralOfLinalgOp(OpBuilder &b, OpOperand &op) {
   return b.create<ConstantOp>(op.getOwner()->getLoc(), t, b.getZeroAttr(t));
 }
 
-static void applyTileAndPadPattern(FuncOp funcOp, ArrayRef<int64_t> tileSizes) {
+static void applyTilePattern(FuncOp funcOp, ArrayRef<int64_t> tileSizes,
+                             bool padTiles, ArrayRef<int64_t> peeledLoops) {
   MLIRContext *context = funcOp.getContext();
   RewritePatternSet tilingPattern(context);
   auto linalgTilingOptions =
-      linalg::LinalgTilingOptions()
-          .setTileSizes(tileSizes)
-          .setPaddingValueComputationFunction(getNeutralOfLinalgOp);
-  tilingPattern.add<linalg::LinalgTilingPattern<linalg::MatmulI8I8I32Op>,
+      linalg::LinalgTilingOptions().setTileSizes(tileSizes).setPeeledLoops(
+          peeledLoops);
+  if (padTiles)
+    linalgTilingOptions.setPaddingValueComputationFunction(
+        getNeutralOfLinalgOp);
+
+  tilingPattern.add<linalg::LinalgTilingPattern<linalg::MatmulOp>,
+                    linalg::LinalgTilingPattern<linalg::MatmulI8I8I32Op>,
                     linalg::LinalgTilingPattern<linalg::GenericOp>>(
       context, linalgTilingOptions,
-      linalg::LinalgTransformationFilter(
-          Identifier::get("tile-and-pad", context)));
+      linalg::LinalgTransformationFilter(Identifier::get("tile", context)));
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
 }
 
@@ -697,8 +708,8 @@ void TestLinalgTransforms::runOnFunction() {
   if (testTiledLoopPeeling.hasValue())
     return applyTiledLoopPeelingPattern(getFunction(), testTiledLoopPeeling,
                                         skipPartial);
-  if (testTileAndPadPattern)
-    return applyTileAndPadPattern(getFunction(), tileSizesForPadding);
+  if (testTilePattern)
+    return applyTilePattern(getFunction(), tileSizes, padTiles, peeledLoops);
   if (testHoistPadding) {
     getFunction().walk([&](linalg::PadTensorOp padTensorOp) {
       (void)linalg::hoistPaddingOnTensors(padTensorOp, testHoistPadding);


        


More information about the Mlir-commits mailing list