[Mlir-commits] [mlir] 2190f8a - [mlir][linalg] Support tile+peel with TiledLoopOp

Matthias Springer llvmlistbot at llvm.org
Thu Sep 23 18:27:10 PDT 2021


Author: Matthias Springer
Date: 2021-09-24T10:23:31+09:00
New Revision: 2190f8a8b1e01b7bc7429eb490f3001a23f27df1

URL: https://github.com/llvm/llvm-project/commit/2190f8a8b1e01b7bc7429eb490f3001a23f27df1
DIFF: https://github.com/llvm/llvm-project/commit/2190f8a8b1e01b7bc7429eb490f3001a23f27df1.diff

LOG: [mlir][linalg] Support tile+peel with TiledLoopOp

Only scf.for was supported until now.

Differential Revision: https://reviews.llvm.org/D110220

Added: 
    

Modified: 
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
    mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 48f52d9baf2d3..533f295a35f56 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -258,7 +258,7 @@ mlir::linalg::LinalgBaseTilingPattern::LinalgBaseTilingPattern(
       options(options) {}
 
 /// Try to peel a loop `op` and return the new result.
-// TODO: Only scf.for loops are supported at the moment.
+// TODO: Add support for scf.parallel and affine.for loops.
 static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter, Operation *op) {
   return llvm::TypeSwitch<Operation *, SmallVector<Value, 4>>(op)
       .Case<scf::ForOp>([&](scf::ForOp forOp) {
@@ -272,6 +272,46 @@ static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter, Operation *op) {
       .Default([&](Operation *op) { return op->getResults(); });
 }
 
+/// Try to peel a TiledLoopOp and return the new result.
+static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter,
+                                      TiledLoopOp tiledLoop, int64_t idx) {
+  assert(idx < static_cast<int64_t>(tiledLoop.iterator_types().size()) &&
+         "requested peeling of non-existing loop");
+  TiledLoopOp result;
+  if (succeeded(peelAndCanonicalizeTiledLoop(rewriter, tiledLoop, idx, result)))
+    return result->getResults();
+  assert(!result && "expected that loop was not peeled");
+  return tiledLoop->getResults();
+}
+
+/// Peel loops after tiling.
+static void peelLoops(RewriterBase &rewriter, TiledLinalgOp &res,
+                      const LinalgTilingOptions &options) {
+  for (int64_t loop : options.peeledLoops) {
+    assert(loop < static_cast<int64_t>(res.loops.size()) &&
+           "requested peeling of non-existing loop");
+    SmallVector<Value, 4> loopResults;
+    Operation *loopOp = res.loops[loop];
+    if (options.loopType == LinalgTilingLoopType::TiledLoops) {
+      assert(llvm::all_of(
+                 res.loops,
+                 [&](Operation *op) { return op == res.loops.front(); }) &&
+             "expected that all loop ops are the same TiledLoopOp");
+      auto tiledLoopOp = dyn_cast<TiledLoopOp>(loopOp);
+      assert(tiledLoopOp && "expected TiledLoopOp");
+      loopResults = peelLoop(rewriter, tiledLoopOp, loop);
+    } else {
+      loopResults = peelLoop(rewriter, loopOp);
+    }
+
+    // The result of the loop nest may change with peeling.
+    if (res.tensorResults.size() == loopOp->getNumResults() &&
+        std::equal(res.tensorResults.begin(), res.tensorResults.end(),
+                   loopOp->getResults().begin()))
+      res.tensorResults = loopResults;
+  }
+}
+
 LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
     Operation *op, PatternRewriter &rewriter, TiledLinalgOp &result) const {
   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
@@ -288,17 +328,7 @@ LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
   filter.replaceLinalgTransformationFilter(rewriter, res->op);
 
   // Peel loops.
-  for (int64_t loop : options.peeledLoops) {
-    assert(loop < static_cast<int64_t>(res->loops.size()) &&
-           "requested peeling of non-existing loop");
-    Operation *loopOp = res->loops[loop];
-    SmallVector<Value, 4> loopResults = peelLoop(rewriter, loopOp);
-    // The result of the loop nest may change with peeling.
-    if (res->tensorResults.size() == loopOp->getNumResults() &&
-        std::equal(res->tensorResults.begin(), res->tensorResults.end(),
-                   loopOp->getResults().begin()))
-      res->tensorResults = loopResults;
-  }
+  peelLoops(rewriter, *res, options);
 
   // Consider padding on the fly only if the op has tensor semantics.
   if (!options.paddingValueComputationFunction ||

diff  --git a/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
index e661fbe20471c..0b42c8ad28ace 100644
--- a/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
@@ -4,6 +4,12 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern tile-sizes=256,128,512 peeled-loops=1,2" -canonicalize | \
 // RUN:     FileCheck %s -check-prefix=CHECK-PEEL-12
 
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern tile-sizes=256,128,512 loop-type=tiled_loop peeled-loops=0" -canonicalize | \
+// RUN:     FileCheck %s -check-prefix=CHECK-TILED-LOOP-PEEL-0
+
+// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-pattern tile-sizes=256,128,512 loop-type=tiled_loop peeled-loops=0,1" -canonicalize | \
+// RUN:     FileCheck %s -check-prefix=CHECK-TILED-LOOP-PEEL-01
+
 //     CHECK-PEEL-0: func @matmul_static_tensor
 // CHECK-PEEL-0-DAG:   %[[c0:.*]] = constant 0 : index
 // CHECK-PEEL-0-DAG:   %[[c128:.*]] = constant 128 : index
@@ -45,6 +51,42 @@
 //     CHECK-PEEL-12:       linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x36xf32>) outs({{.*}} : tensor<?x36xf32>)
 //     CHECK-PEEL-12:     }
 //     CHECK-PEEL-12:   }
+
+//     CHECK-TILED-LOOP-PEEL-0: func @matmul_static_tensor
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c512:.*]] = constant 512 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c1280:.*]] = constant 1280 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c1500:.*]] = constant 1500 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c1600:.*]] = constant 1600 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c1700:.*]] = constant 1700 : index
+//     CHECK-TILED-LOOP-PEEL-0:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %[[c0]], %[[c0]]) to (%[[c1280]], %[[c1700]], %[[c1600]]) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-0:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-0:   }
+//     CHECK-TILED-LOOP-PEEL-0:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c1280]], %[[c0]], %[[c0]]) to (%[[c1500]], %[[c1700]], %[[c1600]]) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-0:     linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-0:   }
+
+//     CHECK-TILED-LOOP-PEEL-01: func @matmul_static_tensor
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c512:.*]] = constant 512 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c1280:.*]] = constant 1280 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c1500:.*]] = constant 1500 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c1600:.*]] = constant 1600 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c1664:.*]] = constant 1664 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c1700:.*]] = constant 1700 : index
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %[[c0]], %[[c0]]) to (%[[c1280]], %[[c1664]], %[[c1600]]) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x128xf32>) outs({{.*}} : tensor<256x128xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %[[c1664]], %[[c0]]) to (%[[c1280]], %[[c1700]], %[[c1600]]) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c1280]], %[[c0]], %[[c0]]) to (%[[c1500]], %[[c1700]], %[[c1600]]) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
 func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700xf32>)
     -> tensor<1500x1700xf32> {
   %out = linalg.init_tensor [1500, 1700] : tensor<1500x1700xf32>
@@ -96,6 +138,33 @@ func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700
 //     CHECK-PEEL-12:       }
 //     CHECK-PEEL-12:     }
 //     CHECK-PEEL-12:   }
+
+//     CHECK-TILED-LOOP-PEEL-0: func @matmul_dynamic_tensor
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-TILED-LOOP-PEEL-0-DAG:   %[[c512:.*]] = constant 512 : index
+//     CHECK-TILED-LOOP-PEEL-0:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %[[c0]], %[[c0]]) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-0:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-0:   }
+//     CHECK-TILED-LOOP-PEEL-0:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %[[c0]], %[[c0]]) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-0:     linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-0:   }
+
+//     CHECK-TILED-LOOP-PEEL-01: func @matmul_dynamic_tensor
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c0:.*]] = constant 0 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c128:.*]] = constant 128 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c256:.*]] = constant 256 : index
+// CHECK-TILED-LOOP-PEEL-01-DAG:   %[[c512:.*]] = constant 512 : index
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %[[c0]], %[[c0]]) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x128xf32>) outs({{.*}} : tensor<256x128xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%[[c0]], %{{.*}}, %[[c0]]) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<256x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<256x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
+//     CHECK-TILED-LOOP-PEEL-01:   linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %[[c0]], %[[c0]]) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%[[c256]], %[[c128]], %[[c512]])
+//     CHECK-TILED-LOOP-PEEL-01:     linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x?xf32>) outs({{.*}} : tensor<?x?xf32>)
+//     CHECK-TILED-LOOP-PEEL-01:   }
 func @matmul_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
     -> tensor<?x?xf32> {
   %c0 = constant 0 : index

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
index 688da87887ab3..b401ce663a466 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
@@ -129,6 +129,11 @@ struct TestLinalgTransforms
       *this, "skip-partial",
       llvm::cl::desc("Skip loops inside partial iterations during peeling"),
       llvm::cl::init(false)};
+  Option<std::string> loopType{
+      *this, "loop-type",
+      llvm::cl::desc("Specify the type of loops to generate: for, parallel or "
+                     "tiled_loop"),
+      llvm::cl::init("for")};
 };
 } // end anonymous namespace
 
@@ -569,13 +574,21 @@ static Value getNeutralOfLinalgOp(OpBuilder &b, OpOperand &op) {
   return b.create<ConstantOp>(op.getOwner()->getLoc(), t, b.getZeroAttr(t));
 }
 
-static void applyTilePattern(FuncOp funcOp, ArrayRef<int64_t> tileSizes,
-                             bool padTiles, ArrayRef<int64_t> peeledLoops,
+static void applyTilePattern(FuncOp funcOp, std::string loopType,
+                             ArrayRef<int64_t> tileSizes, bool padTiles,
+                             ArrayRef<int64_t> peeledLoops,
                              bool scalarizeDynamicDims) {
   MLIRContext *context = funcOp.getContext();
   RewritePatternSet tilingPattern(context);
-  auto linalgTilingOptions =
-      linalg::LinalgTilingOptions().setPeeledLoops(peeledLoops);
+  LinalgTilingLoopType type =
+      llvm::StringSwitch<LinalgTilingLoopType>(loopType)
+          .Case("for", LinalgTilingLoopType::Loops)
+          .Case("affine", LinalgTilingLoopType::AffineLoops)
+          .Case("parallel", LinalgTilingLoopType::ParallelLoops)
+          .Case("tiled_loop", LinalgTilingLoopType::TiledLoops);
+  auto linalgTilingOptions = linalg::LinalgTilingOptions()
+                                 .setPeeledLoops(peeledLoops)
+                                 .setLoopType(type);
   if (scalarizeDynamicDims) {
     linalgTilingOptions.scalarizeDynamicDims();
     assert(tileSizes.empty() &&
@@ -720,10 +733,10 @@ void TestLinalgTransforms::runOnFunction() {
     return applyTiledLoopPeelingPattern(getFunction(), testTiledLoopPeeling,
                                         skipPartial);
   if (testTilePattern)
-    return applyTilePattern(getFunction(), tileSizes, padTiles, peeledLoops,
-                            /*scalarizeDynamicDims=*/false);
+    return applyTilePattern(getFunction(), loopType, tileSizes, padTiles,
+                            peeledLoops, /*scalarizeDynamicDims=*/false);
   if (testTileScalarizeDynamicDims)
-    return applyTilePattern(getFunction(), tileSizes, padTiles,
+    return applyTilePattern(getFunction(), loopType, tileSizes, padTiles,
                             /*peeledLoops=*/{}, /*scalarizeDynamicDims=*/true);
   if (testHoistPadding) {
     getFunction().walk([&](linalg::PadTensorOp padTensorOp) {


        


More information about the Mlir-commits mailing list