[Mlir-commits] [mlir] 347d59b - [mlir][Linalg] Convolution tiling added to ConvOp vectorization pass

Jakub Lichman llvmlistbot at llvm.org
Thu Sep 17 02:40:15 PDT 2020


Author: Jakub Lichman
Date: 2020-09-17T09:39:41Z
New Revision: 347d59b16c71194d7a9372dd69d3e41ebeca3113

URL: https://github.com/llvm/llvm-project/commit/347d59b16c71194d7a9372dd69d3e41ebeca3113
DIFF: https://github.com/llvm/llvm-project/commit/347d59b16c71194d7a9372dd69d3e41ebeca3113.diff

LOG: [mlir][Linalg] Convolution tiling added to ConvOp vectorization pass

ConvOp vectorization supports now only convolutions of static shapes with dimensions
of size either 3(vectorized) or 1(not) as underlying vectors have to be of static
shape as well. In this commit we add support for convolutions of any size as well as
dynamic shapes by leveraging existing matmul infrastructure for tiling of both input
and kernel to sizes accepted by the previous version of ConvOp vectorization.
In the future this pass can be extended to take "tiling mask" as a user input which
will enable vectorization of user specified dimensions.

Differential Revision: https://reviews.llvm.org/D87676

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-ncw-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-nwc-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nchw-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nhwc-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ncdhw-call.mlir
    mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ndhwc-call.mlir
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/test/Conversion/LinalgToVector/linalg-to-vector.mlir
    mlir/test/lib/Transforms/TestConvVectorization.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index b55c429a9d02..a34ea00fdf5d 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -31,8 +31,8 @@ struct TiledLinalgOp {
 };
 
 /// Populates patterns for vectorization of all ConvN-D ops.
-void populateConvVectorizationPatterns(MLIRContext *context,
-                                       OwningRewritePatternList &patterns);
+void populateConvVectorizationPatterns(
+    MLIRContext *context, SmallVectorImpl<OwningRewritePatternList> &patterns);
 
 /// Performs standalone tiling of a single LinalgOp by `tileSizes`.
 /// and permute the loop nest according to `interchangeVector`
@@ -589,6 +589,10 @@ class ConvOpVectorization : public OpRewritePattern<ConvOp> {
 
   LogicalResult matchAndRewrite(ConvOp minOp,
                                 PatternRewriter &rewriter) const override;
+
+  // TODO: Make these pass arguments.
+  static const int tileSize = 3;
+  static const int noTile = 1;
 };
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-call.mlir
index 1b3ee65f13d9..8f3c6df79f90 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=4" -linalg-tile="linalg-tile-sizes=1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=4" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-ncw-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-ncw-call.mlir
index 2647ee3d663c..46634a7e5921 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-ncw-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-ncw-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,4" -linalg-tile="linalg-tile-sizes=1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,4" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-nwc-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-nwc-call.mlir
index 5cc4de3844aa..a6aeb30fc153 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-nwc-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-1d-nwc-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,4" -linalg-tile="linalg-tile-sizes=1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,4" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-call.mlir
index 38420974ad98..819d95ef5da0 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,2" -linalg-tile="linalg-tile-sizes=1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,2" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nchw-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nchw-call.mlir
index fbd831f6801a..fb0e70861864 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nchw-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nchw-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,0,4,4" -linalg-tile="linalg-tile-sizes=1,1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,0,4,4" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nhwc-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nhwc-call.mlir
index 422720da429e..5888eec7d67a 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nhwc-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-2d-nhwc-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,3,2" -linalg-tile="linalg-tile-sizes=1,1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,3,2" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-call.mlir
index 8f38962acf8b..f0ca37f86fcd 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,2,2" -linalg-tile="linalg-tile-sizes=1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,2,2" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ncdhw-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ncdhw-call.mlir
index 2ad2b4fc3465..a56a260b9cd8 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ncdhw-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ncdhw-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,5,5,5" -linalg-tile="linalg-tile-sizes=1,1,1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,5,5,5" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ndhwc-call.mlir b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ndhwc-call.mlir
index 4f1392363bb2..37fc6453e5dd 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ndhwc-call.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/test-conv-3d-ndhwc-call.mlir
@@ -9,17 +9,13 @@
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=1,1,1,1,1" -test-conv-vectorization \
-// RUN:   -convert-linalg-to-loops -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s
 
-// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,5,5,5" -linalg-tile="linalg-tile-sizes=1,1,1,1,1" \
-// RUN:   -test-conv-vectorization -convert-linalg-to-loops \
-// RUN:   -test-vector-contraction-conversion=vector-outerproduct=0 \
-// RUN:   -convert-vector-to-scf -convert-linalg-to-llvm | \
+// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,5,5,5" \
+// RUN:   -test-conv-vectorization -convert-linalg-to-llvm | \
 // RUN: mlir-cpu-runner -e main -entry-point-result=void \
 // RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
 // RUN: | FileCheck %s

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index a8b11a48df17..9a225dd81c79 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -371,7 +371,6 @@ LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
 template <class ConvOp, int N>
 LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
     ConvOp op, PatternRewriter &rewriter) const {
-  unsigned dimSize = 3;
   Location loc = op.getLoc();
   MLIRContext *context = op.getContext();
   edsc::ScopedContext scope(rewriter, loc);
@@ -391,7 +390,7 @@ LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
   for (unsigned i = 0; i < N; i++) {
     if (!mask[i] && (inShape[i] != 1 || kShape[i] != 1))
       return failure();
-    if (mask[i] && (inShape[i] != dimSize || kShape[i] != dimSize))
+    if (mask[i] && (inShape[i] != tileSize || kShape[i] != tileSize))
       return failure();
 
     if (mask[i])
@@ -409,7 +408,7 @@ LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
   auto map = AffineMap::get(rank, 0, mapping, context);
   SmallVector<Value, 4> zeros(rank, std_constant_index(0));
   auto vecType =
-      VectorType::get(SmallVector<int64_t, 4>(numDims, dimSize), elemType);
+      VectorType::get(SmallVector<int64_t, 4>(numDims, tileSize), elemType);
 
   auto inputVec = vector_transfer_read(vecType, input, zeros, map);
   auto kernelVec = vector_transfer_read(vecType, kernel, zeros, map);
@@ -433,32 +432,76 @@ LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
   return success();
 }
 
+using ConvOpConst = ConvOpVectorization<ConvWOp, 1>;
+
+/// Inserts tiling, promotion and vectorization pattern for ConvOp
+/// conversion into corresponding pattern lists.
+template <typename ConvOp, unsigned N>
+static void
+populateVectorizationPatterns(OwningRewritePatternList &tilingPatterns,
+                              OwningRewritePatternList &promotionPatterns,
+                              OwningRewritePatternList &vectorizationPatterns,
+                              ArrayRef<int64_t> tileSizes,
+                              MLIRContext *context) {
+  constexpr static StringRef kTiledMarker = "TILED";
+  constexpr static StringRef kPromotedMarker = "PROMOTED";
+  tilingPatterns.insert<LinalgTilingPattern<ConvOp>>(
+      context, LinalgTilingOptions().setTileSizes(tileSizes),
+      LinalgMarker({}, Identifier::get(kTiledMarker, context)));
+
+  promotionPatterns.insert<LinalgPromotionPattern<ConvOp>>(
+      context, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true),
+      LinalgMarker(Identifier::get(kTiledMarker, context),
+                   Identifier::get(kPromotedMarker, context)));
+
+  SmallVector<bool, 4> mask(N);
+  int offset = tileSizes.size() - N;
+  std::transform(tileSizes.begin() + offset, tileSizes.end(), mask.begin(),
+                 [](int64_t i) -> bool { return i != ConvOpConst::noTile; });
+
+  vectorizationPatterns.insert<ConvOpVectorization<ConvOp, N>>(context, mask);
+}
+
 void mlir::linalg::populateConvVectorizationPatterns(
-    MLIRContext *context, OwningRewritePatternList &patterns) {
-  patterns.insert<ConvOpVectorization<linalg::ConvWOp, 1>>(
-      context, SmallVector<bool, 4>{true});
+    MLIRContext *context, SmallVectorImpl<OwningRewritePatternList> &patterns) {
+  const int64_t tileSize = ConvOpConst::tileSize;
+  const int64_t noTile = ConvOpConst::noTile;
+  auto makeTileSizes = [&](unsigned numNoTile, unsigned numTile) {
+    SmallVector<int64_t, 10> result(numNoTile, noTile);
+    result.append(numTile, tileSize);
+    return result;
+  };
+
+  OwningRewritePatternList tiling, promotion, vectorization;
+  populateVectorizationPatterns<ConvWOp, 1>(
+      tiling, promotion, vectorization,
+      makeTileSizes(/*numNoTile=*/1, /*numTile*/ 1), context);
+
+  populateVectorizationPatterns<ConvNWCOp, 3>(tiling, promotion, vectorization,
+                                              makeTileSizes(3, 2), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNWCOp, 3>>(
-      context, SmallVector<bool, 4>{false, true, true});
+  populateVectorizationPatterns<ConvNCWOp, 3>(tiling, promotion, vectorization,
+                                              makeTileSizes(3, 2), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNCWOp, 3>>(
-      context, SmallVector<bool, 4>{false, true, true});
+  populateVectorizationPatterns<ConvHWOp, 2>(tiling, promotion, vectorization,
+                                             makeTileSizes(2, 2), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvHWOp, 2>>(
-      context, SmallVector<bool, 4>{true, true});
+  populateVectorizationPatterns<ConvNHWCOp, 4>(tiling, promotion, vectorization,
+                                               makeTileSizes(4, 3), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNHWCOp, 4>>(
-      context, SmallVector<bool, 4>{false, true, true, true});
+  populateVectorizationPatterns<ConvNCHWOp, 4>(tiling, promotion, vectorization,
+                                               makeTileSizes(4, 3), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNCHWOp, 4>>(
-      context, SmallVector<bool, 4>{false, true, true, true});
+  populateVectorizationPatterns<ConvDHWOp, 3>(tiling, promotion, vectorization,
+                                              makeTileSizes(3, 3), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvDHWOp, 3>>(
-      context, SmallVector<bool, 4>{true, true, true});
+  populateVectorizationPatterns<ConvNDHWCOp, 5>(
+      tiling, promotion, vectorization, makeTileSizes(5, 4), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNDHWCOp, 5>>(
-      context, SmallVector<bool, 4>{false, true, true, true, true});
+  populateVectorizationPatterns<ConvNCDHWOp, 5>(
+      tiling, promotion, vectorization, makeTileSizes(5, 4), context);
 
-  patterns.insert<ConvOpVectorization<linalg::ConvNCDHWOp, 5>>(
-      context, SmallVector<bool, 4>{false, true, true, true, true});
+  patterns.push_back(std::move(tiling));
+  patterns.push_back(std::move(promotion));
+  patterns.push_back(std::move(vectorization));
 }

diff  --git a/mlir/test/Conversion/LinalgToVector/linalg-to-vector.mlir b/mlir/test/Conversion/LinalgToVector/linalg-to-vector.mlir
index 487718301d00..c2e8a31eb443 100644
--- a/mlir/test/Conversion/LinalgToVector/linalg-to-vector.mlir
+++ b/mlir/test/Conversion/LinalgToVector/linalg-to-vector.mlir
@@ -1,167 +1,52 @@
 // RUN: mlir-opt %s -test-conv-vectorization --cse | FileCheck %s
 
-// CHECK-DAG:  #[[$map0:.*]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG:  #[[$map1:.*]] = affine_map<(d0) -> ()>
-// CHECK-DAG:  #[[$map2:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
-// CHECK-DAG:  #[[$map3:.*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG:  #[[$map4:.*]] = affine_map<(d0, d1) -> ()>
-// CHECK-DAG:  #[[$map5:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>
-// CHECK-DAG:  #[[$map6:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-DAG:  #[[$map7:.*]] = affine_map<(d0, d1, d2) -> ()>
-// CHECK-DAG:  #[[$map8:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d1, d2, d3, d4)>
-// CHECK-DAG:  #[[$map9:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-// CHECK-DAG:  #[[$map10:.*]] = affine_map<(d0, d1, d2, d3) -> ()>
+// CHECK-DAG:  #[[$map0:.*]] = affine_map<(d0)[s0] -> (1, -d0 + s0)>
+// CHECK-DAG:  #[[$map1:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG:  #[[$map2:.*]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK-DAG:  #[[$map3:.*]] = affine_map<(d0, d1)[s0] -> (3, -d0 - d1 + s0)>
+// CHECK-DAG:  #[[$map4:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
+// CHECK-DAG:  #[[$map5:.*]] = affine_map<(d0) -> (d0)>
 
-func @conv_1d(%arg0: memref<3xf32>, %arg1: memref<3xf32>, %arg2: memref<?xf32>) {
-  linalg.conv_1d %arg0, %arg1, %arg2 : (memref<3xf32>, memref<3xf32>, memref<?xf32>)
+func @conv_1d(%arg0: memref<?xf32>, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
+  linalg.conv_1d %arg0, %arg1, %arg2 : (memref<?xf32>, memref<?xf32>, memref<?xf32>)
   return
 }
 
 // CHECK-LABEL: @conv_1d
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<3xf32>
+//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<?xf32>
+//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<?xf32>
 //  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]]], %[[cst]] : memref<3xf32>, vector<3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]]], %[[cst]] : memref<3xf32>, vector<3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map0]], #[[$map0]], #[[$map1]]], iterator_types = ["reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3xf32>, vector<3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]]] : memref<?xf32>
-//       CHECK:   return
-
-func @conv_1d_ncw(%arg0: memref<1x3x3xf32>, %arg1: memref<1x3x3xf32>, %arg2: memref<?x?x?xf32>) {
-  linalg.conv_1d_ncw %arg0, %arg1, %arg2 : (memref<1x3x3xf32>, memref<1x3x3xf32>, memref<?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_1d_ncw
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map3]], #[[$map3]], #[[$map4]]], iterator_types = ["reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3xf32>, vector<3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?xf32>
-//       CHECK:   return
-
-
-func @conv_1d_nwc(%arg0: memref<1x3x3xf32>, %arg1: memref<1x3x3xf32>, %arg2: memref<?x?x?xf32>) {
-  linalg.conv_1d_nwc %arg0, %arg1, %arg2 : (memref<1x3x3xf32>, memref<1x3x3xf32>, memref<?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_1d_nwc
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map3]], #[[$map3]], #[[$map4]]], iterator_types = ["reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3xf32>, vector<3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?xf32>
-//       CHECK:   return
-
-func @conv_2d(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>, %arg2: memref<?x?xf32>) {
-  linalg.conv_2d %arg0, %arg1, %arg2 : (memref<3x3xf32>, memref<3x3xf32>, memref<?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_2d
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]]], %[[cst]] : memref<3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]]], %[[cst]] : memref<3x3xf32>, vector<3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map3]], #[[$map3]], #[[$map4]]], iterator_types = ["reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3xf32>, vector<3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]]] : memref<?x?xf32>
-//       CHECK:   return
-
-func @conv_2d_nchw(%arg0: memref<1x3x3x3xf32>, %arg1: memref<1x3x3x3xf32>, %arg2: memref<?x?x?x?xf32>) {
-  linalg.conv_2d_nchw %arg0, %arg1, %arg2 : (memref<1x3x3x3xf32>, memref<1x3x3x3xf32>, memref<?x?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_2d_nchw
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map6]], #[[$map6]], #[[$map7]]], iterator_types = ["reduction", "reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3x3xf32>, vector<3x3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?x?xf32>
-//       CHECK:   return
-
-func @conv_2d_nhwc(%arg0: memref<1x3x3x3xf32>, %arg1: memref<1x3x3x3xf32>, %arg2: memref<?x?x?x?xf32>) {
-  linalg.conv_2d_nhwc %arg0, %arg1, %arg2 : (memref<1x3x3x3xf32>, memref<1x3x3x3xf32>, memref<?x?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_2d_nhwc
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map6]], #[[$map6]], #[[$map7]]], iterator_types = ["reduction", "reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3x3xf32>, vector<3x3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?x?xf32>
-//       CHECK:   return
-
-func @conv_3d(%arg0: memref<3x3x3xf32>, %arg1: memref<3x3x3xf32>, %arg2: memref<?x?x?xf32>) {
-  linalg.conv_3d %arg0, %arg1, %arg2 : (memref<3x3x3xf32>, memref<3x3x3xf32>, memref<?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_3d
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<3x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<3x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<3x3x3xf32>, vector<3x3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map6]], #[[$map6]], #[[$map7]]], iterator_types = ["reduction", "reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3x3xf32>, vector<3x3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?xf32>
-//       CHECK:   return
-
-func @conv_3d_ncdhw(%arg0: memref<1x3x3x3x3xf32>, %arg1: memref<1x3x3x3x3xf32>, %arg2: memref<?x?x?x?x?xf32>) {
-  linalg.conv_3d_ncdhw %arg0, %arg1, %arg2 : (memref<1x3x3x3x3xf32>, memref<1x3x3x3x3xf32>, memref<?x?x?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_3d_ncdhw
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3x3xf32>, vector<3x3x3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3x3xf32>, vector<3x3x3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map9]], #[[$map9]], #[[$map10]]], iterator_types = ["reduction", "reduction", "reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3x3x3xf32>, vector<3x3x3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?x?x?xf32>
-//       CHECK:   return
-
-func @conv_3d_ndhwc(%arg0: memref<1x3x3x3x3xf32>, %arg1: memref<1x3x3x3x3xf32>, %arg2: memref<?x?x?x?x?xf32>) {
-  linalg.conv_3d_ndhwc %arg0, %arg1, %arg2 : (memref<1x3x3x3x3xf32>, memref<1x3x3x3x3xf32>, memref<?x?x?x?x?xf32>)
-  return
-}
-
-// CHECK-LABEL: @conv_3d_ndhwc
-//  CHECK-SAME: %[[arg0:[a-zA-Z0-9]+]]: memref<1x3x3x3x3xf32>
-//  CHECK-SAME: %[[arg1:[a-zA-Z0-9]+]]: memref<1x3x3x3x3xf32>
-//  CHECK-SAME: %[[arg2:[a-zA-Z0-9]+]]: memref<?x?x?x?x?xf32
-//       CHECK:   %[[c0:.*]] = constant 0 : index
-//       CHECK:   %[[cst:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[v0:.*]] = vector.transfer_read %[[arg0]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3x3xf32>, vector<3x3x3x3xf32>
-//       CHECK:   %[[v1:.*]] = vector.transfer_read %[[arg1]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]], %[[cst]] : memref<1x3x3x3x3xf32>, vector<3x3x3x3xf32>
-//       CHECK:   %[[v2:.*]] = vector.contract {indexing_maps = [#[[$map9]], #[[$map9]], #[[$map10]]], iterator_types = ["reduction", "reduction", "reduction", "reduction"]} %[[v0]], %[[v1]], %[[cst]] : vector<3x3x3x3xf32>, vector<3x3x3x3xf32> into f32
-//       CHECK:   store %[[v2]], %[[arg2]][%[[c0]], %[[c0]], %[[c0]], %[[c0]], %[[c0]]] : memref<?x?x?x?x?xf32>
-//       CHECK:   return
+//   CHECK-DAG:   %[[c12:.*]] = constant 12 : index
+//   CHECK-DAG:   %[[c4:.*]] = constant 4 : index
+//   CHECK-DAG:   %[[cst:.*]] = constant 0.000000e+00 : f32
+//   CHECK-DAG:   %[[c3:.*]] = constant 3 : index
+//   CHECK-DAG:   %[[c0:.*]] = constant 0 : index
+//   CHECK-DAG:   %[[c1:.*]] = constant 1 : index
+//       CHECK:   %[[v0:.*]] = dim %[[arg1]], %[[c0]] : memref<?xf32>
+//       CHECK:   %[[v1:.*]] = dim %[[arg2]], %[[c0]] : memref<?xf32>
+//       CHECK:   %[[v2:.*]] = dim %[[arg0]], %[[c0]] : memref<?xf32>
+//       CHECK:   %[[v3:.*]] = alloc(%[[c12]]) : memref<?xi8>
+//       CHECK:   %[[v4:.*]] = alloc(%[[c12]]) : memref<?xi8>
+//       CHECK:   %[[v5:.*]] = alloc(%[[c4]]) : memref<?xi8>
+//       CHECK:   %[[v6:.*]] = std.view %[[v3]][%[[c0]]][] : memref<?xi8> to memref<3xf32>
+//       CHECK:   %[[v7:.*]] = std.view %[[v4]][%[[c0]]][] : memref<?xi8> to memref<3xf32>
+//       CHECK:   %[[v8:.*]] = std.view %[[v5]][%[[c0]]][] : memref<?xi8> to memref<1xf32>
+//       CHECK:   scf.for %[[arg3:.*]] = %[[c0]] to %[[v1]] step %[[c1]] {
+//       CHECK:     %[[v9:.*]] = affine.min #[[$map0]](%[[arg3]])[%[[v1]]]
+//       CHECK:     %[[v10:.*]] = subview %[[arg2]][%[[arg3]]] [%[[v9]]] [1]  : memref<?xf32> to memref<?xf32, #[[$map1]]>
+//       CHECK:     %[[v11:.*]] = subview %[[v8]][0] [%[[v9]]] [1]  : memref<1xf32> to memref<?xf32>
+//       CHECK:     scf.for %[[arg4:.*]] = %[[c0]] to %[[v0]] step %[[c3]] {
+//       CHECK:       %[[v12:.*]] = affine.apply #[[$map2]](%[[arg3]], %[[arg4]])
+//       CHECK:       %[[v13:.*]] = affine.min #[[$map3]](%[[arg3]], %[[arg4]])[%[[v2]]]
+//       CHECK:       %[[v14:.*]] = subview %arg0[%12] [%13] [1]  : memref<?xf32> to memref<?xf32, #[[$map1]]>
+//       CHECK:       %[[v15:.*]] = affine.min #[[$map4]](%arg4)[%0]
+//       CHECK:       %[[v16:.*]] = subview %[[arg1]][%[[arg4]]] [%[[v15]]] [1]  : memref<?xf32> to memref<?xf32, #[[$map1]]>
+//       CHECK:       %[[v17:.*]] = subview %[[v6]][0] [%[[v13]]] [1]  : memref<3xf32> to memref<?xf32>
+//       CHECK:       %[[v19:.*]] = vector.transfer_read %[[v6]][%[[c0]]], %[[cst]] {masked = [false]} : memref<3xf32>, vector<3xf32>
+//       CHECK:       %[[v20:.*]] = vector.transfer_read %[[v7]][%[[c0]]], %[[cst]] {masked = [false]} : memref<3xf32>, vector<3xf32>
+//       CHECK:       %[[v21:.*]] = mulf %[[v19]], %[[v20]] : vector<3xf32>
+//       CHECK:       %[[v22:.*]] = vector.reduction "add", %[[v21]], %[[cst]] : vector<3xf32> into f32
+//       CHECK:       store %[[v22]], %[[v8]][%[[c0]]] : memref<1xf32>
+//       CHECK:       scf.for %[[arg5:.*]] = %[[c0]] to %[[v9]] step %[[c1]] {
+//       CHECK:         %[[v23:.*]] = load %[[v11]][%[[arg5]]] : memref<?xf32>
+//       CHECK:         store %[[v23]], %[[v10]][%[[arg5]]] : memref<?xf32, #[[$map1]]>

diff  --git a/mlir/test/lib/Transforms/TestConvVectorization.cpp b/mlir/test/lib/Transforms/TestConvVectorization.cpp
index 37e509cbbbe1..c90d8058de32 100644
--- a/mlir/test/lib/Transforms/TestConvVectorization.cpp
+++ b/mlir/test/lib/Transforms/TestConvVectorization.cpp
@@ -1,4 +1,4 @@
-//===- TestConvVectorization.cpp - Linalg to Vector dialect conversion ----===//
+//===- TestConvVectorization.cpp - Vectorization of Conv ops --------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,11 +6,19 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Vector/VectorTransforms.h"
 #include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
 #include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/LoopUtils.h"
+#include "mlir/Transforms/Passes.h"
 
 using namespace mlir;
+using namespace vector;
 
 namespace {
 /// A pass converting MLIR Linalg ops into Vector ops.
@@ -19,8 +27,10 @@ class TestConvVectorization
   void runOnOperation() override;
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<vector::VectorDialect>();
+    registry.insert<VectorDialect>();
     registry.insert<linalg::LinalgDialect>();
+    registry.insert<scf::SCFDialect>();
+    registry.insert<AffineDialect>();
     registry.insert<StandardOpsDialect>();
   }
 };
@@ -32,15 +42,70 @@ void TestConvVectorization::runOnOperation() {
 
   ConversionTarget target(*context);
   target.addLegalDialect<AffineDialect, scf::SCFDialect, StandardOpsDialect,
-                         vector::VectorDialect>();
+                         VectorDialect>();
   target.addLegalOp<ModuleOp, FuncOp, ModuleTerminatorOp, ReturnOp>();
   target.addLegalOp<linalg::FillOp, linalg::YieldOp>();
 
-  OwningRewritePatternList patterns;
-  linalg::populateConvVectorizationPatterns(context, patterns);
+  SmallVector<OwningRewritePatternList, 4> stage1Patterns;
+  linalg::populateConvVectorizationPatterns(context, stage1Patterns);
 
-  if (failed(applyPartialConversion(module, target, patterns)))
-    return signalPassFailure();
+  OwningRewritePatternList stage2Patterns =
+      linalg::getLinalgTilingCanonicalizationPatterns(context);
+  stage2Patterns.insert<linalg::AffineMinSCFCanonicalizationPattern>(context);
+
+  auto stage3Transforms = [](Operation *op) {
+    PassManager pm(op->getContext());
+    pm.addPass(createLoopInvariantCodeMotionPass());
+    if (failed(pm.run(cast<ModuleOp>(op))))
+      llvm_unreachable("Unexpected failure in cleanup pass pipeline.");
+    op->walk([](FuncOp func) {
+      promoteSingleIterationLoops(func);
+      linalg::hoistViewAllocOps(func);
+      linalg::hoistRedundantVectorTransfers(func);
+    });
+    return success();
+  };
+
+  linalg::applyStagedPatterns(module, stage1Patterns, stage2Patterns,
+                              stage3Transforms);
+
+  //===--------------------------------------------------------------------===//
+  // Post staged patterns transforms
+  //===--------------------------------------------------------------------===//
+
+  VectorTransformsOptions vectorTransformsOptions{
+      VectorContractLowering::Dot, VectorTransposeLowering::EltWise};
+
+  OwningRewritePatternList vectorTransferPatterns;
+  // Pattern is not applied because rank-reducing vector transfer is not yet
+  // supported as can be seen in splitFullAndPartialTransferPrecondition,
+  // VectorTransforms.cpp
+  vectorTransferPatterns.insert<VectorTransferFullPartialRewriter>(
+      context, vectorTransformsOptions);
+  applyPatternsAndFoldGreedily(module, vectorTransferPatterns);
+
+  // Programmatic controlled lowering of linalg.copy and linalg.fill.
+  PassManager pm(context);
+  pm.addPass(createConvertLinalgToLoopsPass());
+  if (failed(pm.run(module)))
+    llvm_unreachable("Unexpected failure in linalg to loops pass.");
+
+  // Programmatic controlled lowering of vector.contract only.
+  OwningRewritePatternList vectorContractLoweringPatterns;
+  populateVectorContractLoweringPatterns(vectorContractLoweringPatterns,
+                                         context, vectorTransformsOptions);
+  applyPatternsAndFoldGreedily(module, vectorContractLoweringPatterns);
+
+  // Programmatic controlled lowering of vector.transfer only.
+  OwningRewritePatternList vectorToLoopsPatterns;
+  populateVectorToSCFConversionPatterns(vectorToLoopsPatterns, context,
+                                        VectorTransferToSCFOptions());
+  applyPatternsAndFoldGreedily(module, vectorToLoopsPatterns);
+
+  // Ensure we drop the marker in the end.
+  module.walk([](linalg::LinalgOp op) {
+    op.removeAttr(linalg::LinalgTransforms::kLinalgTransformMarker);
+  });
 }
 
 namespace mlir {


        


More information about the Mlir-commits mailing list