[Mlir-commits] [mlir] 7d97678 - [mlir][linalg] Break up linalg vectorization pre-condition

Thomas Raoux llvmlistbot at llvm.org
Tue Dec 14 13:38:38 PST 2021


Author: Thomas Raoux
Date: 2021-12-14T13:38:14-08:00
New Revision: 7d97678df7f514c14b7611447dad02e9cc5168c9

URL: https://github.com/llvm/llvm-project/commit/7d97678df7f514c14b7611447dad02e9cc5168c9
DIFF: https://github.com/llvm/llvm-project/commit/7d97678df7f514c14b7611447dad02e9cc5168c9.diff

LOG: [mlir][linalg] Break up linalg vectorization pre-condition

Break up the vectorization pre-condition into the part checking for
static shape and the rest checking if the linalg op is supported by
vectorization. This allows checking if an op could be vectorized if it
had static shapes.

Differential Revision: https://reviews.llvm.org/D115754

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 82e60800f21be..c14259f7babad 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -401,9 +401,15 @@ LogicalResult generalizeNamedOpPrecondition(Operation *op);
 LogicalResult promoteSubviewsPrecondition(Operation *op,
                                           LinalgPromotionOptions options);
 
-/// Rewrite a linalg.generic into a suitable vector.contraction op.
+/// Return success if the operation can be vectorized.
 LogicalResult vectorizeLinalgOpPrecondition(Operation *op);
 
+/// Return success if `op` can be vectorized assuming it is static. This allows
+/// checking if an op will be vectorizable once all the dimensions are folded to
+/// static values.
+/// It is the same as `vectorizeLinalgOpPrecondition` for static shapes.
+LogicalResult vectorizeStaticLinalgOpPrecondition(LinalgOp op);
+
 //===----------------------------------------------------------------------===//
 // Transformations exposed as rewrite patterns.
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index bc02298d9d007..d4aa16ecedd3d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -599,34 +599,39 @@ static LogicalResult reductionPreconditions(LinalgOp op) {
   return success();
 }
 
-LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
-  auto linalgOp = cast<linalg::LinalgOp>(op);
-  // All types must be static shape to go to vector.
-  if (linalgOp.hasDynamicShape()) {
-    LDBG("precondition failed: dynamic shape");
-    return failure();
-  }
+LogicalResult
+mlir::linalg::vectorizeStaticLinalgOpPrecondition(linalg::LinalgOp op) {
   if (isElementwise(op))
     return success();
   // TODO: isaConvolutionOpInterface that can also infer from generic features.
   // But we will still need stride/dilation attributes that will be annoying to
   // reverse-engineer...
-  if (isa<ConvolutionOpInterface>(op))
+  if (isa<ConvolutionOpInterface>(op.getOperation()))
     return success();
   // TODO: the common vector shape is equal to the static loop sizes only when
   // all indexing maps are projected permutations. For convs and stencils the
   // logic will need to evolve.
-  if (!allIndexingsAreProjectedPermutation(linalgOp)) {
+  if (!allIndexingsAreProjectedPermutation(op)) {
     LDBG("precondition failed: not projected permutations");
     return failure();
   }
-  if (failed(reductionPreconditions(linalgOp))) {
+  if (failed(reductionPreconditions(op))) {
     LDBG("precondition failed: reduction preconditions");
     return failure();
   }
   return success();
 }
 
+LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
+  auto linalgOp = cast<linalg::LinalgOp>(op);
+  // All types must be static shape to go to vector.
+  if (linalgOp.hasDynamicShape()) {
+    LDBG("precondition failed: dynamic shape");
+    return failure();
+  }
+  return vectorizeStaticLinalgOpPrecondition(linalgOp);
+}
+
 LogicalResult
 mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
                                 SmallVectorImpl<Value> &newResults) {


        


More information about the Mlir-commits mailing list