[Mlir-commits] [mlir] [mlir][linalg] Add support for vectorizing dynamic elementwise named ops (PR #71454)
Han-Chung Wang
llvmlistbot at llvm.org
Mon Nov 6 14:44:04 PST 2023
https://github.com/hanhanW created https://github.com/llvm/llvm-project/pull/71454
We are able to vectorize them in linalg.generic form. We just need to relax the condition, so it can also vectorize named ops.
>From 2eb707acb8cf539798ac6c8b694cc1a1d8748465 Mon Sep 17 00:00:00 2001
From: hanhanW <hanhan0912 at gmail.com>
Date: Mon, 6 Nov 2023 14:37:58 -0800
Subject: [PATCH] [mlir][linalg] Add support for vectorizing dynamic
elementwise named ops
We are able to vectorize them in linalg.generic form. We just need to
relax the condition, so it can also vectorize named ops.
---
.../Linalg/Transforms/Vectorization.cpp | 3 +-
mlir/test/Dialect/Linalg/vectorization.mlir | 28 +++++++++++++++++++
2 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index b427af33e3c4400..8b8eb5131669e13 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1466,7 +1466,8 @@ static LogicalResult reductionPreconditions(LinalgOp op) {
static LogicalResult vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op) {
// TODO: Masking only supports dynamic generic ops for now.
- if (!isa<linalg::GenericOp, linalg::FillOp, linalg::CopyOp,
+ if (!isElementwise(op) &&
+ !isa<linalg::GenericOp, linalg::FillOp, linalg::CopyOp,
linalg::ContractionOpInterface>(op.getOperation()))
return failure();
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 7f4af344886f498..610339405d1c2c9 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -368,6 +368,34 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK: #[[MAP:.*]] = affine_map<(d0, d1) -> (d1, d0)>
+// CHECK: func @test_masked_vectorize_linalg_transpose
+func.func @test_masked_vectorize_linalg_transpose(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ // CHECK: %[[C0:.*]] = arith.constant 0 : index
+ // CHECK: %[[D0:.*]] = tensor.dim %arg0, %[[C0]]
+ // CHECK: %[[C1:.*]] = arith.constant 1 : index
+ // CHECK: %[[D1:.*]] = tensor.dim %arg0, %[[C1]]
+ // CHECK: %[[MASK0:.*]] = vector.create_mask %[[D0]], %[[D1]]
+ // CHECK: %[[LOAD:.*]] = vector.mask %[[MASK0]] { vector.transfer_read %arg0{{.+}} }
+ // CHECK-SAME: vector<2x4xi1> -> vector<2x4xf32>
+ // CHECK: %[[MASK1:.*]] = vector.create_mask %[[D1]], %[[D0]]
+ // CHECK: %[[WRITE:.*]] = vector.mask %[[MASK1]] { vector.transfer_write %[[LOAD]], %arg1{{.+}} permutation_map = #[[MAP]]{{.+}} }
+ // CHECK-SAME: vector<4x2xi1> -> tensor<?x?xf32>
+ // CHECK: return %[[WRITE]]
+ %0 = linalg.transpose ins(%arg0 : tensor<?x?xf32>) outs(%arg1 : tensor<?x?xf32>) permutation = [1, 0]
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK-LABEL: func @test_masked_vectorize_linalg_copy
func.func @test_masked_vectorize_linalg_copy(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {
// CHECK: %[[c0:.*]] = arith.constant 0 : index
More information about the Mlir-commits
mailing list