[Mlir-commits] [mlir] [mlir][linalg] Support `ParamType` in `vector_sizes` option of `VectorizeOp` transform (PR #87557)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Apr 5 10:50:36 PDT 2024
https://github.com/srcarroll updated https://github.com/llvm/llvm-project/pull/87557
>From 223f5e0e9845e4b7b460b1d550c06edeb7eee57a Mon Sep 17 00:00:00 2001
From: Sam <srcarroll314 at gmail.com>
Date: Wed, 3 Apr 2024 15:45:19 -0500
Subject: [PATCH 1/5] Support `ParamType` in `vector_sizes` option of
`VectorizeOp` transform
---
.../Linalg/TransformOps/LinalgTransformOps.td | 7 +-
.../TransformOps/LinalgTransformOps.cpp | 6 ++
mlir/test/Dialect/Linalg/vectorization.mlir | 80 +++++++++++++++++++
3 files changed, 89 insertions(+), 4 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index c260fe3f7a46a5..7220e6e077e59c 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2138,12 +2138,11 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
}];
let arguments = (ins TransformHandleTypeInterface:$target,
- Variadic<TransformHandleTypeInterface>:$vector_sizes,
+ Variadic<TransformAnyParamTypeOrAnyHandle>:$vector_sizes,
+ DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:$static_vector_sizes,
OptionalAttr<UnitAttr>:$vectorize_nd_extract,
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "{}">:
- $scalable_sizes,
- DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
- $static_vector_sizes);
+ $scalable_sizes);
let results = (outs);
let assemblyFormat = [{
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 88819cd964354b..9c284ca309a455 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3136,6 +3136,12 @@ DiagnosedSilenceableFailure transform::VectorizeOp::apply(
auto attr = sz.get<Attribute>();
vectorSizes.push_back(cast<IntegerAttr>(attr).getInt());
continue;
+ } else if (sz.is<Value>() && isa<ParamType>(sz.get<Value>().getType())) {
+ ArrayRef<Attribute> params = state.getParams(sz.get<Value>());
+ assert(params.size() == 1 && "expected a single param");
+ vectorSizes.push_back(
+ cast<IntegerAttr>(params.front()).getValue().getSExtValue());
+ continue;
}
auto szPayloads = state.getPayloadOps(sz.get<Value>());
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 2d01d57304013c..64e5935a90a4c4 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -36,6 +36,43 @@ module attributes {transform.with_named_sequence} {
// -----
+func.func @vectorize_dynamic_identity_with_param(%arg0: tensor<?xf32>,
+ %arg1: tensor<?xf32>,
+ %arg2: tensor<?xf32>) -> tensor<?xf32> {
+ %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>,
+ affine_map<(d0) -> (d0)>,
+ affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"] }
+ ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
+ outs(%arg2 : tensor<?xf32>) {
+ ^bb(%in0: f32, %in1: f32, %out: f32) :
+ %0 = arith.addf %in0, %in1 : f32
+ linalg.yield %0 : f32
+ } -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+// CHECK-LABEL: @vectorize_dynamic_identity_with_param
+// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor<?xf32>
+// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1>
+// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32>
+// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %vector_size = transform.param.constant 4 : i64 -> !transform.param<i64>
+ transform.structured.vectorize %0 vector_sizes [%vector_size : !transform.param<i64>] : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
func.func @vectorize_dynamic_1d_broadcast(%arg0: tensor<?xf32>,
%arg1: tensor<?xf32>,
%arg2: tensor<?xf32>) -> tensor<?xf32> {
@@ -231,6 +268,49 @@ module attributes {transform.with_named_sequence} {
// -----
+func.func @vectorize_dynamic_transpose_reduction_with_params(%arg0: tensor<?x?x?xf32>,
+ %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d2, d1)>],
+ iterator_types = ["reduction", "parallel", "parallel"] }
+ ins(%arg0 : tensor<?x?x?xf32>)
+ outs(%arg1 : tensor<?x?xf32>) {
+ ^bb(%in: f32, %out: f32) :
+ %0 = arith.addf %in, %out : f32
+ linalg.yield %0 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %vector_size_0 = transform.param.constant 4 : i64 -> !transform.param<i64>
+ %vector_size_2 = transform.param.constant 16 : i64 -> !transform.param<i64>
+ transform.structured.vectorize %0 vector_sizes
+ [%vector_size_0 : !transform.param<i64>, 8, %vector_size_2: !transform.param<i64>] : !transform.any_op
+ transform.yield
+ }
+}
+
+// CHECK-LABEL: @vectorize_dynamic_transpose_reduction_with_params(
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?x?xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?x?xf32>
+// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index
+// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?x?xf32>
+// CHECK: %[[VAL_6:.*]] = arith.constant 2 : index
+// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_6]] : tensor<?x?x?xf32>
+// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]], %[[VAL_7]] : vector<4x8x16xi1>
+// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true, true]} : tensor<?x?x?xf32>, vector<4x8x16xf32> } : vector<4x8x16xi1> -> vector<4x8x16xf32>
+// CHECK: %[[VAL_13:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_5]] : vector<16x8xi1>
+// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_13]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor<?x?xf32>, vector<8x16xf32> } : vector<16x8xi1> -> vector<8x16xf32>
+// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_10]] { vector.multi_reduction <add>, %[[VAL_11]], %[[VAL_14]] [0] : vector<4x8x16xf32> to vector<8x16xf32> } : vector<4x8x16xi1> -> vector<8x16xf32>
+// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_13]] { vector.transfer_write %[[VAL_15]], %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : vector<8x16xf32>, tensor<?x?xf32> } : vector<16x8xi1> -> tensor<?x?xf32>
+
+// -----
+
func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>,
%arg1: tensor<8x?xf32>,
%arg2: tensor<8x?xf32>) -> tensor<8x?xf32> {
>From 3b8640e7f2ad91d7907d17b1590f13a95ed6e6a5 Mon Sep 17 00:00:00 2001
From: Sam <srcarroll314 at gmail.com>
Date: Thu, 4 Apr 2024 12:10:02 -0500
Subject: [PATCH 2/5] address review comments
---
.../Linalg/TransformOps/LinalgTransformOps.td | 3 +-
.../TransformOps/LinalgTransformOps.cpp | 3 +-
mlir/test/Dialect/Linalg/vectorization.mlir | 38 +++++++++++++++++++
3 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 7220e6e077e59c..5230a7716398a3 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2139,7 +2139,8 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
let arguments = (ins TransformHandleTypeInterface:$target,
Variadic<TransformAnyParamTypeOrAnyHandle>:$vector_sizes,
- DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:$static_vector_sizes,
+ DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
+ $static_vector_sizes,
OptionalAttr<UnitAttr>:$vectorize_nd_extract,
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "{}">:
$scalable_sizes);
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 9c284ca309a455..bfcea723680726 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3138,7 +3138,8 @@ DiagnosedSilenceableFailure transform::VectorizeOp::apply(
continue;
} else if (sz.is<Value>() && isa<ParamType>(sz.get<Value>().getType())) {
ArrayRef<Attribute> params = state.getParams(sz.get<Value>());
- assert(params.size() == 1 && "expected a single param");
+ if (params.size() != 1)
+ return emitSilenceableFailure(getLoc()) << "expected a single param";
vectorSizes.push_back(
cast<IntegerAttr>(params.front()).getValue().getSExtValue());
continue;
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 64e5935a90a4c4..807fa681036435 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -36,6 +36,44 @@ module attributes {transform.with_named_sequence} {
// -----
+func.func @vectorize_dynamic_identity_with_constant(%arg0: tensor<?xf32>,
+ %arg1: tensor<?xf32>,
+ %arg2: tensor<?xf32>) -> tensor<?xf32> {
+ %c4 = arith.constant 4 : index
+ %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>,
+ affine_map<(d0) -> (d0)>,
+ affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"] }
+ ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
+ outs(%arg2 : tensor<?xf32>) {
+ ^bb(%in0: f32, %in1: f32, %out: f32) :
+ %0 = arith.addf %in0, %in1 : f32
+ linalg.yield %0 : f32
+ } -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+// CHECK-LABEL: @vectorize_dynamic_identity_with_constant
+// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor<?xf32>
+// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1>
+// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
+// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32>
+// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %size = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [%size: !transform.any_op] : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
func.func @vectorize_dynamic_identity_with_param(%arg0: tensor<?xf32>,
%arg1: tensor<?xf32>,
%arg2: tensor<?xf32>) -> tensor<?xf32> {
>From 2fd557f363c1c1113a3c48380a74914747a21313 Mon Sep 17 00:00:00 2001
From: Sam <srcarroll314 at gmail.com>
Date: Thu, 4 Apr 2024 16:04:24 -0500
Subject: [PATCH 3/5] Change parser/printer to be more similar to
`tile_using_for`
---
.../Linalg/TransformOps/LinalgTransformOps.td | 13 +---
.../TransformOps/LinalgTransformOps.cpp | 69 +++++++++++++++++++
mlir/test/Dialect/Linalg/vectorization.mlir | 6 +-
.../dialects/transform_structured_ext.py | 4 +-
4 files changed, 76 insertions(+), 16 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 5230a7716398a3..8edaa7db6cef3b 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -2146,17 +2146,8 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
$scalable_sizes);
let results = (outs);
- let assemblyFormat = [{
- $target oilist(
- `vector_sizes` custom<DynamicIndexList>($vector_sizes,
- $static_vector_sizes,
- type($vector_sizes),
- $scalable_sizes) |
- `vectorize_nd_extract` $vectorize_nd_extract
- )
- attr-dict
- `:` type($target)
- }];
+
+ let hasCustomAssemblyFormat = 1;
let hasVerifier = 1;
let extraClassDeclaration = [{
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index bfcea723680726..fcc9c0bbc09379 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3122,6 +3122,75 @@ transform::VectorizeChildrenAndApplyPatternsOp::applyToOne(
//===----------------------------------------------------------------------===//
// VectorizeOp
//===----------------------------------------------------------------------===//
+ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
+ OperationState &result) {
+ OpAsmParser::UnresolvedOperand target;
+ SmallVector<OpAsmParser::UnresolvedOperand> dynamicSizes;
+ DenseI64ArrayAttr staticSizes;
+ SmallVector<Type> operandTypes;
+ llvm::SMLoc operandLoc;
+ DenseBoolArrayAttr scalableVals;
+
+ if (parser.parseOperand(target) || parser.getCurrentLocation(&operandLoc))
+ return ParseResult::failure();
+
+ if (succeeded(parser.parseOptionalKeyword("vector_sizes"))) {
+ if (parseDynamicIndexList(parser, dynamicSizes, staticSizes, scalableVals))
+ return ParseResult::failure();
+ }
+
+ if (succeeded(parser.parseOptionalKeyword("vectorize_nd_extract")))
+ result.addAttribute(getVectorizeNdExtractAttrName(result.name),
+ parser.getBuilder().getUnitAttr());
+
+ if (parser.parseOptionalAttrDict(result.attributes) ||
+ parser.parseColonTypeList(operandTypes))
+ return ParseResult::failure();
+
+ if (operandTypes.size() != dynamicSizes.size() + 1) {
+ return parser.emitError(operandLoc)
+ << "expected " << dynamicSizes.size() + 1 << " operand type(s)";
+ }
+ if (parser.resolveOperand(target, operandTypes.front(), result.operands) ||
+ parser.resolveOperands(
+ dynamicSizes,
+ SmallVector<Type>(operandTypes.begin() + 1, operandTypes.end()),
+ operandLoc, result.operands)) {
+ return failure();
+ }
+
+ if (scalableVals)
+ result.addAttribute(getScalableSizesAttrName(result.name), scalableVals);
+ if (staticSizes)
+ result.addAttribute(getStaticVectorSizesAttrName(result.name), staticSizes);
+
+ return success();
+}
+
+void transform::VectorizeOp::print(OpAsmPrinter &p) {
+ p << ' ' << getTarget() << ' ';
+ if (!getMixedVectorSizes().empty()) {
+ p << "vector_sizes ";
+ printDynamicIndexList(p, getOperation(), getVectorSizes(),
+ getStaticVectorSizesAttr(),
+ /*valueTypes=*/{}, getScalableSizesAttr(),
+ OpAsmParser::Delimiter::Square);
+ }
+
+ if (getVectorizeNdExtract())
+ p << "vectorize_nd_extract ";
+
+ p.printOptionalAttrDict(
+ (*this)->getAttrs(),
+ /*elidedAttrs=*/{
+ getScalableSizesAttrName(getOperation()->getName()),
+ getStaticVectorSizesAttrName(getOperation()->getName())});
+ p << " : ";
+ p << getTarget().getType() << ", ";
+ llvm::interleaveComma(getVectorSizes(), p,
+ [&](Value operand) { p << operand.getType(); });
+}
+
DiagnosedSilenceableFailure transform::VectorizeOp::apply(
transform::TransformRewriter &rewriter,
mlir::transform::TransformResults &transformResults,
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 807fa681036435..fd7d3b4767eb22 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -67,7 +67,7 @@ module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%size = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
- transform.structured.vectorize %0 vector_sizes [%size: !transform.any_op] : !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [%size] : !transform.any_op, !transform.any_op
transform.yield
}
}
@@ -104,7 +104,7 @@ module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%vector_size = transform.param.constant 4 : i64 -> !transform.param<i64>
- transform.structured.vectorize %0 vector_sizes [%vector_size : !transform.param<i64>] : !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [%vector_size] : !transform.any_op, !transform.param<i64>
transform.yield
}
}
@@ -326,7 +326,7 @@ module attributes {transform.with_named_sequence} {
%vector_size_0 = transform.param.constant 4 : i64 -> !transform.param<i64>
%vector_size_2 = transform.param.constant 16 : i64 -> !transform.param<i64>
transform.structured.vectorize %0 vector_sizes
- [%vector_size_0 : !transform.param<i64>, 8, %vector_size_2: !transform.param<i64>] : !transform.any_op
+ [%vector_size_0, 8, %vector_size_2] : !transform.any_op, !transform.param<i64>, !transform.param<i64>
transform.yield
}
}
diff --git a/mlir/test/python/dialects/transform_structured_ext.py b/mlir/test/python/dialects/transform_structured_ext.py
index c9b7802e1cc453..fee5550d26fc25 100644
--- a/mlir/test/python/dialects/transform_structured_ext.py
+++ b/mlir/test/python/dialects/transform_structured_ext.py
@@ -210,7 +210,7 @@ def testVectorizeMixed(target):
# CHECK: transform.sequence
# CHECK: %[[V0:.*]] = transform.structured.match
# CHECK: transform.structured.vectorize
- # CHECK-SAME: vector_sizes [%[[V0]] : !transform.any_op, 4]
+ # CHECK-SAME: vector_sizes [%[[V0]], 4]
@run
@@ -223,7 +223,7 @@ def testVectorizeScalable(target):
# CHECK: transform.sequence
# CHECK-DAG: %[[V0:.*]] = transform.structured.match
# CHECK-DAG: transform.structured.vectorize
- # CHECK-SAME: vector_sizes [16, [%[[V0]] : !transform.any_op], [4], [8]]
+ # CHECK-SAME: vector_sizes [16, [%[[V0]]], [4], [8]]
@run
>From c2c47692fe140ddceb93a986b1c6ce16e5af441c Mon Sep 17 00:00:00 2001
From: Sam <srcarroll314 at gmail.com>
Date: Fri, 5 Apr 2024 12:38:31 -0500
Subject: [PATCH 4/5] Address review comments and fix printer bug
---
.../TransformOps/LinalgTransformOps.cpp | 30 +++++++++++--------
.../Dialect/Linalg/transform-ops-invalid.mlir | 10 +++++++
mlir/test/Dialect/Linalg/transform-ops.mlir | 11 ++++++-
.../dialects/transform_structured_ext.py | 9 ++++++
4 files changed, 47 insertions(+), 13 deletions(-)
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index fcc9c0bbc09379..7e7cf1d0244613 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3122,6 +3122,9 @@ transform::VectorizeChildrenAndApplyPatternsOp::applyToOne(
//===----------------------------------------------------------------------===//
// VectorizeOp
//===----------------------------------------------------------------------===//
+
+static const StringLiteral kVectorSizesKeyword = "vector_sizes";
+
ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
OperationState &result) {
OpAsmParser::UnresolvedOperand target;
@@ -3134,12 +3137,14 @@ ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
if (parser.parseOperand(target) || parser.getCurrentLocation(&operandLoc))
return ParseResult::failure();
- if (succeeded(parser.parseOptionalKeyword("vector_sizes"))) {
- if (parseDynamicIndexList(parser, dynamicSizes, staticSizes, scalableVals))
+ if (succeeded(parser.parseOptionalKeyword(kVectorSizesKeyword))) {
+ if (failed(parseDynamicIndexList(parser, dynamicSizes, staticSizes,
+ scalableVals)))
return ParseResult::failure();
}
- if (succeeded(parser.parseOptionalKeyword("vectorize_nd_extract")))
+ if (succeeded(parser.parseOptionalKeyword(
+ getVectorizeNdExtractAttrName(result.name))))
result.addAttribute(getVectorizeNdExtractAttrName(result.name),
parser.getBuilder().getUnitAttr());
@@ -3152,10 +3157,8 @@ ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
<< "expected " << dynamicSizes.size() + 1 << " operand type(s)";
}
if (parser.resolveOperand(target, operandTypes.front(), result.operands) ||
- parser.resolveOperands(
- dynamicSizes,
- SmallVector<Type>(operandTypes.begin() + 1, operandTypes.end()),
- operandLoc, result.operands)) {
+ parser.resolveOperands(dynamicSizes, ArrayRef(operandTypes).drop_front(),
+ operandLoc, result.operands)) {
return failure();
}
@@ -3170,7 +3173,7 @@ ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
void transform::VectorizeOp::print(OpAsmPrinter &p) {
p << ' ' << getTarget() << ' ';
if (!getMixedVectorSizes().empty()) {
- p << "vector_sizes ";
+ p << kVectorSizesKeyword << ' ';
printDynamicIndexList(p, getOperation(), getVectorSizes(),
getStaticVectorSizesAttr(),
/*valueTypes=*/{}, getScalableSizesAttr(),
@@ -3178,7 +3181,7 @@ void transform::VectorizeOp::print(OpAsmPrinter &p) {
}
if (getVectorizeNdExtract())
- p << "vectorize_nd_extract ";
+ p << getVectorizeNdExtractAttrName() << ' ';
p.printOptionalAttrDict(
(*this)->getAttrs(),
@@ -3186,9 +3189,12 @@ void transform::VectorizeOp::print(OpAsmPrinter &p) {
getScalableSizesAttrName(getOperation()->getName()),
getStaticVectorSizesAttrName(getOperation()->getName())});
p << " : ";
- p << getTarget().getType() << ", ";
- llvm::interleaveComma(getVectorSizes(), p,
- [&](Value operand) { p << operand.getType(); });
+ p << getTarget().getType();
+ if (!getVectorSizes().empty()) {
+ p << ", ";
+ llvm::interleaveComma(getVectorSizes(), p,
+ [&](Value operand) { p << operand.getType(); });
+ }
}
DiagnosedSilenceableFailure transform::VectorizeOp::apply(
diff --git a/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir b/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir
index 5143be39306630..8288ac631aff75 100644
--- a/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir
+++ b/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir
@@ -71,3 +71,13 @@ transform.sequence failures(propagate) {
: (!transform.any_op) -> !transform.op<"linalg.generic">
}
+
+// -----
+
+transform.sequence failures(propagate) {
+^bb0(%arg0: !transform.any_op):
+ %0 = transform.param.constant 2 : i64 -> !transform.param<i64>
+ // expected-error at below {{custom op 'transform.structured.vectorize' expected 2 operand type(s)}}
+ transform.structured.vectorize %arg0 vector_sizes [%0, 2] : !transform.any_op, !transform.param<i64>, !transform.param<i64>
+
+}
diff --git a/mlir/test/Dialect/Linalg/transform-ops.mlir b/mlir/test/Dialect/Linalg/transform-ops.mlir
index 6b276e69a595db..8f6274fd22c215 100644
--- a/mlir/test/Dialect/Linalg/transform-ops.mlir
+++ b/mlir/test/Dialect/Linalg/transform-ops.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s | mlir-opt | FileCheck %s
+// RUN: mlir-opt %s --split-input-file | mlir-opt | FileCheck %s
transform.sequence failures(propagate) {
^bb1(%arg0: !transform.any_op):
@@ -57,3 +57,12 @@ transform.sequence failures(propagate) {
%1:2 = transform.structured.fuse_into_containing_op %arg2 into %loop
: (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
}
+
+// -----
+
+transform.sequence failures(propagate) {
+^bb0(%arg0: !transform.any_op):
+ // CHECK: transform.structured.vectorize %arg0 : !transform.any_op
+ transform.structured.vectorize %arg0 vector_sizes [] : !transform.any_op
+
+}
diff --git a/mlir/test/python/dialects/transform_structured_ext.py b/mlir/test/python/dialects/transform_structured_ext.py
index fee5550d26fc25..422d5912966b6d 100644
--- a/mlir/test/python/dialects/transform_structured_ext.py
+++ b/mlir/test/python/dialects/transform_structured_ext.py
@@ -212,6 +212,15 @@ def testVectorizeMixed(target):
# CHECK: transform.structured.vectorize
# CHECK-SAME: vector_sizes [%[[V0]], 4]
+ at run
+ at create_sequence
+def testVectorizeEmpty(target):
+ structured.VectorizeOp(target, [])
+ # CHECK-LABEL: TEST: testVectorizeEmpty
+ # CHECK: transform.sequence
+ # CHECK: transform.structured.vectorize
+ # CHECK-NOT: vector_sizes
+
@run
@create_sequence
>From ee8bd56998b2a6bf646c41c10a93c9cd665d61e6 Mon Sep 17 00:00:00 2001
From: Sam <srcarroll314 at gmail.com>
Date: Fri, 5 Apr 2024 12:50:00 -0500
Subject: [PATCH 5/5] fix formatting
---
mlir/test/python/dialects/transform_structured_ext.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mlir/test/python/dialects/transform_structured_ext.py b/mlir/test/python/dialects/transform_structured_ext.py
index 422d5912966b6d..91ecd0fc38e174 100644
--- a/mlir/test/python/dialects/transform_structured_ext.py
+++ b/mlir/test/python/dialects/transform_structured_ext.py
@@ -212,6 +212,7 @@ def testVectorizeMixed(target):
# CHECK: transform.structured.vectorize
# CHECK-SAME: vector_sizes [%[[V0]], 4]
+
@run
@create_sequence
def testVectorizeEmpty(target):
More information about the Mlir-commits
mailing list