[Mlir-commits] [mlir] 01f7431 - [mlir][DeclarativeParser] Add support for formatting operations with AttrSizedOperandSegments.
River Riddle
llvmlistbot at llvm.org
Thu Mar 5 12:58:49 PST 2020
Author: River Riddle
Date: 2020-03-05T12:51:28-08:00
New Revision: 01f7431b5be2be7a6e2de933a4ae883a803502f6
URL: https://github.com/llvm/llvm-project/commit/01f7431b5be2be7a6e2de933a4ae883a803502f6
DIFF: https://github.com/llvm/llvm-project/commit/01f7431b5be2be7a6e2de933a4ae883a803502f6.diff
LOG: [mlir][DeclarativeParser] Add support for formatting operations with AttrSizedOperandSegments.
This attribute details the segment sizes for operand groups within the operation. This revision add support for automatically populating this attribute in the declarative parser.
Differential Revision: https://reviews.llvm.org/D75315
Added:
Modified:
mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
mlir/include/mlir/Dialect/VectorOps/VectorOps.td
mlir/lib/Dialect/StandardOps/IR/Ops.cpp
mlir/lib/Dialect/VectorOps/VectorOps.cpp
mlir/test/Conversion/LoopsToGPU/parallel_loop.mlir
mlir/test/Dialect/Linalg/fusion.mlir
mlir/test/Dialect/Linalg/promote.mlir
mlir/test/Dialect/Linalg/tile.mlir
mlir/test/Dialect/Linalg/tile_conv.mlir
mlir/test/Dialect/Linalg/tile_parallel.mlir
mlir/test/Dialect/Linalg/transform-patterns.mlir
mlir/test/IR/core-ops.mlir
mlir/test/Transforms/canonicalize.mlir
mlir/tools/mlir-tblgen/OpFormatGen.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
index e44f8ff18a3a..899c54d4bbae 100644
--- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
@@ -1722,7 +1722,12 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
Variadic<Index>:$strides,
I32ElementsAttr:$operand_segment_sizes
);
- let results = (outs AnyMemRef);
+ let results = (outs AnyMemRef:$result);
+
+ let assemblyFormat = [{
+ $source `[` $offsets `]` `[` $sizes `]` `[` $strides `]` attr-dict `:`
+ type($source) `to` type($result)
+ }];
let builders = [
OpBuilder<
diff --git a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
index 70917ff2b882..39bf89c48640 100644
--- a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
+++ b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
@@ -715,7 +715,7 @@ def Vector_ReshapeOp :
Variadic<Index>:$output_shape,
I64ArrayAttr:$fixed_vector_sizes,
I32ElementsAttr:$operand_segment_sizes)>,
- Results<(outs AnyVector)> {
+ Results<(outs AnyVector:$result)> {
let summary = "vector reshape operation";
let description = [{
Reshapes its vector operand from 'input_shape' to 'output_shape' maintaining
@@ -822,6 +822,11 @@ def Vector_ReshapeOp :
static StringRef getInputShapeAttrName() { return "input_shape"; }
static StringRef getOutputShapeAttrName() { return "output_shape"; }
}];
+
+ let assemblyFormat = [{
+ $vector `,` `[` $input_shape `]` `,` `[` $output_shape `]` `,`
+ $fixed_vector_sizes attr-dict `:` type($vector) `to` type($result)
+ }];
}
def Vector_StridedSliceOp :
diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 6cb1f21ccda5..9f3954581efe 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1838,48 +1838,6 @@ void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType,
resultType);
}
-static ParseResult parseSubViewOp(OpAsmParser &parser, OperationState &result) {
- OpAsmParser::OperandType srcInfo;
- SmallVector<OpAsmParser::OperandType, 4> offsetsInfo;
- SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
- SmallVector<OpAsmParser::OperandType, 4> stridesInfo;
- auto indexType = parser.getBuilder().getIndexType();
- Type srcType, dstType;
- if (parser.parseOperand(srcInfo) ||
- parser.parseOperandList(offsetsInfo, OpAsmParser::Delimiter::Square) ||
- parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
- parser.parseOperandList(stridesInfo, OpAsmParser::Delimiter::Square)) {
- return failure();
- }
-
- auto builder = parser.getBuilder();
- result.addAttribute(
- SubViewOp::getOperandSegmentSizeAttr(),
- builder.getI32VectorAttr({1, static_cast<int>(offsetsInfo.size()),
- static_cast<int32_t>(sizesInfo.size()),
- static_cast<int32_t>(stridesInfo.size())}));
-
- return failure(
- parser.parseOptionalAttrDict(result.attributes) ||
- parser.parseColonType(srcType) ||
- parser.resolveOperand(srcInfo, srcType, result.operands) ||
- parser.resolveOperands(offsetsInfo, indexType, result.operands) ||
- parser.resolveOperands(sizesInfo, indexType, result.operands) ||
- parser.resolveOperands(stridesInfo, indexType, result.operands) ||
- parser.parseKeywordType("to", dstType) ||
- parser.addTypeToList(dstType, result.types));
-}
-
-static void print(OpAsmPrinter &p, SubViewOp op) {
- p << op.getOperationName() << ' ' << op.getOperand(0) << '[' << op.offsets()
- << "][" << op.sizes() << "][" << op.strides() << ']';
-
- std::array<StringRef, 1> elidedAttrs = {
- SubViewOp::getOperandSegmentSizeAttr()};
- p.printOptionalAttrDict(op.getAttrs(), elidedAttrs);
- p << " : " << op.getOperand(0).getType() << " to " << op.getType();
-}
-
static LogicalResult verify(SubViewOp op) {
auto baseType = op.getBaseMemRefType().cast<MemRefType>();
auto subViewType = op.getType();
diff --git a/mlir/lib/Dialect/VectorOps/VectorOps.cpp b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
index 53c5cbd57319..345ee9a9dfe5 100644
--- a/mlir/lib/Dialect/VectorOps/VectorOps.cpp
+++ b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
@@ -963,58 +963,6 @@ static LogicalResult verify(OuterProductOp op) {
// ReshapeOp
//===----------------------------------------------------------------------===//
-static void print(OpAsmPrinter &p, ReshapeOp op) {
- p << op.getOperationName() << " " << op.vector() << ", [" << op.input_shape()
- << "], [" << op.output_shape() << "], " << op.fixed_vector_sizes();
- std::array<StringRef, 2> elidedAttrs = {
- ReshapeOp::getOperandSegmentSizeAttr(),
- ReshapeOp::getFixedVectorSizesAttrName()};
- p.printOptionalAttrDict(op.getAttrs(), elidedAttrs);
- p << " : " << op.getInputVectorType() << " to " << op.getOutputVectorType();
-}
-
-// TODO(b/146516564) Consider passing number of inner vector dimensions that
-// are fixed, instead of their values in 'fixesVectorSizes' array attr.
-//
-// operation ::= ssa-id `=` `vector.reshape` ssa-use, `[` ssa-use-list `]`,
-// `[` ssa-use-list `]`, `[` array-attribute `]`
-// `:` vector-type 'to' vector-type
-//
-static ParseResult parseReshapeOp(OpAsmParser &parser, OperationState &result) {
- OpAsmParser::OperandType inputInfo;
- SmallVector<OpAsmParser::OperandType, 4> inputShapeInfo;
- SmallVector<OpAsmParser::OperandType, 4> outputShapeInfo;
- ArrayAttr fixedVectorSizesAttr;
- StringRef attrName = ReshapeOp::getFixedVectorSizesAttrName();
- auto indexType = parser.getBuilder().getIndexType();
- if (parser.parseOperand(inputInfo) || parser.parseComma() ||
- parser.parseOperandList(inputShapeInfo, OpAsmParser::Delimiter::Square) ||
- parser.parseComma() ||
- parser.parseOperandList(outputShapeInfo,
- OpAsmParser::Delimiter::Square) ||
- parser.parseComma()) {
- return failure();
- }
-
- auto builder = parser.getBuilder();
- result.addAttribute(
- ReshapeOp::getOperandSegmentSizeAttr(),
- builder.getI32VectorAttr({1, static_cast<int32_t>(inputShapeInfo.size()),
- static_cast<int32_t>(outputShapeInfo.size())}));
- Type inputType;
- Type outputType;
- return failure(
- parser.parseAttribute(fixedVectorSizesAttr, attrName,
- result.attributes) ||
- parser.parseOptionalAttrDict(result.attributes) ||
- parser.parseColonType(inputType) ||
- parser.resolveOperand(inputInfo, inputType, result.operands) ||
- parser.resolveOperands(inputShapeInfo, indexType, result.operands) ||
- parser.resolveOperands(outputShapeInfo, indexType, result.operands) ||
- parser.parseKeywordType("to", outputType) ||
- parser.addTypeToList(outputType, result.types));
-}
-
static LogicalResult verify(ReshapeOp op) {
// Verify that rank(numInputs/outputs) + numFixedVec dim matches vec rank.
auto inputVectorType = op.getInputVectorType();
diff --git a/mlir/test/Conversion/LoopsToGPU/parallel_loop.mlir b/mlir/test/Conversion/LoopsToGPU/parallel_loop.mlir
index 24ea0320f0ac..4bc97da954ff 100644
--- a/mlir/test/Conversion/LoopsToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/LoopsToGPU/parallel_loop.mlir
@@ -270,17 +270,17 @@ module {
// CHECK: [[VAL_31:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]]
// CHECK: [[VAL_32:%.*]] = dim [[VAL_0]], 1 : memref<?x?xf32, #[[MAP0]]>
// CHECK: [[VAL_33:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_32]]]
-// CHECK: [[VAL_34:%.*]] = std.subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]]{{\[}}[[VAL_31]], [[VAL_33]]]{{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK: [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31]], [[VAL_33]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
// CHECK: [[VAL_35:%.*]] = dim [[VAL_1]], 0 : memref<?x?xf32, #[[MAP0]]>
// CHECK: [[VAL_36:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]]
// CHECK: [[VAL_37:%.*]] = dim [[VAL_1]], 1 : memref<?x?xf32, #[[MAP0]]>
// CHECK: [[VAL_38:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]]
-// CHECK: [[VAL_39:%.*]] = std.subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]]{{\[}}[[VAL_36]], [[VAL_38]]]{{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK: [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
// CHECK: [[VAL_40:%.*]] = dim [[VAL_2]], 0 : memref<?x?xf32, #[[MAP0]]>
// CHECK: [[VAL_41:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]]
// CHECK: [[VAL_42:%.*]] = dim [[VAL_2]], 1 : memref<?x?xf32, #[[MAP0]]>
// CHECK: [[VAL_43:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
-// CHECK: [[VAL_44:%.*]] = std.subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]]{{\[}}[[VAL_41]], [[VAL_43]]]{{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK: [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
// CHECK: [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}[[VAL_3]], [[VAL_4]]]
// CHECK: [[VAL_46:%.*]] = cmpi "slt", [[VAL_45]], [[VAL_31]] : index
// CHECK: loop.if [[VAL_46]] {
diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index 582e26185382..f844f76a3da6 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -265,17 +265,17 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
// CHECK: loop.for %[[I:.*]] = %{{.*}} to %[[D_0]] step %{{.*}} {
// CHECK: loop.for %[[J:.*]] = %{{.*}} to %[[B_1]] step %{{.*}} {
// CHECK: loop.for %[[K:.*]] = %{{.*}} to %[[D_1]] step %{{.*}} {
-// CHECK-DAG: %[[D_IK:.*]] = std.subview %[[D]][%[[I]], %[[K]]]
-// CHECK-DAG: %[[B_KJ:.*]] = std.subview %[[B]][%[[K]], %[[J]]]
-// CHECK-DAG: %[[E_IJ:.*]] = std.subview %[[E]][%[[I]], %[[J]]]
+// CHECK-DAG: %[[D_IK:.*]] = subview %[[D]][%[[I]], %[[K]]]
+// CHECK-DAG: %[[B_KJ:.*]] = subview %[[B]][%[[K]], %[[J]]]
+// CHECK-DAG: %[[E_IJ:.*]] = subview %[[E]][%[[I]], %[[J]]]
// CHECK: dim
-// CHECK-DAG: %[[C_I0:.*]] = std.subview %[[C]][%[[I]], %{{.*}}]
-// CHECK-DAG: %[[B_0K:.*]] = std.subview %[[B]][%{{.*}}, %[[K]]]
-// CHECK-DAG: %[[D_IK_:.*]] = std.subview %[[D]][%[[I]], %[[K]]]
+// CHECK-DAG: %[[C_I0:.*]] = subview %[[C]][%[[I]], %{{.*}}]
+// CHECK-DAG: %[[B_0K:.*]] = subview %[[B]][%{{.*}}, %[[K]]]
+// CHECK-DAG: %[[D_IK_:.*]] = subview %[[D]][%[[I]], %[[K]]]
// CHECK: dim
-// CHECK-DAG: %[[A_I0:.*]] = std.subview %[[A]][%[[I]], %{{.*}}]
-// CHECK-DAG: %[[B_00:.*]] = std.subview %[[B]][%{{.*}}, %{{.*}}]
-// CHECK-DAG: %[[C_I0_:.*]] = std.subview %[[C]][%[[I]], %{{.*}}]
+// CHECK-DAG: %[[A_I0:.*]] = subview %[[A]][%[[I]], %{{.*}}]
+// CHECK-DAG: %[[B_00:.*]] = subview %[[B]][%{{.*}}, %{{.*}}]
+// CHECK-DAG: %[[C_I0_:.*]] = subview %[[C]][%[[I]], %{{.*}}]
// CHECK: linalg.matmul(%[[A_I0]], %[[B_00]], %[[C_I0_]])
// CHECK: linalg.matmul(%[[C_I0]], %[[B_0K]], %[[D_IK_]])
// CHECK: linalg.matmul(%[[D_IK]], %[[B_KJ]], %[[E_IJ]])
diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index c28d9f359af8..e9eea206b26e 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -39,9 +39,9 @@ func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-// CHECK: %[[vA:.*]] = std.subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK: %[[vB:.*]] = std.subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK: %[[vC:.*]] = std.subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK: %[[vA:.*]] = subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK: %[[vB:.*]] = subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK: %[[vC:.*]] = subview {{.*}} : memref<?x?xf32, #[[strided2D]]>
///
// CHECK: %[[tmpA:.*]] = alloc() : memref<32xi8>
// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][][{{.*}}] : memref<32xi8> to memref<?x?xf32>
@@ -104,9 +104,9 @@ func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-// CHECK: %[[vA_f64:.*]] = std.subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
-// CHECK: %[[vB_f64:.*]] = std.subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
-// CHECK: %[[vC_f64:.*]] = std.subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
+// CHECK: %[[vA_f64:.*]] = subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
+// CHECK: %[[vB_f64:.*]] = subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
+// CHECK: %[[vC_f64:.*]] = subview {{.*}} : memref<?x?xf64, #[[strided2D]]>
///
// CHECK: %[[tmpA_f64:.*]] = alloc() : memref<64xi8>
// CHECK: %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][][{{.*}}] : memref<64xi8> to memref<?x?xf64>
@@ -169,9 +169,9 @@ func @matmul_i32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-// CHECK: %[[vA_i32:.*]] = std.subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
-// CHECK: %[[vB_i32:.*]] = std.subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
-// CHECK: %[[vC_i32:.*]] = std.subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
+// CHECK: %[[vA_i32:.*]] = subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
+// CHECK: %[[vB_i32:.*]] = subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
+// CHECK: %[[vC_i32:.*]] = subview {{.*}} : memref<?x?xi32, #[[strided2D]]>
///
// CHECK: %[[tmpA_i32:.*]] = alloc() : memref<32xi8>
// CHECK: %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][][{{.*}}] : memref<32xi8> to memref<?x?xi32>
diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index a8643f9a3c8c..c06447f29c0f 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -45,11 +45,11 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
// TILE-2: %[[K:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
-// TILE-2: %[[sAi:.*]] = std.subview %{{.*}}[%[[I]], %[[C0]]][%[[szM]], %[[K]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[szM]], %[[K]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-2: %[[localK:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[I]])
// TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
-// TILE-2: %[[sCi:.*]] = std.subview %{{.*}}[%[[I]], %[[C0]]][%[[szK]], %[[N]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[szK]], %[[N]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-2: linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02-LABEL: func @matmul(
@@ -61,11 +61,11 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-02: %[[K:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
// TILE-02: %[[localN:.*]] = dim %{{.*}}, 1
// TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localN]], %[[J]])
-// TILE-02: %[[sBj:.*]] = std.subview %{{.*}}[%[[C0]], %[[J]]][%[[K]], %[[szN]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-02: %[[sBj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[K]], %[[szN]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
// TILE-02: %[[localK:.*]] = dim %{{.*}}, 1
// TILE-02: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[J]])
-// TILE-02: %[[sCj:.*]] = std.subview %{{.*}}[%[[C0]], %[[J]]][%[[M]], %[[szK]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-02: %[[sCj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[M]], %[[szK]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02: linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002-LABEL: func @matmul(
@@ -77,11 +77,11 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-002: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
// TILE-002: %[[localK:.*]] = dim %{{.*}}, 1
// TILE-002: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[K]])
-// TILE-002: %[[sAj:.*]] = std.subview %{{.*}}[%[[C0]], %[[K]]][%[[M]], %[[szK]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-002: %[[sAj:.*]] = subview %{{.*}}[%[[C0]], %[[K]]] [%[[M]], %[[szK]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002: %[[localK:.*]] = dim %{{.*}}, 0
// TILE-002: %[[szK:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localK]], %[[K]])
// TILE-002: %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
-// TILE-002: %[[sBj:.*]] = std.subview %{{.*}}[%[[K]], %[[C0]]][%[[szK]], %[[N]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-002: %[[sBj:.*]] = subview %{{.*}}[%[[K]], %[[C0]]] [%[[szK]], %[[N]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002: linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D]]>
// TILE-234-LABEL: func @matmul(
@@ -100,17 +100,17 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
// TILE-234: %[[localK:.*]] = dim %{{.*}}, 1
// TILE-234: %[[szK:.*]] = affine.min #[[bound_map]](%[[C4]], %[[localK]], %[[K]])
-// TILE-234: %[[sAik:.*]] = std.subview %{{.*}}[%[[I]], %[[K]]][%[[szM]], %[[szK]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-234: %[[localK:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szK:.*]] = affine.min #[[bound_map]](%[[C4]], %[[localK]], %[[K]])
// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1
// TILE-234: %[[szN:.*]] = affine.min #[[bound_map]](%[[C3]], %[[localN]], %[[J]])
-// TILE-234: %[[sBkj:.*]] = std.subview %{{.*}}[%[[K]], %[[J]]][%[[szK]], %[[szN]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[szK]], %[[szN]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1
// TILE-234: %[[szN:.*]] = affine.min #[[bound_map]](%[[C3]], %[[localN]], %[[J]])
-// TILE-234: %[[sCij:.*]] = std.subview %{{.*}}[%[[I]], %[[J]]][%[[szM]], %[[szN]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
//
// TILE-234: linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
@@ -129,9 +129,9 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
// TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref<10x16xf32, #[[strided2D]]>
// TILE-2: loop.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
// TILE-2: %[[K:.*]] = dim %{{.*}}, 1 : memref<10x16xf32, #[[strided2D]]>
-// TILE-2: %[[sAi:.*]] = std.subview %{{.*}}[%[[I]], %[[C0]]][%[[C2]], %[[K]]][%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[C2]], %[[K]]] [%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref<10x12xf32, #[[strided2D]]>
-// TILE-2: %[[sCi:.*]] = std.subview %{{.*}}[%[[I]], %[[C0]]][%[[C2]], %[[N]]][%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[C2]], %[[N]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-2: linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]])
// TILE-02-LABEL: func @matmul_static(
@@ -142,10 +142,10 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
// TILE-02: loop.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
// TILE-02: %[[K:.*]] = dim %{{.*}}, 0 : memref<16x12xf32, #[[strided2D]]>
// TILE-02-NOT: affine.min
-// TILE-02: %[[sBj:.*]] = std.subview %{{.*}}[%[[C0]], %[[J]]][%[[K]], %[[C2]]][%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-02: %[[sBj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[K]], %[[C2]]] [%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref<10x12xf32, #[[strided2D]]>
// TILE-02-NOT: affine.min
-// TILE-02: %[[sCj:.*]] = std.subview %{{.*}}[%[[C0]], %[[J]]][%[[M]], %[[C2]]][%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-02: %[[sCj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[M]], %[[C2]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02: linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<10x16xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002-LABEL: func @matmul_static(
@@ -156,10 +156,10 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
// TILE-002: loop.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
// TILE-002: %[[M:.*]] = dim %{{.*}}, 0 : memref<10x16xf32, #[[strided2D]]>
// TILE-002-NOT: affine.min
-// TILE-002: %[[sAj:.*]] = std.subview %{{.*}}[%[[C0]], %[[K]]][%[[M]], %[[C2]]][%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-002: %[[sAj:.*]] = subview %{{.*}}[%[[C0]], %[[K]]] [%[[M]], %[[C2]]] [%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002: %[[N:.*]] = dim %{{.*}}, 1 : memref<16x12xf32, #[[strided2D]]>
// TILE-002-NOT: affine.min
-// TILE-002: %[[sBj:.*]] = std.subview %{{.*}}[%[[K]], %[[C0]]][%[[C2]], %[[N]]][%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-002: %[[sBj:.*]] = subview %{{.*}}[%[[K]], %[[C0]]] [%[[C2]], %[[N]]] [%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-002: linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<10x12xf32, #[[strided2D]]>
// TILE-234-LABEL: func @matmul_static(
@@ -175,11 +175,11 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
// TILE-234: loop.for %[[J:.*]] = %{{.*}}{{.*}} to %[[ubN]] step %{{.*}} {
// TILE-234: loop.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
// TILE-234-NOT: affine.min
-// TILE-234: %[[sAik:.*]] = std.subview %{{.*}}[%[[I]], %[[K]]][%[[C2]], %[[C4]]][%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[C2]], %[[C4]]] [%[[C1]], %[[C1]]] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-234-NOT: affine.min
-// TILE-234: %[[sBkj:.*]] = std.subview %{{.*}}[%[[K]], %[[J]]][%[[C4]], %[[C3]]][%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[C4]], %[[C3]]] [%[[C1]], %[[C1]]] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-234-NOT: affine.min
-// TILE-234: %[[sCij:.*]] = std.subview %{{.*}}[%[[I]], %[[J]]][%[[C2]], %[[C3]]][%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[C2]], %[[C3]]] [%[[C1]], %[[C1]]] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
//
// TILE-234: linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
@@ -196,10 +196,10 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
// TILE-2: %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
-// TILE-2: %[[sAi:.*]] = std.subview %{{.*}}[%[[I]], %[[C0]]][%[[szM]], %[[N]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]], %[[C0]]] [%[[szM]], %[[N]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-2: %[[localN:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szN:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localN]], %[[I]])
-// TILE-2: %[[sCi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szN]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-2: %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szN]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-2: linalg.matvec(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D_dynamic]]>
// TILE-02-LABEL: func @matvec(
@@ -211,10 +211,10 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-02: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
// TILE-02: %[[localN:.*]] = dim %{{.*}}, 1
// TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localN]], %[[J]])
-// TILE-02: %[[sAj:.*]] = std.subview %{{.*}}[%[[C0]], %[[J]]][%[[M]], %[[szN]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-02: %[[sAj:.*]] = subview %{{.*}}[%[[C0]], %[[J]]] [%[[M]], %[[szN]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-02: %[[localN:.*]] = dim %{{.*}}, 0
// TILE-02: %[[szN:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localN]], %[[J]])
-// TILE-02: %[[sBj:.*]] = std.subview %{{.*}}[%[[J]]][%[[szN]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-02: %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-02: linalg.matvec(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?xf32, #[[strided1D_dynamic]]>, memref<?xf32, #[[strided1D]]>
// TILE-002-LABEL: func @matvec(
@@ -233,13 +233,13 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
// TILE-234: %[[localN:.*]] = dim %{{.*}}, 1
// TILE-234: %[[szN:.*]] = affine.min #[[bound_map]](%[[C3]], %[[localN]], %[[J]])
-// TILE-234: %[[sAij:.*]] = std.subview %{{.*}}[%[[I]], %[[J]]][%[[szM]], %[[szN]]][%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
+// TILE-234: %[[sAij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [%[[C1]], %[[C1]]] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D_dynamic]]>
// TILE-234: %[[localN:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szN:.*]] = affine.min #[[bound_map]](%[[C3]], %[[localN]], %[[J]])
-// TILE-234: %[[sBj:.*]] = std.subview %{{.*}}[%[[J]]][%[[szN]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-234: %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
-// TILE-234: %[[sCi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szM]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-234: %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
//
// TILE-234: linalg.matvec(%[[sAij]], %[[sBj]], %[[sCi]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?xf32, #[[strided1D_dynamic]]>, memref<?xf32, #[[strided1D_dynamic]]>
@@ -255,10 +255,10 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
// TILE-2: loop.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
-// TILE-2: %[[sAi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szM]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-2: %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-2: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-2: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
-// TILE-2: %[[sBi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szM]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-2: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-2: linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref<?xf32, #[[strided1D_dynamic]]>, memref<?xf32, #[[strided1D_dynamic]]>, memref<f32>
// TILE-02-LABEL: func @dot(
@@ -275,10 +275,10 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
// TILE-234: loop.for %[[I:.*]] = %{{.*}} to %[[ubK]] step %{{.*}} {
// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
-// TILE-234: %[[sAi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szM]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-234: %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-234: %[[localM:.*]] = dim %{{.*}}, 0
// TILE-234: %[[szM:.*]] = affine.min #[[bound_map]](%[[C2]], %[[localM]], %[[I]])
-// TILE-234: %[[sBi:.*]] = std.subview %{{.*}}[%[[I]]][%[[szM]]][%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
+// TILE-234: %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [%[[C1]]] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D_dynamic]]>
// TILE-234: linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref<?xf32, #[[strided1D_dynamic]]>, memref<?xf32, #[[strided1D_dynamic]]>, memref<f32>
func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
@@ -288,13 +288,13 @@ func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
// TILE-2-LABEL: func @fill_static
// TILE-2: for
// TILE-2-NOT: for
-// TILE-2: std.subview{{.*}} : memref<127x99xf32>
+// TILE-2: subview{{.*}} : memref<127x99xf32>
// TILE-2: linalg.fill{{.*}} : memref<?x?xf32, #[[strided2D_dynamic]]>, f32
// TILE-02-LABEL: func @fill_static
// TILE-02: for
// TILE-02-NOT: for
-// TILE-02: std.subview{{.*}} : memref<127x99xf32>
+// TILE-02: subview{{.*}} : memref<127x99xf32>
// TILE-02: linalg.fill{{.*}} : memref<?x?xf32, #[[strided2D_dynamic]]>, f32
// TILE-002-LABEL: func @fill_static
@@ -305,7 +305,7 @@ func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
// TILE-234: for
// TILE-234: for
// TILE-234-NOT: for
-// TILE-234: std.subview{{.*}} : memref<127x99xf32>
+// TILE-234: subview{{.*}} : memref<127x99xf32>
// TILE-234: linalg.fill{{.*}} : memref<?x?xf32, #[[strided2D_dynamic]]>, f32
diff --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir
index b95b17cb2b8f..25cabc02efb0 100644
--- a/mlir/test/Dialect/Linalg/tile_conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv.mlir
@@ -29,17 +29,17 @@ func @conv(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1:
// TILE-23004: %[[Z2:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[szK:.*]] = affine.min #[[bound_map]](%[[C4]], %[[Z2]], %[[ivK]])
// TILE-23004: %[[K:.*]] = dim %{{.*}}, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
-// TILE-23004: %[[FilterView:.*]] = std.subview %{{.*}}[%[[C0]], %[[C0]], %[[ivK]], %[[C0]]][%[[Z0]], %[[Z1]], %[[szK]], %[[K]]][%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
+// TILE-23004: %[[FilterView:.*]] = subview %{{.*}}[%[[C0]], %[[C0]], %[[ivK]], %[[C0]]] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
//
// TILE-23004: %[[J1:.*]] = affine.apply #[[D0x30pS0x10]](%[[ivJ]])
// T__ILE-23004: %[[I1pStep:.*]] = affine.apply #[[S0x10p90]]()[%[[I1]]]
// TILE-23004: %[[SZ2:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[dim3:.*]] = dim %{{.*}}, 3
// TILE-23004: %[[sz3:.*]] = affine.min #[[bound_map]](%[[C4]], %[[dim3]], %[[ivK]]
-// TILE-23004: %[[InputView:.*]] = std.subview %{{.*}}[%[[ivI]], %[[J1]], %[[C0]], %[[ivK]]][%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]][%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
+// TILE-23004: %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], %[[C0]], %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
//
// TILE-23004: %[[X0:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[X1:.*]] = dim %{{.*}}, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
-// TILE-23004: %[[OutputView:.*]] = std.subview %{{.*}}[%[[ivI]], %[[ivJ]], %[[C0]], %[[C0]]][%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]][%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
+// TILE-23004: %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], %[[C0]], %[[C0]]] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
//
// TILE-23004: linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[strided4D_dynamic]]>, memref<?x?x?x?xf32, #[[strided4D_dynamic]]>, memref<?x?x?x?xf32, #[[strided4D_dynamic]]>
diff --git a/mlir/test/Dialect/Linalg/tile_parallel.mlir b/mlir/test/Dialect/Linalg/tile_parallel.mlir
index 7dbcce9e3a8a..7db9da0715aa 100644
--- a/mlir/test/Dialect/Linalg/tile_parallel.mlir
+++ b/mlir/test/Dialect/Linalg/tile_parallel.mlir
@@ -31,9 +31,9 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// TILE-2: [[LHS_ROWS:%.*]] = dim [[LHS]], 0
// TILE-2: loop.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_ROWS]]) step ([[C2]]) {
// TILE-2-NO: loop.parallel
-// TILE-2: [[LHS_SUBVIEW:%.*]] = std.subview [[LHS]]
-// TILE-2: [[RHS_SUBVIEW:%.*]] = std.subview [[RHS]]
-// TILE-2: [[SUM_SUBVIEW:%.*]] = std.subview [[SUM]]
+// TILE-2: [[LHS_SUBVIEW:%.*]] = subview [[LHS]]
+// TILE-2: [[RHS_SUBVIEW:%.*]] = subview [[RHS]]
+// TILE-2: [[SUM_SUBVIEW:%.*]] = subview [[SUM]]
// TILE-2: linalg.generic {{.*}} [[LHS_SUBVIEW]], [[RHS_SUBVIEW]], [[SUM_SUBVIEW]] {
// TILE-02-LABEL: func @sum(
@@ -44,9 +44,9 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// TILE-02: [[LHS_COLS:%.*]] = dim [[LHS]], 1
// TILE-02: loop.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_COLS]]) step ([[C2]]) {
// TILE-02-NO: loop.parallel
-// TILE-02: [[LHS_SUBVIEW:%.*]] = std.subview [[LHS]]
-// TILE-02: [[RHS_SUBVIEW:%.*]] = std.subview [[RHS]]
-// TILE-02: [[SUM_SUBVIEW:%.*]] = std.subview [[SUM]]
+// TILE-02: [[LHS_SUBVIEW:%.*]] = subview [[LHS]]
+// TILE-02: [[RHS_SUBVIEW:%.*]] = subview [[RHS]]
+// TILE-02: [[SUM_SUBVIEW:%.*]] = subview [[SUM]]
// TILE-02: linalg.generic {{.*}} [[LHS_SUBVIEW]], [[RHS_SUBVIEW]], [[SUM_SUBVIEW]] {
// TILE-002-LABEL: func @sum(
@@ -64,7 +64,7 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// TILE-234: [[LHS_COLS:%.*]] = dim [[LHS]], 1
// TILE-234: loop.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]]) to ([[LHS_ROWS]], [[LHS_COLS]]) step ([[C2]], [[C3]]) {
// TILE-234-NO: loop.parallel
-// TILE-234: [[LHS_SUBVIEW:%.*]] = std.subview [[LHS]]
-// TILE-234: [[RHS_SUBVIEW:%.*]] = std.subview [[RHS]]
-// TILE-234: [[SUM_SUBVIEW:%.*]] = std.subview [[SUM]]
+// TILE-234: [[LHS_SUBVIEW:%.*]] = subview [[LHS]]
+// TILE-234: [[RHS_SUBVIEW:%.*]] = subview [[RHS]]
+// TILE-234: [[SUM_SUBVIEW:%.*]] = subview [[SUM]]
// TILE-234: linalg.generic {{.*}} [[LHS_SUBVIEW]], [[RHS_SUBVIEW]], [[SUM_SUBVIEW]] {
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 520be05bca7d..a0a7b74d4257 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -365,9 +365,9 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK : loop.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
-// CHECK : %[[s0:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
-// CHECK : %[[s1:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
-// CHECK : %[[s2:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}][{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK : %[[s0:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK : %[[s1:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK : %[[s2:.*]] = std.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK : %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK : %[[v0:.*]] = std.view %[[a0]][][{{%.*}}, {{%.*}}]: memref<?xi8> to memref<?x?xf32>
// CHECK : %[[l0:.*]] = linalg.slice %[[v0]][{{%.*}}, {{%.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #map{{.*}}>
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index e2f6f57ba0a1..454ecfc53791 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -682,39 +682,39 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%c1 = constant 1 : index
%0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
- // CHECK: std.subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP0]]>
+ // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP0]]>
%1 = subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
%2 = alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
- // CHECK: std.subview %2[%c1][%arg0][%c1] : memref<64xf32, #[[BASE_MAP1]]> to memref<?xf32, #[[SUBVIEW_MAP1]]>
+ // CHECK: subview %2[%c1] [%arg0] [%c1] : memref<64xf32, #[[BASE_MAP1]]> to memref<?xf32, #[[SUBVIEW_MAP1]]>
%3 = subview %2[%c1][%arg0][%c1]
: memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to
memref<?xf32, affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>>
%4 = alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>>
- // CHECK: std.subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0] : memref<64x22xf32, #[[BASE_MAP2]]> to memref<?x?xf32, #[[SUBVIEW_MAP2]]>
+ // CHECK: subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] : memref<64x22xf32, #[[BASE_MAP2]]> to memref<?x?xf32, #[[SUBVIEW_MAP2]]>
%5 = subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0]
: memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>> to
memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>>
- // CHECK: std.subview %0[][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<4x4x4xf32, #[[SUBVIEW_MAP3]]>
+ // CHECK: subview %0[] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<4x4x4xf32, #[[SUBVIEW_MAP3]]>
%6 = subview %0[][][]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<4x4x4xf32, affine_map<(d0, d1, d2) -> (d0 * 16 + d1 * 4 + d2 + 8)>>
%7 = alloc(%arg1, %arg2) : memref<?x?xf32>
- // CHECK: std.subview {{%.*}}[][][] : memref<?x?xf32> to memref<4x4xf32, #[[SUBVIEW_MAP4]]>
+ // CHECK: subview {{%.*}}[] [] [] : memref<?x?xf32> to memref<4x4xf32, #[[SUBVIEW_MAP4]]>
%8 = subview %7[][][]
: memref<?x?xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]>
%9 = alloc() : memref<16x4xf32>
- // CHECK: std.subview {{%.*}}[{{%.*}}, {{%.*}}][][{{%.*}}, {{%.*}}] : memref<16x4xf32> to memref<4x4xf32, #[[SUBVIEW_MAP4]]
+ // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [] [{{%.*}}, {{%.*}}] : memref<16x4xf32> to memref<4x4xf32, #[[SUBVIEW_MAP4]]
%10 = subview %9[%arg1, %arg1][][%arg2, %arg2]
: memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]>
- // CHECK: std.subview {{%.*}}[{{%.*}}, {{%.*}}][][] : memref<16x4xf32> to memref<4x4xf32, #[[SUBVIEW_MAP5]]
+ // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [] [] : memref<16x4xf32> to memref<4x4xf32, #[[SUBVIEW_MAP5]]
%11 = subview %9[%arg1, %arg2][][]
: memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]>
return
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index e375a43af6f9..746605452baa 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -437,7 +437,7 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
affine.for %arg4 = 0 to %ub {
%s = dim %0, 0 : memref<?x?xf32>
%v = std.view %3[%c0][%arg4, %s] : memref<?xi8> to memref<?x?xf32, #map1>
- %sv = std.subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
+ %sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
%l = dim %v, 1 : memref<?x?xf32, #map1>
%u = dim %sv, 0 : memref<?x?xf32, #map1>
affine.for %arg5 = %l to %u {
@@ -777,8 +777,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview with constant base memref and constant operands is folded.
// Note that the subview uses the base memrefs layout map because it used
// zero offset and unit stride arguments.
- // CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[BASE_MAP0]]>
- %1 = subview %0[%c0, %c0, %c0][%c7, %c11, %c2][%c1, %c1, %c1]
+ // CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[BASE_MAP0]]>
+ %1 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
@@ -786,8 +786,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview with one dynamic operand should not be folded.
- // CHECK: std.subview %[[ALLOC0]][%[[C0]], %[[ARG0]], %[[C0]]][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
- %2 = subview %0[%c0, %arg0, %c0][%c7, %c11, %c15][%c1, %c1, %c1]
+ // CHECK: subview %[[ALLOC0]][%[[C0]], %[[ARG0]], %[[C0]]] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
+ %2 = subview %0[%c0, %arg0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
@@ -797,8 +797,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// CHECK: %[[ALLOC1:.*]] = alloc(%[[ARG0]])
%3 = alloc(%arg0) : memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
- // CHECK: std.subview %[[ALLOC1]][][][] : memref<?x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[BASE_MAP0]]>
- %4 = subview %3[%c0, %c0, %c0][%c7, %c11, %c15][%c1, %c1, %c1]
+ // CHECK: subview %[[ALLOC1]][] [] [] : memref<?x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x15xf32, #[[BASE_MAP0]]>
+ %4 = subview %3[%c0, %c0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<?x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
@@ -806,8 +806,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview offset operands are folded correctly w.r.t. base strides.
- // CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
- %5 = subview %0[%c1, %c2, %c7][%c7, %c11, %c2][%c1, %c1, %c1]
+ // CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
+ %5 = subview %0[%c1, %c2, %c7] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
@@ -815,8 +815,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview stride operands are folded correctly w.r.t. base strides.
- // CHECK: std.subview %[[ALLOC0]][][][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
- %6 = subview %0[%c0, %c0, %c0][%c7, %c11, %c2][%c2, %c7, %c11]
+ // CHECK: subview %[[ALLOC0]][] [] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
+ %6 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c2, %c7, %c11]
: memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
memref<?x?x?xf32,
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
@@ -824,35 +824,35 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
// Test: subview shape are folded, but offsets and strides are not even if base memref is static
- // CHECK: std.subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][][%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
- %10 = subview %0[%arg0, %arg0, %arg0][%c7, %c11, %c2][%arg1, %arg1, %arg1] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
+ %10 = subview %0[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %10[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// Test: subview strides are folded, but offsets and shape are not even if base memref is static
- // CHECK: std.subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][%[[ARG1]], %[[ARG1]], %[[ARG1]]][] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP4]]
- %11 = subview %0[%arg0, %arg0, %arg0][%arg1, %arg1, %arg1][%c2, %c7, %c11] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP4]]
+ %11 = subview %0[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c7, %c11] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %11[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// Test: subview offsets are folded, but strides and shape are not even if base memref is static
- // CHECK: std.subview %[[ALLOC0]][][%[[ARG1]], %[[ARG1]], %[[ARG1]]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP5]]
- %13 = subview %0[%c1, %c2, %c7][%arg1, %arg1, %arg1][%arg0, %arg0, %arg0] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC0]][] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] : memref<8x16x4xf32, #[[BASE_MAP0]]> to memref<?x?x?xf32, #[[SUBVIEW_MAP5]]
+ %13 = subview %0[%c1, %c2, %c7] [%arg1, %arg1, %arg1] [%arg0, %arg0, %arg0] : memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %13[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// CHECK: %[[ALLOC2:.*]] = alloc(%[[ARG0]], %[[ARG0]], %[[ARG1]])
%14 = alloc(%arg0, %arg0, %arg1) : memref<?x?x?xf32>
// Test: subview shape are folded, even if base memref is not static
- // CHECK: std.subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][][%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
- %15 = subview %14[%arg0, %arg0, %arg0][%c7, %c11, %c2][%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
+ %15 = subview %14[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %15[%arg1, %arg1, %arg1] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// TEST: subview strides are not folded when the base memref is not static
- // CHECK: std.subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][%[[ARG1]], %[[ARG1]], %[[ARG1]]][%[[C2]], %[[C2]], %[[C2]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
- %16 = subview %14[%arg0, %arg0, %arg0][%arg1, %arg1, %arg1][%c2, %c2, %c2] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[C2]], %[[C2]], %[[C2]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
+ %16 = subview %14[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c2, %c2] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %16[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// TEST: subview offsets are not folded when the base memref is not static
- // CHECK: std.subview %[[ALLOC2]][%[[C1]], %[[C1]], %[[C1]]][%[[ARG0]], %[[ARG0]], %[[ARG0]]][%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
- %17 = subview %14[%c1, %c1, %c1][%arg0, %arg0, %arg0][%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
+ // CHECK: subview %[[ALLOC2]][%[[C1]], %[[C1]], %[[C1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] : memref<?x?x?xf32> to memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
+ %17 = subview %14[%c1, %c1, %c1] [%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] : memref<?x?x?xf32> to memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
load %17[%arg0, %arg0, %arg0] : memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
// CHECK: %[[ALLOC3:.*]] = alloc() : memref<12x4xf32>
@@ -860,13 +860,13 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
%c4 = constant 4 : index
// TEST: subview strides are maintained when sizes are folded
- // CHECK: std.subview %[[ALLOC3]][%arg1, %arg1][][] : memref<12x4xf32> to memref<2x4xf32, #[[SUBVIEW_MAP6]]>
- %19 = subview %18[%arg1, %arg1][%c2, %c4][] : memref<12x4xf32> to memref<?x?xf32, offset: ?, strides:[4, 1]>
+ // CHECK: subview %[[ALLOC3]][%arg1, %arg1] [] [] : memref<12x4xf32> to memref<2x4xf32, #[[SUBVIEW_MAP6]]>
+ %19 = subview %18[%arg1, %arg1] [%c2, %c4] [] : memref<12x4xf32> to memref<?x?xf32, offset: ?, strides:[4, 1]>
load %19[%arg1, %arg1] : memref<?x?xf32, offset: ?, strides:[4, 1]>
// TEST: subview strides and sizes are maintained when offsets are folded
- // CHECK: std.subview %[[ALLOC3]][][][] : memref<12x4xf32> to memref<12x4xf32, #[[SUBVIEW_MAP7]]>
- %20 = subview %18[%c2, %c4][][] : memref<12x4xf32> to memref<12x4xf32, offset: ?, strides:[4, 1]>
+ // CHECK: subview %[[ALLOC3]][] [] [] : memref<12x4xf32> to memref<12x4xf32, #[[SUBVIEW_MAP7]]>
+ %20 = subview %18[%c2, %c4] [] [] : memref<12x4xf32> to memref<12x4xf32, offset: ?, strides:[4, 1]>
load %20[%arg1, %arg1] : memref<12x4xf32, offset: ?, strides:[4, 1]>
// Test: dim on subview is rewritten to size operand.
diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
index 17c85195820e..1f92e1e9d962 100644
--- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
@@ -295,7 +295,7 @@ struct OperationFormat {
};
OperationFormat(const Operator &op)
- : allOperandTypes(false), allResultTypes(false) {
+ : allOperands(false), allOperandTypes(false), allResultTypes(false) {
operandTypes.resize(op.getNumOperands(), TypeResolution());
resultTypes.resize(op.getNumResults(), TypeResolution());
}
@@ -307,6 +307,8 @@ struct OperationFormat {
void genParserTypeResolution(Operator &op, OpMethodBody &body);
/// Generate the c++ to resolve successors during parsing.
void genParserSuccessorResolution(Operator &op, OpMethodBody &body);
+ /// Generate the c++ to handling variadic segment size traits.
+ void genParserVariadicSegmentResolution(Operator &op, OpMethodBody &body);
/// Generate the operation printer from this format.
void genPrinter(Operator &op, OpClass &opClass);
@@ -316,7 +318,7 @@ struct OperationFormat {
/// A flag indicating if all operand/result types were seen. If the format
/// contains these, it can not contain individual type resolvers.
- bool allOperandTypes, allResultTypes;
+ bool allOperands, allOperandTypes, allResultTypes;
/// A map of buildable types to indices.
llvm::MapVector<StringRef, int, llvm::StringMap<int>> buildableTypes;
@@ -380,14 +382,10 @@ const char *const enumAttrParserCode = R"(
///
/// {0}: The name of the operand.
const char *const variadicOperandParserCode = R"(
- llvm::SMLoc {0}OperandsLoc = parser.getCurrentLocation();
- (void){0}OperandsLoc;
if (parser.parseOperandList({0}Operands))
return failure();
)";
const char *const operandParserCode = R"(
- llvm::SMLoc {0}OperandsLoc = parser.getCurrentLocation();
- (void){0}OperandsLoc;
if (parser.parseOperand({0}RawOperands[0]))
return failure();
)";
@@ -507,13 +505,18 @@ static void genElementParserStorage(Element *element, OpMethodBody &body) {
genElementParserStorage(&childElement, body);
} else if (auto *operand = dyn_cast<OperandVariable>(element)) {
StringRef name = operand->getVar()->name;
- if (operand->getVar()->isVariadic())
+ if (operand->getVar()->isVariadic()) {
body << " SmallVector<OpAsmParser::OperandType, 4> " << name
<< "Operands;\n";
- else
+ } else {
body << " OpAsmParser::OperandType " << name << "RawOperands[1];\n"
<< " ArrayRef<OpAsmParser::OperandType> " << name << "Operands("
<< name << "RawOperands);";
+ }
+ body << llvm::formatv(
+ " llvm::SMLoc {0}OperandsLoc = parser.getCurrentLocation();\n"
+ " (void){0}OperandsLoc;\n",
+ name);
} else if (auto *dir = dyn_cast<TypeDirective>(element)) {
bool variadic = false;
StringRef name = getTypeListName(dir->getOperand(), variadic);
@@ -654,6 +657,8 @@ void OperationFormat::genParser(Operator &op, OpClass &opClass) {
// that they have been parsed.
genParserTypeResolution(op, body);
genParserSuccessorResolution(op, body);
+ genParserVariadicSegmentResolution(op, body);
+
body << " return success();\n";
}
@@ -727,14 +732,10 @@ void OperationFormat::genParserTypeResolution(Operator &op,
if (op.getNumOperands() == 0)
return;
- // Flag indicating if operands were dumped all together in a group.
- bool hasAllOperands = llvm::any_of(
- elements, [](auto &elt) { return isa<OperandsDirective>(elt.get()); });
-
// Handle the case where all operand types are in one group.
if (allOperandTypes) {
// If we have all operands together, use the full operand list directly.
- if (hasAllOperands) {
+ if (allOperands) {
body << " if (parser.resolveOperands(allOperands, allOperandTypes, "
"allOperandLoc, result.operands))\n"
" return failure();\n";
@@ -758,7 +759,7 @@ void OperationFormat::genParserTypeResolution(Operator &op,
return;
}
// Handle the case where all of the operands were grouped together.
- if (hasAllOperands) {
+ if (allOperands) {
body << " if (parser.resolveOperands(allOperands, ";
// Group all of the operand types together to perform the resolution all at
@@ -817,12 +818,29 @@ void OperationFormat::genParserSuccessorResolution(Operator &op,
}
}
+void OperationFormat::genParserVariadicSegmentResolution(Operator &op,
+ OpMethodBody &body) {
+ if (!allOperands && op.getTrait("OpTrait::AttrSizedOperandSegments")) {
+ body << " result.addAttribute(\"operand_segment_sizes\", "
+ << "builder.getI32VectorAttr({";
+ auto interleaveFn = [&](const NamedTypeConstraint &operand) {
+ // If the operand is variadic emit the parsed size.
+ if (operand.isVariadic())
+ body << "static_cast<int32_t>(" << operand.name << "Operands.size())";
+ else
+ body << "1";
+ };
+ interleaveComma(op.getOperands(), body, interleaveFn);
+ body << "}));\n";
+ }
+}
+
//===----------------------------------------------------------------------===//
// PrinterGen
/// Generate the printer for the 'attr-dict' directive.
-static void genAttrDictPrinter(OperationFormat &fmt, OpMethodBody &body,
- bool withKeyword) {
+static void genAttrDictPrinter(OperationFormat &fmt, Operator &op,
+ OpMethodBody &body, bool withKeyword) {
// Collect all of the attributes used in the format, these will be elided.
SmallVector<const NamedAttribute *, 1> usedAttributes;
for (auto &it : fmt.elements)
@@ -831,6 +849,9 @@ static void genAttrDictPrinter(OperationFormat &fmt, OpMethodBody &body,
body << " p.printOptionalAttrDict" << (withKeyword ? "WithKeyword" : "")
<< "(getAttrs(), /*elidedAttrs=*/{";
+ // Elide the variadic segment size attributes if necessary.
+ if (!fmt.allOperands && op.getTrait("OpTrait::AttrSizedOperandSegments"))
+ body << "\"operand_segment_sizes\", ";
interleaveComma(usedAttributes, body, [&](const NamedAttribute *attr) {
body << "\"" << attr->name << "\"";
});
@@ -903,7 +924,7 @@ static void genElementPrinter(Element *element, OpMethodBody &body,
// Emit the attribute dictionary.
if (auto *attrDict = dyn_cast<AttrDictDirective>(element)) {
- genAttrDictPrinter(fmt, body, attrDict->isWithKeyword());
+ genAttrDictPrinter(fmt, op, body, attrDict->isWithKeyword());
lastWasPunctuation = false;
return;
}
@@ -1439,6 +1460,11 @@ LogicalResult FormatParser::parse() {
}
}
}
+
+ // Check to see if we are formatting all of the operands.
+ fmt.allOperands = llvm::any_of(fmt.elements, [](auto &elt) {
+ return isa<OperandsDirective>(elt.get());
+ });
return success();
}
More information about the Mlir-commits
mailing list