[Mlir-commits] [mlir] 2865d11 - [mlir] Use ReassociationIndices instead of affine maps in linalg.reshape.

Alexander Belyaev llvmlistbot at llvm.org
Wed May 5 04:00:20 PDT 2021


Author: Alexander Belyaev
Date: 2021-05-05T12:59:57+02:00
New Revision: 2865d114f953a0c05df2663f4569704c9fe35eb0

URL: https://github.com/llvm/llvm-project/commit/2865d114f953a0c05df2663f4569704c9fe35eb0
DIFF: https://github.com/llvm/llvm-project/commit/2865d114f953a0c05df2663f4569704c9fe35eb0.diff

LOG: [mlir] Use ReassociationIndices instead of affine maps in linalg.reshape.

Differential Revision: https://reviews.llvm.org/D101861

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
    mlir/test/Dialect/Linalg/canonicalize.mlir
    mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
    mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
    mlir/test/Dialect/Linalg/invalid.mlir
    mlir/test/Dialect/Linalg/llvm.mlir
    mlir/test/Dialect/Linalg/reshape_fusion.mlir
    mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
    mlir/test/Dialect/Linalg/roundtrip.mlir
    mlir/test/EDSC/builder-api-test.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
index 51c24cda736f1..b3046d2200b6c 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
@@ -315,55 +315,54 @@ class Linalg_ReshapeLikeOp<string mnemonic, list<OpTrait> traits = []> :
     // Builders for a contracting reshape whose result type is computed from
     // `src` and `reassociation`.
     OpBuilder<(ins "Value":$src,
-      "ArrayRef<ReassociationExprs>":$reassociation,
+      "ArrayRef<ReassociationIndices>":$reassociation,
       CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
     OpBuilder<(ins "Value":$src,
-      "ArrayRef<ReassociationIndices>":$reassociation,
+      "ArrayRef<ReassociationExprs>":$reassociation,
       CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
     [{
       auto reassociationMaps =
-          convertReassociationIndicesToMaps($_builder, reassociation);
+          convertReassociationMapsToIndices($_builder, reassociation);
       build($_builder, $_state, src, reassociationMaps, attrs);
     }]>,
 
     // Builders for a reshape whose result type is passed explicitly. This may
     // be either a contracting or expanding reshape.
     OpBuilder<(ins "Type":$resultType, "Value":$src,
-      "ArrayRef<ReassociationExprs>":$reassociation,
+      "ArrayRef<ReassociationIndices>":$reassociation,
       CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
     OpBuilder<(ins "Type":$resultType, "Value":$src,
-      "ArrayRef<ReassociationIndices>":$reassociation,
+      "ArrayRef<ReassociationExprs>":$reassociation,
       CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
     [{
       auto reassociationMaps =
-          convertReassociationIndicesToMaps($_builder, reassociation);
+          convertReassociationMapsToIndices($_builder, reassociation);
       build($_builder, $_state, resultType, src, reassociationMaps, attrs);
     }]>
   ];
 
   code commonExtraClassDeclaration = [{
     static StringRef getReassociationAttrName() { return "reassociation"; }
-    SmallVector<AffineMap, 4> getReassociationMaps() {
-      return llvm::to_vector<4>(llvm::map_range(reassociation(), [
-      ](Attribute a) { return a.cast<AffineMapAttr>().getValue(); }));
-    }
-    SmallVector<ReassociationExprs, 4> getReassociationExprs() {
-      return
-        llvm::to_vector<4>(llvm::map_range(reassociation(),
-          [](Attribute a) {
-            return llvm::to_vector<2>(
-              a.cast<AffineMapAttr>().getValue().getResults());
-          }));
-    }
-  }];
-  let assemblyFormat = [{
-    $src $reassociation attr-dict `:` type($src) `into` type(results)
+    SmallVector<AffineMap, 4> getReassociationMaps();
+    SmallVector<ReassociationExprs, 4> getReassociationExprs();
+    SmallVector<ReassociationIndices, 4> getReassociationIndices() {
+      SmallVector<ReassociationIndices, 4> reassociationIndices;
+      for (auto attr : reassociation())
+        reassociationIndices.push_back(llvm::to_vector<2>(
+            llvm::map_range(attr.cast<ArrayAttr>(), [&](Attribute indexAttr) {
+              return indexAttr.cast<IntegerAttr>().getInt();
+            })));
+      return reassociationIndices;
+    };
   }];
 }
 
+def IndexListArrayAttr :
+  TypedArrayAttrBase<I64ArrayAttr, "Array of 64-bit integer array attributes">;
+
 def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape",
     [DeclareOpInterfaceMethods<ViewLikeOpInterface>]>,
-    Arguments<(ins AnyStridedMemRef:$src, AffineMapArrayAttr:$reassociation)>,
+    Arguments<(ins AnyStridedMemRef:$src, IndexListArrayAttr:$reassociation)>,
     Results<(outs AnyStridedMemRef:$result)> {
   let summary = "linalg.reshape produces a new view into the operand view";
   let description = [{
@@ -373,9 +372,7 @@ def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape",
     and copies.
 
     A reassociation is defined as a continuous grouping of dimensions and is
-    represented with an affine map array attribute. In the future,
-    non-continuous groupings may be allowed (i.e. permutations, reindexings
-    etc).
+    represented with an array of I64ArrayAttr attribute.
 
     For now, it is assumed that either:
       1. a reassociation produces and consumes contiguous MemRefType or,
@@ -401,13 +398,13 @@ def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape",
 
     ```mlir
     // Dimension collapse (i, j) -> i' and k -> k'
-    %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
+    %1 = linalg.reshape %0 [[0, 1], [2]] :
       memref<?x?x?xf32, stride_spec> into memref<?x?xf32, stride_spec_2>
     ```
 
     ```mlir
     // Dimension expansion i -> (i', j') and (k) -> (k')
-    %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
+    %1 = linalg.reshape %0 [[0, 1], [2]] :
       memref<?x?xf32, stride_spec> into memref<?x?x?xf32, stride_spec_2>
     ```
   }];
@@ -417,6 +414,8 @@ def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape",
   }];
   let hasFolder = 1;
   let hasCanonicalizer = 1;
+  let printer = [{ return ::print(p, *this); }];
+  let parser = [{ return ::parseReshapeLikeOp(parser, result); }];
 }
 
 def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<
@@ -424,7 +423,7 @@ def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<
     [DeclareOpInterfaceMethods<InferShapedTypeOpInterface,
       ["reifyReturnTypeShapesPerResultDim"]>]>,
     Arguments<(ins AnyTensor:$src,
-                   AffineMapArrayAttr:$reassociation)>,
+                   IndexListArrayAttr:$reassociation)>,
     Results<(outs AnyTensor:$result)> {
   let summary = "linalg.tensor_reshape produces a new reshaped tensor.";
   let description = [{
@@ -432,9 +431,7 @@ def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<
     reassociation of the original `src`.
 
     A reassociation is defined as a continuous grouping of dimensions and is
-    represented with an affine map array attribute. In the future,
-    non-continuous groupings may be allowed (i.e. permutations, reindexings
-    etc).
+    represented with an array of I64ArrayAttr attribute.
 
     A reshape may either collapse or expand dimensions, depending on the
     relationship between source and target tensor ranks. The verification rule
@@ -453,14 +450,14 @@ def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<
 
     ```mlir
     // Dimension collapse (i, j) -> i' and k -> k'
-    %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
-      tensor<?x?x?xf32> into tensor<?x?xf32>
+    %b = linalg.tensor_reshape %a [[0, 1], [2]]
+        : tensor<?x?x?xf32> into tensor<?x?xf32>
     ```
 
     ```mlir
     // Dimension expansion i -> (i', j') and (k) -> (k')
-    %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
-      tensor<?x?xf32> into tensor<?x?x?xf32>
+    %b = linalg.tensor_reshape %a [[0, 1], [2]]
+        : tensor<?x?xf32> into tensor<?x?x?xf32>
     ```
   }];
   let extraClassDeclaration = commonExtraClassDeclaration # [{
@@ -473,6 +470,8 @@ def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<
   }];
   let hasFolder = 1;
   let hasCanonicalizer = 1;
+  let printer = [{ return ::print(p, *this); }];
+  let parser = [{ return ::parseReshapeLikeOp(parser, result); }];
 }
 
 def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>,

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 9cadd14311cf0..a43c4acadc138 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1160,6 +1160,89 @@ mlir::linalg::getReassociationIndicesForReshape(ShapedType sourceType,
   return reassociationMap;
 }
 
+template <typename ReshapeLikeOp>
+static void print(OpAsmPrinter &p, ReshapeLikeOp op) {
+  p << op.getOperationName() << ' ' << op.src() << " [";
+
+  llvm::interleaveComma(op.reassociation(), p, [&](const Attribute &attr) {
+    p << '[';
+    auto arrayAttr = attr.template cast<ArrayAttr>();
+    llvm::interleaveComma(arrayAttr, p, [&](const Attribute &attr) {
+      p << attr.cast<IntegerAttr>().getInt();
+    });
+    p << ']';
+  });
+
+  p << "] ";
+  p.printOptionalAttrDict(op->getAttrs(),
+                          /*elidedAttrs=*/{op.getReassociationAttrName()});
+  p << ": " << op.src().getType() << " into " << op.getType();
+}
+
+static void print(OpAsmPrinter &p, linalg::ReshapeOp op) {
+  print<linalg::ReshapeOp>(p, op);
+}
+
+static void print(OpAsmPrinter &p, linalg::TensorReshapeOp op) {
+  print<linalg::TensorReshapeOp>(p, op);
+}
+
+static ParseResult parseReshapeLikeOp(OpAsmParser &parser,
+                                      OperationState &result) {
+  // Parse the operand.
+  OpAsmParser::OperandType src;
+  if (parser.parseOperand(src))
+    return failure();
+
+  // Parse reassociation indices.
+  Builder &b = parser.getBuilder();
+  SmallVector<Attribute, 4> reassociation;
+  if (parser.parseLSquare())
+    return failure();
+
+  while (true) {
+    if (succeeded(parser.parseOptionalRSquare()))
+      break;
+    if (parser.parseLSquare())
+      return failure();
+    SmallVector<int64_t> indices;
+    while (true) {
+      int64_t index;
+      if (parser.parseInteger(index))
+        return failure();
+      indices.push_back(index);
+
+      if (succeeded(parser.parseOptionalComma()))
+        continue;
+      if (failed(parser.parseRSquare()))
+        return failure();
+      break;
+    }
+    reassociation.push_back(b.getI64ArrayAttr(indices));
+    if (succeeded(parser.parseOptionalComma()))
+      continue;
+    if (failed(parser.parseRSquare()))
+      return failure();
+    break;
+  }
+
+  result.addAttribute(ReshapeOp::getReassociationAttrName(),
+                      b.getArrayAttr(reassociation));
+
+  // Parse optional attributes.
+  parser.parseOptionalAttrDict(result.attributes);
+
+  // Parse types.
+  Type srcType;
+  Type resultType;
+  if (parser.parseColon() || parser.parseType(srcType) ||
+      parser.resolveOperand(src, srcType, result.operands) ||
+      parser.parseKeyword("into") || parser.parseType(resultType))
+    return failure();
+  result.addTypes(resultType);
+  return success();
+}
+
 /// Collapse reassociation maps that are used in pair of reshape ops where one
 /// is a producer and other is the consumer. Only valid to use this method when
 /// both the producer and consumer are collapsing dimensions or both are
@@ -1195,18 +1278,16 @@ collapseReassociationIndices(ArrayRef<AffineMap> mapsProducer,
     return llvm::None;
 
   unsigned currDim = 0;
-  ReassociationIndices reassociations;
   SmallVector<ReassociationIndices> reassociationMaps;
   for (AffineMap rhs : mapsConsumer) {
+    ReassociationIndices reassociations;
     for (AffineExpr rhsExpr : rhs.getResults()) {
       AffineDimExpr dimExpr = rhsExpr.cast<AffineDimExpr>();
       for (int i = 0, e = mapsProducer[dimExpr.getPosition()].getNumResults();
-           i < e; ++i) {
+           i < e; ++i)
         reassociations.push_back(currDim++);
-      }
     }
-    reassociationMaps.emplace_back(ReassociationIndices{});
-    std::swap(reassociationMaps.back(), reassociations);
+    reassociationMaps.push_back(std::move(reassociations));
   }
   return reassociationMaps;
 }
@@ -1401,14 +1482,6 @@ computeReshapeCollapsedType(MemRefType type,
       MemRefType::Builder(type).setShape(newSizes).setAffineMaps({layout}));
 }
 
-/// Helper functions assert Attribute of the proper type in attr and returns the
-/// corresponding vector.
-/// TODO: this should be evolved into a generic
-/// `getRangeOfType<AffineMap>(ArrayAttr attrs)` that does not copy.
-static SmallVector<AffineMap, 4> getAffineMaps(ArrayAttr attrs) {
-  return llvm::to_vector<8>(llvm::map_range(
-      attrs, [](Attribute a) { return a.cast<AffineMapAttr>().getValue(); }));
-}
 
 template <typename AffineExprTy>
 unsigned getMaxPosOfType(ArrayRef<ReassociationExprs> exprArrays) {
@@ -1438,8 +1511,21 @@ getSymbolLessAffineMaps(ArrayRef<ReassociationExprs> reassociation) {
   return maps;
 }
 
+static SmallVector<ReassociationIndices, 2> convertReassociationMapsToIndices(
+    OpBuilder &b, ArrayRef<ReassociationExprs> reassociationExprs) {
+  SmallVector<ReassociationIndices, 2> reassociationIndices;
+  for (const auto &exprs : reassociationExprs) {
+    ReassociationIndices indices;
+    indices.reserve(exprs.size());
+    for (const auto &expr : exprs)
+      indices.push_back(expr.cast<AffineDimExpr>().getPosition());
+    reassociationIndices.push_back(indices);
+  }
+  return reassociationIndices;
+}
+
 static SmallVector<SmallVector<AffineExpr, 2>, 2>
-convertReassociationIndicesToMaps(
+convertReassociationIndicesToExprs(
     OpBuilder &b, ArrayRef<ReassociationIndices> reassociationIndices) {
   SmallVector<SmallVector<AffineExpr, 2>, 2> reassociationMaps;
   for (const auto &indices : reassociationIndices) {
@@ -1452,6 +1538,20 @@ convertReassociationIndicesToMaps(
   return reassociationMaps;
 }
 
+SmallVector<AffineMap, 4> ReshapeOp::getReassociationMaps() {
+  return getSymbolLessAffineMaps(getReassociationExprs());
+}
+SmallVector<ReassociationExprs, 4> ReshapeOp::getReassociationExprs() {
+  OpBuilder b(this->getContext());
+  return convertReassociationIndicesToExprs(b, getReassociationIndices());
+}
+SmallVector<AffineMap, 4> TensorReshapeOp::getReassociationMaps() {
+  return getSymbolLessAffineMaps(getReassociationExprs());
+}
+SmallVector<ReassociationExprs, 4> TensorReshapeOp::getReassociationExprs() {
+  OpBuilder b(this->getContext());
+  return convertReassociationIndicesToExprs(b, getReassociationIndices());
+}
 /// For reshape op compute the shape at dimension `dimIndex` of the output in
 /// terms of shape of the `src`, when the reshape op is a collapsing
 /// operation. It is the product of the shape of the collapsed dimensions of the
@@ -1571,26 +1671,37 @@ getReshapeOutputShapeFromInputShape(OpBuilder &builder, Location loc, Value src,
                    builder, loc, src, dstStaticShape, reassocation);
 }
 
-void mlir::linalg::ReshapeOp::build(OpBuilder &b, OperationState &result,
-                                    Value src,
-                                    ArrayRef<ReassociationExprs> reassociation,
-                                    ArrayRef<NamedAttribute> attrs) {
-  auto maps = getSymbolLessAffineMaps(reassociation);
+static ArrayAttr
+getReassociationIndicesAttribute(OpBuilder &b,
+                                 ArrayRef<ReassociationIndices> reassociation) {
+  SmallVector<Attribute, 4> reassociationAttr =
+      llvm::to_vector<4>(llvm::map_range(
+          reassociation, [&](ReassociationIndices indices) -> Attribute {
+            return b.getI64ArrayAttr(indices).cast<Attribute>();
+          }));
+  return b.getArrayAttr(reassociationAttr);
+}
+
+void mlir::linalg::ReshapeOp::build(
+    OpBuilder &b, OperationState &result, Value src,
+    ArrayRef<ReassociationIndices> reassociation,
+    ArrayRef<NamedAttribute> attrs) {
   auto memRefType = src.getType().cast<MemRefType>();
-  auto resultType = computeReshapeCollapsedType(memRefType, maps);
+  auto resultType = computeReshapeCollapsedType(
+      memRefType, getSymbolLessAffineMaps(
+                      convertReassociationIndicesToExprs(b, reassociation)));
   build(b, result, resultType, src, attrs);
   result.addAttribute(ReshapeOp::getReassociationAttrName(),
-                      b.getAffineMapArrayAttr(maps));
+                      getReassociationIndicesAttribute(b, reassociation));
 }
 
-void mlir::linalg::ReshapeOp::build(OpBuilder &b, OperationState &result,
-                                    Type resultType, Value src,
-                                    ArrayRef<ReassociationExprs> reassociation,
-                                    ArrayRef<NamedAttribute> attrs) {
-  auto maps = getSymbolLessAffineMaps(reassociation);
+void mlir::linalg::ReshapeOp::build(
+    OpBuilder &b, OperationState &result, Type resultType, Value src,
+    ArrayRef<ReassociationIndices> reassociation,
+    ArrayRef<NamedAttribute> attrs) {
   build(b, result, resultType, src, attrs);
   result.addAttribute(ReshapeOp::getReassociationAttrName(),
-                      b.getAffineMapArrayAttr(maps));
+                      getReassociationIndicesAttribute(b, reassociation));
 }
 
 Value mlir::linalg::ReshapeOp::getViewSource() { return src(); }
@@ -1670,16 +1781,15 @@ static LogicalResult verifyReshapeLikeTypes(Op op, T &expandedType,
     // sizes 1.
     if (llvm::any_of(expandedType.getShape(),
                      [](int64_t dim) -> bool { return dim != 1; }))
-      return op.emitOpError(
-          "invalid to reshape tensor/memref with non-unit extent dimensions to "
-          "zero-rank tensor/memref");
+      return op.emitOpError("invalid to reshape tensor/memref with non-unit "
+                            "extent dimensions to zero-rank tensor/memref");
     return success();
   }
   if (collapsedRank != op.reassociation().size())
     return op.emitOpError("expected rank of the collapsed type(")
            << collapsedRank << ") to be the number of reassociation maps("
            << op.reassociation().size() << ")";
-  auto maps = getAffineMaps(op.reassociation());
+  auto maps = op.getReassociationMaps();
   for (auto it : llvm::enumerate(maps))
     if (it.value().getNumDims() != expandedRank)
       return op.emitOpError("expected reassociation map #")
@@ -1696,7 +1806,7 @@ static LogicalResult verify(ReshapeOp op) {
   MemRefType expandedType, collapsedType;
   if (failed(verifyReshapeLikeTypes(op, expandedType, collapsedType)))
     return failure();
-  auto maps = getAffineMaps(op.reassociation());
+  auto maps = op.getReassociationMaps();
   MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
   if (collapsedType != expectedType)
     return op.emitOpError("expected collapsed type to be ")
@@ -1743,31 +1853,32 @@ computeTensorReshapeCollapsedType(RankedTensorType type,
 
 void mlir::linalg::TensorReshapeOp::build(
     OpBuilder &b, OperationState &result, Value src,
-    ArrayRef<ReassociationExprs> reassociation,
+    ArrayRef<ReassociationIndices> reassociation,
     ArrayRef<NamedAttribute> attrs) {
-  auto maps = getSymbolLessAffineMaps(reassociation);
   auto resultType = computeTensorReshapeCollapsedType(
-      src.getType().cast<RankedTensorType>(), maps);
+      src.getType().cast<RankedTensorType>(),
+      getSymbolLessAffineMaps(
+          convertReassociationIndicesToExprs(b, reassociation)));
   build(b, result, resultType, src, attrs);
-  result.addAttribute(TensorReshapeOp::getReassociationAttrName(),
-                      b.getAffineMapArrayAttr(maps));
+  result.addAttribute(ReshapeOp::getReassociationAttrName(),
+                      getReassociationIndicesAttribute(b, reassociation));
 }
 
 void mlir::linalg::TensorReshapeOp::build(
     OpBuilder &b, OperationState &result, Type resultType, Value src,
-    ArrayRef<ReassociationExprs> reassociation,
+    ArrayRef<ReassociationIndices> reassociation,
     ArrayRef<NamedAttribute> attrs) {
-  auto maps = getSymbolLessAffineMaps(reassociation);
   build(b, result, resultType, src, attrs);
-  result.addAttribute(TensorReshapeOp::getReassociationAttrName(),
-                      b.getAffineMapArrayAttr(maps));
+  result.addAttribute(ReshapeOp::getReassociationAttrName(),
+                      getReassociationIndicesAttribute(b, reassociation));
 }
 
 static LogicalResult verify(TensorReshapeOp op) {
   RankedTensorType expandedType, collapsedType;
   if (failed(verifyReshapeLikeTypes(op, expandedType, collapsedType)))
     return failure();
-  auto maps = getAffineMaps(op.reassociation());
+
+  auto maps = op.getReassociationMaps();
   RankedTensorType expectedType =
       computeTensorReshapeCollapsedType(expandedType, maps);
   if (collapsedType != expectedType)
@@ -2397,8 +2508,8 @@ static LogicalResult verify(ConvOp op) {
   if (oType.getRank() != iType.getRank() || oType.getRank() != fType.getRank())
     return op.emitOpError("expects memref ranks to match");
   if (auto strides = op.strides()) {
-    if (failed(
-            verifyStrideOrDilation(op, strides->getValue(), /*isStride=*/true)))
+    if (failed(verifyStrideOrDilation(op, strides->getValue(),
+                                      /*isStride=*/true)))
       return failure();
   }
   if (auto dilations = op.dilations()) {
@@ -2422,8 +2533,8 @@ static LogicalResult verifySingleInputPoolingOp(PoolingOp op) {
     return op.emitOpError("expects memref ranks to match");
 
   if (auto strides = op.strides()) {
-    if (failed(
-            verifyStrideOrDilation(op, strides->getValue(), /*isStride=*/true)))
+    if (failed(verifyStrideOrDilation(op, strides->getValue(),
+                                      /*isStride=*/true)))
       return failure();
   }
   if (auto dilations = op.dilations()) {

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index b941cbe2844cf..47f490c46e58e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -327,7 +327,7 @@ static UnitExtentReplacementInfo replaceUnitExtents(AffineMap indexMap,
       reassociations.push_back(getAffineDimExpr(dim, context));
     }
     reassociationMaps.push_back(AffineMapAttr::get(AffineMap::get(
-        origRank, /*numSymbols = */ 0, reassociations, context)));
+        origRank, /*symbolCount = */ 0, reassociations, context)));
     reassociations.clear();
     ++dim;
   }
@@ -341,6 +341,15 @@ static UnitExtentReplacementInfo replaceUnitExtents(AffineMap indexMap,
 
 namespace {
 
+SmallVector<ReassociationExprs, 2>
+convertAffineMapArrayToExprs(ArrayAttr affineMapArrayAttr) {
+  SmallVector<ReassociationExprs, 2> reassociationExprs;
+  for (auto attr : affineMapArrayAttr)
+    reassociationExprs.push_back(
+        llvm::to_vector<4>(attr.cast<AffineMapAttr>().getValue().getResults()));
+  return reassociationExprs;
+}
+
 /// Pattern to replace tensors operands/results that are unit extents.
 template <typename GenericOpTy>
 struct ReplaceUnitExtentTensors : public OpRewritePattern<GenericOpTy> {
@@ -387,7 +396,7 @@ struct ReplaceUnitExtentTensors : public OpRewritePattern<GenericOpTy> {
         else
           res.push_back(rewriter.create<linalg::TensorReshapeOp>(
               loc, newInputOutputTypes[flattenedIdx], operand.value(),
-              reassociationMaps[flattenedIdx]));
+              convertAffineMapArrayToExprs(reassociationMaps[flattenedIdx])));
         ++flattenedIdx;
       }
       return res;
@@ -419,7 +428,8 @@ struct ReplaceUnitExtentTensors : public OpRewritePattern<GenericOpTy> {
                                             .template cast<RankedTensorType>();
       if (origResultType != result.value().getType())
         resultReplacements.push_back(rewriter.create<linalg::TensorReshapeOp>(
-            loc, origResultType, result.value(), reassociationMaps[index]));
+            loc, origResultType, result.value(),
+            convertAffineMapArrayToExprs(reassociationMaps[index])));
       else
         resultReplacements.push_back(result.value());
     }

diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index a9c91c2e2bffa..d52c632bf7f33 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -73,15 +73,15 @@ func @test_broadcast(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>) -> tensor<2xf32
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
-// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)>
 
 // CHECK-LABEL: @test_multibroadcast
 func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x3xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32>
-  // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 [#map0]
-  // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape %arg1 [#map0]
+  // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]]
+  // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape %arg1 {{\[}}[0, 1]]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins([[RESHAPE1]], [[RESHAPE2]] : tensor<3xf32>, tensor<2xf32>) outs([[INIT]] : tensor<2x3xf32>) {
   // CHECK: ^bb0(%arg2: f32, %arg3: f32, %arg4: f32):
   // CHECK:   [[ELEMENT:%.+]] = addf %arg2, %arg3 : f32
@@ -418,10 +418,9 @@ func @test_negate_quantized(%arg0: tensor<1xi8>) -> () {
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: @test_reshape_downrank
 func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
-  // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]]
+  // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [6]} : (tensor<2x3xf32>) -> tensor<6xf32>
   // CHECK: return [[RESHAPE]]
   return %0 : tensor<6xf32>
@@ -429,10 +428,9 @@ func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: @test_reshape_uprank
 func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
-  // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]]
+  // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<6xf32>) -> tensor<2x3xf32>
   // CHECK: return [[RESHAPE]]
   return %0 : tensor<2x3xf32>
@@ -440,25 +438,21 @@ func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: @test_reshape_samerank
 func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
-  // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]]
-  // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape [[RESHAPE1]] [#[[$MAP0]]]
+  // CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xf32>)
+  // CHECK-NEXT: %[[RESHAPE1:.*]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1]]
+  // CHECK-NEXT: %[[RESHAPE2:.*]] = linalg.tensor_reshape %[[RESHAPE1]] {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<3x2xf32>) -> tensor<2x3xf32>
-  // CHECK: return [[RESHAPE2]]
+  // CHECK-NEXT: return %[[RESHAPE2]]
   return %0 : tensor<2x3xf32>
 }
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>
-// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)>
-// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d4, d5)>
-
 // CHECK-LABEL: @test_reshape_downrank_6D
 func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
-  // CHECK: linalg.tensor_reshape %arg0 [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
+  // CHECK: linalg.tensor_reshape %arg0 {{\[}}[0, 1, 2], [3], [4, 5]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32>
   return %0 : tensor<6x5x77xf32>
 }
@@ -496,9 +490,9 @@ func @test_transpose(%arg0: tensor<1x2x3xi32>) -> () {
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
-// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)>
 
 // CHECK-LABEL: @reduce_float
 // CHECK-SAME: [[ARG0:%.+]]: tensor<5x4xf32>
@@ -510,7 +504,7 @@ func @reduce_float(%arg0: tensor<5x4xf32>) -> () {
   // CHECK: ^bb0(%arg1: f32, %arg2: f32)
   // CHECK:   [[RES:%.+]] = addf %arg1, %arg2 : f32
   // CHECK:   linalg.yield [[RES]] : f32
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xf32> into tensor<1x4xf32>
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xf32> into tensor<1x4xf32>
   %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32>
 
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [5]
@@ -520,7 +514,7 @@ func @reduce_float(%arg0: tensor<5x4xf32>) -> () {
   // CHECK: ^bb0(%arg1: f32, %arg2: f32)
   // CHECK:   [[RES:%.+]] = addf %arg1, %arg2 : f32
   // CHECK:   linalg.yield [[RES]] : f32
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<5xf32> into tensor<5x1xf32>
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xf32> into tensor<5x1xf32>
   %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xf32>) -> tensor<5x1xf32>
 
   // CHECK: constant 1.0
@@ -561,7 +555,7 @@ func @reduce_int(%arg0: tensor<5x4xi32>) -> () {
   // CHECK: ^bb0(%arg1: i32, %arg2: i32)
   // CHECK:   [[RES:%.+]] = addi %arg1, %arg2 : i32
   // CHECK:   linalg.yield [[RES]] : i32
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xi32> into tensor<1x4xi32>
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi32> into tensor<1x4xi32>
   %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32>
 
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [5]
@@ -571,7 +565,7 @@ func @reduce_int(%arg0: tensor<5x4xi32>) -> () {
   // CHECK: ^bb0(%arg1: i32, %arg2: i32)
   // CHECK:   [[RES:%.+]] = addi %arg1, %arg2 : i32
   // CHECK:   linalg.yield [[RES]] : i32
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<5xi32> into tensor<5x1xi32>
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xi32> into tensor<5x1xi32>
   %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x1xi32>
 
   // CHECK: constant 1
@@ -611,7 +605,7 @@ func @reduce_bool(%arg0: tensor<5x4xi1>) -> () {
   // CHECK: ^bb0(%arg1: i1, %arg2: i1)
   // CHECK:   [[RES:%.+]] = and %arg1, %arg2 : i1
   // CHECK:   linalg.yield [[RES]] : i1
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xi1> into tensor<1x4xi1>
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi1> into tensor<1x4xi1>
   %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1>
 
   // CHECK: constant false
@@ -775,31 +769,27 @@ func @reverse(%arg0: tensor<5x4xi32>) -> () {
 
 // -----
 
-// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)>
-// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-// CHECK: #[[$MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d3)>
-// CHECK: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-// CHECK: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @tile
 func @tile(%arg0 : tensor<2x3xi8>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 2, 1, 3]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>)
   // CHECK:   linalg.yield %arg1 : i8
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP2]], #[[$MAP3]]]
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1, 2], [3]]
   %0 = "tosa.tile"(%arg0) {multiples = [2, 1]} : (tensor<2x3xi8>)  -> (tensor<4x3xi8>)
 
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 2, 2, 3]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>)
   // CHECK:   linalg.yield %arg1 : i8
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP4]], #[[$MAP5]]]
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1], [2, 3]]
   %1 = "tosa.tile"(%arg0) {multiples = [1, 2]} : (tensor<2x3xi8>)  -> (tensor<2x6xi8>)
 
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [5, 2, 7, 3]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>)
   // CHECK:   linalg.yield %arg1 : i8
-  // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP4]], #[[$MAP5]]]
+  // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1], [2, 3]]
   %2 = "tosa.tile"(%arg0) {multiples = [5, 7]} : (tensor<2x3xi8>)  -> (tensor<10x21xi8>)
 
   return
@@ -1110,7 +1100,7 @@ func @resize_nearest(%input: tensor<1x2x2x1xf32>) -> () {
   // CHECK-DAG: %[[VAL7:.+]] = mulf %[[VAL5]], %[[STRIDEX]]
   // CHECK-DAG: %[[VAL8:.+]] = addf %[[VAL6]], %[[OFFSETY]]
   // CHECK-DAG: %[[VAL9:.+]] = addf %[[VAL7]], %[[OFFSETX]]
-    
+
   // Find the remainder and integer component of the target index.
 
   // CHECK-DAG: %[[VAL10:.+]] = floorf %[[VAL8]]
@@ -1167,8 +1157,8 @@ func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
   // CHECK: %[[VAL10:.+]] = floorf %[[VAL8:.+]]
   // CHECK: %[[VAL11:.+]] = floorf %[[VAL9:.+]]
 
-  // CHECK: %[[DY:.+]] = subf %[[VAL8:.+]], %[[VAL10]] 
-  // CHECK: %[[DX:.+]] = subf %[[VAL9:.+]], %[[VAL11]] 
+  // CHECK: %[[DY:.+]] = subf %[[VAL8:.+]], %[[VAL10]]
+  // CHECK: %[[DX:.+]] = subf %[[VAL9:.+]], %[[VAL11]]
 
   // CHECK: %[[Y0:.+]] = fptosi %[[VAL10]]
   // CHECK: %[[X0:.+]] = fptosi %[[VAL11]]
@@ -1212,7 +1202,7 @@ func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
   // CHECK: %[[LOHI:.+]] = tensor.extract %arg0[%arg1, %[[YLOI]], %[[XHII]], %arg4]
   // CHECK: %[[HILO:.+]] = tensor.extract %arg0[%arg1, %[[YHII]], %[[XLOI]], %arg4]
   // CHECK: %[[HIHI:.+]] = tensor.extract %arg0[%arg1, %[[YHII]], %[[XHII]], %arg4]
-    
+
   // Compute the bilinear interpolation.
 
   // CHECK: %[[ONE:.+]] = constant 1.000000e+00
@@ -1252,7 +1242,7 @@ func @resize_nearest_int(%input: tensor<1x2x2x1xi32>) -> () {
   // CHECK-DAG: %[[VAL5:.+]] = muli %[[X]], %[[STRIDEX]]
   // CHECK-DAG: %[[VAL6:.+]] = addi %[[VAL4]], %[[OFFSETY]]
   // CHECK-DAG: %[[VAL7:.+]] = addi %[[VAL5]], %[[OFFSETX]]
-    
+
   // Find the remainder and integer component of the target index.
 
 
@@ -1358,7 +1348,7 @@ func @resize_bilinear_int(%input: tensor<1x2x2x1xi8>) -> () {
   // CHECK: %[[XLOHI:.+]] = sexti %[[LOHI]]
   // CHECK: %[[XHILO:.+]] = sexti %[[HILO]]
   // CHECK: %[[XHIHI:.+]] = sexti %[[HIHI]]
-    
+
   // Compute the bilinear interpolation.
 
   // CHECK: %[[SCALE:.+]] = constant 256

diff  --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 8006a901e0dbc..39fc2b9743795 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -45,29 +45,22 @@ func @memref_cast_into_tiled_loop(%arg0: memref<192xf32>)  {
 
 func @collapsing_tensor_reshapes(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] :
-       tensor<?x?x?x?x?xf32> into tensor<?x?x?xf32>
-  %1 = linalg.tensor_reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x?x?xf32> into tensor<?x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4]]
+      : tensor<?x?x?x?x?xf32> into tensor<?x?x?xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2]]
+      : tensor<?x?x?xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: collapsing_tensor_reshapes
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]]
 //   CHECK-NOT:   linalg.tensor_reshape
 
 // -----
 
 func @collapsing_tensor_reshapes_to_zero_dim(%arg0 : tensor<1x1x1xf32>)
                                              -> tensor<f32> {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] :
-       tensor<1x1x1xf32> into tensor<1xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]]
+      : tensor<1x1x1xf32> into tensor<1xf32>
   %1 = linalg.tensor_reshape %0 [] : tensor<1xf32> into tensor<f32>
   return %1 : tensor<f32>
 }
@@ -79,8 +72,8 @@ func @collapsing_tensor_reshapes_to_zero_dim(%arg0 : tensor<1x1x1xf32>)
 
 func @collapsing_memref_reshapes_to_zero_dim(%arg0 : memref<1x1x1xf32>)
                                              -> memref<f32> {
-  %0 = linalg.reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] :
-       memref<1x1x1xf32> into memref<1xf32>
+  %0 = linalg.reshape %arg0 [[0, 1, 2]]
+      : memref<1x1x1xf32> into memref<1xf32>
   %1 = linalg.reshape %0 [] : memref<1xf32> into memref<f32>
   return %1 : memref<f32>
 }
@@ -92,63 +85,42 @@ func @collapsing_memref_reshapes_to_zero_dim(%arg0 : memref<1x1x1xf32>)
 
 func @expanding_tensor_reshapes(%arg0 : tensor<?x?xf32>) -> tensor<?x6x4x?x5xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x?xf32> into tensor<?x4x?xf32>
-  %1 = linalg.tensor_reshape %0
-         [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] :
-       tensor<?x4x?xf32> into tensor<?x6x4x?x5xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
+      : tensor<?x?xf32> into tensor<?x4x?xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4]]
+      : tensor<?x4x?xf32> into tensor<?x6x4x?x5xf32>
   return %1 : tensor<?x6x4x?x5xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: expanding_tensor_reshapes
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]]
 //   CHECK-NOT:   linalg.tensor_reshape
 
 // -----
 
 func @collapsing_memref_reshapes(%arg0 : memref<?x?x?x?x?xf32>) -> memref<?x?xf32>
 {
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] :
-       memref<?x?x?x?x?xf32> into memref<?x?x?xf32>
-  %1 = linalg.reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x?x?xf32> into memref<?x?xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]]
+      : memref<?x?x?x?x?xf32> into memref<?x?x?xf32>
+  %1 = linalg.reshape %0 [[0, 1], [2]]
+      : memref<?x?x?xf32> into memref<?x?xf32>
   return %1 : memref<?x?xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: collapsing_memref_reshapes
-//       CHECK:   linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
+//       CHECK:   linalg.reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]]
 //   CHECK-NOT:   linalg.reshape
 
 // -----
 
 func @expanding_memref_reshapes(%arg0 : memref<?x?xf32>) -> memref<?x6x4x5x?xf32>
 {
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x?xf32> into memref<?x4x?xf32>
-  %1 = linalg.reshape %0
-         [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] :
-       memref<?x4x?xf32> into memref<?x6x4x5x?xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2]]
+      : memref<?x?xf32> into memref<?x4x?xf32>
+  %1 = linalg.reshape %0 [[0, 1], [2], [3, 4]]
+      : memref<?x4x?xf32> into memref<?x6x4x5x?xf32>
   return %1 : memref<?x6x4x5x?xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: expanding_memref_reshapes
-//       CHECK:   linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
+//       CHECK:   linalg.reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]]
 //   CHECK-NOT:   linalg.reshape
 
 // -----
@@ -156,8 +128,8 @@ func @expanding_memref_reshapes(%arg0 : memref<?x?xf32>) -> memref<?x6x4x5x?xf32
 func @expanding_tensor_reshapes_to_zero_dim(%arg0 : tensor<f32>)
                                              -> tensor<1x1x1xf32> {
   %0 = linalg.tensor_reshape %arg0 [] : tensor<f32> into tensor<1xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] :
-       tensor<1xf32> into tensor<1x1x1xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2]] 
+      : tensor<1xf32> into tensor<1x1x1xf32>
   return %1 : tensor<1x1x1xf32>
 }
 // CHECK-LABEL: expanding_tensor_reshapes_to_zero
@@ -169,9 +141,8 @@ func @expanding_tensor_reshapes_to_zero_dim(%arg0 : tensor<f32>)
 func @expanding_memref_reshapes_to_zero_dim(%arg0 : memref<f32>)
                                              -> memref<1x1x1xf32> {
   %0 = linalg.reshape %arg0 [] : memref<f32> into memref<1xf32>
-  %1 = linalg.reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] :
-       memref<1xf32> into memref<1x1x1xf32>
+  %1 = linalg.reshape %0 [[0, 1, 2]]
+      : memref<1xf32> into memref<1x1x1xf32>
   return %1 : memref<1x1x1xf32>
 }
 // CHECK-LABEL: expanding_memref_reshapes_to_zero
@@ -182,14 +153,10 @@ func @expanding_memref_reshapes_to_zero_dim(%arg0 : memref<f32>)
 
 func @fold_tensor_reshape(%arg0 : tensor<12x4xf32>) -> tensor<12x4xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<12x4xf32> into tensor<3x4x4xf32>
-  %1 = linalg.tensor_reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<3x4x4xf32> into tensor<12x4xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
+      : tensor<12x4xf32> into tensor<3x4x4xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2]]
+      : tensor<3x4x4xf32> into tensor<12x4xf32>
   return %1 : tensor<12x4xf32>
 }
 // CHECK-LABEL: @fold_tensor_reshape
@@ -199,14 +166,10 @@ func @fold_tensor_reshape(%arg0 : tensor<12x4xf32>) -> tensor<12x4xf32>
 
 func @fold_tensor_reshape_dynamic(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x?xf32> into tensor<?x4x?xf32>
-  %1 = linalg.tensor_reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x4x?xf32> into tensor<?x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
+      : tensor<?x?xf32> into tensor<?x4x?xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2]]
+      : tensor<?x4x?xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }
 // CHECK-LABEL: @fold_tensor_reshape_dynamic
@@ -216,14 +179,10 @@ func @fold_tensor_reshape_dynamic(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
 
 func @fold_memref_reshape(%arg0 : memref<12x4xf32>) -> memref<12x4xf32>
 {
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<12x4xf32> into memref<3x4x4xf32>
-  %1 = linalg.reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<3x4x4xf32> into memref<12x4xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2]]
+      : memref<12x4xf32> into memref<3x4x4xf32>
+  %1 = linalg.reshape %0 [[0, 1], [2]]
+      : memref<3x4x4xf32> into memref<12x4xf32>
   return %1 : memref<12x4xf32>
 }
 // CHECK-LABEL: @fold_memref_reshape
@@ -233,14 +192,10 @@ func @fold_memref_reshape(%arg0 : memref<12x4xf32>) -> memref<12x4xf32>
 
 func @fold_memref_reshape_dynamic(%arg0 : memref<?x?xf32>) -> memref<?x?xf32>
 {
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x?xf32> into memref<?x4x?xf32>
-  %1 = linalg.reshape %0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x4x?xf32> into memref<?x?xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2]]
+      : memref<?x?xf32> into memref<?x4x?xf32>
+  %1 = linalg.reshape %0 [[0, 1], [2]]
+      : memref<?x4x?xf32> into memref<?x?xf32>
   return %1 : memref<?x?xf32>
 }
 // CHECK-LABEL: @fold_memref_reshape_dynamic
@@ -250,223 +205,154 @@ func @fold_memref_reshape_dynamic(%arg0 : memref<?x?xf32>) -> memref<?x?xf32>
 
 func @reshape_collapse(%arg0 : tensor<2x3x4x5x6x7x8xf32>) -> tensor<24x5x42x8xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-      [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3, 4, 5, 6]]
       : tensor<2x3x4x5x6x7x8xf32> into tensor<40320xf32>
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]]
       : tensor<40320xf32> into tensor<24x5x42x8xf32>
   return %1 : tensor<24x5x42x8xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d6)>
 //      CHECK: func @reshape_collapse
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<2x3x4x5x6x7x8xf32>
 //      CHECK:   %[[RESULT:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]]
+// CHECK-SAME:     [0, 1, 2], [3], [4, 5], [6]
 //      CHECK:   return %[[RESULT]]
 
 // -----
 
 func @reshape_expand(%arg0 : tensor<24x5x42x8xf32>) -> tensor<2x3x4x5x6x7x8xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-      [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3]]
       : tensor<24x5x42x8xf32> into tensor<40320xf32>
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3, 4, 5, 6]]
       : tensor<40320xf32> into tensor<2x3x4x5x6x7x8xf32>
   return %1 : tensor<2x3x4x5x6x7x8xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d6)>
 //      CHECK: func @reshape_expand
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<24x5x42x8xf32>
 //      CHECK:   %[[RESULT:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]]
+// CHECK-SAME:     [0, 1, 2], [3], [4, 5], [6]
 //      CHECK:   return %[[RESULT]]
 
 // -----
 
 func @expand_reshape_1D(%arg0 : tensor<2048xf32>) -> tensor<4x512xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3]]
     : tensor<2048xf32> into tensor<1x4x1x512xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>,
-     affine_map<(d0, d1, d2, d3) -> (d3)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3]]
     : tensor<1x4x1x512xf32> into tensor<4x512xf32>
   return %1 : tensor<4x512xf32>
 }
-//       CHECK: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //       CHECK: func @expand_reshape_1D
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]]
 //  CHECK-SAME:   tensor<2048xf32> into tensor<4x512xf32>
 
 // -----
 
 func @fold_reshape_1D(%arg0 : tensor<4x512xf32>) -> tensor<2048xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>,
-     affine_map<(d0, d1, d2, d3) -> (d3)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3]]
     : tensor<4x512xf32> into tensor<1x4x1x512xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]]
     : tensor<1x4x1x512xf32> into tensor<2048xf32>
   return %1 : tensor<2048xf32>
 }
-//       CHECK: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //       CHECK: func @fold_reshape_1D
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]]
 //  CHECK-SAME:   tensor<4x512xf32> into tensor<2048xf32>
 
 // -----
 
 func @fold_reshape_unit_dims(%arg0 : tensor<2048x1x1xf32>) -> tensor<4x512x1x1xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3], [4], [5]]
     : tensor<2048x1x1xf32> into tensor<1x4x1x512x1x1xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3], [4], [5]]
     : tensor<1x4x1x512x1x1xf32> into tensor<4x512x1x1xf32>
   return %1 : tensor<4x512x1x1xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //       CHECK: func @fold_reshape_unit_dims
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2], [3]]
 //  CHECK-SAME:   tensor<2048x1x1xf32> into tensor<4x512x1x1xf32>
 
 // -----
 
 func @expand_reshape_unit_dims(%arg0 : tensor<2048x1x2048xf32>) -> tensor<4x512x1x512x4xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2, d3, d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3, 4], [5], [6, 7, 8]]
     : tensor<2048x1x2048xf32> into tensor<1x4x1x512x1x1x512x1x4xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3, d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d8)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3, 4], [5], [6, 7], [8]]
     : tensor<1x4x1x512x1x1x512x1x4xf32> into tensor<4x512x1x512x4xf32>
   return %1 : tensor<4x512x1x512x4xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 //       CHECK: func @expand_reshape_unit_dims
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2], [3, 4]]
 //  CHECK-SAME:   tensor<2048x1x2048xf32> into tensor<4x512x1x512x4xf32>
 
 // -----
 
 func @fold_reshape_trailing_unit_dims(%arg0: tensor<2xf32>) -> tensor<2x1xf32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : tensor<2xf32> into tensor<2x1x1xf32>
-  %1 = linalg.tensor_reshape %0
-  [affine_map<(d0, d1, d2) -> (d0)>,
-   affine_map<(d0, d1, d2) -> (d1, d2)>
-  ] : tensor<2x1x1xf32> into tensor<2x1xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]]
+      : tensor<2xf32> into tensor<2x1x1xf32>
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2]]
+      : tensor<2x1x1xf32> into tensor<2x1xf32>
   return %1 : tensor<2x1xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //       CHECK: func @fold_reshape_trailing_unit_dims
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]]
 //  CHECK-SAME:   tensor<2xf32> into tensor<2x1xf32>
 
 // -----
 
 func @collapse_reshape_unit_dims_dynamic(%arg0 : tensor<?x1x?x1x1x?x?x1x1xf32>) -> tensor<?x?x?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d1, d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>,
-     affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)>]
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3], [4], [5], [6, 7, 8]]
     : tensor<?x1x?x1x1x?x?x1x1xf32> into tensor<?x?x1x1x?x?xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>]
+  %1 = linalg.tensor_reshape %0 [[0], [1], [2, 3, 4], [5]]
     : tensor<?x?x1x1x?x?xf32> into tensor<?x?x?x?xf32>
   return %1 : tensor<?x?x?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d1, d2)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3, d4, d5)>
-//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)>
 //       CHECK: func @collapse_reshape_unit_dims_dynamic
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]]
+//       CHECK: linalg.tensor_reshape
+//  CHECK-SAME:   [0], [1, 2], [3, 4, 5], [6, 7, 8]
 //  CHECK-SAME:   tensor<?x1x?x1x1x?x?x1x1xf32> into tensor<?x?x?x?xf32>
 
 // -----
 
 func @fold_reshape_trailing_unit_dims(%arg0: tensor<2xf32>) -> tensor<2x1xf32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : tensor<2xf32> into tensor<2x1x1xf32>
-  %1 = linalg.tensor_reshape %0
-  [affine_map<(d0, d1, d2) -> (d0)>,
-   affine_map<(d0, d1, d2) -> (d1, d2)>
-  ] : tensor<2x1x1xf32> into tensor<2x1xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]]
+      : tensor<2xf32> into tensor<2x1x1xf32>
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2]]
+      : tensor<2x1x1xf32> into tensor<2x1xf32>
   return %1 : tensor<2x1xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //       CHECK: func @fold_reshape_trailing_unit_dims
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]]
 //  CHECK-SAME:   tensor<2xf32> into tensor<2x1xf32>
 
 // -----
 
 func @fold_reshape_trailing_unit_dims_dynamic(%arg0: tensor<1x1x?x1x1x1xf32>) -> tensor<?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-      [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>,
-       affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)>,
-       affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>,
-       affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3], [4], [5]]
       : tensor<1x1x?x1x1x1xf32> into tensor<?x1x1x1xf32>
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]]
       : tensor<?x1x1x1xf32> into tensor<?xf32>
   return %1 : tensor<?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>
 //       CHECK: func @fold_reshape_trailing_unit_dims_dynamic
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2, 3, 4, 5]]
 //  CHECK-SAME:   tensor<1x1x?x1x1x1xf32> into tensor<?xf32>
 
 // -----
 
 func @no_fold_reshapes(%arg0 : tensor<?x?x?xf32>) -> tensor<?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-      [affine_map<(d0, d1, d2, d3) -> (d0)>,
-       affine_map<(d0, d1, d2, d3) -> (d1)>,
-       affine_map<(d0, d1, d2, d3) -> (d2, d3)>]
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3]]
       : tensor<?x?x?xf32> into tensor<?x?x1x?xf32>
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3) -> (d0)>,
-       affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>]
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]]
       : tensor<?x?x1x?xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }
@@ -478,15 +364,9 @@ func @no_fold_reshapes(%arg0 : tensor<?x?x?xf32>) -> tensor<?x?xf32>
 
 func @no_fold_reshape_incompatible(%arg0 : tensor<4x6x8xf32>) -> tensor<2x6x16xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-      [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>,
-       affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>,
-       affine_map<(d0, d1, d2, d3, d4) -> (d4)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2, 3], [4]]
       : tensor<4x6x8xf32> into tensor<2x2x3x2x8xf32>
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-       affine_map<(d0, d1, d2, d3, d4) -> (d1, d2)>,
-       affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>]
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2], [3, 4]]
       : tensor<2x2x3x2x8xf32> into tensor<2x6x16xf32>
   return %1 : tensor<2x6x16xf32>
 }
@@ -497,19 +377,18 @@ func @no_fold_reshape_incompatible(%arg0 : tensor<4x6x8xf32>) -> tensor<2x6x16xf
 // -----
 
 func @no_fold_reshape_empty_expr(%arg0: tensor<3x2x2xf32>) -> tensor<12x1xf32> {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2, d3) -> (d0)>, affine_map<(d0, d1, d2, d3) -> (d1)>, affine_map<(d0, d1, d2, d3) -> (d2, d3)>] : tensor<3x2x2xf32> into tensor<3x2x2x1xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d3)>] : tensor<3x2x2x1xf32> into tensor<12x1xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3]]
+      : tensor<3x2x2xf32> into tensor<3x2x2x1xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3]]
+      : tensor<3x2x2x1xf32> into tensor<12x1xf32>
   return %1 : tensor<12x1xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //      CHECK: func @no_fold_reshape_empty_expr
 // CHECK-SAME:    %[[ARG0:.+]]: tensor<3x2x2xf32>
-//      CHECK:    %[[RARG0:.+]] = linalg.tensor_reshape %[[ARG0:.+]] [#[[MAP0]], #[[MAP1]], #[[MAP2]]
-//      CHECK:    %[[RES:.+]] = linalg.tensor_reshape %[[RARG0:.+]] [#[[MAP3]], #[[MAP4]]]
+//      CHECK:    %[[RARG0:.+]] = linalg.tensor_reshape %[[ARG0]]
+// CHECK-SAME:      [0], [1], [2, 3]
+//      CHECK:    %[[RES:.+]] = linalg.tensor_reshape %[[RARG0]]
+// CHECK-SAME:      [0, 1, 2], [3]
 //      CHECK:    return %[[RES:.+]] : tensor<12x1xf32>
 
 // -----
@@ -546,10 +425,8 @@ func @dce_zero_memref(%arg0 : memref<0xf32>, %arg1: tensor<0xf32>) -> tensor<0xf
 func @reshape_splat_constant_int32() -> tensor<2x4x2xi32>
 {
   %c0 = constant dense<42> : tensor<2x8xi32>
-  %0 = linalg.tensor_reshape %c0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>]
-       : tensor<2x8xi32> into tensor<2x4x2xi32>
+  %0 = linalg.tensor_reshape %c0 [[0], [1, 2]]
+      : tensor<2x8xi32> into tensor<2x4x2xi32>
   return %0 : tensor<2x4x2xi32>
 }
 // CHECK-LABEL: @reshape_splat_constant_int32
@@ -560,10 +437,8 @@ func @reshape_splat_constant_int32() -> tensor<2x4x2xi32>
 func @reshape_splat_constant_int16() -> tensor<2x4x2xi16>
 {
   %c0 = constant dense<42> : tensor<2x8xi16>
-  %0 = linalg.tensor_reshape %c0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>]
-       : tensor<2x8xi16> into tensor<2x4x2xi16>
+  %0 = linalg.tensor_reshape %c0 [[0], [1, 2]]
+      : tensor<2x8xi16> into tensor<2x4x2xi16>
   return %0 : tensor<2x4x2xi16>
 }
 // CHECK-LABEL: @reshape_splat_constant_int16
@@ -574,10 +449,8 @@ func @reshape_splat_constant_int16() -> tensor<2x4x2xi16>
 func @reshape_splat_constant_float32() -> tensor<2x4x2xf32>
 {
   %c0 = constant dense<42.0> : tensor<2x8xf32>
-  %0 = linalg.tensor_reshape %c0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>]
-       : tensor<2x8xf32> into tensor<2x4x2xf32>
+  %0 = linalg.tensor_reshape %c0 [[0], [1, 2]]
+      : tensor<2x8xf32> into tensor<2x4x2xf32>
   return %0 : tensor<2x4x2xf32>
 }
 // CHECK-LABEL: @reshape_splat_constant_float32
@@ -588,10 +461,8 @@ func @reshape_splat_constant_float32() -> tensor<2x4x2xf32>
 func @reshape_splat_constant_float64() -> tensor<2x4x2xf64>
 {
   %c0 = constant dense<42.0> : tensor<2x8xf64>
-  %0 = linalg.tensor_reshape %c0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>]
-       : tensor<2x8xf64> into tensor<2x4x2xf64>
+  %0 = linalg.tensor_reshape %c0 [[0], [1, 2]]
+      : tensor<2x8xf64> into tensor<2x4x2xf64>
   return %0 : tensor<2x4x2xf64>
 }
 // CHECK-LABEL: @reshape_splat_constant_float64
@@ -851,11 +722,8 @@ func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
 
 func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
   %0 = linalg.init_tensor [6, 5, %arg0] : tensor<6x5x?xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] :
-     tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4, 5]]
+      : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
   return %1 : tensor<2x3x5x4x?x7xf32>
 }
 //      CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 28)>
@@ -869,11 +737,8 @@ func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
 
 func @init_tensor_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> {
   %0 = linalg.init_tensor [2, 3, 5, 4, %arg0, 7] : tensor<2x3x5x4x?x7xf32>
-  %1 = linalg.tensor_reshape %0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] :
-    tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
+  %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4, 5]]
+      : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
   return %1 : tensor<6x5x?xf32>
 }
 //      CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 28)>
@@ -1022,11 +887,8 @@ func @dim_reshape_expansion(%arg0 : tensor<6x5x?xf32>) -> (index, index, index)
   %c1 = constant 1 : index
   %c3 = constant 3 : index
   %c4 = constant 4 : index
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] :
-     tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4, 5]]
+      : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
   %1 = memref.dim %0, %c1 : tensor<2x3x5x4x?x7xf32>
   %2 = memref.dim %0, %c3 : tensor<2x3x5x4x?x7xf32>
   %3 = memref.dim %0, %c4 : tensor<2x3x5x4x?x7xf32>
@@ -1048,11 +910,8 @@ func @dim_reshape_collapse(%arg0 : tensor<2x3x5x4x?x7xf32>) -> (index, index)
 {
   %c1 = constant 1 : index
   %c2 = constant 2 : index
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] :
-     tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4, 5]]
+      : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
   %1 = memref.dim %0, %c1 : tensor<6x5x?xf32>
   %2 = memref.dim %0, %c2 : tensor<6x5x?xf32>
   return %1, %2 : index, index
@@ -1109,9 +968,8 @@ func @fold_fill_reshape() -> tensor<6x4xf32> {
   %init = linalg.init_tensor [1, 2, 3, 4] : tensor<1x2x3x4xf32>
   // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %cst) : tensor<6x4xf32>, f32 -> tensor<6x4xf32>
   %fill = linalg.fill(%init, %zero) : tensor<1x2x3x4xf32>, f32 -> tensor<1x2x3x4xf32>
-  %reshape = linalg.tensor_reshape %fill [
-    affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>,
-    affine_map<(d0, d1, d2, d3) -> (d3)>] : tensor<1x2x3x4xf32> into tensor<6x4xf32>
+  %reshape = linalg.tensor_reshape %fill [[0, 1, 2], [3]]
+      : tensor<1x2x3x4xf32> into tensor<6x4xf32>
   // CHECK: return %[[FILL]] : tensor<6x4xf32>
   return %reshape : tensor<6x4xf32>
 }
@@ -1125,9 +983,7 @@ func @fold_fill_reshape_dynamic(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf32
   // CHECK: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]]
   %0 = linalg.fill(%arg0, %zero) : tensor<?x?x?x?x?xf32>, f32 -> tensor<?x?x?x?x?xf32>
   // CHECK: %[[RESULT:.+]] = linalg.fill(%[[RESHAPE]], %{{.+}})
-  %1 = linalg.tensor_reshape %0
-      [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>,
-       affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>]
+  %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3, 4]]
       : tensor<?x?x?x?x?xf32> into tensor<?x?xf32>
   // CHECK: return %[[RESULT]]
   return %1 : tensor<?x?xf32>

diff  --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index 5eac3a458a35a..2b8855a2affde 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -20,19 +20,14 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %shape: tensor<?x1x?x1x?xf3
        } -> tensor<?x1x?x1x?xf32>
   return %0 : tensor<?x1x?x1x?xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
 //   CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
 //   CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-//   CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>
-//   CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)>
 // CHECK-LABEL: func @drop_one_trip_loops
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2]]
 //       CHECK: linalg.generic
 //  CHECK-SAME:   indexing_maps = [#[[$MAP2]], #[[$MAP3]]]
 //  CHECK-SAME:   iterator_types = ["parallel", "parallel", "parallel"]
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP4]], #[[$MAP5]], #[[$MAP6]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2, 3], [4]]
 
 // -----
 
@@ -146,7 +141,7 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
        } -> tensor<1x1xf32>
   return %0 : tensor<1x1xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> ()>
+//       CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>
 // CHECK-LABEL: func @drop_all_loops
 //       CHECK:   linalg.tensor_reshape %{{.*}} []
 //       CHECK:   linalg.generic
@@ -235,9 +230,10 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf3
   } -> tensor<5xf32>
   return %0 : tensor<5xf32>
 }
-//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK: #[[$MAP1:.*]] = affine_map<(d0) -> (d0)>
+
 // CHECK-LABEL: func @leading_dim_1_canonicalization
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]]
 //       CHECK:   linalg.generic
 //  CHECK-SAME:     indexing_maps = [#[[$MAP1]], #[[$MAP1]]]
 //  CHECK-SAME:     iterator_types = ["parallel"]
@@ -258,10 +254,8 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf3
 
 func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>, %shape : tensor<5x5xf32>) -> tensor<5x5xf32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] :
-       tensor<5xf32> into tensor<1x5xf32>
-  %1 = linalg.tensor_reshape %arg1 [affine_map<(d0, d1) -> (d0, d1)>] :
-       tensor<5xf32> into tensor<5x1xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1]] : tensor<5xf32> into tensor<1x5xf32>
+  %1 = linalg.tensor_reshape %arg1 [[0, 1]] : tensor<5xf32> into tensor<5x1xf32>
   %2 = linalg.generic #trait
      ins(%0, %1 : tensor<1x5xf32>, tensor<5x1xf32>)
     outs(%shape : tensor<5x5xf32>) {
@@ -319,8 +313,6 @@ func @broadcast_scalar(%arg0 : tensor<1x1xf32>, %shape : tensor<?x?xf32>) -> ten
 
 #map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 #map1 = affine_map<(d0, d1, d2) -> (d2)>
-#map3 = affine_map<(d0, d1, d2) -> (d0, d1)>
-#map4 = affine_map<(d0, d1, d2) -> (d2)>
 func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
 {
   %1 = linalg.init_tensor [1, 2, 5] : tensor<1x2x5xf32>
@@ -330,7 +322,7 @@ func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
     ^bb0(%arg1: f32, %arg2: f32):  // no predecessors
       linalg.yield %arg1 : f32
     } -> tensor<1x2x5xf32>
-  %3 = linalg.tensor_reshape %2 [#map3, #map4]
+  %3 = linalg.tensor_reshape %2 [[0, 1], [2]]
     : tensor<1x2x5xf32> into tensor<2x5xf32>
   return %3 : tensor<2x5xf32>
 }
@@ -356,14 +348,13 @@ func @fold_unit_dim_for_init_tensor(%input: tensor<1x1000xf32>) -> tensor<1xf32>
 }
 
 
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0) -> (d0)>
 //   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0) -> ()>
 
 //       CHECK: func @fold_unit_dim_for_init_tensor
 
 
-//       CHECK: %[[INPUT_RESHAPE:.+]] = linalg.tensor_reshape %{{.+}} [#[[MAP0]]] : tensor<1x1000xf32> into tensor<1000xf32>
+//       CHECK: %[[INPUT_RESHAPE:.+]] = linalg.tensor_reshape %{{.+}} {{\[}}[0, 1]] : tensor<1x1000xf32> into tensor<1000xf32>
 //       CHECK: %[[INIT:.+]] = linalg.init_tensor [] : tensor<f32>
 //       CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %cst) : tensor<f32>, f32 -> tensor<f32>
 //       CHECK: %[[GENERIC:.+]] = linalg.generic
@@ -389,20 +380,17 @@ func @fold_subtensor(
       tensor<1x?x?x?x?x1x1xf32> to tensor<1x?x?x1x?x1x1xf32>
   return %0, %1 : tensor<1x?x?x1x?x1x1xf32>, tensor<1x?x?x1x?x1x1xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d2)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3, d4, d5, d6)>
 //      CHECK: func @fold_subtensor
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<1x?x?x1x?x1x1xf32>
 // CHECK-SAME:   %[[ARG1:.+]]: tensor<1x?x?x?x?x1x1xf32>
 //      CHECK:   %[[SUBTENSOR1:.+]] = subtensor %[[ARG0]]
 // CHECK-SAME:       to tensor<?x?x?xf32>
 //      CHECK:   %[[RESULT1:.+]] = linalg.tensor_reshape %[[SUBTENSOR1]]
-// CHECK-SAME:       [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+// CHECK-SAME:       [0, 1], [2], [3, 4, 5, 6]
 //      CHECK:   %[[SUBTENSOR2:.+]] = subtensor %[[ARG1]]
 // CHECK-SAME:       to tensor<?x?x?xf32>
 //      CHECK:   %[[RESULT2:.+]] = linalg.tensor_reshape %[[SUBTENSOR2]]
-// CHECK-SAME:       [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+// CHECK-SAME:       [0, 1], [2], [3, 4, 5, 6]
 //      CHECK:   return %[[RESULT1]], %[[RESULT2]]
 
 // -----
@@ -425,13 +413,11 @@ func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
   } -> tensor<1x?xf32>
   return %3 : tensor<1x?xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)>
 //      CHECK: func @unit_dim_for_reduction
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<1x?x1x?xf32>
-//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]]
+//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1, 2], [3]]
 //      CHECK:   %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xf32>
 //      CHECK:   %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}})
 //      CHECK:   %[[RESULT:.+]] = linalg.generic
@@ -439,7 +425,7 @@ func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
 // CHECK-SAME:     iterator_types = ["parallel", "reduction"]
 // CHECK-SAME:     ins(%[[RESHAPE]] : tensor<?x?xf32>)
 // CHECK-SAME:     outs(%[[FILL]] : tensor<?xf32>)
-//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]]
+//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]]
 //      CHECK:   return %[[RESULT_RESHAPE]]
 
 // -----
@@ -461,13 +447,11 @@ func @unit_dim_for_reduction_keep_one(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1x
   } -> tensor<1x1xf32>
   return %3 : tensor<1x1xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)>
 //      CHECK: func @unit_dim_for_reduction_keep_one
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<1x?x1x1xf32>
-//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]]
+//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1, 2], [3]]
 //      CHECK:   %[[INIT:.+]] = linalg.init_tensor [1] : tensor<1xf32>
 //      CHECK:   %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}})
 //      CHECK:   %[[RESULT:.+]] = linalg.generic
@@ -475,7 +459,7 @@ func @unit_dim_for_reduction_keep_one(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1x
 // CHECK-SAME:     iterator_types = ["parallel", "reduction"]
 // CHECK-SAME:     ins(%[[RESHAPE]] : tensor<?x1xf32>)
 // CHECK-SAME:     outs(%[[FILL]] : tensor<1xf32>)
-//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]]
+//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]]
 //      CHECK:   return %[[RESULT_RESHAPE]]
 
 // -----
@@ -498,13 +482,11 @@ func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32
   } -> tensor<?x1xf32>
   return %3 : tensor<?x1xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
 //  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)>
 //  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)>
 //      CHECK: func @unit_dim_for_reduction_inner
 // CHECK-SAME:   %[[ARG0:.+]]: tensor<?x1x?x1xf32>
-//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]]
+//  CHECK-DAG:   %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1], [2, 3]]
 //      CHECK:   %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xf32>
 //      CHECK:   %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}})
 //      CHECK:   %[[RESULT:.+]] = linalg.generic
@@ -512,5 +494,5 @@ func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32
 // CHECK-SAME:     iterator_types = ["parallel", "reduction"]
 // CHECK-SAME:     ins(%[[RESHAPE]] : tensor<?x?xf32>)
 // CHECK-SAME:     outs(%[[FILL]] : tensor<?xf32>)
-//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]]
+//      CHECK:   %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]]
 //      CHECK:   return %[[RESULT_RESHAPE]]

diff  --git a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
index bddf0d68749bc..eda7d460a5268 100644
--- a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
@@ -1,21 +1,18 @@
 // RUN: mlir-opt %s -test-linalg-push-reshape -split-input-file | FileCheck %s
 
-// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
 // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1)>
 
 // CHECK-LABEL: func @reshape
 // CHECK-SAME: (%[[A:.*]]: tensor<?x16xf32>, %[[B:.*]]: tensor<16xf32>, %[[INIT:.*]]: tensor<?x112x16xf32>)
-//      CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[INIT]] [#[[$MAP0]], #[[$MAP1]]] : tensor<?x112x16xf32> into tensor<?x16xf32>
+//      CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[INIT]] {{\[}}[0, 1], [2]] : tensor<?x112x16xf32> into tensor<?x16xf32>
 //      CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[$MAP2]], #[[$MAP3]], #[[$MAP2]]],
 // CHECK-SAME: iterator_types = ["parallel", "parallel"]}
 // CHECK-SAME: ins(%[[A]], %[[B]] : tensor<?x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<?x16xf32>)
-//      CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] [#[[$MAP0]], #[[$MAP1]]] : tensor<?x16xf32> into tensor<?x112x16xf32>
+//      CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] {{\[}}[0, 1], [2]] : tensor<?x16xf32> into tensor<?x112x16xf32>
 //      CHECK: return %[[RR]] : tensor<?x112x16xf32>
 func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf32>) -> tensor<?x112x16xf32> {
-  %0 = linalg.tensor_reshape %A [
-    affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>]
+  %0 = linalg.tensor_reshape %A [[0, 1], [2]]
       : tensor<?x16xf32> into tensor<?x112x16xf32>
   %2 = linalg.generic {indexing_maps = [
     affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d2)>,
@@ -32,27 +29,23 @@ func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf
 
 // -----
 
-// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
 // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1)>
 
 // CHECK-LABEL: func @reshape_multiple
 // CHECK-SAME: (%[[A:.*]]: tensor<12544x16xf32>, %[[B:.*]]: tensor<12544x16xf32>, %[[C:.*]]: tensor<16xf32>)
 //      CHECK: %[[I:.*]] = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
-//      CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[I]] [#[[$MAP0]], #[[$MAP1]]] : tensor<112x112x16xf32> into tensor<12544x16xf32>
+//      CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[I]] {{\[}}[0, 1], [2]] : tensor<112x112x16xf32> into tensor<12544x16xf32>
 //      CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[$MAP2]], #[[$MAP2]], #[[$MAP3]], #[[$MAP2]]],
 // CHECK-SAME: iterator_types = ["parallel", "parallel"]}
 // CHECK-SAME: ins(%[[A]], %[[B]], %[[C]] : tensor<12544x16xf32>, tensor<12544x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<12544x16xf32>)
-//      CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] [#[[$MAP0]], #[[$MAP1]]] : tensor<12544x16xf32> into tensor<112x112x16xf32>
+//      CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] {{\[}}[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32>
 //      CHECK: return %[[RR]] : tensor<112x112x16xf32>
 func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
   %C: tensor<16xf32>) -> tensor<112x112x16xf32> {
-  %0 = linalg.tensor_reshape %A [
-    affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>]
+  %0 = linalg.tensor_reshape %A [[0, 1], [2]]
       : tensor<12544x16xf32> into tensor<112x112x16xf32>
-  %1 = linalg.tensor_reshape %B [
-    affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>]
+  %1 = linalg.tensor_reshape %B [[0, 1], [2]]
       : tensor<12544x16xf32> into tensor<112x112x16xf32>
   %2 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
   %3 = linalg.generic {indexing_maps = [
@@ -80,8 +73,7 @@ func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
 // CHECK: linalg.generic
 // CHECK: } -> tensor<112x112x16xf32>
 func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<112x112x16xf32> {
-  %20 = linalg.tensor_reshape %A [
-    affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>]
+  %20 = linalg.tensor_reshape %A [[0, 1], [2]]
       : tensor<12544x16xf32> into tensor<112x112x16xf32>
   %21 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
   %22 = linalg.generic {indexing_maps = [

diff  --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 796e511e9db15..e67e60a8bf920 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -348,29 +348,21 @@ func @generic(%arg0: memref<?x?xi4>) {
 
 func @reshape(%arg0: memref<f32>) {
   // expected-error @+1 {{expected non-zero memref ranks}}
-  %0 = linalg.reshape %arg0 [affine_map<()->(0)>] : memref<f32> into memref<f32>
+  %0 = linalg.reshape %arg0 [[0]] : memref<f32> into memref<f32>
 }
 
 // -----
 
 func @reshape(%arg0: memref<?xf32>) {
   // expected-error @+1 {{expected to collapse or expand dims}}
-  %0 = linalg.reshape %arg0 [affine_map<(i)->(i)>] : memref<?xf32> into memref<?xf32>
+  %0 = linalg.reshape %arg0 [[0]] : memref<?xf32> into memref<?xf32>
 }
 
 // -----
 
 func @reshape(%arg0: memref<?x?x?xf32>) {
   // expected-error @+1 {{expected rank of the collapsed type(2) to be the number of reassociation maps(1)}}
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>] :
-    memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
-}
-
-// -----
-
-func @reshape(%arg0: memref<?x?x?xf32>) {
-  // expected-error @+1 {{expected reassociation map #0 of same rank as expanded memref(3), but got 1}}
-  %0 = linalg.reshape %arg0 [affine_map<(i) -> (i)>, affine_map<(i, j, k) -> (k)>] :
+  %0 = linalg.reshape %arg0 [[0, 1]] :
     memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
 }
 
@@ -378,7 +370,7 @@ func @reshape(%arg0: memref<?x?x?xf32>) {
 
 func @reshape(%arg0: memref<?x?x?xf32>) {
   // expected-error @+1 {{expected reassociation map #1 to be valid and contiguous}}
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k, j)>] :
+  %0 = linalg.reshape %arg0 [[0, 1], [1, 2]] :
     memref<?x?x?xf32> into memref<?x?xf32, offset: 0, strides: [?, 1]>
 }
 
@@ -386,7 +378,7 @@ func @reshape(%arg0: memref<?x?x?xf32>) {
 
 func @reshape(%arg0: memref<?x?x?xf32>) {
   // expected-error @+1 {{expected collapsed type to be 'memref<?x?xf32>', but got 'memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>'}}
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k)>] :
+  %0 = linalg.reshape %arg0 [[0, 1], [2]] :
     memref<?x?x?xf32> into memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>>
 }
 
@@ -463,11 +455,8 @@ func @illegal_expanding_reshape_dynamic_tensor
   (%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?x4x?xf32>
 {
   // expected-error @+1 {{invalid to have a single dimension (2) expanded into multiple dynamic dims (2,4)}}
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    tensor<?x?x?xf32> into tensor<?x?x?x4x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]]
+      : tensor<?x?x?xf32> into tensor<?x?x?x4x?xf32>
   return %0 : tensor<?x?x?x4x?xf32>
 }
 
@@ -477,11 +466,8 @@ func @illegal_expanding_reshape_dynamic_memref
   (%arg0: memref<?x?x?xf32>) -> memref<?x?x?x4x?xf32>
 {
   // expected-error @+1 {{invalid to have a single dimension (2) expanded into multiple dynamic dims (2,4)}}
-  %0 = linalg.reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    memref<?x?x?xf32> into memref<?x?x?x4x?xf32>
+  %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]]
+      : memref<?x?x?xf32> into memref<?x?x?x4x?xf32>
   return %0 : memref<?x?x?x4x?xf32>
 }
 
@@ -491,11 +477,8 @@ func @illegal_expanding_reshape_static_tensor
   (%arg0: tensor<2x3x20xf32>) -> tensor<2x3x2x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    tensor<2x3x20xf32> into tensor<2x3x2x4x5xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]]
+      : tensor<2x3x20xf32> into tensor<2x3x2x4x5xf32>
   return %0 : tensor<2x3x2x4x5xf32>
 }
 
@@ -505,11 +488,8 @@ func @illegal_collapsing_reshape_static_tensor
   (%arg0: tensor<2x3x2x4x5xf32>) -> tensor<2x3x20xf32>
 {
   // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    tensor<2x3x2x4x5xf32> into tensor<2x3x20xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]]
+      : tensor<2x3x2x4x5xf32> into tensor<2x3x20xf32>
   return %0 : tensor<2x3x20xf32>
 }
 
@@ -519,11 +499,8 @@ func @illegal_expanding_reshape_static_memref
   (%arg0: memref<2x3x20xf32>) -> memref<2x3x2x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
-  %0 = linalg.reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    memref<2x3x20xf32> into memref<2x3x2x4x5xf32>
+  %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]]
+      : memref<2x3x20xf32> into memref<2x3x2x4x5xf32>
   return %0 : memref<2x3x2x4x5xf32>
 }
 
@@ -533,11 +510,8 @@ func @illegal_collapsing_reshape_static_memref
   (%arg0: memref<2x3x2x4x5xf32>) -> memref<2x3x20xf32>
 {
   // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}}
-  %0 = linalg.reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
-    memref<2x3x2x4x5xf32> into memref<2x3x20xf32>
+  %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]]
+      : memref<2x3x2x4x5xf32> into memref<2x3x20xf32>
   return %0 : memref<2x3x20xf32>
 }
 
@@ -546,10 +520,8 @@ func @illegal_collapsing_reshape_static_memref
 func @illegal_collapsing_reshape_mixed_tensor(%arg0 : tensor<?x?xf32>) -> tensor<?x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x?xf32> into tensor<?x4x5xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
+      : tensor<?x?xf32> into tensor<?x4x5xf32>
   return %0 : tensor<?x4x5xf32>
 }
 
@@ -558,10 +530,8 @@ func @illegal_collapsing_reshape_mixed_tensor(%arg0 : tensor<?x?xf32>) -> tensor
 func @illegal_collapsing_reshape_mixed_tensor_2(%arg0 : tensor<?x?xf32>) -> tensor<?x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>] :
-       tensor<?x?xf32> into tensor<?x4x5xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]]
+      : tensor<?x?xf32> into tensor<?x4x5xf32>
   return %0 : tensor<?x4x5xf32>
 }
 
@@ -570,10 +540,8 @@ func @illegal_collapsing_reshape_mixed_tensor_2(%arg0 : tensor<?x?xf32>) -> tens
 func @illegal_expanding_reshape_mixed_tensor(%arg0 : tensor<?x4x5xf32>) -> tensor<?x?xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       tensor<?x4x5xf32> into tensor<?x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
+      : tensor<?x4x5xf32> into tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
 }
 
@@ -582,10 +550,8 @@ func @illegal_expanding_reshape_mixed_tensor(%arg0 : tensor<?x4x5xf32>) -> tenso
 func @illegal_expanding_reshape_mixed_tensor_2(%arg0 : tensor<?x4x5xf32>) -> tensor<?x?xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
-  %0 = linalg.tensor_reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>] :
-       tensor<?x4x5xf32> into tensor<?x?xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]]
+      : tensor<?x4x5xf32> into tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
 }
 
@@ -594,10 +560,8 @@ func @illegal_expanding_reshape_mixed_tensor_2(%arg0 : tensor<?x4x5xf32>) -> ten
 func @illegal_collapsing_reshape_mixed_memref(%arg0 : memref<?x?xf32>) -> memref<?x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x?xf32> into memref<?x4x5xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2]]
+      : memref<?x?xf32> into memref<?x4x5xf32>
   return %0 : memref<?x4x5xf32>
 }
 
@@ -606,10 +570,8 @@ func @illegal_collapsing_reshape_mixed_memref(%arg0 : memref<?x?xf32>) -> memref
 func @illegal_collapsing_reshape_mixed_memref_2(%arg0 : memref<?x?xf32>) -> memref<?x4x5xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>] :
-       memref<?x?xf32> into memref<?x4x5xf32>
+  %0 = linalg.reshape %arg0 [[0], [1, 2]]
+      : memref<?x?xf32> into memref<?x4x5xf32>
   return %0 : memref<?x4x5xf32>
 }
 
@@ -618,10 +580,8 @@ func @illegal_collapsing_reshape_mixed_memref_2(%arg0 : memref<?x?xf32>) -> memr
 func @illegal_expanding_reshape_mixed_memref(%arg0 : memref<?x4x5xf32>) -> memref<?x?xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}}
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0, d1)>,
-          affine_map<(d0, d1, d2) -> (d2)>] :
-       memref<?x4x5xf32> into memref<?x?xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2]]
+      : memref<?x4x5xf32> into memref<?x?xf32>
   return %0 : memref<?x?xf32>
 }
 
@@ -630,10 +590,8 @@ func @illegal_expanding_reshape_mixed_memref(%arg0 : memref<?x4x5xf32>) -> memre
 func @illegal_expanding_reshape_mixed_memref_2(%arg0 : memref<?x4x5xf32>) -> memref<?x?xf32>
 {
   // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}}
-  %0 = linalg.reshape %arg0
-         [affine_map<(d0, d1, d2) -> (d0)>,
-          affine_map<(d0, d1, d2) -> (d1, d2)>] :
-       memref<?x4x5xf32> into memref<?x?xf32>
+  %0 = linalg.reshape %arg0 [[0], [1, 2]]
+      : memref<?x4x5xf32> into memref<?x?xf32>
   return %0 : memref<?x?xf32>
 }
 

diff  --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir
index d79c72161b191..ea57d8ef299b9 100644
--- a/mlir/test/Dialect/Linalg/llvm.mlir
+++ b/mlir/test/Dialect/Linalg/llvm.mlir
@@ -16,10 +16,8 @@ func @range(%arg0: index) {
 
 func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
   // Reshapes that expand a contiguous tensor with some 1's.
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                             affine_map<(i, j, k, l, m) -> (k)>,
-                             affine_map<(i, j, k, l, m) -> (l, m)>] :
-    memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
+  %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]]
+      : memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
   return %0 : memref<1x3x4x1x5xf32>
 }
 // CHECK-LABEL: func @reshape_static_expand
@@ -52,9 +50,7 @@ func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
 //       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 
 func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                             affine_map<(i, j, k, l, m) -> (k)>,
-                             affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] :
     memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
   return %0 : memref<3x4x5xf32>
 }

diff  --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
index 0e7239ea01c04..9855dc9e66827 100644
--- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
@@ -7,9 +7,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
                                          %arg1 : tensor<?x?x?xf32>) ->
                                          tensor<?x?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>,
-                                    affine_map<(i, j, k, l) -> (j, k)>,
-                                    affine_map<(i, j, k, l) -> (l)>] :
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] :
     tensor<?x?x4x?xf32> into tensor<?x?x?xf32>
   %1 = linalg.generic {
      indexing_maps = [#map0, #map1, #map1],
@@ -23,29 +21,24 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
   return %1 : tensor<?x?x?xf32>
 }
 
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d1)>
-//  CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
 //  CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d0, d1, d2)>
 //  CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3, d0, d1)>
 //      CHECK: func @generic_op_reshape_producer_fusion
 // CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x4x?xf32>
 // CHECK-SAME:   %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
 //      CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+// CHECK-SAME:     [0], [1, 2], [3]
 //      CHECK:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP3]], #[[MAP4]]]
+// CHECK-SAME:     [0], [1], [2, 3]
 //      CHECK:   %[[T2:.+]] = linalg.tensor_reshape %[[T0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP3]], #[[MAP4]]]
+// CHECK-SAME:     [0], [1], [2, 3]
 //      CHECK:   %[[T3:.+]] = linalg.generic
 // CHECK-SAME:     indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP6]]]
 // CHECK-SAME:     ["parallel", "parallel", "parallel", "parallel"]
 // CHECK-SAME:     ins(%[[ARG0]], %[[T1]] : tensor<?x?x4x?xf32>, tensor<?x?x?x4xf32>)
 // CHECK-SAME:     outs(%[[T2]] : tensor<?x?x?x4xf32>)
 //      CHECK:   %[[T4:.+]] = linalg.tensor_reshape %[[T3]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP3]], #[[MAP4]]]
+// CHECK-SAME:     [0], [1], [2, 3]
 // CHECK-SAME:     tensor<?x?x?x4xf32> into tensor<?x?x?xf32>
 //      CHECK:   return %[[T4]]
 
@@ -65,26 +58,23 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
       %1 = mulf %arg3, %arg4 : f32
       linalg.yield %1 : f32
   } -> tensor<?x?xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?xf32> into tensor<?x4x?x5xf32>
   return %1 : tensor<?x4x?x5xf32>
 }
 
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>
 //  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //      CHECK: func @generic_op_reshape_consumer_fusion
 // CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
 // CHECK-SAME:   %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
 //      CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0], [1, 2, 3]
 // CHECK-SAME:     tensor<?x?xf32> into tensor<?x4x?x5xf32>
 //      CHECK:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0], [1, 2, 3]
 // CHECK-SAME:     tensor<?x?xf32> into tensor<?x4x?x5xf32>
 //      CHECK:   %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0], [1, 2, 3]
 //      CHECK:   %[[T3:.+]] = linalg.generic
 // CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP2]]]
 // CHECK-SAME:     ["parallel", "parallel", "parallel", "parallel"]
@@ -109,21 +99,10 @@ func @reshape_as_consumer_permutation
          %1 = addf %arg0, %arg1 : f32
          linalg.yield %1 : f32
        } -> tensor<?x?x?xf32>
-  %d = linalg.tensor_reshape %c
-         [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>]
+  %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]]
        : tensor<?x?x?xf32> into tensor<?x2x?x3x4x?xf32>
   return %d : tensor<?x2x?x3x4x?xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
-//  CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>
-//  CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>
-//  CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>
 //  CHECK-DAG: #[[MAP8:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)>
 //  CHECK-DAG: #[[MAP9:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)>
 //  CHECK-DAG: #[[MAP10:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)>
@@ -131,13 +110,13 @@ func @reshape_as_consumer_permutation
 // CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
 // CHECK-SAME:   %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
 //      CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+// CHECK-SAME:     [0, 1, 2], [3, 4], [5]
 // CHECK-SAME:     tensor<?x?x?xf32> into tensor<3x4x?x?x2x?xf32>
 //      CHECK:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]]
-// CHECK-SAME:     [#[[MAP3]], #[[MAP4]]]
+// CHECK-SAME:     [0, 1, 2], [3]
 // CHECK-SAME:     tensor<?x?xf32> into tensor<3x4x?x?xf32>
 //      CHECK:   %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP5]], #[[MAP6]], #[[MAP7]]]
+// CHECK-SAME:     [0, 1], [2], [3, 4, 5]
 //      CHECK:   %[[T3:.+]] = linalg.generic
 // CHECK-SAME:     indexing_maps = [#[[MAP8]], #[[MAP9]], #[[MAP10]]]
 // CHECK-SAME:     ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]
@@ -164,18 +143,16 @@ func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
       %2 = mulf %arg1, %arg2 : f32
       linalg.yield %2 : f32
     } -> tensor<264x4xf32>
-  %2 = linalg.tensor_reshape %1 [#map1, #map2] :
+  %2 = linalg.tensor_reshape %1 [[0, 1], [2]] :
     tensor<264x4xf32> into tensor<8x33x4xf32>
   return %2 : tensor<8x33x4xf32>
 }
 
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d2)>
 //  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 //      CHECK: func @generic_op_reshape_consumer_static
 // CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<264x4xf32>
 //      CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0, 1], [2]
 // CHECK-SAME:     tensor<264x4xf32> into tensor<8x33x4xf32>
 //      CHECK:   %[[T1:.+]] = linalg.init_tensor [8, 33, 4]
 //      CHECK:   %[[T2:.+]] = linalg.generic
@@ -193,9 +170,7 @@ func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xi32>,
                                          %arg1 : tensor<?x?x?xi32>) ->
                                          tensor<?x?x?xi32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>,
-                                    affine_map<(i, j, k, l) -> (j, k)>,
-                                    affine_map<(i, j, k, l) -> (l)>] :
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] :
     tensor<?x?x4x?xi32> into tensor<?x?x?xi32>
   %1 = linalg.indexed_generic {
      indexing_maps = [#map0, #map1, #map1],
@@ -243,9 +218,7 @@ func @indexed_consumer_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xi32>,
                                          %arg1 : tensor<?x?x?xi32>) ->
                                          tensor<?x?x?xi32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>,
-                                    affine_map<(i, j, k, l) -> (j, k)>,
-                                    affine_map<(i, j, k, l) -> (l)>] :
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]]:
     tensor<?x?x4x?xi32> into tensor<?x?x?xi32>
   %1 = linalg.generic {
      indexing_maps = [#map0, #map1, #map1],
@@ -309,8 +282,7 @@ func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
       %5 = addi %3, %4 : i32
       linalg.yield %5 : i32
   } -> tensor<?x?xi32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?xi32> into tensor<?x?x4x5xi32>
   return %1 : tensor<?x?x4x5xi32>
 }
@@ -354,8 +326,7 @@ func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
       %5 = addi %3, %4 : i32
       linalg.yield %5 : i32
   } -> tensor<?x?xi32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?xi32> into tensor<?x?x4x5xi32>
   return %1 : tensor<?x?x4x5xi32>
 }
@@ -402,20 +373,10 @@ func @reshape_as_consumer_permutation
          %7 = addi %5, %6 : i32
          linalg.yield %7 : i32
        } -> tensor<6x4x210xi32>
-  %d = linalg.tensor_reshape %c
-         [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>]
+  %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]]
        : tensor<6x4x210xi32> into tensor<2x3x4x5x6x7xi32>
   return %d : tensor<2x3x4x5x6x7xi32>
 }
-
-
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>
-//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //   CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)>
 //   CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)>
 //   CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)>
@@ -425,9 +386,9 @@ func @reshape_as_consumer_permutation
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<210x6x4xi32>
 //  CHECK-SAME:   %[[ARG1:.+]]: tensor<210x4xi32>
 //   CHECK-DAG:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//  CHECK-SAME:     [0, 1, 2], [3, 4], [5]
 //   CHECK-DAG:   %[[T2:.+]] = linalg.tensor_reshape %[[ARG1]]
-//  CHECK-SAME:     [#[[MAP3]], #[[MAP4]]]
+//  CHECK-SAME:     [0, 1, 2], [3]
 //   CHECK-DAG:   %[[T0:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7]
 //       CHECK:   %[[T4:.+]] = linalg.indexed_generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP7]]]
@@ -475,20 +436,12 @@ func @reshape_as_consumer_permutation
          %7 = addi %5, %6 : i32
          linalg.yield %7 : i32
        } -> tensor<6x4x210xi32>
-  %d = linalg.tensor_reshape %c
-         [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>,
-          affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>]
+  %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]]
        : tensor<6x4x210xi32> into tensor<2x3x4x5x6x7xi32>
   return %d : tensor<2x3x4x5x6x7xi32>
 }
 
 
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>
-//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //   CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)>
 //   CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)>
 //   CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)>
@@ -498,9 +451,9 @@ func @reshape_as_consumer_permutation
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<210x6x4xi32>
 //  CHECK-SAME:   %[[ARG1:.+]]: tensor<210x4xi32>
 //   CHECK-DAG:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//  CHECK-SAME:     [0, 1, 2], [3, 4], [5]
 //   CHECK-DAG:   %[[T2:.+]] = linalg.tensor_reshape %[[ARG1]]
-//  CHECK-SAME:     [#[[MAP3]], #[[MAP4]]]
+//  CHECK-SAME:     [0, 1, 2], [3]
 //   CHECK-DAG:   %[[T0:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7]
 //       CHECK:   %[[T4:.+]] = linalg.generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP7]]]
@@ -530,8 +483,7 @@ func @reshape_as_consumer_permutation
 func @reshape_as_producer_projected_permutation(
     %arg0 : tensor<33x8x?xi32>, %shape : tensor<264x?x4xi32>) -> tensor<264x?x4xi32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1)>,
-                                    affine_map<(d0, d1, d2) -> (d2)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
     : tensor<33x8x?xi32> into tensor<264x?xi32>
   %1 = linalg.indexed_generic
     {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1)>,
@@ -554,9 +506,6 @@ func @reshape_as_producer_projected_permutation(
 //   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
 //   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0 + d1 * 8)>
-//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)>
-//   CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //       CHECK: @reshape_as_producer_projected_permutation
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<33x8x?xi32>
 //       CHECK:   %[[RES:.+]] = linalg.indexed_generic
@@ -578,7 +527,7 @@ func @reshape_as_producer_projected_permutation(
 //       CHECK:       %[[T6:.+]] = addi %[[T4]], %[[T5]] : i32
 //       CHECK:       linalg.yield %[[T6]] : i32
 //       CHECK:    %[[RES2:.+]] = linalg.tensor_reshape %[[RES]]
-//  CHECK-SAME:      [#[[MAP3]], #[[MAP4]], #[[MAP5]]]
+//  CHECK-SAME:      [0, 1], [2], [3]
 //  CHECK-SAME:    : tensor<33x8x?x4xi32> into tensor<264x?x4xi32>
 //       CHECK:  return %[[RES2]] : tensor<264x?x4xi32>
 
@@ -587,8 +536,7 @@ func @reshape_as_producer_projected_permutation(
 func @reshape_as_producer_projected_permutation(
     %arg0 : tensor<33x8x?xi32>, %shape : tensor<264x?x4xi32>) -> tensor<264x?x4xi32>
 {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1)>,
-                                    affine_map<(d0, d1, d2) -> (d2)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]]
     : tensor<33x8x?xi32> into tensor<264x?xi32>
   %1 = linalg.generic
     {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1)>,
@@ -614,9 +562,6 @@ func @reshape_as_producer_projected_permutation(
 //   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
 //   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0 + d1 * 8)>
-//   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)>
-//   CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //       CHECK: @reshape_as_producer_projected_permutation
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<33x8x?xi32>
 //       CHECK:   %[[RES:.+]] = linalg.generic
@@ -638,7 +583,7 @@ func @reshape_as_producer_projected_permutation(
 //       CHECK:       %[[T6:.+]] = addi %[[T4]], %[[T5]] : i32
 //       CHECK:       linalg.yield %[[T6]] : i32
 //       CHECK:    %[[RES2:.+]] = linalg.tensor_reshape %[[RES]]
-//  CHECK-SAME:      [#[[MAP3]], #[[MAP4]], #[[MAP5]]]
+//  CHECK-SAME:      [0, 1], [2], [3]
 //  CHECK-SAME:    : tensor<33x8x?x4xi32> into tensor<264x?x4xi32>
 //       CHECK:  return %[[RES2]] : tensor<264x?x4xi32>
 
@@ -659,29 +604,24 @@ func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
       %1 = mulf %arg3, %arg4 : f32
       linalg.yield %1 : f32
   } -> tensor<?x?xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?xf32> into tensor<?x?x4x5xf32>
   return %1 : tensor<?x?x4x5xf32>
 }
 
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
-//  CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//  CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>
 //  CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //  CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d0, d1, d2)>
 //      CHECK: func @generic_op_reshape_consumer_fusion_projected
 // CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
 // CHECK-SAME:   %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?xf32>
 //      CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0, 1, 2], [3]
 // CHECK-SAME:     tensor<?x?xf32> into tensor<?x4x5x?xf32>
 //      CHECK:   %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]]
-// CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+// CHECK-SAME:     [0, 1, 2], [3]
 // CHECK-SAME:     tensor<?x?xf32> into tensor<?x4x5x?xf32>
 //      CHECK:   %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]]
-// CHECK-SAME:     [#[[MAP2]], #[[MAP3]]]
+// CHECK-SAME:     [0], [1, 2, 3]
 //      CHECK:   %[[T3:.+]] = linalg.generic
 // CHECK-SAME:     indexing_maps = [#[[MAP4]], #[[MAP4]], #[[MAP5]]]
 // CHECK-SAME:     ["parallel", "parallel", "parallel", "parallel"]
@@ -692,8 +632,8 @@ func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
 // -----
 
 func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1) -> (d0, d1)>] : tensor<1x5xf32> into tensor<5xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0, 1]]
+      : tensor<1x5xf32> into tensor<5xf32>
   %1 = linalg.init_tensor [5, 5] : tensor<5x5xf32>
   %2 = linalg.generic
     {indexing_maps = [affine_map<(d0, d1) -> (d0)>,
@@ -722,8 +662,7 @@ func @unit_dim_reshape_collapse(%arg0 : tensor<5xf32>) -> tensor<5x1x5xf32> {
   ^bb0(%arg2: f32, %arg3: f32):  // no predecessors
     linalg.yield %arg2 : f32
   } -> tensor<5x5xf32>
-  %2 = linalg.tensor_reshape %1
-    [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>]
+  %2 = linalg.tensor_reshape %1 [[0, 1], [2]]
     : tensor<5x5xf32> into tensor<5x1x5xf32>
   return %2 : tensor<5x1x5xf32>
 }
@@ -738,10 +677,7 @@ func @unit_dim_reshape_expansion_full
   (%arg0 : tensor<1x?x1x2x1x4xf32>, %arg1 : tensor<?x2x4xf32>)
   -> tensor<?x2x4xf32> {
   %c1 = constant 1 : index
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>,
-     affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>]
+  %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3, 4], [5]]
     : tensor<1x?x1x2x1x4xf32> into tensor<?x2x4xf32>
   %1 = memref.dim %arg0, %c1 : tensor<1x?x1x2x1x4xf32>
   %2 = linalg.init_tensor [%1, 2, 4] : tensor<?x2x4xf32>

diff  --git a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
index b1a04db826de9..623350de97a8f 100644
--- a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
@@ -2,12 +2,8 @@
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
-                                         %arg1 : tensor<?x?x4x?xf32>) ->
-                                         tensor<?x?x4x?xf32>
-{
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>,
-                                    affine_map<(i, j, k, l) -> (j, k)>,
-                                    affine_map<(i, j, k, l) -> (l)>] :
+    %arg1 : tensor<?x?x4x?xf32>) -> tensor<?x?x4x?xf32> {
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] :
     tensor<?x?x?xf32> into tensor<?x?x4x?xf32>
   %1 = linalg.generic {
     indexing_maps = [#map0, #map0, #map0],
@@ -20,16 +16,12 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
   } -> tensor<?x?x4x?xf32>
   return %1 : tensor<?x?x4x?xf32>
 }
-
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //   CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
 //   CHECK-DAG: #[[MAP4:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //       CHECK: func @generic_op_reshape_producer_fusion
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<?x?x?xf32>
 //       CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:   [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//  CHECK-SAME:   [0], [1, 2], [3]
 //       CHECK: linalg.generic
 //  CHECK-SAME:   indexing_maps = [#[[MAP3]], #[[MAP4]], #[[MAP4]]]
 //  CHECK-SAME:   ins(%[[ARG0]], %{{.+}} : tensor<?x?x?xf32>, tensor<?x?x4x?xf32>)
@@ -39,9 +31,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
-                                         %arg1 : tensor<?x?x4x5xf32>) ->
-                                         tensor<?x?xf32>
-{
+    %arg1 : tensor<?x?x4x5xf32>) -> tensor<?x?xf32> {
   %0 = linalg.generic {
     indexing_maps = [#map0, #map0, #map0],
     iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
@@ -51,20 +41,17 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
       %1 = mulf %arg3, %arg4 : f32
       linalg.yield %1 : f32
   } -> tensor<?x?x4x5xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?x4x5xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }
 
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>
 //   CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //   CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
 //       CHECK: func @generic_op_reshape_consumer_fusion
 //  CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x4x5xf32>
 //       CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     [0], [1, 2, 3]
 //       CHECK:   linalg.generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP3]]]
 //  CHECK-SAME:     outs(%[[T0]] : tensor<?x?xf32>)
@@ -74,9 +61,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
   -> tensor<?x?x4x?xi32> {
-  %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>,
-                                    affine_map<(i, j, k, l) -> (j, k)>,
-                                    affine_map<(i, j, k, l) -> (l)>] :
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] :
     tensor<?x?x?xi32> into tensor<?x?x4x?xi32>
   %1 = linalg.indexed_generic {
     indexing_maps = [#map0, #map0],
@@ -90,16 +75,12 @@ func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
   } -> tensor<?x?x4x?xi32>
   return %1 : tensor<?x?x4x?xi32>
 }
-
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
-//   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
 //   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
 //   CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //       CHECK: func @indexed_generic_op_reshape_producer_fusion
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<?x?x?xi32>
 //       CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//  CHECK-SAME:     [0], [1, 2], [3]
 //       CHECK:   linalg.indexed_generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP3]], #[[MAP4]]]
 //  CHECK-SAME:     ins(%[[ARG0]] : tensor<?x?x?xi32>)
@@ -119,20 +100,16 @@ func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
     %3 = addi %arg6, %2 : i32
     linalg.yield %3 : i32
   } -> tensor<?x?x4x5xi32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?x4x5xi32> into tensor<?x?xi32>
   return %1 : tensor<?x?xi32>
 }
-
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>
 //   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 //   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
 //       CHECK: func @indexed_generic_op_reshape_consumer_fusion
 //  CHECK-SAME:   %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x4x5xi32>
 //       CHECK:   %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     [0], [1, 2, 3]
 //       CHECK:   linalg.indexed_generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP3]]]
 //  CHECK-SAME:     outs(%[[T0]] : tensor<?x?xi32>)
@@ -140,12 +117,11 @@ func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2) -> (d0)>
-#map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
 #map2 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<3x7x5xf32> {
-  %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]]
+      : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [3, 7, 5] : tensor<3x7x5xf32>
   %2 = linalg.generic
     {indexing_maps = [#map2, #map3],
@@ -166,12 +142,11 @@ func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf3
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2) -> (d0)>
-#map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
 #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
 func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x7x3xf32> {
-  %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]]
+      : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [5, 7, 3] : tensor<5x7x3xf32>
   %2 = linalg.generic
     {indexing_maps = [#map2, #map3],
@@ -197,7 +172,8 @@ func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32
 #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x3x7xf32> {
-  %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32>
+  %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]]
+      : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32>
   %2 = linalg.generic
     {indexing_maps = [#map2, #map3],
@@ -232,19 +208,17 @@ func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf
     ^bb0(%arg2: f32, %arg3 : f32):  // no predecessors
       linalg.yield %arg2 : f32
   } -> tensor<5x3x7xf32>
-  %2 = linalg.tensor_reshape %1 [#map2, #map3] : tensor<5x3x7xf32> into tensor<5x21xf32>
+  %2 = linalg.tensor_reshape %1 [[0], [1, 2]]
+      : tensor<5x3x7xf32> into tensor<5x21xf32>
   return %2 : tensor<5x21xf32>
 }
-
-//   CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0)>
-//   CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d1, d2)>
 //   CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 //   CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d1, d0 * 7 + d2)>
 //       CHECK: func @generic_op_102_permultation_reshape_consumer_fusion
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<3x5x7xf32>
 //       CHECK:   %[[T0:.+]] = linalg.init_tensor [5, 3, 7]
 //       CHECK:   %[[T1:.+]] = linalg.tensor_reshape %[[T0]]
-//  CHECK-SAME:     [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     [0], [1, 2]
 //       CHECK:   linalg.generic
 //  CHECK-SAME:     indexing_maps = [#[[MAP2]], #[[MAP3]]]
 //  CHECK-SAME:     ins(%[[ARG0]] : tensor<3x5x7xf32>)
@@ -266,8 +240,7 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
       %1 = mulf %arg3, %arg4 : f32
       linalg.yield %1 : f32
   } -> tensor<?x?x?x5xf32>
-  %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>,
-                                 affine_map<(i, j, k, l) -> (j, k, l)>] :
+  %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] :
     tensor<?x?x?x5xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 441987a11bbdc..1435f7b6bdb1b 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -10,17 +10,6 @@
 // CHECK-DAG: #[[$id_1d:.*]] = affine_map<(d0, d1, d2) -> (d1)>
 // CHECK-DAG: #[[$permute_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
 // CHECK-DAG: #[[$permute_1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
-// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-// CHECK-DAG: #[[$reshape5D0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)>
-// CHECK-DAG: #[[$reshape5D1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)>
-// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
-// CHECK-DAG: #[[$reshape5D345:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>
-// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
-// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
-// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
-// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
 // CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 // CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
@@ -571,68 +560,53 @@ func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?,
 
 // -----
 
-func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2: tensor<3x?x5xf32>) {
+func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>,
+                     %arg2: tensor<3x?x5xf32>) {
   // Reshapes that collapse and expand back a contiguous buffer.
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>,
-                             affine_map<(i, j, k) -> (k)>] :
+  %0 = linalg.reshape %arg0 [[0, 1], [2]] :
     memref<3x4x5xf32> into memref<12x5xf32>
-  %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>,
-                           affine_map<(i, j, k) -> (k)>] :
+  %r0 = linalg.reshape %0 [[0, 1], [2]] :
     memref<12x5xf32> into memref<3x4x5xf32>
-  %1 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i)>,
-                             affine_map<(i, j, k) -> (j, k)>] :
+  %1 = linalg.reshape %arg0 [[0], [1, 2]] :
     memref<3x4x5xf32> into memref<3x20xf32>
-  %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i)>,
-                           affine_map<(i, j, k) -> (j, k)>] :
+  %r1 = linalg.reshape %1 [[0], [1, 2]] :
     memref<3x20xf32> into memref<3x4x5xf32>
-  %2 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j, k)>] :
+  %2 = linalg.reshape %arg0 [[0, 1, 2]] :
     memref<3x4x5xf32> into memref<60xf32>
-  %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j, k)>] :
+  %r2 = linalg.reshape %2 [[0, 1, 2]] :
     memref<60xf32> into memref<3x4x5xf32>
   // Reshapes that expand and collapse back a contiguous buffer with some 1's.
-  %3 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                             affine_map<(i, j, k, l, m) -> (k)>,
-                             affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %3 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] :
     memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
-  %r3 = linalg.reshape %3 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                           affine_map<(i, j, k, l, m) -> (k)>,
-                           affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %r3 = linalg.reshape %3 [[0, 1], [2], [3, 4]] :
     memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
   // Reshapes on tensors.
-  %t0 = linalg.tensor_reshape %arg1 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                                     affine_map<(i, j, k, l, m) -> (k)>,
-                                     affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %t0 = linalg.tensor_reshape %arg1 [[0, 1], [2], [3, 4]] :
     tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32>
-  %rt0 = linalg.tensor_reshape %t0 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                                    affine_map<(i, j, k, l, m) -> (k)>,
-                                    affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %rt0 = linalg.tensor_reshape %t0 [[0, 1], [2], [3, 4]] :
     tensor<1x3x4x1x5xf32> into tensor<3x4x5xf32>
-  %t1 = linalg.tensor_reshape %arg2 [affine_map<(i, j, k, l, m) -> (i, j)>,
-                                     affine_map<(i, j, k, l, m) -> (k)>,
-                                     affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %t1 = linalg.tensor_reshape %arg2 [[0, 1], [2], [3, 4]] :
     tensor<3x?x5xf32> into tensor<1x3x?x1x5xf32>
-  %rt1 = linalg.tensor_reshape %t1 [affine_map<(i, j, k, l, m) -> (i)>,
-                                    affine_map<(i, j, k, l, m) -> (j, k)>,
-                                    affine_map<(i, j, k, l, m) -> (l, m)>] :
+  %rt1 = linalg.tensor_reshape %t1 [[0], [1, 2], [3, 4]] :
     tensor<1x3x?x1x5xf32> into tensor<1x?x5xf32>
   return
 }
 // CHECK-LABEL: func @reshape_static
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<12x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<12x5xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0], [1, 2]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<3x20xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0], [1, 2]]
 //  CHECK-SAME:     memref<3x20xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD012]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1, 2]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<60xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD012]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1, 2]]
 //  CHECK-SAME:     memref<60xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2], [3, 4]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2], [3, 4]]
 //  CHECK-SAME:     memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
 //
 //       CHECK:   linalg.tensor_reshape {{.*}}: tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32>
@@ -645,43 +619,36 @@ func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2:
 func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
                       %arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>,
                       %arg2: memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>) {
-  %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>,
-                             affine_map<(i, j, k) -> (k)>] :
+  %0 = linalg.reshape %arg0 [[0, 1], [2]] :
     memref<?x?x?xf32> into memref<?x?xf32>
-  %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>,
-                           affine_map<(i, j, k) -> (k)>] :
+  %r0 = linalg.reshape %0 [[0, 1], [2]] :
     memref<?x?xf32> into memref<?x4x?xf32>
-  %1 = linalg.reshape %arg1 [affine_map<(i, j, k) -> (i, j)>,
-                             affine_map<(i, j, k) -> (k)>] :
+  %1 = linalg.reshape %arg1 [[0, 1], [2]] :
     memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]> into
     memref<?x?xf32, offset : 0, strides : [?, 1]>
-  %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i, j)>,
-                           affine_map<(i, j, k) -> (k)>] :
+  %r1 = linalg.reshape %1 [[0, 1], [2]] :
     memref<?x?xf32, offset : 0, strides : [?, 1]> into
     memref<?x4x?xf32, offset : 0, strides : [?, ?, 1]>
-  %2 = linalg.reshape %arg2 [affine_map<(i, j, k) -> (i, j)>,
-                             affine_map<(i, j, k) -> (k)>] :
+  %2 = linalg.reshape %arg2 [[0, 1], [2]] :
     memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]> into
     memref<?x?xf32, offset : ?, strides : [?, 1]>
-  %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j)>,
-                           affine_map<(i, j, k) -> (k)>] :
+  %r2 = linalg.reshape %2 [[0, 1], [2]] :
     memref<?x?xf32, offset : ?, strides : [?, 1]> into
     memref<?x4x?xf32, offset : ?, strides : [?, ?, 1]>
   return
 }
-
 // CHECK-LABEL: func @reshape
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?x?xf32> into memref<?x?xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?xf32> into memref<?x4x?xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3DOFF0]]> into memref<?x?xf32, #[[$strided2DOFF0]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?xf32, #[[$strided2DOFF0]]> into memref<?x4x?xf32, #[[$strided3DOFF0]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]> into memref<?x?xf32, #[[$strided2D]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} {{\[}}[0, 1], [2]]
 //  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]> into memref<?x4x?xf32, #[[$strided3D]]>
 
 func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?x?xf32>, %c3: memref<?x?x?xf32>,
@@ -749,30 +716,26 @@ func @init_tensor(%arg0 : index, %arg1 : index)
 func @legal_collapsing_reshape_dynamic_tensor
   (%arg0: tensor<?x?x?x4x?xf32>) -> tensor<?x?x?xf32>
 {
-  %0 = linalg.tensor_reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
+  %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]] :
     tensor<?x?x?x4x?xf32> into tensor<?x?x?xf32>
   return %0 : tensor<?x?x?xf32>
 }
-//     CHECK: func @legal_collapsing_reshape_dynamic_tensor
-//     CHECK:   linalg.tensor_reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]]
+//      CHECK: func @legal_collapsing_reshape_dynamic_tensor
+//      CHECK:   linalg.tensor_reshape
+// CHECK-SAME:    [0], [1], [2, 3, 4]
 
 // -----
 
 func @legal_collapsing_reshape_dynamic_memref
   (%arg0: memref<?x?x?x4x?xf32>) -> memref<?x?x?xf32>
 {
-  %0 = linalg.reshape %arg0
-    [affine_map<(d0, d1, d2, d3, d4) -> (d0)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d1)>,
-     affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] :
+  %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]] :
     memref<?x?x?x4x?xf32> into memref<?x?x?xf32>
   return %0 : memref<?x?x?xf32>
 }
-//     CHECK: func @legal_collapsing_reshape_dynamic_memref
-//     CHECK:   linalg.reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]]
+//      CHECK: func @legal_collapsing_reshape_dynamic_memref
+//      CHECK:   linalg.reshape
+// CHECK-SAME:    [0], [1], [2, 3, 4]
 
 // -----
 

diff  --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 5cb04e6aba52f..ed7fef160ff96 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -1046,8 +1046,8 @@ TEST_FUNC(linalg_generic_dilated_conv_nhwc) {
 
 // clang-format off
 // CHECK-LABEL: func @linalg_metadata_ops
-//       CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<4x8x16xf32> into memref<32x16xf32>
-//       CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<32x16xf32> into memref<4x8x16xf32>
+//       CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] : memref<4x8x16xf32> into memref<32x16xf32>
+//       CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] : memref<32x16xf32> into memref<4x8x16xf32>
 // clang-format on
 TEST_FUNC(linalg_metadata_ops) {
   using linalg::ReassociationExprs;


        


More information about the Mlir-commits mailing list