[Mlir-commits] [mlir] 2791162 - [mlir] make memref.subview produce strided layout
Alex Zinenko
llvmlistbot at llvm.org
Fri Sep 16 01:57:02 PDT 2022
Author: Alex Zinenko
Date: 2022-09-16T10:56:46+02:00
New Revision: 2791162b01e3199b24f2d18f7b370157e2c57daf
URL: https://github.com/llvm/llvm-project/commit/2791162b01e3199b24f2d18f7b370157e2c57daf
DIFF: https://github.com/llvm/llvm-project/commit/2791162b01e3199b24f2d18f7b370157e2c57daf.diff
LOG: [mlir] make memref.subview produce strided layout
Memref subview operation has been initially designed to work on memrefs with
strided layouts only and has never supported anything else. Port it to use the
recently added StridedLayoutAttr instead of extracting the strided from
implicitly from affine maps.
Reviewed By: nicolasvasilache
Differential Revision: https://reviews.llvm.org/D133938
Added:
Modified:
mlir/include/mlir/IR/BuiltinTypes.h
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
mlir/lib/IR/BuiltinTypes.cpp
mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
mlir/test/Dialect/Linalg/promote.mlir
mlir/test/Dialect/Linalg/tile.mlir
mlir/test/Dialect/Linalg/transform-patterns.mlir
mlir/test/Dialect/Linalg/transform-promotion.mlir
mlir/test/Dialect/MemRef/canonicalize.mlir
mlir/test/Dialect/MemRef/invalid.mlir
mlir/test/Dialect/MemRef/multibuffer.mlir
mlir/test/Dialect/SCF/one-shot-bufferize.mlir
mlir/test/Dialect/Tensor/bufferize.mlir
mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
mlir/test/Transforms/canonicalize.mlir
mlir/test/Transforms/compose-subview.mlir
mlir/unittests/Dialect/MemRef/InferShapeTest.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h
index 96c7804d96495..46f46f1b182a0 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.h
+++ b/mlir/include/mlir/IR/BuiltinTypes.h
@@ -413,28 +413,18 @@ inline bool TensorType::classof(Type type) {
/// MemRefs with a layout map in strided form include:
/// 1. empty or identity layout map, in which case the stride information is
/// the canonical form computed from sizes;
-/// 2. single affine map layout of the form `K + k0 * d0 + ... kn * dn`,
-/// where K and ki's are constants or symbols.
+/// 2. a StridedLayoutAttr layout;
+/// 3. any other layout that be converted into a single affine map layout of
+/// the form `K + k0 * d0 + ... kn * dn`, where K and ki's are constants or
+/// symbols.
///
/// A stride specification is a list of integer values that are either static
-/// or dynamic (encoded with getDynamicStrideOrOffset()). Strides encode the
-/// distance in the number of elements between successive entries along a
+/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// the distance in the number of elements between successive entries along a
/// particular dimension.
-///
-/// For example, `memref<42x16xf32, (64 * d0 + d1)>` specifies a view into a
-/// non-contiguous memory region of `42` by `16` `f32` elements in which the
-/// distance between two consecutive elements along the outer dimension is `1`
-/// and the distance between two consecutive elements along the inner dimension
-/// is `64`.
-///
-/// The convention is that the strides for dimensions d0, .. dn appear in
-/// order to make indexing intuitive into the result.
LogicalResult getStridesAndOffset(MemRefType t,
SmallVectorImpl<int64_t> &strides,
int64_t &offset);
-LogicalResult getStridesAndOffset(MemRefType t,
- SmallVectorImpl<AffineExpr> &strides,
- AffineExpr &offset);
/// Return a version of `t` with identity layout if it can be determined
/// statically that the layout is the canonical contiguous strided layout.
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 90275c1458b4b..c2361719223f8 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -2184,11 +2184,10 @@ Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
}
// The type is now known.
- return MemRefType::get(
- staticSizes, sourceMemRefType.getElementType(),
- makeStridedLinearLayoutMap(targetStrides, targetOffset,
- sourceMemRefType.getContext()),
- sourceMemRefType.getMemorySpace());
+ return MemRefType::get(staticSizes, sourceMemRefType.getElementType(),
+ StridedLayoutAttr::get(sourceMemRefType.getContext(),
+ targetOffset, targetStrides),
+ sourceMemRefType.getMemorySpace());
}
Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
@@ -2224,14 +2223,19 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
computeRankReductionMask(inferredType.getShape(), resultShape);
assert(dimsToProject.has_value() && "invalid rank reduction");
- llvm::SmallBitVector dimsToProjectVector(inferredType.getRank());
- for (unsigned dim : *dimsToProject)
- dimsToProjectVector.set(dim);
-
- // Compute layout map and result type.
- AffineMap map = getProjectedMap(inferredType.getLayout().getAffineMap(),
- dimsToProjectVector);
- return MemRefType::get(resultShape, inferredType.getElementType(), map,
+
+ // Compute the layout and result type.
+ auto inferredLayout = inferredType.getLayout().cast<StridedLayoutAttr>();
+ SmallVector<int64_t> rankReducedStrides;
+ rankReducedStrides.reserve(resultShape.size());
+ for (auto [idx, value] : llvm::enumerate(inferredLayout.getStrides())) {
+ if (!dimsToProject->contains(idx))
+ rankReducedStrides.push_back(value);
+ }
+ return MemRefType::get(resultShape, inferredType.getElementType(),
+ StridedLayoutAttr::get(inferredLayout.getContext(),
+ inferredLayout.getOffset(),
+ rankReducedStrides),
inferredType.getMemorySpace());
}
@@ -2363,8 +2367,8 @@ Value SubViewOp::getViewSource() { return getSource(); }
/// Return true if t1 and t2 have equal offsets (both dynamic or of same
/// static value).
static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) {
- AffineExpr t1Offset, t2Offset;
- SmallVector<AffineExpr> t1Strides, t2Strides;
+ int64_t t1Offset, t2Offset;
+ SmallVector<int64_t> t1Strides, t2Strides;
auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset);
auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset);
return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset;
@@ -2506,16 +2510,25 @@ static MemRefType getCanonicalSubViewResultType(
// Return nullptr as failure mode.
if (!unusedDims)
return nullptr;
- SmallVector<int64_t> shape;
- for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
- if (unusedDims->test(sizes.index()))
+
+ auto layout = nonRankReducedType.getLayout().cast<StridedLayoutAttr>();
+ SmallVector<int64_t> shape, strides;
+ unsigned numDimsAfterReduction =
+ nonRankReducedType.getRank() - unusedDims->count();
+ shape.reserve(numDimsAfterReduction);
+ strides.reserve(numDimsAfterReduction);
+ for (const auto &[idx, size, stride] :
+ llvm::zip(llvm::seq<unsigned>(0, nonRankReducedType.getRank()),
+ nonRankReducedType.getShape(), layout.getStrides())) {
+ if (unusedDims->test(idx))
continue;
- shape.push_back(sizes.value());
+ shape.push_back(size);
+ strides.push_back(stride);
}
- AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
- if (!layoutMap.isIdentity())
- layoutMap = getProjectedMap(layoutMap, *unusedDims);
- return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
+
+ return MemRefType::get(shape, nonRankReducedType.getElementType(),
+ StridedLayoutAttr::get(sourceType.getContext(),
+ layout.getOffset(), strides),
nonRankReducedType.getMemorySpace());
}
diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index f53a94146efcb..b9b5bedd70420 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -766,9 +766,22 @@ static LogicalResult extractStrides(AffineExpr e,
llvm_unreachable("unexpected binary operation");
}
-LogicalResult mlir::getStridesAndOffset(MemRefType t,
- SmallVectorImpl<AffineExpr> &strides,
- AffineExpr &offset) {
+/// A stride specification is a list of integer values that are either static
+/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode
+/// the distance in the number of elements between successive entries along a
+/// particular dimension.
+///
+/// For example, `memref<42x16xf32, (64 * d0 + d1)>` specifies a view into a
+/// non-contiguous memory region of `42` by `16` `f32` elements in which the
+/// distance between two consecutive elements along the outer dimension is `1`
+/// and the distance between two consecutive elements along the inner dimension
+/// is `64`.
+///
+/// The convention is that the strides for dimensions d0, .. dn appear in
+/// order to make indexing intuitive into the result.
+static LogicalResult getStridesAndOffset(MemRefType t,
+ SmallVectorImpl<AffineExpr> &strides,
+ AffineExpr &offset) {
AffineMap m = t.getLayout().getAffineMap();
if (m.getNumResults() != 1 && !m.isIdentity())
@@ -807,12 +820,12 @@ LogicalResult mlir::getStridesAndOffset(MemRefType t,
for (auto &stride : strides)
stride = simplifyAffineExpr(stride, numDims, numSymbols);
- /// In practice, a strided memref must be internally non-aliasing. Test
- /// against 0 as a proxy.
- /// TODO: static cases can have more advanced checks.
- /// TODO: dynamic cases would require a way to compare symbolic
- /// expressions and would probably need an affine set context propagated
- /// everywhere.
+ // In practice, a strided memref must be internally non-aliasing. Test
+ // against 0 as a proxy.
+ // TODO: static cases can have more advanced checks.
+ // TODO: dynamic cases would require a way to compare symbolic
+ // expressions and would probably need an affine set context propagated
+ // everywhere.
if (llvm::any_of(strides, [](AffineExpr e) {
return e == getAffineConstantExpr(0, e.getContext());
})) {
@@ -827,6 +840,15 @@ LogicalResult mlir::getStridesAndOffset(MemRefType t,
LogicalResult mlir::getStridesAndOffset(MemRefType t,
SmallVectorImpl<int64_t> &strides,
int64_t &offset) {
+ // Happy path: the type uses the strided layout directly.
+ if (auto strided = t.getLayout().dyn_cast<StridedLayoutAttr>()) {
+ llvm::append_range(strides, strided.getStrides());
+ offset = strided.getOffset();
+ return success();
+ }
+
+ // Otherwise, defer to the affine fallback as layouts are supposed to be
+ // convertible to affine maps.
AffineExpr offsetExpr;
SmallVector<AffineExpr, 4> strideExprs;
if (failed(::getStridesAndOffset(t, strideExprs, offsetExpr)))
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
index d792565469388..1e18c66062007 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
@@ -74,12 +74,11 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
// -----
// CHECK: #[[$map2a:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-// CHECK: #[[$map2b:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 20 + s0 + d1)>
// CHECK-LABEL: func @callee(
// CHECK-SAME: %{{.*}}: index,
// CHECK-SAME: %[[r:.*]]: memref<2x5xf32, #[[$map2a]]>) {
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10x20xf32>
-// CHECK: %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, #[[$map2b]]>
+// CHECK: %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, strided<[20, 1], offset: ?>>
// CHECK: %[[casted:.*]] = memref.cast %[[subview]]
// CHECK: memref.copy %[[casted]], %[[r]]
// CHECK: memref.dealloc %[[alloc]]
@@ -98,9 +97,8 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
// CHECK-NO-LAYOUT: memref.copy %[[alloc2]], %[[r]]
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc2]]
-// CHECK-BASELINE: #[[$map2:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 20 + s0 + d1)>
// CHECK-BASELINE-LABEL: func @callee(
-// CHECK-BASELINE-SAME: %{{.*}}: index) -> memref<2x5xf32, #[[$map2]]> {
+// CHECK-BASELINE-SAME: %{{.*}}: index) -> memref<2x5xf32, strided<[20, 1], offset: ?>> {
// CHECK-BASELINE: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10x20xf32>
// CHECK-BASELINE: %[[subview:.*]] = memref.subview %[[alloc]]
// CHECK-BASELINE: return %[[subview]]
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
index 79535bbe9e7da..61529c783505b 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
@@ -48,24 +48,20 @@ func.func private @external_func_with_return_val(tensor<4xi32>) -> f32
// A function that returns a non-equivalent tensor with layout map.
-// CHECK: #[[$map2:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 10 + s0 + d1)>
-// CHECK-LABEL: func @return_extract_slice(%{{.*}}) -> memref<2x?xf32,
-// CHECK-SAME: #[[$map2]]> {
+// CHECK-LABEL: func @return_extract_slice(%{{.*}}) -> memref<2x?xf32, strided<[10, 1], offset: ?>>
// CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<20x10xf32>
-// CHECK: %[[subview:.*]] = memref.subview {{.*}} : memref<20x10xf32> to memref<2x?xf32, #[[$map2]]>
+// CHECK: %[[subview:.*]] = memref.subview {{.*}} : memref<20x10xf32> to memref<2x?xf32, strided<[10, 1], offset: ?>>
// CHECK: return %[[subview]]
-// CHECK-NO-LAYOUT-MAP: #[[$map2:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 10 + s0 + d1)>
// CHECK-NO-LAYOUT-MAP-LABEL: func @return_extract_slice(%{{.*}}) -> memref<2x?xf32>
// CHECK-NO-LAYOUT-MAP: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<20x10xf32>
-// CHECK-NO-LAYOUT-MAP: %[[subview:.*]] = memref.subview {{.*}} : memref<20x10xf32> to memref<2x?xf32, #[[$map2]]>
+// CHECK-NO-LAYOUT-MAP: %[[subview:.*]] = memref.subview {{.*}} : memref<20x10xf32> to memref<2x?xf32, strided<[10, 1], offset: ?>>
// CHECK-NO-LAYOUT-MAP: %[[alloc_no_layout:.*]] = memref.alloc(%{{.*}}) : memref<2x?xf32>
// CHECK-NO-LAYOUT-MAP: memref.copy %[[subview]], %[[alloc_no_layout]]
// CHECK-NO-LAYOUT-MAP: memref.dealloc %[[alloc]]
// CHECK-NO-LAYOUT-MAP: return %[[alloc_no_layout]]
// CHECK-FULLY-DYNAMIC-LAYOUT-MAP: #[[$map2a:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-// CHECK-FULLY-DYNAMIC-LAYOUT-MAP: #[[$map2b:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 10 + s0 + d1)>
// CHECK-FULLY-DYNAMIC-LAYOUT-MAP-LABEL: func @return_extract_slice(%{{.*}}) -> memref<2x?xf32,
// CHECK-FULLY-DYNAMIC-LAYOUT-MAP-SAME: #[[$map2a]]> {
func.func @return_extract_slice(%idx: index, %sz: index) -> (tensor<2x?xf32>)
@@ -375,11 +371,11 @@ func.func @scf_for_with_tensor_insert_slice(
-> (tensor<?xf32>, tensor<?xf32>)
{
// CHECK-NEXT: %[[SVA:.*]] = memref.subview %[[A]]
- // CHECK-NEXT: memref.copy %[[C]], %[[SVA]] : memref<4xf32, #[[$DYN_1D_MAP]]> to memref<4xf32, #[[$DYN_1D_MAP]]>
+ // CHECK-NEXT: memref.copy %[[C]], %[[SVA]] : memref<4xf32, #[[$DYN_1D_MAP]]> to memref<4xf32, strided<[?], offset: ?>>
%ttA = tensor.insert_slice %C into %tA[%i][4][1] : tensor<4xf32> into tensor<?xf32>
// CHECK-NEXT: %[[SVB:.*]] = memref.subview %[[B]]
- // CHECK-NEXT: memref.copy %[[C]], %[[SVB]] : memref<4xf32, #[[$DYN_1D_MAP]]> to memref<4xf32, #[[$DYN_1D_MAP]]>
+ // CHECK-NEXT: memref.copy %[[C]], %[[SVB]] : memref<4xf32, #[[$DYN_1D_MAP]]> to memref<4xf32, strided<[?], offset: ?>>
%ttB = tensor.insert_slice %C into %tB[%i][4][1] : tensor<4xf32> into tensor<?xf32>
// scf.yield is empty and is elided
diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 6ef8a1d71406c..86ea60dbd7b3d 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -4,8 +4,6 @@
#map2 = affine_map<(d0) -> (d0 + 4)>
#map3 = affine_map<(d0) -> (d0 + 3)>
-// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
%c4 = arith.constant 4 : index
%c3 = arith.constant 3 : index
@@ -44,24 +42,24 @@ func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
///
// CHECK: %[[tmpA:.*]] = memref.alloca() : memref<32xi8>
// CHECK: %[[fullA:.*]] = memref.view %[[tmpA]][{{.*}}][{{.*}}] : memref<32xi8> to memref<?x?xf32>
-// CHECK: %[[partialA:.*]] = memref.subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D]]>
+// CHECK: %[[partialA:.*]] = memref.subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
///
// CHECK: %[[tmpB:.*]] = memref.alloca() : memref<48xi8>
// CHECK: %[[fullB:.*]] = memref.view %[[tmpB]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf32>
-// CHECK: %[[partialB:.*]] = memref.subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D]]>
+// CHECK: %[[partialB:.*]] = memref.subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
///
// CHECK: %[[tmpC:.*]] = memref.alloca() : memref<24xi8>
// CHECK: %[[fullC:.*]] = memref.view %[[tmpC]][{{.*}}][{{.*}}] : memref<24xi8> to memref<?x?xf32>
-// CHECK: %[[partialC:.*]] = memref.subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D]]>
+// CHECK: %[[partialC:.*]] = memref.subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
-// CHECK: memref.copy %[[vA]], %[[partialA]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// CHECK: memref.copy %[[vB]], %[[partialB]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// CHECK: memref.copy %[[vC]], %[[partialC]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
+// CHECK: memref.copy %[[vA]], %[[partialA]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// CHECK: memref.copy %[[vB]], %[[partialB]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// CHECK: memref.copy %[[vC]], %[[partialC]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
//
// CHECK: linalg.matmul ins(%[[partialA]], %[[partialB]]{{.*}} outs(%[[partialC]]
//
// CHECK: memref.copy %[[partialC]], %[[vC]] :
-// CHECK: memref<?x?xf32, #[[$strided2D]]> to
+// CHECK: memref<?x?xf32, strided<[?, 1], offset: ?>> to
// CHECK: memref<?x?xf32, strided<[?, 1], offset: ?>>
//
// CHECK-NOT: memref.dealloc %[[tmpA]] : memref<32xi8>
@@ -117,24 +115,24 @@ func.func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
///
// CHECK: %[[tmpA_f64:.*]] = memref.alloc() : memref<64xi8>
// CHECK: %[[fullA_f64:.*]] = memref.view %[[tmpA_f64]][{{.*}}][{{.*}}] : memref<64xi8> to memref<?x?xf64>
-// CHECK: %[[partialA_f64:.*]] = memref.subview %[[fullA_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D]]>
+// CHECK: %[[partialA_f64:.*]] = memref.subview %[[fullA_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, strided<[?, 1], offset: ?>>
///
// CHECK: %[[tmpB_f64:.*]] = memref.alloc() : memref<96xi8>
// CHECK: %[[fullB_f64:.*]] = memref.view %[[tmpB_f64]][{{.*}}][{{.*}}] : memref<96xi8> to memref<?x?xf64>
-// CHECK: %[[partialB_f64:.*]] = memref.subview %[[fullB_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D]]>
+// CHECK: %[[partialB_f64:.*]] = memref.subview %[[fullB_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, strided<[?, 1], offset: ?>>
///
// CHECK: %[[tmpC_f64:.*]] = memref.alloc() : memref<48xi8>
// CHECK: %[[fullC_f64:.*]] = memref.view %[[tmpC_f64]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf64>
-// CHECK: %[[partialC_f64:.*]] = memref.subview %[[fullC_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D]]>
+// CHECK: %[[partialC_f64:.*]] = memref.subview %[[fullC_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf64> to memref<?x?xf64, strided<[?, 1], offset: ?>>
-// CHECK: memref.copy %[[vA_f64]], %[[partialA_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, #[[$strided2D]]>
-// CHECK: memref.copy %[[vB_f64]], %[[partialB_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, #[[$strided2D]]>
-// CHECK: memref.copy %[[vC_f64]], %[[partialC_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, #[[$strided2D]]>
+// CHECK: memref.copy %[[vA_f64]], %[[partialA_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, strided<[?, 1], offset: ?>>
+// CHECK: memref.copy %[[vB_f64]], %[[partialB_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, strided<[?, 1], offset: ?>>
+// CHECK: memref.copy %[[vC_f64]], %[[partialC_f64]] : memref<?x?xf64, strided<[?, 1], offset: ?>> to memref<?x?xf64, strided<[?, 1], offset: ?>>
//
// CHECK: linalg.matmul ins(%[[partialA_f64]], %[[partialB_f64]]{{.*}} outs(%[[partialC_f64]]
//
// CHECK: memref.copy %[[partialC_f64]], %[[vC_f64]] :
-// CHECK: memref<?x?xf64, #[[$strided2D]]> to
+// CHECK: memref<?x?xf64, strided<[?, 1], offset: ?>> to
// CHECK: memref<?x?xf64, strided<[?, 1], offset: ?>>
//
// CHECK: memref.dealloc %[[tmpA_f64]] : memref<64xi8>
diff --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index 329e9841d314e..0fc2ca6efbbc1 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -3,15 +3,6 @@
// RUN: mlir-opt %s -linalg-tile="tile-sizes=0,0,2" -mlir-disable-threading=true | FileCheck %s -check-prefix=TILE-002
// RUN: mlir-opt %s -linalg-tile="tile-sizes=2,3,4" -mlir-disable-threading=true | FileCheck %s -check-prefix=TILE-234
-// TILE-2-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// TILE-02-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// TILE-234-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-
-// TILE-2-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// TILE-02-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// TILE-002-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// TILE-234-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
// TILE-2-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
// TILE-02-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
// TILE-002-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
@@ -19,10 +10,6 @@
// TILE-234-DAG: #[[$bound_map_3:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 3)>
// TILE-234-DAG: #[[$bound_map_4:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 4)>
-// TILE-2-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
-// TILE-02-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
-// TILE-234-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
-
func.func @matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%arg1: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%arg2: memref<?x?xf32, strided<[?, 1], offset: ?>>) {
@@ -41,8 +28,8 @@ func.func @matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// TILE-2: %[[K:.*]] = memref.dim %{{.*}}, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-2: %[[szK:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[M]]]
// TILE-2: %[[N:.*]] = memref.dim %{{.*}}, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
-// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
+// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-2: linalg.matmul ins(%[[sAi]]{{.*}} outs(%[[sCi]]
// TILE-02-LABEL: func @matmul(
@@ -54,8 +41,8 @@ func.func @matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// TILE-02: %[[szN:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[N]]]
// TILE-02: %[[M:.*]] = memref.dim %{{.*}}, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-02: %[[szK:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[N]]]
-// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-02: %[[sCj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
+// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-02: %[[sCj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-02: linalg.matmul ins(%{{.*}}, %[[sBj]]{{.*}} outs(%[[sCj]]
// TILE-002-LABEL: func @matmul(
@@ -67,8 +54,8 @@ func.func @matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// TILE-002: %[[szK:.*]] = affine.min #[[$bound_map]](%[[K]])[%[[ubK]]]
// TILE-002: %[[szK_1:.*]] = affine.min #[[$bound_map]](%[[K]])[%[[ubK]]]
// TILE-002: %[[N:.*]] = memref.dim %{{.*}}, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
-// TILE-002: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-002: %[[sBj:.*]] = memref.subview %{{.*}}[%[[K]], 0] [%[[szK_1]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
+// TILE-002: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-002: %[[sBj:.*]] = memref.subview %{{.*}}[%[[K]], 0] [%[[szK_1]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-002: linalg.matmul ins(%[[sAj]], %[[sBj]]{{.*}} outs(%{{.*}}
// TILE-234-LABEL: func @matmul(
@@ -88,9 +75,9 @@ func.func @matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// TILE-234: %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[ubN]]]
// TILE-234: %[[szM_1:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[ubM]]]
// TILE-234: %[[szN_1:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[ubN]]]
-// TILE-234: %[[sAik:.*]] = memref.subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-234: %[[sBkj:.*]] = memref.subview %{{.*}}[%[[K]], %[[J]]] [%[[szK_1]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-234: %[[sCij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [%[[szM_1]], %[[szN_1]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
+// TILE-234: %[[sAik:.*]] = memref.subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-234: %[[sBkj:.*]] = memref.subview %{{.*}}[%[[K]], %[[J]]] [%[[szK_1]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-234: %[[sCij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [%[[szM_1]], %[[szN_1]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
//
// TILE-234: linalg.matmul ins(%[[sAik]], %[[sBkj]]{{.*}} outs(%[[sCij]]
@@ -115,8 +102,8 @@ func.func @matmul_static(%arg0: memref<10x16xf32, strided<[?, 1], offset: ?>>,
// TILE-2-DAG: %[[C2:.*]] = arith.constant 2 : index
// TILE-2-DAG: %[[M:.*]] = arith.constant 10 : index
// TILE-2: scf.for %[[I:.*]] = %{{.*}} to %[[M]] step %{{.*}} {
-// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [2, 16] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<2x16xf32, #[[$strided2D]]>
-// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [2, 12] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<2x12xf32, #[[$strided2D]]>
+// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [2, 16] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<2x16xf32, strided<[?, 1], offset: ?>>
+// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [2, 12] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<2x12xf32, strided<[?, 1], offset: ?>>
// TILE-2: linalg.matmul ins(%[[sAi]], %{{.*}}{{.*}} outs(%[[sCi]]
// TILE-02-LABEL: func @matmul_static(
@@ -124,8 +111,8 @@ func.func @matmul_static(%arg0: memref<10x16xf32, strided<[?, 1], offset: ?>>,
// TILE-02-DAG: %[[C2:.*]] = arith.constant 2 : index
// TILE-02-DAG: %[[N:.*]] = arith.constant 12 : index
// TILE-02: scf.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
-// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [16, 2] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<16x2xf32, #[[$strided2D]]>
-// TILE-02: %[[sCj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [10, 2] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<10x2xf32, #[[$strided2D]]>
+// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [16, 2] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<16x2xf32, strided<[?, 1], offset: ?>>
+// TILE-02: %[[sCj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [10, 2] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<10x2xf32, strided<[?, 1], offset: ?>>
// TILE-02: linalg.matmul ins(%{{.*}}, %[[sBj]]{{.*}} outs(%[[sCj]]
// TILE-002-LABEL: func @matmul_static(
@@ -133,8 +120,8 @@ func.func @matmul_static(%arg0: memref<10x16xf32, strided<[?, 1], offset: ?>>,
// TILE-002-DAG: %[[C2:.*]] = arith.constant 2 : index
// TILE-002-DAG: %[[C16:.*]] = arith.constant 16 : index
// TILE-002: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[C16]] step %{{.*}} {
-// TILE-002: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[K]]] [10, 2] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<10x2xf32, #[[$strided2D]]>
-// TILE-002: %[[sBj:.*]] = memref.subview %{{.*}}[%[[K]], 0] [2, 12] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<2x12xf32, #[[$strided2D]]>
+// TILE-002: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[K]]] [10, 2] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<10x2xf32, strided<[?, 1], offset: ?>>
+// TILE-002: %[[sBj:.*]] = memref.subview %{{.*}}[%[[K]], 0] [2, 12] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<2x12xf32, strided<[?, 1], offset: ?>>
// TILE-002: linalg.matmul ins(%[[sAj]], %[[sBj]]{{.*}} outs(%{{.*}}
// TILE-234-LABEL: func @matmul_static(
@@ -148,9 +135,9 @@ func.func @matmul_static(%arg0: memref<10x16xf32, strided<[?, 1], offset: ?>>,
// TILE-234: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[C10]] step %{{.*}} {
// TILE-234: scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[C12]] step %{{.*}} {
// TILE-234: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[C16]] step %{{.*}} {
-// TILE-234: %[[sAik:.*]] = memref.subview %{{.*}}[%[[I]], %[[K]]] [2, 4] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<2x4xf32, #[[$strided2D]]>
-// TILE-234: %[[sBkj:.*]] = memref.subview %{{.*}}[%[[K]], %[[J]]] [4, 3] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<4x3xf32, #[[$strided2D]]>
-// TILE-234: %[[sCij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [2, 3] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<2x3xf32, #[[$strided2D]]>
+// TILE-234: %[[sAik:.*]] = memref.subview %{{.*}}[%[[I]], %[[K]]] [2, 4] [1, 1] : memref<10x16xf32, strided<[?, 1], offset: ?>> to memref<2x4xf32, strided<[?, 1], offset: ?>>
+// TILE-234: %[[sBkj:.*]] = memref.subview %{{.*}}[%[[K]], %[[J]]] [4, 3] [1, 1] : memref<16x12xf32, strided<[?, 1], offset: ?>> to memref<4x3xf32, strided<[?, 1], offset: ?>>
+// TILE-234: %[[sCij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [2, 3] [1, 1] : memref<10x12xf32, strided<[?, 1], offset: ?>> to memref<2x3xf32, strided<[?, 1], offset: ?>>
//
// TILE-234: linalg.matmul ins(%[[sAik]], %[[sBkj]]{{.*}} outs(%[[sCij]]
@@ -172,8 +159,8 @@ func.func @matvec(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg1: mem
// TILE-2: %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[M]]]
// TILE-2: %[[N:.*]] = memref.dim %{{.*}}, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-2: %[[szN:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[M]]]
-// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
+// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-2: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// TILE-2: linalg.matvec ins(%[[sAi]], %{{.*}} outs(%[[sCi]]
// TILE-02-LABEL: func @matvec(
@@ -187,8 +174,8 @@ func.func @matvec(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg1: mem
// TILE-02: %[[M:.*]] = memref.dim %{{.*}}, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
// TILE-02: %[[szN:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[K]]]
// TILE-02: %[[szN_1:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[K]]]
-// TILE-02: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[%[[J]]] [%[[szN_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
+// TILE-02: %[[sAj:.*]] = memref.subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-02: %[[sBj:.*]] = memref.subview %{{.*}}[%[[J]]] [%[[szN_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// TILE-02: linalg.matvec ins(%[[sAj]], %[[sBj]]{{.*}} outs(%{{.*}}
// TILE-002-LABEL: func @matvec(
@@ -212,9 +199,9 @@ func.func @matvec(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg1: mem
// TILE-234: %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[K]]]
// TILE-234: %[[szN_1:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[K]]]
// TILE-234: %[[szM_1:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[M]]]
-// TILE-234: %[[sAij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$strided2D]]>
-// TILE-234: %[[sBj:.*]] = memref.subview %{{.*}}[%[[J]]] [%[[szN_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
-// TILE-234: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
+// TILE-234: %[[sAij:.*]] = memref.subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// TILE-234: %[[sBj:.*]] = memref.subview %{{.*}}[%[[J]]] [%[[szN_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
+// TILE-234: %[[sCi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
//
// TILE-234: linalg.matvec ins(%[[sAij]], %[[sBj]]{{.*}} outs(%[[sCi]]
@@ -231,8 +218,8 @@ func.func @dot(%arg0: memref<?xf32, strided<[1], offset: ?>>, %arg1: memref<?xf3
// TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
// TILE-2: %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[M]]]
// TILE-2: %[[szM_1:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[M]]]
-// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
-// TILE-2: %[[sBi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
+// TILE-2: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
+// TILE-2: %[[sBi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// TILE-2: linalg.dot ins(%[[sAi]], %[[sBi]]{{.*}} outs(
// TILE-02-LABEL: func @dot(
@@ -248,8 +235,8 @@ func.func @dot(%arg0: memref<?xf32, strided<[1], offset: ?>>, %arg1: memref<?xf3
// TILE-234: scf.for %[[I:.*]] = %{{.*}} to %[[ubK]] step %{{.*}} {
// TILE-234: %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[ubK]]]
// TILE-234: %[[szM_1:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[ubK]]]
-// TILE-234: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
-// TILE-234: %[[sBi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, #[[$strided1D]]>
+// TILE-234: %[[sAi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
+// TILE-234: %[[sBi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM_1]]] [1] : memref<?xf32, strided<[1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// TILE-234: linalg.dot ins(%[[sAi]], %[[sBi]]{{.*}} outs(
func.func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
@@ -260,13 +247,13 @@ func.func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
// TILE-2: for
// TILE-2-NOT: for
// TILE-2: memref.subview{{.*}} : memref<127x99xf32>
-// TILE-2: linalg.fill{{.*}} : memref<?x99xf32, #[[$stride_99_1_layout_map]]>
+// TILE-2: linalg.fill{{.*}} : memref<?x99xf32, strided<[99, 1], offset: ?>>
// TILE-02-LABEL: func @fill_static
// TILE-02: for
// TILE-02-NOT: for
// TILE-02: memref.subview{{.*}} : memref<127x99xf32>
-// TILE-02: linalg.fill{{.*}} : memref<127x?xf32, #[[$stride_99_1_layout_map]]>
+// TILE-02: linalg.fill{{.*}} : memref<127x?xf32, strided<[99, 1], offset: ?>>
// TILE-002-LABEL: func @fill_static
// TILE-002-NOT: for
@@ -277,7 +264,7 @@ func.func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
// TILE-234: for
// TILE-234-NOT: for
// TILE-234: memref.subview{{.*}} : memref<127x99xf32>
-// TILE-234: linalg.fill{{.*}} : memref<?x3xf32, #[[$stride_99_1_layout_map]]>
+// TILE-234: linalg.fill{{.*}} : memref<?x3xf32, strided<[99, 1], offset: ?>>
func.func @fill(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg1: f32) {
diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 0bbfb2582e777..3502f99581334 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -1,8 +1,6 @@
// RUN: mlir-opt %s -test-linalg-transform-patterns=test-patterns -split-input-file -test-transform-dialect-interpreter | FileCheck %s
-// CHECK-DAG: #[[$STRIDED_1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
// Map corresponding to a 2D memory access where the stride along the last dim is known to be 1.
-// CHECK-DAG: #[[$STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK-DAG: #[[$nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
// CHECK-DAG: #[[$km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
@@ -46,8 +44,8 @@ func.func @matvec(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// CHECK: scf.parallel {{.*}} step (%[[c5]])
// CHECK: scf.for {{.*}} step %[[c6]]
// CHECK: linalg.matvec
-// CHECK: ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?xf32, #[[$STRIDED_1D]]>)
-// CHECK: outs({{.*}}: memref<?xf32, #[[$STRIDED_1D]]>)
+// CHECK: ins({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>, memref<?xf32, strided<[1], offset: ?>>)
+// CHECK: outs({{.*}}: memref<?xf32, strided<[1], offset: ?>>)
func.func @matmul(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%B: memref<?x?xf32, strided<[?, 1], offset: ?>>,
@@ -85,8 +83,8 @@ func.func @matmul(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] {
// CHECK: linalg.matmul
-// CHECK: ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
-// CHECK: outs({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
+// CHECK: ins({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>, memref<?x?xf32, strided<[?, 1], offset: ?>>)
+// CHECK: outs({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>)
#matmul_accesses = [
affine_map<(m, n, k) -> (m, k)>,
@@ -147,8 +145,8 @@ func.func @matvec_perm(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]]
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]]
// CHECK: linalg.matvec
-// CHECK: ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?xf32, #[[$STRIDED_1D]]>)
-// CHECK: outs({{.*}}: memref<?xf32, #[[$STRIDED_1D]]>)
+// CHECK: ins({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>, memref<?xf32, strided<[1], offset: ?>>)
+// CHECK: outs({{.*}}: memref<?xf32, strided<[1], offset: ?>>)
func.func @matmul_perm(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%B: memref<?x?xf32, strided<[?, 1], offset: ?>>,
@@ -180,8 +178,8 @@ func.func @matmul_perm(%A: memref<?x?xf32, strided<[?, 1], offset: ?>>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
// CHECK: linalg.matmul
-// CHECK: ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
-// CHECK: outs({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
+// CHECK: ins({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>, memref<?x?xf32, strided<[?, 1], offset: ?>>)
+// CHECK: outs({{.*}}: memref<?x?xf32, strided<[?, 1], offset: ?>>)
func.func @tile_permute_parallel_loop(%arg0: memref<?x?xf32>,
%arg1: memref<?x?xf32>,
diff --git a/mlir/test/Dialect/Linalg/transform-promotion.mlir b/mlir/test/Dialect/Linalg/transform-promotion.mlir
index ad8b21e8d0a74..803bb60a7a2fa 100644
--- a/mlir/test/Dialect/Linalg/transform-promotion.mlir
+++ b/mlir/test/Dialect/Linalg/transform-promotion.mlir
@@ -1,7 +1,5 @@
// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s
-// Map corresponding to a 2D memory access where the stride along the last dim is known to be 1.
-// CHECK-DAG: #[[$STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
func.func @promote_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%arg1: memref<?x?xf32, strided<[?, 1], offset: ?>>,
%arg2: memref<?x?xf32, strided<[?, 1], offset: ?>>) {
@@ -44,18 +42,18 @@ func.func @promote_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], offset:
// CHECK: %[[a0:.*]] = memref.alloc() : memref<32000000xi8>
// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref<?x?xf32>
// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1]
-// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
+// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// CHECK: %[[a1:.*]] = memref.alloc() : memref<48000000xi8>
// CHECK: %[[v1:.*]] = memref.view %[[a1]]{{.*}} : memref<48000000xi8> to memref<?x?xf32>
// CHECK: %[[l1:.*]] = memref.subview %[[v1]][0, 0] [%{{.*}}, %{{.*}}] [1, 1]
-// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
+// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// CHECK: %[[a2:.*]] = memref.alloc() : memref<24000000xi8>
// CHECK: %[[v2:.*]] = memref.view %[[a2]]{{.*}} : memref<24000000xi8> to memref<?x?xf32>
// CHECK: %[[l2:.*]] = memref.subview %[[v2]][0, 0] [%{{.*}}, %{{.*}}] [1, 1]
-// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
-// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, #map{{.*}}>
-// CHECK: memref.copy %[[s1]], %[[l1]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, #map{{.*}}>
-// CHECK: memref.copy %[[s2]], %[[l2]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK-SAME: memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
+// CHECK: memref.copy %[[s1]], %[[l1]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
+// CHECK: memref.copy %[[s2]], %[[l2]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
// CHECK: linalg.matmul
// CHECK-SAME: ins(%[[v0]], %[[v1]] : memref<?x?xf32>, memref<?x?xf32>)
// CHECK-SAME: outs(%[[v2]] : memref<?x?xf32>)
@@ -113,11 +111,11 @@ func.func @promote_first_subview_matmul(%arg0: memref<?x?xf32, strided<[?, 1], o
// CHECK: %[[s2:.*]] = memref.subview {{.*}}: memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
// CHECK: %[[a0:.*]] = memref.alloc() : memref<32000000xi8>
// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref<?x?xf32>
-// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
+// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// CHECK-NOT: memref.alloc
// CHECK-NOT: memref.view
// CHECK-NOT: memref.subview
-// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
// CHECK-NOT: memref.copy
// CHECK: linalg.matmul
// CHECK-SAME: ins(%[[v0]], %[[s1]] : memref<?x?xf32>, memref<?x?xf32, strided<[?, ?], offset: ?>>)
@@ -151,9 +149,9 @@ func.func @aligned_promote_fill(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?
// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
// CHECK: %[[a0:.*]] = memref.alloc() {alignment = 32 : i64} : memref<32000000xi8>
// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref<?x?xf32>
-// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
+// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// CHECK: linalg.fill ins({{.*}} : f32) outs(%[[v0]] : memref<?x?xf32>)
-// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, #map{{.*}}>
+// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, strided{{.*}}> to memref<?x?xf32, strided{{.*}}>
// CHECK: linalg.fill ins(%[[cf]] : f32) outs(%[[v0]] : memref<?x?xf32>)
transform.with_pdl_patterns {
@@ -185,9 +183,9 @@ func.func @aligned_promote_fill_complex(%arg0: memref<?x?xcomplex<f32>, strided<
// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref<?x?xcomplex<f32>, strided{{.*}}> to memref<?x?xcomplex<f32>, strided{{.*}}>
// CHECK: %[[a0:.*]] = memref.alloc() {alignment = 32 : i64} : memref<64000000xi8>
// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<64000000xi8> to memref<?x?xcomplex<f32>>
-// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xcomplex<f32>> to memref<?x?xcomplex<f32>, #[[$STRIDED_2D_u_1]]>
+// CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref<?x?xcomplex<f32>> to memref<?x?xcomplex<f32>, strided<[?, 1], offset: ?>>
// CHECK: linalg.fill ins({{.*}} : complex<f32>) outs(%[[v0]] : memref<?x?xcomplex<f32>>)
-// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xcomplex<f32>, strided{{.*}}> to memref<?x?xcomplex<f32>, #map{{.*}}>
+// CHECK: memref.copy %[[s0]], %[[l0]] : memref<?x?xcomplex<f32>, strided{{.*}}> to memref<?x?xcomplex<f32>, strided{{.*}}>
// CHECK: linalg.fill ins(%[[cc]] : complex<f32>) outs(%[[v0]] : memref<?x?xcomplex<f32>>)
transform.with_pdl_patterns {
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index 1241d925e7f82..08144a2fee63e 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -2,37 +2,31 @@
// CHECK-LABEL: func @subview_of_size_memcast
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<4x6x16x32xi8>
-// CHECK: %[[S:.+]] = memref.subview %[[ARG0]][0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] : memref<4x6x16x32xi8> to memref<16x32xi8, #{{.*}}>
-// CHECK: %[[M:.+]] = memref.cast %[[S]] : memref<16x32xi8, #{{.*}}> to memref<16x32xi8, #{{.*}}>
-// CHECK: return %[[M]] : memref<16x32xi8, #{{.*}}>
+// CHECK: %[[S:.+]] = memref.subview %[[ARG0]][0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] : memref<4x6x16x32xi8> to memref<16x32xi8, strided{{.*}}>
+// CHECK: %[[M:.+]] = memref.cast %[[S]] : memref<16x32xi8, {{.*}}> to memref<16x32xi8, strided{{.*}}>
+// CHECK: return %[[M]] : memref<16x32xi8, strided{{.*}}>
func.func @subview_of_size_memcast(%arg : memref<4x6x16x32xi8>) ->
- memref<16x32xi8, affine_map<(d0, d1)[s0] -> (d0 * 32 + d1 + s0)>>{
+ memref<16x32xi8, strided<[32, 1], offset: ?>>{
%0 = memref.cast %arg : memref<4x6x16x32xi8> to memref<?x?x16x32xi8>
%1 = memref.subview %0[0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] :
memref<?x?x16x32xi8> to
- memref<16x32xi8, affine_map<(d0, d1)[s0] -> (d0 * 32 + d1 + s0)>>
- return %1 : memref<16x32xi8, affine_map<(d0, d1)[s0] -> (d0 * 32 + d1 + s0)>>
+ memref<16x32xi8, strided<[32, 1], offset: ?>>
+ return %1 : memref<16x32xi8, strided<[32, 1], offset: ?>>
}
// -----
-// CHECK-DAG: #[[MAP0:[0-9a-z]+]] = affine_map<(d0, d1)[s0] -> (d0 * 7 + s0 + d1)>
-// CHECK-DAG: #[[MAP1:[0-9a-z]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-#map0 = affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>
-#map1 = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
-#map2 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-
// CHECK: func @subview_of_strides_memcast
-// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<1x1x?xf32, #{{.*}}>
+// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<1x1x?xf32, strided{{.*}}>
// CHECK: %[[S:.+]] = memref.subview %[[ARG0]][0, 0, 0] [1, 1, 4]
-// CHECK-SAME: to memref<1x4xf32, #[[MAP0]]>
+// CHECK-SAME: to memref<1x4xf32, strided<[7, 1], offset: ?>>
// CHECK: %[[M:.+]] = memref.cast %[[S]]
-// CHECK-SAME: to memref<1x4xf32, #[[MAP1]]>
+// CHECK-SAME: to memref<1x4xf32, strided<[?, ?], offset: ?>>
// CHECK: return %[[M]]
-func.func @subview_of_strides_memcast(%arg : memref<1x1x?xf32, #map0>) -> memref<1x4xf32, #map2> {
- %0 = memref.cast %arg : memref<1x1x?xf32, #map0> to memref<1x1x?xf32, #map1>
- %1 = memref.subview %0[0, 0, 0] [1, 1, 4] [1, 1, 1] : memref<1x1x?xf32, #map1> to memref<1x4xf32, #map2>
- return %1 : memref<1x4xf32, #map2>
+func.func @subview_of_strides_memcast(%arg : memref<1x1x?xf32, strided<[35, 7, 1], offset: ?>>) -> memref<1x4xf32, strided<[?, ?], offset: ?>> {
+ %0 = memref.cast %arg : memref<1x1x?xf32, strided<[35, 7, 1], offset: ?>> to memref<1x1x?xf32, strided<[?, ?, ?], offset: ?>>
+ %1 = memref.subview %0[0, 0, 0] [1, 1, 4] [1, 1, 1] : memref<1x1x?xf32, strided<[?, ?, ?], offset: ?>> to memref<1x4xf32, strided<[?, ?], offset: ?>>
+ return %1 : memref<1x4xf32, strided<[?, ?], offset: ?>>
}
// -----
@@ -88,9 +82,6 @@ func.func @rank_reducing_subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 :
// -----
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 384 + s0 + d1)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-
func.func @multiple_reducing_dims(%arg0 : memref<1x384x384xf32>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, strided<[1], offset: ?>>
{
@@ -101,15 +92,12 @@ func.func @multiple_reducing_dims(%arg0 : memref<1x384x384xf32>,
}
// CHECK: func @multiple_reducing_dims
// CHECK: %[[REDUCED1:.+]] = memref.subview %{{.+}}[0, %{{.+}}, %{{.+}}] [1, 1, %{{.+}}] [1, 1, 1]
-// CHECK-SAME: : memref<1x384x384xf32> to memref<1x?xf32, #[[MAP0]]>
+// CHECK-SAME: : memref<1x384x384xf32> to memref<1x?xf32, strided<[384, 1], offset: ?>>
// CHECK: %[[REDUCED2:.+]] = memref.subview %[[REDUCED1]][0, 0] [1, %{{.+}}] [1, 1]
-// CHECK-SAME: : memref<1x?xf32, #[[MAP0]]> to memref<?xf32, #[[MAP1]]>
+// CHECK-SAME: : memref<1x?xf32, strided<[384, 1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// -----
-// CHECK-DAG: #[[MAP0]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[MAP1]] = affine_map<(d0)[s0] -> (d0 + s0)>
-
func.func @multiple_reducing_dims_dynamic(%arg0 : memref<?x?x?xf32>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, strided<[1], offset: ?>>
{
@@ -120,15 +108,12 @@ func.func @multiple_reducing_dims_dynamic(%arg0 : memref<?x?x?xf32>,
}
// CHECK: func @multiple_reducing_dims_dynamic
// CHECK: %[[REDUCED1:.+]] = memref.subview %{{.+}}[0, %{{.+}}, %{{.+}}] [1, 1, %{{.+}}] [1, 1, 1]
-// CHECK-SAME: : memref<?x?x?xf32> to memref<1x?xf32, #[[MAP0]]>
+// CHECK-SAME: : memref<?x?x?xf32> to memref<1x?xf32, strided<[?, 1], offset: ?>>
// CHECK: %[[REDUCED2:.+]] = memref.subview %[[REDUCED1]][0, 0] [1, %{{.+}}] [1, 1]
-// CHECK-SAME: : memref<1x?xf32, #[[MAP0]]> to memref<?xf32, #[[MAP1]]>
+// CHECK-SAME: : memref<1x?xf32, strided<[?, 1], offset: ?>> to memref<?xf32, strided<[1], offset: ?>>
// -----
-// CHECK-DAG: #[[MAP0]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-// CHECK-DAG: #[[MAP1]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-
func.func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>,
%arg1 : index, %arg2 : index, %arg3 : index) -> memref<?xf32, strided<[?], offset: ?>>
{
@@ -140,9 +125,9 @@ func.func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, strided<
}
// CHECK: func @multiple_reducing_dims_all_dynamic
// CHECK: %[[REDUCED1:.+]] = memref.subview %{{.+}}[0, %{{.+}}, %{{.+}}] [1, 1, %{{.+}}] [1, 1, 1]
-// CHECK-SAME: : memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>> to memref<1x?xf32, #[[MAP0]]>
+// CHECK-SAME: : memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>> to memref<1x?xf32, strided<[?, ?], offset: ?>>
// CHECK: %[[REDUCED2:.+]] = memref.subview %[[REDUCED1]][0, 0] [1, %{{.+}}] [1, 1]
-// CHECK-SAME: : memref<1x?xf32, #[[MAP0]]> to memref<?xf32, #[[MAP1]]>
+// CHECK-SAME: : memref<1x?xf32, strided<[?, ?], offset: ?>> to memref<?xf32, strided<[?], offset: ?>>
// -----
@@ -457,23 +442,23 @@ func.func @collapse_after_memref_cast_type_change_dynamic(%arg0: memref<1x1x1x?x
// -----
func.func @reduced_memref(%arg0: memref<2x5x7x1xf32>, %arg1 :index)
- -> memref<1x4x1xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>> {
+ -> memref<1x4x1xf32, strided<[35, 7, 1], offset: ?>> {
%c0 = arith.constant 0 : index
%c5 = arith.constant 5 : index
%c4 = arith.constant 4 : index
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%0 = memref.subview %arg0[%arg1, %arg1, %arg1, 0] [%c1, %c4, %c1, 1] [1, 1, 1, 1]
- : memref<2x5x7x1xf32> to memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>>
+ : memref<2x5x7x1xf32> to memref<?x?x?xf32, strided<[35, 7, 1], offset: ?>>
%1 = memref.cast %0
- : memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>> to
- memref<1x4x1xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>>
- return %1 : memref<1x4x1xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * 35 + s0 + d1 * 7 + d2)>>
+ : memref<?x?x?xf32, strided<[35, 7, 1], offset: ?>> to
+ memref<1x4x1xf32, strided<[35, 7, 1], offset: ?>>
+ return %1 : memref<1x4x1xf32, strided<[35, 7, 1], offset: ?>>
}
// CHECK-LABEL: func @reduced_memref
// CHECK: %[[RESULT:.+]] = memref.subview
-// CHECK-SAME: memref<2x5x7x1xf32> to memref<1x4x1xf32, #{{.+}}>
+// CHECK-SAME: memref<2x5x7x1xf32> to memref<1x4x1xf32, strided{{.+}}>
// CHECK: return %[[RESULT]]
// -----
@@ -806,9 +791,8 @@ func.func @canonicalize_rank_reduced_subview(%arg0 : memref<8x?xf32>,
%0 = memref.subview %arg0[%c0, %c0] [1, %arg1] [%c1, %c1] : memref<8x?xf32> to memref<?xf32, strided<[?], offset: ?>>
return %0 : memref<?xf32, strided<[?], offset: ?>>
}
-// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK: func @canonicalize_rank_reduced_subview
// CHECK-SAME: %[[ARG0:.+]]: memref<8x?xf32>
// CHECK-SAME: %[[ARG1:.+]]: index
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG0]][0, 0] [1, %[[ARG1]]] [1, 1]
-// CHECK-SAME: memref<8x?xf32> to memref<?xf32, #[[MAP]]>
+// CHECK-SAME: memref<8x?xf32> to memref<?xf32, strided<[1], offset: ?>>
diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir
index 77dc58044e48c..4c98b21bdfe80 100644
--- a/mlir/test/Dialect/MemRef/invalid.mlir
+++ b/mlir/test/Dialect/MemRef/invalid.mlir
@@ -684,7 +684,7 @@ func.func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
- // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}}
+ // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, strided<[64, 4, 1]>>' or a rank-reduced version. (mismatch of result sizes)}}
%1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1]
: memref<8x16x4xf32> to memref<16x4xf32>
return
@@ -694,7 +694,7 @@ func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : i
func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
%0 = memref.alloc() : memref<8x16x4xf32>
- // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}}
+ // expected-error at +1 {{expected result type to be 'memref<8x16x4xf32, strided<[64, 4, 1], offset: 8>>' or a rank-reduced version. (mismatch of result sizes)}}
%1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1]
: memref<8x16x4xf32> to memref<16x4xf32>
return
@@ -703,7 +703,7 @@ func.func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : i
// -----
func.func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index, %arg2 : index) {
- // expected-error at +1 {{expected result type to be 'memref<?x1xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result layout)}}
+ // expected-error at +1 {{expected result type to be 'memref<?x1xf32, strided<[?, 1], offset: ?>>' or a rank-reduced version. (mismatch of result layout)}}
%0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref<?x?xf32> to memref<?xf32>
return
}
@@ -715,7 +715,7 @@ func.func @invalid_rank_reducing_subview(%arg0 : memref<?x?xf32>, %arg1 : index,
func.func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
- // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
+ // expected-error @+1 {{expected result type to be 'memref<8x8xf32, strided<[16, 1], offset: ?>>' or a rank-reduced version}}
%s2 = memref.subview %arg0[%c8, %c8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0>
return
}
@@ -727,20 +727,18 @@ func.func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {
func.func @subview_bad_offset_2(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
- // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
+ // expected-error @+1 {{expected result type to be 'memref<8x8xf32, strided<[16, 1], offset: ?>>' or a rank-reduced version}}
%s2 = memref.subview %arg0[%c8, 8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0>
return
}
// -----
-#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1 + s0 * 437)>
-
func.func @subview_bad_offset_3(%arg0: memref<16x16xf32>) {
%c0 = arith.constant 0 : index
%c8 = arith.constant 8 : index
- // expected-error @+1 {{expected result type to be 'memref<8x8xf32, affine_map<(d0, d1)[s0] -> (d0 * 16 + s0 + d1)>>' or a rank-reduced version}}
- %s2 = memref.subview %arg0[%c8, 8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, #map0>
+ // expected-error @+1 {{expected result type to be 'memref<8x8xf32, strided<[16, 1], offset: ?>>' or a rank-reduced version}}
+ %s2 = memref.subview %arg0[%c8, 8][8, 8][1, 1] : memref<16x16xf32> to memref<8x8xf32, strided<[16, 1], offset: 437>>
return
}
diff --git a/mlir/test/Dialect/MemRef/multibuffer.mlir b/mlir/test/Dialect/MemRef/multibuffer.mlir
index d6030af1eb879..b70b51e6aa622 100644
--- a/mlir/test/Dialect/MemRef/multibuffer.mlir
+++ b/mlir/test/Dialect/MemRef/multibuffer.mlir
@@ -1,6 +1,5 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -test-multi-buffering=multiplier=5 -cse -split-input-file | FileCheck %s
-// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (((d0 - d1) floordiv d2) mod 5)>
// CHECK-LABEL: func @multi_buffer
@@ -15,14 +14,14 @@ func.func @multi_buffer(%a: memref<1024x1024xf32>) {
// CHECK: scf.for %[[IV:.*]] = %[[C1]]
scf.for %arg2 = %c1 to %c1024 step %c3 {
// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]], %[[C1]], %[[C3]])
-// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
%1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
-// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
-// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
"some_use"(%0) : (memref<4x128xf32>) -> ()
-// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
"some_use"(%0) : (memref<4x128xf32>) -> ()
}
return
@@ -42,14 +41,14 @@ func.func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
// CHECK: affine.for %[[IV:.*]] = 1
affine.for %arg2 = 1 to 1024 step 3 {
// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]], %[[C1]], %[[C3]])
-// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
%1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
-// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
-// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
"some_use"(%0) : (memref<4x128xf32>) -> ()
-// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided{{.*}}>) -> ()
"some_use"(%0) : (memref<4x128xf32>) -> ()
}
return
@@ -57,7 +56,6 @@ func.func @multi_buffer_affine(%a: memref<1024x1024xf32>) {
// -----
-// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (((d0 - d1) floordiv d2) mod 5)>
// CHECK-LABEL: func @multi_buffer_subview_use
@@ -72,17 +70,17 @@ func.func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) {
// CHECK: scf.for %[[IV:.*]] = %[[C1]]
scf.for %arg2 = %c1 to %c1024 step %c3 {
// CHECK: %[[I:.*]] = affine.apply #[[$MAP1]](%[[IV]], %[[C1]], %[[C3]])
-// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
%1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] :
memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>>
-// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, #[[$MAP0]]>
+// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, strided<[128, 1], offset: ?>>
memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32>
-// CHECK: %[[SV1:.*]] = memref.subview %[[SV]][0, 1] [4, 127] [1, 1] : memref<4x128xf32, #[[$MAP0]]> to memref<4x127xf32, #[[$MAP0]]>
+// CHECK: %[[SV1:.*]] = memref.subview %[[SV]][0, 1] [4, 127] [1, 1] : memref<4x128xf32, strided<[128, 1], offset: ?>> to memref<4x127xf32, strided<[128, 1], offset: ?>>
%s = memref.subview %0[0, 1] [4, 127] [1, 1] :
memref<4x128xf32> to memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>>
-// CHECK: "some_use"(%[[SV1]]) : (memref<4x127xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV1]]) : (memref<4x127xf32, strided<[128, 1], offset: ?>>) -> ()
"some_use"(%s) : (memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>>) -> ()
-// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> ()
+// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, strided<[128, 1], offset: ?>>) -> ()
"some_use"(%0) : (memref<4x128xf32>) -> ()
}
return
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index b37999315208d..06ec529a8dcfc 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -601,8 +601,6 @@ func.func @parallel_insert_slice_with_conflict(
#map0 = affine_map<(d0) -> (d0 * 4)>
#map1 = affine_map<(d0) -> (d0 * 2)>
-// CHECK: #[[$DYN_LAYOUT_MAP:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-
// CHECK-LABEL: func.func @matmul
func.func @matmul(%arg0: tensor<8x8xf32>, %arg1: tensor<8x8xf32>, %arg2: tensor<8x8xf32> {bufferization.writable = true}) -> tensor<8x8xf32> {
%c2 = arith.constant 2 : index
@@ -616,7 +614,7 @@ func.func @matmul(%arg0: tensor<8x8xf32>, %arg1: tensor<8x8xf32>, %arg2: tensor<
%6 = tensor.extract_slice %arg1[0, %4] [8, 4] [1, 1] : tensor<8x8xf32> to tensor<8x4xf32>
%7 = tensor.extract_slice %o[%1, %4] [4, 4] [1, 1] : tensor<8x8xf32> to tensor<4x4xf32>
- // CHECK: linalg.matmul ins({{.*}}memref<4x8xf32, #[[$DYN_LAYOUT_MAP]]>, memref<8x4xf32, #[[$DYN_LAYOUT_MAP]]>) outs({{.*}} : memref<4x4xf32, #[[$DYN_LAYOUT_MAP]]>)
+ // CHECK: linalg.matmul ins({{.*}}memref<4x8xf32, strided<[?, ?], offset: ?>>, memref<8x4xf32, strided<[?, ?], offset: ?>>) outs({{.*}} : memref<4x4xf32, strided<[?, ?], offset: ?>>)
%8 = linalg.matmul ins(%3, %6 : tensor<4x8xf32>, tensor<8x4xf32>) outs(%7 : tensor<4x4xf32>) -> tensor<4x4xf32>
scf.foreach_thread.perform_concurrently {
tensor.parallel_insert_slice %8 into %o[%1, %4] [4, 4] [1, 1] : tensor<4x4xf32> into tensor<8x8xf32>
@@ -863,4 +861,4 @@ func.func @non_tensor_for_arg(%A : tensor<?xf32> {bufferization.writable = true}
scf.yield %idx, %t2 : index, tensor<?xf32>
}
return %r1#1 : tensor<?xf32>
-}
\ No newline at end of file
+}
diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index 7cde99d94d590..4d3d26c7e0d91 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -253,14 +253,12 @@ func.func @tensor.generate_unknown_ops_in_body(%arg0: index) -> tensor<?xindex>
// -----
- // CHECK-DAG: #[[$MAP0a:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
// CHECK-LABEL: func @tensor.extract_slice(
// CHECK-SAME: %[[t1:.*]]: tensor<?x?xf32>, %[[idx1:.*]]: index, %[[idx2:.*]]: index
func.func @tensor.extract_slice(
%t1: tensor<?x?xf32>, %idx1: index, %idx2: index) -> tensor<?x10xf32> {
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : memref<?x?xf32>
- // CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref<?x?xf32> to memref<?x10xf32, #[[$MAP0a]]>
+ // CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref<?x?xf32> to memref<?x10xf32, strided<[?, 1], offset: ?>>
%0 = tensor.extract_slice %t1[5, %idx2][%idx1, 10][1, 1]
: tensor<?x?xf32> to tensor<?x10xf32>
// CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
@@ -270,15 +268,13 @@ func.func @tensor.extract_slice(
// -----
-// CHECK-DAG: #[[$MAP0b:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
// CHECK-LABEL: func @tensor.extract_slice_rank_reducing(
// CHECK-SAME: %[[t1:.*]]: tensor<?x10x?xf32>, %[[idx1:.*]]: index,
// CHECK-SAME: %[[idx2:.*]]: index
func.func @tensor.extract_slice_rank_reducing(
%t1: tensor<?x10x?xf32>, %idx1: index, %idx2: index) -> tensor<?x15xf32> {
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10x?xf32>
- // CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref<?x10x?xf32> to memref<?x15xf32, #[[$MAP0b]]>
+ // CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref<?x10x?xf32> to memref<?x15xf32, strided<[?, 1], offset: ?>>
%0 = tensor.extract_slice %t1[5, %idx1, 10][%idx2, 1, 15][1, 1, 1]
: tensor<?x10x?xf32> to tensor<?x15xf32>
// CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
@@ -313,16 +309,14 @@ func.func @tensor.insert_slice(%t1: tensor<?x?xf32>, %t2: tensor<?x10xf32>,
// -----
-// CHECK: #[[$MAP11:.*]] = affine_map<()[s0] -> (s0)>
-
// CHECK-LABEL: func @tensor.insert_slice_rank_reducing_1(
func.func @tensor.insert_slice_rank_reducing_1(
%t1: tensor<?x?xf32>, %f: tensor<f32>, %idx1: index, %idx2: index)
-> tensor<?x?xf32>
{
// CHECK: %[[alloc:.*]] = memref.alloc{{.*}} : memref<?x?xf32>
- // CHECK: memref.subview %[[alloc]][%{{.*}}, %{{.*}}] [1, 1] [1, 1] : memref<?x?xf32> to memref<f32, #[[$MAP11]]>
- // CHECK: memref.copy {{.*}} : memref<f32> to memref<f32, #[[$MAP11]]>
+ // CHECK: memref.subview %[[alloc]][%{{.*}}, %{{.*}}] [1, 1] [1, 1] : memref<?x?xf32> to memref<f32, strided<[], offset: ?>>
+ // CHECK: memref.copy {{.*}} : memref<f32> to memref<f32, strided<[], offset: ?>>
%0 = tensor.insert_slice %f into %t1[%idx1, %idx2][1, 1][1, 1]
: tensor<f32> into tensor<?x?xf32>
return %0 : tensor<?x?xf32>
@@ -330,16 +324,14 @@ func.func @tensor.insert_slice_rank_reducing_1(
// -----
-// CHECK: #[[$MAP12:.*]] = affine_map<(d0, d1, d2, d3, d4)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5)>
-
// CHECK-LABEL: func @tensor.insert_slice_rank_reducing_2(
func.func @tensor.insert_slice_rank_reducing_2(
%t1: tensor<?x?x?x?x?x?x?xf32>, %t2: tensor<2x1x4x1x1xf32>, %i: index)
-> tensor<?x?x?x?x?x?x?xf32>
{
// CHECK: %[[alloc:.*]] = memref.alloc{{.*}} : memref<?x?x?x?x?x?x?xf32>
- // CHECK: memref.subview %[[alloc]][{{.*}}] [1, 2, 1, 4, 1, 1, 1] [1, 1, 1, 1, 1, 1, 1] : memref<?x?x?x?x?x?x?xf32> to memref<2x1x4x1x1xf32, #[[$MAP12]]>
- // CHECK: memref.copy {{.*}} : memref<2x1x4x1x1xf32> to memref<2x1x4x1x1xf32, #[[$MAP12]]>
+ // CHECK: memref.subview %[[alloc]][{{.*}}] [1, 2, 1, 4, 1, 1, 1] [1, 1, 1, 1, 1, 1, 1] : memref<?x?x?x?x?x?x?xf32> to memref<2x1x4x1x1xf32, strided<[?, ?, ?, ?, ?], offset: ?>>
+ // CHECK: memref.copy {{.*}} : memref<2x1x4x1x1xf32> to memref<2x1x4x1x1xf32, strided<[?, ?, ?, ?, ?], offset: ?>>
%0 = tensor.insert_slice %t2 into %t1[%i, %i, %i, %i, %i, %i, %i][1, 2, 1, 4, 1, 1, 1][1, 1, 1, 1, 1, 1, 1]
: tensor<2x1x4x1x1xf32> into tensor<?x?x?x?x?x?x?xf32>
return %0 : tensor<?x?x?x?x?x?x?xf32>
@@ -380,7 +372,6 @@ func.func @tensor.expand_shape(%t1: tensor<?x10xf32>) -> tensor<2x?x10xf32> {
// -----
-// CHECK-DAG: #[[$MAP1b:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 20 + s0 + d1)>
// CHECK-DAG: #[[$MAP2b:.*]] = affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 140 + d1 * 20 + d2 * 5 + d3 + s0)>
// CHECK-LABEL: func @tensor.expand_shape_of_slice(
@@ -388,11 +379,11 @@ func.func @tensor.expand_shape(%t1: tensor<?x10xf32>) -> tensor<2x?x10xf32> {
func.func @tensor.expand_shape_of_slice(
%t1: tensor<?x20xf32>, %o1: index, %s1: index) -> tensor<?x7x2x5xf32> {
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x20xf32>
- // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref<?x20xf32> to memref<?x10xf32, #[[$MAP1b]]>
+ // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref<?x20xf32> to memref<?x10xf32, strided<[20, 1], offset: ?>>
%0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] :
tensor<?x20xf32> to tensor<?x10xf32>
// CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [
- // CHECK-SAME: [0, 1], [2, 3]] : memref<?x10xf32, #[[$MAP1b]]> into memref<?x7x2x5xf32, #[[$MAP2b]]>
+ // CHECK-SAME: [0, 1], [2, 3]] : memref<?x10xf32, strided<[20, 1], offset: ?>> into memref<?x7x2x5xf32, #[[$MAP2b]]>
%1 = tensor.expand_shape %0 [[0, 1], [2, 3]] :
tensor<?x10xf32> into tensor<?x7x2x5xf32>
// CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]]
@@ -402,7 +393,6 @@ func.func @tensor.expand_shape_of_slice(
// -----
-// CHECK-DAG: #[[$MAP9:.*]] = affine_map<()[s0] -> (s0)>
// CHECK-DAG: #[[$MAP10:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-LABEL: func @tensor.expand_shape_of_scalar_slice(
@@ -410,9 +400,9 @@ func.func @tensor.expand_shape_of_slice(
func.func @tensor.expand_shape_of_scalar_slice(
%t1: tensor<?xf32>, %o1: index, %s1: index) -> tensor<1xf32> {
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?xf32>
- // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref<?xf32> to memref<f32, #[[$MAP9]]>
+ // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref<?xf32> to memref<f32, strided<[], offset: ?>>
%0 = tensor.extract_slice %t1[%o1][1][1] : tensor<?xf32> to tensor<f32>
- // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] : memref<f32, #[[$MAP9]]> into memref<1xf32, #[[$MAP10]]>
+ // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] : memref<f32, strided{{.*}}> into memref<1xf32, #[[$MAP10]]>
%1 = tensor.expand_shape %0 [] : tensor<f32> into tensor<1xf32>
// CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]]
// CHECK: return %[[r]]
@@ -452,14 +442,13 @@ func.func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor<f32
// -----
-// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0) -> (d0 + 1)>
// CHECK-DAG: #[[$MAP4:.*]] = affine_map<() -> (1)>
// CHECK-LABEL: func @tensor.collapse_shape_of_slice(
func.func @tensor.collapse_shape_of_slice(%arg0: tensor<2xi32>) -> tensor<i32> {
- // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<2xi32> to memref<1xi32, #[[$MAP3]]>
+ // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<2xi32> to memref<1xi32, strided<[1], offset: 1>>
%0 = tensor.extract_slice %arg0[1] [1] [1] : tensor<2xi32> to tensor<1xi32>
- // CHECK: memref.collapse_shape %{{.*}} [] : memref<1xi32, #[[$MAP3]]> into memref<i32, #[[$MAP4]]>
+ // CHECK: memref.collapse_shape %{{.*}} [] : memref<1xi32, strided<[1], offset: 1>> into memref<i32, #[[$MAP4]]>
%1 = tensor.collapse_shape %0 [] : tensor<1xi32> into tensor<i32>
return %1 : tensor<i32>
}
@@ -470,7 +459,7 @@ func.func @tensor.collapse_shape_of_slice(%arg0: tensor<2xi32>) -> tensor<i32> {
func.func @tensor.collapse_shape_of_slice2(
%arg0: tensor<?x?x?x?xi64>, %o1: index, %o2: index, %o3: index, %o4: index)
-> tensor<87x63648xi64> {
- // CHECK: %[[subview:.*]] = memref.subview %{{.*}} : memref<?x?x?x?xi64> to memref<87x78x68x12xi64, #{{.*}}>
+ // CHECK: %[[subview:.*]] = memref.subview %{{.*}} : memref<?x?x?x?xi64> to memref<87x78x68x12xi64, strided{{.*}}>
%0 = tensor.extract_slice %arg0[%o1, %o2, %o3, %o4] [87, 78, 68, 12] [1, 1, 1, 1] : tensor<?x?x?x?xi64> to tensor<87x78x68x12xi64>
// This memref may not be collapsible, so the buffer must be copied to get rid
@@ -485,33 +474,31 @@ func.func @tensor.collapse_shape_of_slice2(
// -----
-// CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
// CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0) -> (d0 * 2)>
// CHECK-LABEL: func @tensor.collapse_shape_of_slice3(
// CHECK-SAME: %[[t1:.*]]: tensor<1x2xf32>
func.func @tensor.collapse_shape_of_slice3(%t1: tensor<1x2xf32>) -> tensor<1xf32> {
- // CHECK: memref.subview {{.*}} : memref<1x2xf32> to memref<1x1xf32, #[[$MAP5]]>
+ // CHECK: memref.subview {{.*}} : memref<1x2xf32> to memref<1x1xf32, strided<[2, 1]>>
%0 = tensor.extract_slice %t1[0, 0][1, 1][1, 1] : tensor<1x2xf32> to tensor<1x1xf32>
// CHECK: memref.collapse_shape %{{.*}} [
- // CHECK-SAME: [0, 1]] : memref<1x1xf32, #[[$MAP5]]> into memref<1xf32, #[[$MAP6]]>
+ // CHECK-SAME: [0, 1]] : memref<1x1xf32, strided<[2, 1]>> into memref<1xf32, #[[$MAP6]]>
%1 = tensor.collapse_shape %0 [[0, 1]] : tensor<1x1xf32> into tensor<1xf32>
return %1 : tensor<1xf32>
}
// -----
-// CHECK-DAG: #[[$MAP7:.*]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 8 + s0 + d1 * 4 + d2)>
// CHECK-DAG: #[[$MAP8:.*]] = affine_map<(d0)[s0] -> (d0 * 4 + s0)>
// CHECK-LABEL: func @tensor.collapse_shape_of_slice4(
// CHECK-SAME: %[[t1:.*]]: tensor<?x2x4xf32>,
// CHECK-SAME: %[[OFFSET:.*]]: index) -> tensor<8xf32> {
func.func @tensor.collapse_shape_of_slice4(%arg0: tensor<?x2x4xf32>, %offset: index, %size: index) -> tensor<8xf32> {
- // CHECK: memref.subview %{{.*}} : memref<?x2x4xf32> to memref<4x2x1xf32, #[[$MAP7]]>
+ // CHECK: memref.subview %{{.*}} : memref<?x2x4xf32> to memref<4x2x1xf32, strided<[8, 4, 1], offset: ?>>
%0 = tensor.extract_slice %arg0[0, 0, %offset] [4, 2, 1] [1, 1, 1] : tensor<?x2x4xf32> to tensor<4x2x1xf32>
// CHECK: memref.collapse_shape %{{.*}} [
- // CHECK-SAME: [0, 1, 2]] : memref<4x2x1xf32, #[[$MAP7]]> into memref<8xf32, #[[$MAP8]]>
+ // CHECK-SAME: [0, 1, 2]] : memref<4x2x1xf32, strided<[8, 4, 1], offset: ?>> into memref<8xf32, #[[$MAP8]]>
%ret = tensor.collapse_shape %0 [[0, 1, 2]] : tensor<4x2x1xf32> into tensor<8xf32>
return %ret: tensor<8xf32>
}
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index 1f1936c1df347..eb0f60cb57748 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -208,8 +208,8 @@ func.func @rank_reducing_parallel_insert_slice(%in: tensor<100xf32>, %out: tenso
%result = scf.foreach_thread (%thread_idx) in (%num_threads) shared_outs (%o = %out) -> tensor<200x100xf32> {
%1 = tensor.extract_slice %in[%thread_idx][1][1] : tensor<100xf32> to tensor<1xf32>
scf.foreach_thread.perform_concurrently {
- // CHECK: memref.subview %{{.*}}[%{{.*}}] [1] [1] : memref<100xf32, #[[$MAP0]]> to memref<1xf32, #[[$MAP0]]>
- // CHECK: memref.subview %{{.*}}[1, %{{.*}}] [1, 1] [1, 1] : memref<200x100xf32, #[[$MAP1]]> to memref<1xf32, #[[$MAP0]]>
+ // CHECK: memref.subview %{{.*}}[%{{.*}}] [1] [1] : memref<100xf32, #[[$MAP0]]> to memref<1xf32, strided<[?], offset: ?>>
+ // CHECK: memref.subview %{{.*}}[1, %{{.*}}] [1, 1] [1, 1] : memref<200x100xf32, #[[$MAP1]]> to memref<1xf32, strided<[?], offset: ?>>
tensor.parallel_insert_slice %1 into %o[1, %thread_idx][1, 1][1, 1] :
tensor<1xf32> into tensor<200x100xf32>
}
diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
index 7891d71ef91f0..92d435ab0e5d1 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
@@ -8,7 +8,6 @@
// LINALG-DAG: #[[$map_p4:.*]] = affine_map<()[s0] -> (s0 + 4)>
// LINALG-DAG: #[[$map_p8:.*]] = affine_map<()[s0] -> (s0 + 8)>
// LINALG-DAG: #[[$map_2d_stride_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// LINALG-DAG: #[[$map_2d_stride_8x1:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1)>
// LINALG-DAG: #[[$bounds_map_4:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 4)>
// LINALG-DAG: #[[$bounds_map_8:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 8)>
@@ -80,9 +79,9 @@ func.func @split_vector_transfer_read_2d(%A: memref<?x8xf32>, %i: index, %j: ind
// LINALG: %[[sv0:.*]] = affine.min #[[$bounds_map_4]](%[[d0]], %[[i]], %[[c4]])
// LINALG: %[[sv1:.*]] = affine.min #[[$bounds_map_8]](%[[c8]], %[[j]], %[[c8]])
// LINALG: %[[sv:.*]] = memref.subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [1, 1]
- // LINALG-SAME: memref<?x8xf32> to memref<?x?xf32, #[[$map_2d_stride_8x1]]>
+ // LINALG-SAME: memref<?x8xf32> to memref<?x?xf32, strided<[8, 1], offset: ?>>
// LINALG: %[[alloc_view:.*]] = memref.subview %[[alloc]][0, 0] [%[[sv0]], %[[sv1]]] [1, 1]
- // LINALG: memref.copy %[[sv]], %[[alloc_view]] : memref<?x?xf32, #[[$map_2d_stride_8x1]]> to memref<?x?xf32, #{{.*}}>
+ // LINALG: memref.copy %[[sv]], %[[alloc_view]] : memref<?x?xf32, strided<[8, 1], offset: ?>> to memref<?x?xf32, strided{{.*}}>
// LINALG: %[[yielded:.*]] = memref.cast %[[alloc]] :
// LINALG-SAME: memref<4x8xf32> to memref<?x8xf32>
// LINALG: scf.yield %[[yielded]], %[[c0]], %[[c0]] :
@@ -172,9 +171,9 @@ func.func @split_vector_transfer_read_strided_2d(
// LINALG: %[[sv0:.*]] = affine.min #[[$bounds_map_4]](%[[c7]], %[[i]], %[[c4]])
// LINALG: %[[sv1:.*]] = affine.min #[[$bounds_map_8]](%[[c8]], %[[j]], %[[c8]])
// LINALG: %[[sv:.*]] = memref.subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [1, 1]
- // LINALG-SAME: memref<7x8xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, #[[$map_2d_stride_1]]>
+ // LINALG-SAME: memref<7x8xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// LINALG: %[[alloc_view:.*]] = memref.subview %[[alloc]][0, 0] [%[[sv0]], %[[sv1]]] [1, 1]
- // LINALG: memref.copy %[[sv]], %[[alloc_view]] : memref<?x?xf32, #[[$map_2d_stride_1]]> to memref<?x?xf32, #{{.*}}>
+ // LINALG: memref.copy %[[sv]], %[[alloc_view]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided{{.*}}>
// LINALG: %[[yielded:.*]] = memref.cast %[[alloc]] :
// LINALG-SAME: memref<4x8xf32> to memref<?x8xf32, #[[$map_2d_stride_1]]>
// LINALG: scf.yield %[[yielded]], %[[c0]], %[[c0]] :
@@ -243,7 +242,6 @@ func.func @split_vector_transfer_write_2d(%V: vector<4x8xf32>, %A: memref<?x8xf3
// LINALG-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 8)>
// LINALG-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 4)>
// LINALG-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 8)>
-// LINALG-DAG: #[[MAP4:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1)>
// LINALG: func @split_vector_transfer_write_2d(
// LINALG-SAME: %[[VEC:.*]]: vector<4x8xf32>,
// LINALG-SAME: %[[DEST:.*]]: memref<?x8xf32>,
@@ -277,10 +275,10 @@ func.func @split_vector_transfer_write_2d(%V: vector<4x8xf32>, %A: memref<?x8xf3
// LINALG-DAG: %[[VAL_21:.*]] = affine.min #[[MAP3]](%[[C8]], %[[J]], %[[C8]])
// LINALG: %[[VAL_22:.*]] = memref.subview %[[TEMP]]
// LINALG-SAME: [%[[I]], %[[J]]] [%[[VAL_20]], %[[VAL_21]]]
-// LINALG-SAME: [1, 1] : memref<4x8xf32> to memref<?x?xf32, #[[MAP4]]>
+// LINALG-SAME: [1, 1] : memref<4x8xf32> to memref<?x?xf32, strided<[8, 1], offset: ?>>
// LINALG: %[[DEST_VIEW:.*]] = memref.subview %[[DEST]][0, 0] [%[[VAL_20]], %[[VAL_21]]] [1, 1]
// LINALG: memref.copy %[[VAL_22]], %[[DEST_VIEW]]
-// LINALG-SAME: : memref<?x?xf32, #[[MAP4]]> to memref<?x?xf32, #{{.*}}>
+// LINALG-SAME: : memref<?x?xf32, strided<[8, 1], offset: ?>> to memref<?x?xf32, strided{{.*}}>
// LINALG: }
// LINALG: return
// LINALG: }
@@ -346,7 +344,6 @@ func.func @split_vector_transfer_write_strided_2d(
// LINALG-DAG: #[[MAP2:.*]] = affine_map<()[s0] -> (s0 + 8)>
// LINALG-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 4)>
// LINALG-DAG: #[[MAP4:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 8)>
-// LINALG-DAG: #[[MAP5:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1)>
// LINALG: func @split_vector_transfer_write_strided_2d(
// LINALG-SAME: %[[VEC:.*]]: vector<4x8xf32>,
// LINALG-SAME: %[[DEST:.*]]: memref<7x8xf32, strided<[?, 1], offset: ?>>,
@@ -386,10 +383,10 @@ func.func @split_vector_transfer_write_strided_2d(
// LINALG-DAG: %[[VAL_21:.*]] = affine.min #[[MAP4]](%[[C8]], %[[J]], %[[C8]])
// LINALG: %[[VAL_22:.*]] = memref.subview %[[TEMP]]
// LINALG-SAME: [%[[I]], %[[J]]] [%[[VAL_20]], %[[VAL_21]]]
-// LINALG-SAME: [1, 1] : memref<4x8xf32> to memref<?x?xf32, #[[MAP5]]>
+// LINALG-SAME: [1, 1] : memref<4x8xf32> to memref<?x?xf32, strided<[8, 1], offset: ?>>
// LINALG: %[[DEST_VIEW:.*]] = memref.subview %[[DEST]][0, 0] [%[[VAL_20]], %[[VAL_21]]] [1, 1]
// LINALG: memref.copy %[[VAL_22]], %[[DEST_VIEW]]
-// LINALG-SAME: : memref<?x?xf32, #[[MAP5]]> to memref<?x?xf32, #[[MAP0]]>
+// LINALG-SAME: : memref<?x?xf32, strided<[8, 1], offset: ?>> to memref<?x?xf32, strided<[?, 1], offset: ?>>
// LINALG: }
// LINALG: return
// LINALG: }
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index a7b3b343c9dcd..bd16f38362098 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -733,18 +733,6 @@ func.func @view(%arg0 : index) -> (f32, f32, f32, f32) {
// -----
-// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
-// CHECK-DAG: #[[$SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
-// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
-// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
-// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
-// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
-// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
-// CHECK-DAG: #[[$SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2 * 2)>
-// CHECK-DAG: #[[$SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
-// CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
-
-
// CHECK-LABEL: func @subview
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
@@ -771,7 +759,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// zero offset and unit stride arguments.
// CHECK: memref.subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>>
- // CHECK-SAME: to memref<7x11x2xf32, #[[$BASE_MAP0]]>
+ // CHECK-SAME: to memref<7x11x2xf32, strided<[64, 4, 1]>>
%1 = memref.subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -780,7 +768,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview with one dynamic operand can also be folded.
// CHECK: memref.subview %[[ALLOC0]][0, %[[ARG0]], 0] [7, 11, 15] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>>
- // CHECK-SAME: to memref<7x11x15xf32, #[[$SUBVIEW_MAP0]]>
+ // CHECK-SAME: to memref<7x11x15xf32, strided<[64, 4, 1], offset: ?>>
%2 = memref.subview %0[%c0, %arg0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -791,7 +779,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
// CHECK: memref.subview %[[ALLOC1]][0, 0, 0] [7, 11, 15] [1, 1, 1] :
// CHECK-SAME: memref<?x16x4xf32, strided<[64, 4, 1]>>
- // CHECK-SAME: to memref<7x11x15xf32, #[[$BASE_MAP0]]>
+ // CHECK-SAME: to memref<7x11x15xf32, strided<[64, 4, 1]>>
%4 = memref.subview %3[%c0, %c0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<?x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -800,7 +788,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview offset operands are folded correctly w.r.t. base strides.
// CHECK: memref.subview %[[ALLOC0]][1, 2, 7] [7, 11, 2] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>> to
- // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP1]]>
+ // CHECK-SAME: memref<7x11x2xf32, strided<[64, 4, 1], offset: 79>>
%5 = memref.subview %0[%c1, %c2, %c7] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -809,7 +797,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview stride operands are folded correctly w.r.t. base strides.
// CHECK: memref.subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [2, 7, 11] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>>
- // CHECK-SAME: to memref<7x11x2xf32, #[[$SUBVIEW_MAP2]]>
+ // CHECK-SAME: to memref<7x11x2xf32, strided<[128, 28, 11]>>
%6 = memref.subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c2, %c7, %c11]
: memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -818,7 +806,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview shape are folded, but offsets and strides are not even if base memref is static
// CHECK: memref.subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>> to
- // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
+ // CHECK-SAME: memref<7x11x2xf32, strided<[?, ?, ?], offset: ?>>
%10 = memref.subview %0[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -828,7 +816,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview strides are folded, but offsets and shape are not even if base memref is static
// CHECK: memref.subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 7, 11] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>> to
- // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP4]]
+ // CHECK-SAME: memref<?x?x?xf32, strided<[128, 28, 11], offset: ?>>
%11 = memref.subview %0[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c7, %c11] :
memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -838,7 +826,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview offsets are folded, but strides and shape are not even if base memref is static
// CHECK: memref.subview %[[ALLOC0]][1, 2, 7] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] :
// CHECK-SAME: memref<8x16x4xf32, strided<[64, 4, 1]>> to
- // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP5]]
+ // CHECK-SAME: memref<?x?x?xf32, strided<[?, ?, ?], offset: 79>>
%13 = memref.subview %0[%c1, %c2, %c7] [%arg1, %arg1, %arg1] [%arg0, %arg0, %arg0] :
memref<8x16x4xf32, strided<[64, 4, 1], offset: 0>> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -850,7 +838,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview shape are folded, even if base memref is not static
// CHECK: memref.subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<?x?x?xf32> to
- // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
+ // CHECK-SAME: memref<7x11x2xf32, strided<[?, ?, ?], offset: ?>>
%15 = memref.subview %14[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
memref<?x?x?xf32> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -859,7 +847,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides are folded, in the type only the most minor stride is folded.
// CHECK: memref.subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 2, 2] :
// CHECK-SAME: memref<?x?x?xf32> to
- // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP6]]
+ // CHECK-SAME: memref<?x?x?xf32, strided<[?, ?, 2], offset: ?>>
%16 = memref.subview %14[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c2, %c2] :
memref<?x?x?xf32> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -868,7 +856,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview offsets are folded but the type offset remains dynamic, when the base memref is not static
// CHECK: memref.subview %[[ALLOC2]][1, 1, 1] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<?x?x?xf32> to
- // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP3]]
+ // CHECK-SAME: memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
%17 = memref.subview %14[%c1, %c1, %c1] [%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] :
memref<?x?x?xf32> to
memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
@@ -881,7 +869,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides are maintained when sizes are folded
// CHECK: memref.subview %[[ALLOC3]][%arg1, %arg1] [2, 4] [1, 1] :
// CHECK-SAME: memref<12x4xf32> to
- // CHECK-SAME: memref<2x4xf32, #[[$SUBVIEW_MAP7]]>
+ // CHECK-SAME: memref<2x4xf32, strided<[4, 1], offset: ?>
%19 = memref.subview %18[%arg1, %arg1] [%c2, %c4] [1, 1] :
memref<12x4xf32> to
memref<?x?xf32, strided<[4, 1], offset: ?>>
@@ -890,7 +878,7 @@ func.func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides and sizes are maintained when offsets are folded
// CHECK: memref.subview %[[ALLOC3]][2, 4] [12, 4] [1, 1] :
// CHECK-SAME: memref<12x4xf32> to
- // CHECK-SAME: memref<12x4xf32, #[[$SUBVIEW_MAP8]]>
+ // CHECK-SAME: memref<12x4xf32, strided<[4, 1], offset: 12>>
%20 = memref.subview %18[%c2, %c4] [12, 4] [1, 1] :
memref<12x4xf32> to
memref<12x4xf32, strided<[4, 1], offset: ?>>
@@ -1053,15 +1041,11 @@ func.func @memref_cast_folding_subview(%arg0: memref<4x5xf32>, %i: index) -> (me
%0 = memref.cast %arg0 : memref<4x5xf32> to memref<?x?xf32>
// CHECK-NEXT: memref.subview %{{.*}}: memref<4x5xf32>
%1 = memref.subview %0[%i, %i][%i, %i][%i, %i]: memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
- // CHECK-NEXT: memref.cast
- // CHECK-NEXT: return %{{.*}}
return %1: memref<?x?xf32, strided<[?, ?], offset: ?>>
}
// -----
-// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
-
// CHECK-LABEL: func @memref_cast_folding_subview_static(
func.func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index, %b: index)
-> memref<3x4xf32, strided<[?, 1], offset: ?>>
@@ -1069,7 +1053,7 @@ func.func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index,
%0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
%1 = memref.subview %0[0, 0][3, 4][1, 1] : memref<?x?xf32> to memref<3x4xf32, strided<[?, 1], offset: ?>>
- // CHECK: memref.subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, #[[$map0]]>
+ // CHECK: memref.subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, strided<[16, 1]>>
return %1: memref<3x4xf32, strided<[?, 1], offset: ?>>
}
diff --git a/mlir/test/Transforms/compose-subview.mlir b/mlir/test/Transforms/compose-subview.mlir
index e43b4e532cdae..cb8ebcb2bf9e9 100644
--- a/mlir/test/Transforms/compose-subview.mlir
+++ b/mlir/test/Transforms/compose-subview.mlir
@@ -1,62 +1,47 @@
-// RUN: mlir-opt -allow-unregistered-dialect %s -test-compose-subview -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -test-compose-subview -split-input-file | FileCheck %s
-// CHECK: [[MAP:#.*]] = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 3456)
-#map0 = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 2304)>
-#map1 = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 3456)>
-
-func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, #map1> {
+func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, strided<[1024, 1], offset: 3456>> {
// CHECK: subview %arg0[3, 384] [1, 128] [1, 1]
- // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, [[MAP]]>
- %0 = memref.subview %input[2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, #map0>
- %1 = memref.subview %0[1, 128] [1, 128] [1, 1] : memref<2x256xf32, #map0> to memref<1x128xf32, #map1>
- return %1 : memref<1x128xf32, #map1>
+ // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, strided<[1024, 1], offset: 3456>>
+ %0 = memref.subview %input[2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, strided<[1024, 1], offset: 2304>>
+ %1 = memref.subview %0[1, 128] [1, 128] [1, 1] : memref<2x256xf32, strided<[1024, 1], offset: 2304>> to memref<1x128xf32, strided<[1024, 1], offset: 3456>>
+ return %1 : memref<1x128xf32, strided<[1024, 1], offset: 3456>>
}
// -----
-// CHECK: [[MAP:#.*]] = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 3745)
-#map0 = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 1536)>
-#map1 = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 2688)>
-#map2 = affine_map<(d0, d1) -> (d0 * 1024 + d1 + 3745)>
-
-func.func @main(%input: memref<4x1024xf32>) -> memref<1x10xf32, #map2> {
+func.func @main(%input: memref<4x1024xf32>) -> memref<1x10xf32, strided<[1024, 1], offset: 3745>> {
// CHECK: subview %arg0[3, 673] [1, 10] [1, 1]
- // CHECK-SAME: memref<4x1024xf32> to memref<1x10xf32, [[MAP]]>
- %0 = memref.subview %input[1, 512] [3, 256] [1, 1] : memref<4x1024xf32> to memref<3x256xf32, #map0>
- %1 = memref.subview %0[1, 128] [2, 128] [1, 1] : memref<3x256xf32, #map0> to memref<2x128xf32, #map1>
- %2 = memref.subview %1[1, 33] [1, 10] [1, 1] : memref<2x128xf32, #map1> to memref<1x10xf32, #map2>
- return %2 : memref<1x10xf32, #map2>
+ // CHECK-SAME: memref<4x1024xf32> to memref<1x10xf32, strided<[1024, 1], offset: 3745>>
+ %0 = memref.subview %input[1, 512] [3, 256] [1, 1] : memref<4x1024xf32> to memref<3x256xf32, strided<[1024, 1], offset: 1536>>
+ %1 = memref.subview %0[1, 128] [2, 128] [1, 1] : memref<3x256xf32, strided<[1024, 1], offset: 1536>> to memref<2x128xf32, strided<[1024, 1], offset: 2688>>
+ %2 = memref.subview %1[1, 33] [1, 10] [1, 1] : memref<2x128xf32, strided<[1024, 1], offset: 2688>> to memref<1x10xf32, strided<[1024, 1], offset: 3745>>
+ return %2 : memref<1x10xf32, strided<[1024, 1], offset: 3745>>
}
// -----
-// CHECK: [[MAP:#.*]] = affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)
-#map = affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>
-
-func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, #map> {
+func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, strided<[1024, 1], offset: ?>> {
// CHECK: [[CST_3:%.*]] = arith.constant 3 : index
%cst_1 = arith.constant 1 : index
%cst_2 = arith.constant 2 : index
// CHECK: subview %arg0{{\[}}[[CST_3]], 384] [1, 128] [1, 1]
- // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, [[MAP]]>
- %0 = memref.subview %input[%cst_2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, #map>
- %1 = memref.subview %0[%cst_1, 128] [1, 128] [1, 1] : memref<2x256xf32, #map> to memref<1x128xf32, #map>
- return %1 : memref<1x128xf32, #map>
+ // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, strided<[1024, 1], offset: ?>>
+ %0 = memref.subview %input[%cst_2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, strided<[1024, 1], offset: ?>>
+ %1 = memref.subview %0[%cst_1, 128] [1, 128] [1, 1] : memref<2x256xf32, strided<[1024, 1], offset: ?>> to memref<1x128xf32, strided<[1024, 1], offset: ?>>
+ return %1 : memref<1x128xf32, strided<[1024, 1], offset: ?>>
}
// -----
-// CHECK: [[MAP:#.*]] = affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)
-#map = affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>
-
-func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, #map> {
+func.func @main(%input: memref<4x1024xf32>) -> memref<1x128xf32, strided<[1024, 1], offset: ?>> {
// CHECK: [[CST_3:%.*]] = arith.constant 3 : index
%cst_2 = arith.constant 2 : index
// CHECK: [[CST_384:%.*]] = arith.constant 384 : index
%cst_128 = arith.constant 128 : index
// CHECK: subview %arg0{{\[}}[[CST_3]], [[CST_384]]] [1, 128] [1, 1]
- // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, [[MAP]]>
- %0 = memref.subview %input[%cst_2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, #map>
- %1 = memref.subview %0[1, %cst_128] [1, 128] [1, 1] : memref<2x256xf32, #map> to memref<1x128xf32, #map>
- return %1 : memref<1x128xf32, #map>
+ // CHECK-SAME: memref<4x1024xf32> to memref<1x128xf32, strided<[1024, 1], offset: ?>>
+ %0 = memref.subview %input[%cst_2, 256] [2, 256] [1, 1] : memref<4x1024xf32> to memref<2x256xf32, strided<[1024, 1], offset: ?>>
+ %1 = memref.subview %0[1, %cst_128] [1, 128] [1, 1] : memref<2x256xf32, strided<[1024, 1], offset: ?>> to memref<1x128xf32, strided<[1024, 1], offset: ?>>
+ return %1 : memref<1x128xf32, strided<[1024, 1], offset: ?>>
}
diff --git a/mlir/unittests/Dialect/MemRef/InferShapeTest.cpp b/mlir/unittests/Dialect/MemRef/InferShapeTest.cpp
index 28dc768bda25a..3937095c119c3 100644
--- a/mlir/unittests/Dialect/MemRef/InferShapeTest.cpp
+++ b/mlir/unittests/Dialect/MemRef/InferShapeTest.cpp
@@ -22,10 +22,9 @@ TEST(InferShapeTest, inferRankReducedShapeIdentity) {
auto sourceMemref = MemRefType::get({10, 5}, b.getIndexType());
auto reducedType = SubViewOp::inferRankReducedResultType(
/*resultShape=*/{2}, sourceMemref, {2, 3}, {1, 2}, {1, 1});
- AffineExpr dim0;
- bindDims(&ctx, dim0);
- auto expectedType =
- MemRefType::get({2}, b.getIndexType(), AffineMap::get(1, 0, dim0 + 13));
+ auto expectedType = MemRefType::get(
+ {2}, b.getIndexType(),
+ StridedLayoutAttr::get(&ctx, /*offset=*/13, /*strides=*/{1}));
EXPECT_EQ(reducedType, expectedType);
}
@@ -39,8 +38,9 @@ TEST(InferShapeTest, inferRankReducedShapeNonIdentity) {
AffineMap::get(2, 0, 1000 * dim0 + dim1));
auto reducedType = SubViewOp::inferRankReducedResultType(
/*resultShape=*/{2}, sourceMemref, {2, 3}, {1, 2}, {1, 1});
- auto expectedType =
- MemRefType::get({2}, b.getIndexType(), AffineMap::get(1, 0, dim0 + 2003));
+ auto expectedType = MemRefType::get(
+ {2}, b.getIndexType(),
+ StridedLayoutAttr::get(&ctx, /*offset=*/2003, /*strides=*/{1}));
EXPECT_EQ(reducedType, expectedType);
}
@@ -53,8 +53,8 @@ TEST(InferShapeTest, inferRankReducedShapeToScalar) {
AffineMap::get(2, 0, 1000 * dim0 + dim1));
auto reducedType = SubViewOp::inferRankReducedResultType(
/*resultShape=*/{}, sourceMemref, {2, 3}, {1, 1}, {1, 1});
- auto expectedType =
- MemRefType::get({}, b.getIndexType(),
- AffineMap::get(0, 0, b.getAffineConstantExpr(2003)));
+ auto expectedType = MemRefType::get(
+ {}, b.getIndexType(),
+ StridedLayoutAttr::get(&ctx, /*offset=*/2003, /*strides=*/{}));
EXPECT_EQ(reducedType, expectedType);
}
More information about the Mlir-commits
mailing list