[Mlir-commits] [mlir] f3fae03 - [mlir] use strided layout in structured codegen-related tests

Alex Zinenko llvmlistbot at llvm.org
Fri Sep 16 23:11:40 PDT 2022


Author: Alex Zinenko
Date: 2022-09-17T08:11:28+02:00
New Revision: f3fae035c7a16e1f4c7d96b115212714375a3d38

URL: https://github.com/llvm/llvm-project/commit/f3fae035c7a16e1f4c7d96b115212714375a3d38
DIFF: https://github.com/llvm/llvm-project/commit/f3fae035c7a16e1f4c7d96b115212714375a3d38.diff

LOG: [mlir] use strided layout in structured codegen-related tests

All relevant operations have been switched to primarily use the strided
layout, but still support the affine map layout. Update the relevant
tests to use the strided format instead for compatibility with how ops
now print by default.

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D134045

Added: 
    

Modified: 
    mlir/docs/Bufferization.md
    mlir/docs/Dialects/Linalg/_index.md
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h
    mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
    mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
    mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
    mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
    mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
    mlir/test/Dialect/Linalg/fusion-indexed.mlir
    mlir/test/Dialect/Linalg/fusion.mlir
    mlir/test/Dialect/Linalg/promote.mlir
    mlir/test/Dialect/MemRef/canonicalize.mlir
    mlir/test/Dialect/MemRef/simplify-extract-strided-metadata.mlir
    mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
    mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
    mlir/test/Dialect/Vector/vector-warp-distribute.mlir
    mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir
    mlir/test/Transforms/canonicalize.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md
index 7d13e9d22eab8..464e1ca63dde8 100644
--- a/mlir/docs/Bufferization.md
+++ b/mlir/docs/Bufferization.md
@@ -317,10 +317,9 @@ When bufferizing the above IR, One-Shot Bufferize inserts a `to_memref` ops with
 dynamic offset and strides:
 
 ```mlir
-#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 %0 = "my_dialect.unbufferizable_op(%t) : (tensor<?x?xf32>) -> (tensor<?x?xf32>)
-%0_m = bufferization.to_memref %0 : memref<?x?xf32, #map>
-%1 = memref.load %0_m[%idx1, %idx2] : memref<?x?xf32, #map>
+%0_m = bufferization.to_memref %0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
+%1 = memref.load %0_m[%idx1, %idx2] : memref<?x?xf32, strided<[?, ?], offset: ?>>
 ```
 
 All users of `%0` have fully dynamic layout maps. This ensures that the

diff  --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md
index 246436a0a6f68..dbf008d0bb0c2 100644
--- a/mlir/docs/Dialects/Linalg/_index.md
+++ b/mlir/docs/Dialects/Linalg/_index.md
@@ -99,13 +99,10 @@ layout, and the second one is a `memref` of 4-element vectors with a 2-strided,
   iterator_types = ["parallel"]
 }
 
-// memory layouts
-#identity = affine_map<(d0) -> (d0)>
-
-func.func @example(%A: memref<?xf32, #identity>,
+func.func @example(%A: memref<?xf32, strided<[1]>>,
               %B: memref<?xvector<4xf32>, strided<[2], offset: 1>>) {
   linalg.generic #attrs
-  ins(%A: memref<?xf32, #identity>)
+  ins(%A: memref<?xf32, strided<[1]>>)
   outs(%B: memref<?xvector<4xf32>, strided<[2], offset: 1>>) {
   ^bb0(%a: f32, %b: vector<4xf32>):
     %c = "some_compute"(%a, %b): (f32, vector<4xf32>) -> (vector<4xf32>)
@@ -122,17 +119,19 @@ materialized by a lowering into a form that will resemble:
 // Run: mlir-opt example1.mlir -allow-unregistered-dialect -convert-linalg-to-loops
 // This converted representation is in the `scf` dialect.
 // It's syntax can be found here: https://mlir.llvm.org/docs/Dialects/SCFDialect/
-#map0 = affine_map<(d0) -> (d0 * 2 + 1)>
 
-func.func @example(%arg0: memref<?xf32>, %arg1: memref<?xvector<4xf32>, #map0>) {
+func.func @example(%arg0: memref<?xf32>,
+                   %arg1: memref<?xvector<4xf32>, strided<[2], offset: 1>>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %0 = memref.dim %arg0, %c0 : memref<?xf32>
   scf.for %arg2 = %c0 to %0 step %c1 {
     %1 = memref.load %arg0[%arg2] : memref<?xf32>
-    %2 = memref.load %arg1[%arg2] : memref<?xvector<4xf32>, #map0>
+    %2 = memref.load %arg1[%arg2]
+       : memref<?xvector<4xf32>, strided<[2], offset: 1>>
     %3 = "some_compute"(%1, %2) : (f32, vector<4xf32>) -> vector<4xf32>
-    memref.store %3, %arg1[%arg2] : memref<?xvector<4xf32>, #map0>
+    memref.store %3, %arg1[%arg2]
+       : memref<?xvector<4xf32>, strided<[2], offset: 1>>
   }
   return
 }
@@ -204,16 +203,15 @@ materialized by a lowering into a form that will resemble:
 
 ```mlir
 // Run: mlir-opt example2.mlir -allow-unregistered-dialect -convert-linalg-to-loops
-#map0 = affine_map<(d0, d1) -> (d0 * 2 + d1 * 2)>
 
-func.func @example(%arg0: memref<8x?xf32, #map0>, %arg1: memref<?xvector<4xf32>>) {
+func.func @example(%arg0: memref<8x?xf32, strided<[2, 2]>>, %arg1: memref<?xvector<4xf32>>) {
   %c8 = arith.constant 8 : index
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
-  %0 = memref.dim %arg0, %c1 : memref<8x?xf32, #map0>
+  %0 = memref.dim %arg0, %c1 : memref<8x?xf32, strided<[2, 2]>>
   scf.for %arg2 = %c0 to %0 step %c1 {
     scf.for %arg3 = %c0 to %c8 step %c1 {
-      %1 = memref.load %arg0[%arg3, %arg2] : memref<8x?xf32, #map0>
+      %1 = memref.load %arg0[%arg3, %arg2] : memref<8x?xf32, strided<[2, 2]>>
       %2 = memref.load %arg1[%arg3] : memref<?xvector<4xf32>>
       %3 = "some_compute"(%1, %2) : (f32, vector<4xf32>) -> vector<4xf32>
       memref.store %3, %arg1[%arg3] : memref<?xvector<4xf32>>
@@ -400,16 +398,17 @@ into a form that will resemble:
 ```mlir
 // Run: mlir-opt example4.mlir -convert-linalg-to-std
 
-#map0 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-
 func.func @example(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
-  %0 = memref.cast %arg0 : memref<?x?xf32> to memref<?x?xf32, #map0>
-  %1 = memref.cast %arg1 : memref<?x?xf32> to memref<?x?xf32, #map0>
-  %2 = memref.cast %arg2 : memref<?x?xf32> to memref<?x?xf32, #map0>
-  call @pointwise_add(%0, %1, %2) : (memref<?x?xf32, #map0>, memref<?x?xf32, #map0>, memref<?x?xf32, #map0>) -> ()
+  %0 = memref.cast %arg0 : memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+  %1 = memref.cast %arg1 : memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+  %2 = memref.cast %arg2 : memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+  call @pointwise_add(%0, %1, %2) : (memref<?x?xf32, strided<[?, ?], offset: ?>>,
+    memref<?x?xf32, strided<[?, ?], offset: ?>>, memref<?x?xf32, strided<[?, ?], offset: ?>>) -> ()
   return
 }
-func.func @pointwise_add(memref<?x?xf32, #map0>, memref<?x?xf32, #map0>, memref<?x?xf32, #map0>) attributes {llvm.emit_c_interface}
+func.func @pointwise_add(memref<?x?xf32, strided<[?, ?], offset: ?>>,
+                         memref<?x?xf32, strided<[?, ?], offset: ?>>,
+                         memref<?x?xf32, strided<[?, ?], offset: ?>>) attributes {llvm.emit_c_interface}
 ```
 
 Which, after lowering to LLVM resembles:

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index ae8421e30c368..13e82f40fea1d 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -355,8 +355,8 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
     Casts a tensor to a memref.
 
     ```mlir
-    // Result type is memref<4x?xf32, #map0, 42>
-    %12 = bufferization.to_memref %10 : memref<4x?xf32, #map0, 42>
+    // Result type is memref<4x?xf32, #layout, 42>
+    %12 = bufferization.to_memref %10 : memref<4x?xf32, #layout, 42>
     ```
 
     Note, that mutating the result of the `to_memref` operation leads to

diff  --git a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h
index a5309ddcc42c6..2ba2890c382a0 100644
--- a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h
@@ -80,9 +80,9 @@ void populateSimplifyExtractStridedMetadataOpPatterns(
 ///   %d = arith.divsi %s, %c3 : index
 ///   %i = arith.remsi %d, %c5 : index
 ///   %sv = memref.subview %0[%i, 0, 0] [1, 4, 128] [1, 1, 1] :
-///     memref<5x4x128xf32> to memref<4x128xf32, #map0>
-///   memref.copy %1, %sv : memref<4x128xf32> to memref<4x128xf32, #map0>
-///   "some_use"(%sv) : (memref<4x128xf32, $map0>) -> ()
+///     memref<5x4x128xf32> to memref<4x128xf32, strided<[128, 1], offset: ?>>
+///   memref.copy %1, %sv : memref<4x128xf32> to memref<4x128xf32, strided<...>>
+///   "some_use"(%sv) : (memref<4x128xf32, strided<...>) -> ()
 /// }
 /// ```
 LogicalResult multiBuffer(memref::AllocOp allocOp, unsigned multiplier);

diff  --git a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
index 6190aad34d9bf..e73c170b5b99e 100644
--- a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
+++ b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
@@ -39,21 +39,19 @@ func.func @conversion_unknown(%arg0 : memref<*xf32>) -> memref<*xf32> {
 
 // -----
 
-// CHECK: #[[$MAP:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-#map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 // CHECK-LABEL: func @conversion_with_layout_map(
-//  CHECK-SAME:     %[[ARG:.*]]: memref<?xf32, #[[$MAP]]>
+//  CHECK-SAME:     %[[ARG:.*]]: memref<?xf32, strided<[?], offset: ?>>
 //       CHECK:   %[[C0:.*]] = arith.constant 0 : index
 //       CHECK:   %[[DIM:.*]] = memref.dim %[[ARG]], %[[C0]]
 //       CHECK:   %[[ALLOC:.*]] = memref.alloc(%[[DIM]]) : memref<?xf32>
-//       CHECK:   %[[CASTED:.*]] = memref.cast %[[ALLOC]] : memref<?xf32> to memref<?xf32, #[[$MAP]]>
+//       CHECK:   %[[CASTED:.*]] = memref.cast %[[ALLOC]] : memref<?xf32> to memref<?xf32, strided<[?], offset: ?>>
 //       CHECK:   memref.copy
 //       CHECK:   memref.dealloc
 //       CHECK:   return %[[CASTED]]
-func.func @conversion_with_layout_map(%arg0 : memref<?xf32, #map>) -> memref<?xf32, #map> {
-  %1 = bufferization.clone %arg0 : memref<?xf32, #map> to memref<?xf32, #map>
-  memref.dealloc %arg0 : memref<?xf32, #map>
-  return %1 : memref<?xf32, #map>
+func.func @conversion_with_layout_map(%arg0 : memref<?xf32, strided<[?], offset: ?>>) -> memref<?xf32, strided<[?], offset: ?>> {
+  %1 = bufferization.clone %arg0 : memref<?xf32, strided<[?], offset: ?>> to memref<?xf32, strided<[?], offset: ?>>
+  memref.dealloc %arg0 : memref<?xf32, strided<[?], offset: ?>>
+  return %1 : memref<?xf32, strided<[?], offset: ?>>
 }
 
 // -----
@@ -61,11 +59,10 @@ func.func @conversion_with_layout_map(%arg0 : memref<?xf32, #map>) -> memref<?xf
 // This bufferization.clone cannot be lowered because a buffer with this layout
 // map cannot be allocated (or casted to).
 
-#map2 = affine_map<(d0)[s0] -> (d0 * 10 + s0)>
-func.func @conversion_with_invalid_layout_map(%arg0 : memref<?xf32, #map2>)
-    -> memref<?xf32, #map2> {
+func.func @conversion_with_invalid_layout_map(%arg0 : memref<?xf32, strided<[10], offset: ?>>)
+    -> memref<?xf32, strided<[10], offset: ?>> {
 // expected-error at +1 {{failed to legalize operation 'bufferization.clone' that was explicitly marked illegal}}
-  %1 = bufferization.clone %arg0 : memref<?xf32, #map2> to memref<?xf32, #map2>
-  memref.dealloc %arg0 : memref<?xf32, #map2>
-  return %1 : memref<?xf32, #map2>
+  %1 = bufferization.clone %arg0 : memref<?xf32, strided<[10], offset: ?>> to memref<?xf32, strided<[10], offset: ?>>
+  memref.dealloc %arg0 : memref<?xf32, strided<[10], offset: ?>>
+  return %1 : memref<?xf32, strided<[10], offset: ?>>
 }

diff  --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 61c426a80236a..14d2b9709b7cc 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -1042,11 +1042,10 @@ func.func @memref_copy_ranked() {
 // -----
 
 // CHECK-LABEL: func @memref_copy_contiguous
-#map = affine_map<(d0, d1)[s0] -> (d0 * 2 + s0 + d1)>
 func.func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) {
   %buf = memref.alloc() : memref<1x2xi32>
-  %sub = memref.subview %in[%offset, 0] [1, 2] [1, 1] : memref<16x2xi32> to memref<1x2xi32, #map>
-  memref.copy %sub, %buf : memref<1x2xi32, #map> to memref<1x2xi32>
+  %sub = memref.subview %in[%offset, 0] [1, 2] [1, 1] : memref<16x2xi32> to memref<1x2xi32, strided<[2, 1], offset: ?>>
+  memref.copy %sub, %buf : memref<1x2xi32, strided<[2, 1], offset: ?>> to memref<1x2xi32>
   // CHECK: [[EXTRACT0:%.*]] = llvm.extractvalue {{%.*}}[3, 0] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: [[MUL1:%.*]] = llvm.mul {{.*}}, [[EXTRACT0]] : i64
   // CHECK: [[EXTRACT1:%.*]] = llvm.extractvalue {{%.*}}[3, 1] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -1081,11 +1080,10 @@ func.func @memref_copy_0d_offset(%in: memref<2xi32>) {
 // -----
 
 // CHECK-LABEL: func @memref_copy_noncontiguous
-#map = affine_map<(d0, d1)[s0] -> (d0 * 2 + s0 + d1)>
 func.func @memref_copy_noncontiguous(%in: memref<16x2xi32>, %offset: index) {
   %buf = memref.alloc() : memref<2x1xi32>
-  %sub = memref.subview %in[%offset, 0] [2, 1] [1, 1] : memref<16x2xi32> to memref<2x1xi32, #map>
-  memref.copy %sub, %buf : memref<2x1xi32, #map> to memref<2x1xi32>
+  %sub = memref.subview %in[%offset, 0] [2, 1] [1, 1] : memref<16x2xi32> to memref<2x1xi32, strided<[2, 1], offset: ?>>
+  memref.copy %sub, %buf : memref<2x1xi32, strided<[2, 1], offset: ?>> to memref<2x1xi32>
   // CHECK: llvm.call @memrefCopy
   return
 }

diff  --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 951e820f9a237..734961ecfdde1 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -197,43 +197,41 @@ func.func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
 
 // -----
 
-#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 #map1 = affine_map<(d0)[s0] -> (2, -d0 + s0)>
 #map2 = affine_map<(d0)[s0] -> (3, -d0 + s0)>
-#map3 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 module {
-  func.func @sum(%arg0: memref<?x?xf32, #map0>, %arg1: memref<?x?xf32, #map0>, %arg2: memref<?x?xf32, #map0>) {
+  func.func @sum(%arg0: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg1: memref<?x?xf32, strided<[?, 1], offset: ?>>, %arg2: memref<?x?xf32, strided<[?, 1], offset: ?>>) {
     %c1 = arith.constant 1 : index
     %c0 = arith.constant 0 : index
     %c3 = arith.constant 3 : index
     %c2 = arith.constant 2 : index
-    %0 = memref.dim %arg0, %c0 : memref<?x?xf32, #map0>
-    %1 = memref.dim %arg0, %c1 : memref<?x?xf32, #map0>
+    %0 = memref.dim %arg0, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
+    %1 = memref.dim %arg0, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
     scf.parallel (%arg3, %arg4) = (%c0, %c0) to (%0, %1) step (%c2, %c3) {
-      %2 = memref.dim %arg0, %c0 : memref<?x?xf32, #map0>
+      %2 = memref.dim %arg0, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %3 = affine.min #map1(%arg3)[%2]
       %squared_min = arith.muli %3, %3 : index
-      %4 = memref.dim %arg0, %c1 : memref<?x?xf32, #map0>
+      %4 = memref.dim %arg0, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %d = arith.subi %4, %arg4 : index
       %5 = arith.minsi %c3, %d : index
-      %6 = memref.subview %arg0[%arg3, %arg4][%squared_min, %5][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
-      %7 = memref.dim %arg1, %c0 : memref<?x?xf32, #map0>
+      %6 = memref.subview %arg0[%arg3, %arg4][%squared_min, %5][%c1, %c1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+      %7 = memref.dim %arg1, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %8 = affine.min #map1(%arg3)[%7]
-      %9 = memref.dim %arg1, %c1 : memref<?x?xf32, #map0>
+      %9 = memref.dim %arg1, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %10 = affine.min #map2(%arg4)[%9]
-      %11 = memref.subview %arg1[%arg3, %arg4][%8, %10][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
-      %12 = memref.dim %arg2, %c0 : memref<?x?xf32, #map0>
+      %11 = memref.subview %arg1[%arg3, %arg4][%8, %10][%c1, %c1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+      %12 = memref.dim %arg2, %c0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %13 = affine.min #map1(%arg3)[%12]
-      %14 = memref.dim %arg2, %c1 : memref<?x?xf32, #map0>
+      %14 = memref.dim %arg2, %c1 : memref<?x?xf32, strided<[?, 1], offset: ?>>
       %15 = affine.min #map2(%arg4)[%14]
-      %16 = memref.subview %arg2[%arg3, %arg4][%13, %15][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
+      %16 = memref.subview %arg2[%arg3, %arg4][%13, %15][%c1, %c1] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       scf.parallel (%arg5, %arg6) = (%c0, %c0) to (%squared_min, %5) step (%c1, %c1) {
-        %17 = memref.load %6[%arg5, %arg6] : memref<?x?xf32, #map3>
-        %18 = memref.load %11[%arg5, %arg6] : memref<?x?xf32, #map3>
-        %19 = memref.load %16[%arg5, %arg6] : memref<?x?xf32, #map3>
+        %17 = memref.load %6[%arg5, %arg6] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+        %18 = memref.load %11[%arg5, %arg6] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+        %19 = memref.load %16[%arg5, %arg6] : memref<?x?xf32, strided<[?, ?], offset: ?>>
         %20 = arith.addf %17, %18 : f32
-        memref.store %20, %16[%arg5, %arg6] : memref<?x?xf32, #map3>
+        memref.store %20, %16[%arg5, %arg6] : memref<?x?xf32, strided<[?, ?], offset: ?>>
         scf.yield
       } {mapping = [#gpu.loop_dim_map<bound = (d0) -> (d0), map = (d0) -> (d0), processor = thread_x>, #gpu.loop_dim_map<bound = (d0) -> (d0), map = (d0) -> (d0), processor = thread_y>]}
       scf.yield
@@ -242,22 +240,20 @@ module {
   }
 }
 
-// CHECK-DAG:       #[[$MAP0:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 // CHECK-DAG:       #[[$MAP1:.*]] = affine_map<(d0)[s0, s1] -> ((d0 - s0) ceildiv s1)>
 // CHECK-DAG:       #[[$MAP2:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 // CHECK-DAG:       #[[$MAP3:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
 // CHECK-DAG:       #[[$MAP4:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
-// CHECK-DAG:       #[[$MAP5:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @sum(
-// CHECK-SAME:              [[VAL_0:%.*]]: memref<?x?xf32, #[[$MAP0]]>, [[VAL_1:%.*]]: memref<?x?xf32, #[[$MAP0]]>, [[VAL_2:%.*]]: memref<?x?xf32, #[[$MAP0]]>) {
+// CHECK-SAME:              [[VAL_0:%.*]]: memref<?x?xf32, strided<[?, 1], offset: ?>>, [[VAL_1:%.*]]: memref<?x?xf32, strided<[?, 1], offset: ?>>, [[VAL_2:%.*]]: memref<?x?xf32, strided<[?, 1], offset: ?>>) {
 // CHECK:           %[[C1:.*]] = arith.constant 1 : index
 // CHECK:           %[[C0:.*]] = arith.constant 0 : index
 // CHECK:           %[[C3:.*]] = arith.constant 3 : index
 // CHECK:           %[[C2:.*]] = arith.constant 2 : index
-// CHECK:           [[VAL_7:%.*]] = memref.dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
-// CHECK:           [[VAL_8:%.*]] = memref.dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:           [[VAL_7:%.*]] = memref.dim [[VAL_0]], %[[C0]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
+// CHECK:           [[VAL_8:%.*]] = memref.dim [[VAL_0]], %[[C1]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:           [[VAL_9:%.*]] = arith.constant 1 : index
 // CHECK:           [[VAL_10:%.*]] = affine.apply #[[$MAP1]]([[VAL_7]]){{\[}}%[[C0]], %[[C2]]]
 // CHECK:           [[VAL_11:%.*]] = affine.apply #[[$MAP1]]([[VAL_8]]){{\[}}%[[C0]], %[[C3]]]
@@ -267,34 +263,34 @@ module {
 // CHECK:           gpu.launch blocks([[VAL_16:%.*]], [[VAL_17:%.*]], [[VAL_18:%.*]]) in ([[VAL_19:%.*]] = [[VAL_10]], [[VAL_20:%.*]] = [[VAL_11]], [[VAL_21:%.*]] = [[VAL_9]]) threads([[VAL_22:%.*]], [[VAL_23:%.*]], [[VAL_24:%.*]]) in ([[VAL_25:%.*]] = [[VAL_13]], [[VAL_26:%.*]] = [[VAL_15]], [[VAL_27:%.*]] = [[VAL_9]]) {
 // CHECK:             [[VAL_28:%.*]] = affine.apply #[[$MAP2]]([[VAL_16]]){{\[}}%[[C2]], %[[C0]]]
 // CHECK:             [[VAL_29:%.*]] = affine.apply #[[$MAP2]]([[VAL_17]]){{\[}}%[[C3]], %[[C0]]]
-// CHECK:             [[VAL_30:%.*]] = memref.dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_30:%.*]] = memref.dim [[VAL_0]], %[[C0]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_31:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]]
 // CHECK:             [[VAL_31_SQUARED:%.*]] = arith.muli [[VAL_31]], [[VAL_31]] : index
-// CHECK:             [[VAL_32:%.*]] = memref.dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_32:%.*]] = memref.dim [[VAL_0]], %[[C1]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_D:%.*]] = arith.subi [[VAL_32]], [[VAL_29]] : index
 // CHECK:             [[VAL_33:%.*]] = arith.minsi %[[C3]], [[VAL_D]] : index
-// CHECK:             [[VAL_34:%.*]] = memref.subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
-// CHECK:             [[VAL_35:%.*]] = memref.dim [[VAL_1]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_34:%.*]] = memref.subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+// CHECK:             [[VAL_35:%.*]] = memref.dim [[VAL_1]], %[[C0]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_36:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]]
-// CHECK:             [[VAL_37:%.*]] = memref.dim [[VAL_1]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_37:%.*]] = memref.dim [[VAL_1]], %[[C1]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_38:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]]
-// CHECK:             [[VAL_39:%.*]] = memref.subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
-// CHECK:             [[VAL_40:%.*]] = memref.dim [[VAL_2]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_39:%.*]] = memref.subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
+// CHECK:             [[VAL_40:%.*]] = memref.dim [[VAL_2]], %[[C0]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_41:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]]
-// CHECK:             [[VAL_42:%.*]] = memref.dim [[VAL_2]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_42:%.*]] = memref.dim [[VAL_2]], %[[C1]] : memref<?x?xf32, strided<[?, 1], offset: ?>>
 // CHECK:             [[VAL_43:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
-// CHECK:             [[VAL_44:%.*]] = memref.subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
+// CHECK:             [[VAL_44:%.*]] = memref.subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, strided<[?, 1], offset: ?>> to memref<?x?xf32, strided<[?, ?], offset: ?>>
 // CHECK:             [[VAL_45:%.*]] = affine.apply #[[$MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:             [[VAL_46:%.*]] = arith.cmpi slt, [[VAL_45]], [[VAL_31_SQUARED]] : index
 // CHECK:             scf.if [[VAL_46]] {
 // CHECK:               [[VAL_47:%.*]] = affine.apply #[[$MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:               [[VAL_48:%.*]] = arith.cmpi slt, [[VAL_47]], [[VAL_33]] : index
 // CHECK:               scf.if [[VAL_48]] {
-// CHECK:                 [[VAL_49:%.*]] = memref.load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
-// CHECK:                 [[VAL_50:%.*]] = memref.load [[VAL_39]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
-// CHECK:                 [[VAL_51:%.*]] = memref.load [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
+// CHECK:                 [[VAL_49:%.*]] = memref.load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+// CHECK:                 [[VAL_50:%.*]] = memref.load [[VAL_39]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
+// CHECK:                 [[VAL_51:%.*]] = memref.load [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
 // CHECK:                 [[VAL_52:%.*]] = arith.addf [[VAL_49]], [[VAL_50]] : f32
-// CHECK:                 memref.store [[VAL_52]], [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
+// CHECK:                 memref.store [[VAL_52]], [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, strided<[?, ?], offset: ?>>
 // CHECK:               }
 // CHECK:             }
 // CHECK:             gpu.terminator

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
index 61e7973f4260c..ff94c1b331d92 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/finalizing-bufferize.mlir
@@ -29,51 +29,45 @@ func.func @unable_to_convert_lone_tensor_load(%arg0: memref<f32>) {
 
 // -----
 
-//       CHECK: #[[$map1:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 // CHECK-LABEL: func @dyn_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, #[[$map1]]>)
+//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[1], offset: ?>>)
 //       CHECK:   %[[c0:.*]] = arith.constant 0 : index
 //       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
 //       CHECK:   memref.copy %[[arg]], %[[alloc]]
 //       CHECK:   return %[[alloc]]
-#map1 = affine_map<(d0)[s0] -> (d0 + s0)>
-func.func @dyn_layout_to_no_layout_cast(%m: memref<?xf32, #map1>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, #map1>
+func.func @dyn_layout_to_no_layout_cast(%m: memref<?xf32, strided<[1], offset: ?>>) -> memref<?xf32> {
+  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[1], offset: ?>>
   %1 = bufferization.to_memref %0 : memref<?xf32>
   return %1 : memref<?xf32>
 }
 
 // -----
 
-//       CHECK: #[[$map2:.*]] = affine_map<(d0)[s0] -> (d0 * 100 + s0)>
 // CHECK-LABEL: func @fancy_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, #[[$map2]]>)
+//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[100], offset: ?>>)
 //       CHECK:   %[[c0:.*]] = arith.constant 0 : index
 //       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
 //       CHECK:   memref.copy %[[arg]], %[[alloc]]
 //       CHECK:   return %[[alloc]]
-#map2 = affine_map<(d0)[s0] -> (d0 * 100 + s0)>
-func.func @fancy_layout_to_no_layout_cast(%m: memref<?xf32, #map2>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, #map2>
+func.func @fancy_layout_to_no_layout_cast(%m: memref<?xf32, strided<[100], offset: ?>>) -> memref<?xf32> {
+  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[100], offset: ?>>
   %1 = bufferization.to_memref %0 : memref<?xf32>
   return %1 : memref<?xf32>
 }
 
 // -----
 
-//       CHECK: #[[$map3:.*]] = affine_map<(d0)[s0] -> (d0 + 25)>
 // CHECK-LABEL: func @static_layout_to_no_layout_cast(
-//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, #[[$map3]]>)
+//  CHECK-SAME:     %[[arg:.*]]: memref<?xf32, strided<[1], offset: 25>>)
 //       CHECK:   %[[c0:.*]] = arith.constant 0 : index
 //       CHECK:   %[[dim:.*]] = memref.dim %[[arg]], %[[c0]]
 //       CHECK:   %[[alloc:.*]] = memref.alloc(%[[dim]]) : memref<?xf32>
 //       CHECK:   memref.copy %[[arg]], %[[alloc]]
 //       CHECK:   return %[[alloc]]
-#map3 = affine_map<(d0)[s0] -> (d0 + 25)>
-func.func @static_layout_to_no_layout_cast(%m: memref<?xf32, #map3>) -> memref<?xf32> {
-  %0 = bufferization.to_tensor %m : memref<?xf32, #map3>
+func.func @static_layout_to_no_layout_cast(%m: memref<?xf32, strided<[1], offset: 25>>) -> memref<?xf32> {
+  %0 = bufferization.to_tensor %m : memref<?xf32, strided<[1], offset: 25>>
   %1 = bufferization.to_memref %0 : memref<?xf32>
   return %1 : memref<?xf32>
 }
@@ -82,13 +76,12 @@ func.func @static_layout_to_no_layout_cast(%m: memref<?xf32, #map3>) -> memref<?
 
 // TODO: to_memref with layout maps not supported yet. This should fold to a
 // memref.cast.
-#map4 = affine_map<(d0)[s0] -> (d0 + s0)>
-func.func @no_layout_to_dyn_layout_cast(%m: memref<?xf32>) -> memref<?xf32, #map4> {
+func.func @no_layout_to_dyn_layout_cast(%m: memref<?xf32>) -> memref<?xf32, strided<[1], offset: ?>> {
   %0 = bufferization.to_tensor %m : memref<?xf32>
   // expected-error @+1 {{failed to materialize conversion for result #0 of operation 'bufferization.to_memref' that remained live after conversion}}
-  %1 = bufferization.to_memref %0 : memref<?xf32, #map4>
+  %1 = bufferization.to_memref %0 : memref<?xf32, strided<[1], offset: ?>>
   // expected-note @+1 {{see existing live user here}}
-  return %1 : memref<?xf32, #map4>
+  return %1 : memref<?xf32, strided<[1], offset: ?>>
 }
 
 // -----

diff  --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index 8941b03391f71..24b2c73ce27a4 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -753,8 +753,6 @@ func.func @fold_unit_dim_for_init_memref(%input: memref<1x1000xf32>) -> memref<1
 // Test that nothing changes and no assertions are fired for memrefs with affine
 // maps while still changing the other operations.
 
-#map0 = affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 + d2)>
-
 #accesses = [
   affine_map<(i, j, k, l, m) -> (i, k, m)>,
   affine_map<(i, j, k, l, m) -> ()>,
@@ -767,9 +765,9 @@ func.func @fold_unit_dim_for_init_memref(%input: memref<1x1000xf32>) -> memref<1
   library_call = "some_external_func"
 }
 
-func.func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
+func.func @input_stays_same(%arg0 : memref<?x1x?xf32, strided<[?, 1, 1]>>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
   linalg.generic #trait
-     ins(%arg0, %arg1 : memref<?x1x?xf32, #map0>, f32)
+     ins(%arg0, %arg1 : memref<?x1x?xf32, strided<[?, 1, 1]>>, f32)
     outs(%shape : memref<?x1x?x1x?xf32>) {
        ^bb0(%arg2 : f32, %arg3 : f32, %arg4 : f32) :
          linalg.yield %arg3 : f32
@@ -777,12 +775,11 @@ func.func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shap
   return %shape : memref<?x1x?x1x?xf32>
 }
 
-// CHECK:     #[[MAP0:.*]] = affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 + d2)>
 // CHECK:     #[[MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d2)>
 // CHECK:     #[[MAP2:.*]] = affine_map<(d0, d1, d2) -> ()>
 // CHECK:     #[[MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK:     func @input_stays_same(
-// CHECK-SAME:  %[[ARG0:.*]]: memref<?x1x?xf32, #[[MAP0]]>,
+// CHECK-SAME:  %[[ARG0:.*]]: memref<?x1x?xf32, strided<[?, 1, 1]>>,
 // CHECK-SAME:  %[[ARG1:.*]]: f32, %[[ARG2:.*]]: memref<?x1x?x1x?xf32>)
 // CHECK-SAME   -> memref<?x1x?x1x?xf32> {
 // CHECK:      %[[OUT:.*]] = memref.collapse_shape %[[ARG2]] {{\[}}[0, 1], [2, 3], [4]]
@@ -790,7 +787,7 @@ func.func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shap
 // CHECK:      linalg.generic
 // CHECK-SAME:   {indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]],
 // CHECK-SAME:   iterator_types = ["parallel", "parallel", "parallel"]}
-// CHECK-SAME:   ins(%[[ARG0]], %[[ARG1]] : memref<?x1x?xf32, #[[MAP0]]>, f32)
+// CHECK-SAME:   ins(%[[ARG0]], %[[ARG1]] : memref<?x1x?xf32, strided<[?, 1, 1]>>, f32)
 // CHECK-SAME:   outs(%[[OUT]] : memref<?x?x?xf32>) {
 // CHECK:      ^bb0(%{{.*}}: f32, %[[ARG:.*]]: f32, %{{.*}}: f32):
 // CHECK:       linalg.yield %[[ARG]] : f32

diff  --git a/mlir/test/Dialect/Linalg/fusion-indexed.mlir b/mlir/test/Dialect/Linalg/fusion-indexed.mlir
index 20e98f7370374..eb5f38bc23096 100644
--- a/mlir/test/Dialect/Linalg/fusion-indexed.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-indexed.mlir
@@ -1,6 +1,5 @@
 // RUN: mlir-opt %s -test-linalg-greedy-fusion -split-input-file | FileCheck %s
 
-#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 #id_2d = affine_map<(d0, d1) -> (d0, d1)>
 #pointwise_2d_trait = {
   indexing_maps = [#id_2d, #id_2d, #id_2d],
@@ -28,14 +27,14 @@ func.func @fuse_indexed_consumer(%A: memref<?x?xf32>,
   scf.for %arg2 = %c0 to %0 step %c10 {
     scf.for %arg3 = %c0 to %1 step %c25 {
       %4 = memref.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
-          memref<?x?xf32> to memref<?x?xf32, #map>
+          memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       %5 = memref.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] :
-          memref<?x?xf32> to memref<?x?xf32, #map>
+          memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       linalg.generic {
         indexing_maps = [#id_2d, #id_2d],
         iterator_types = ["parallel", "parallel"]}
-        ins(%4 : memref<?x?xf32, #map>)
-       outs(%5 : memref<?x?xf32, #map>) {
+        ins(%4 : memref<?x?xf32, strided<[?, ?], offset: ?>>)
+       outs(%5 : memref<?x?xf32, strided<[?, ?], offset: ?>>) {
       ^bb0(%arg4: f32, %arg5: f32):
         %idx0 = linalg.index 0 : index
         %idx1 = linalg.index 1 : index
@@ -65,7 +64,6 @@ func.func @fuse_indexed_consumer(%A: memref<?x?xf32>,
 
 // -----
 
-#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 func.func @fuse_indexed_producer(%A: memref<?x?xindex>,
                             %B: memref<?x?xindex>) {
   %c1 = arith.constant 1 : index
@@ -86,15 +84,15 @@ func.func @fuse_indexed_producer(%A: memref<?x?xindex>,
   %A_Y = memref.dim %A, %c1 : memref<?x?xindex>
   scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%A_X, %A_Y) step (%c10, %c25) {
     %A_view = memref.subview %A[%arg2, %arg3][%c10, %c25][%c1, %c1] :
-        memref<?x?xindex> to memref<?x?xindex, #map>
+        memref<?x?xindex> to memref<?x?xindex, strided<[?, ?], offset: ?>>
     %B_view = memref.subview %B[%arg2, %arg3][%c10, %c25][%c1, %c1] :
-        memref<?x?xindex> to memref<?x?xindex, #map>
+        memref<?x?xindex> to memref<?x?xindex, strided<[?, ?], offset: ?>>
     linalg.generic {
       indexing_maps = [affine_map<(i, j) -> (i, j)>,
                        affine_map<(i, j) -> (i, j)>],
       iterator_types = ["parallel", "parallel"]}
-      ins(%A_view : memref<?x?xindex, #map>)
-      outs(%B_view : memref<?x?xindex, #map>) {
+      ins(%A_view : memref<?x?xindex, strided<[?, ?], offset: ?>>)
+      outs(%B_view : memref<?x?xindex, strided<[?, ?], offset: ?>>) {
     ^bb0(%a: index, %b: index):
       linalg.yield %a : index
     }
@@ -115,7 +113,6 @@ func.func @fuse_indexed_producer(%A: memref<?x?xindex>,
 
 // -----
 
-#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 func.func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
                                                   %B: memref<?x?xindex>) {
   %c1 = arith.constant 1 : index
@@ -135,15 +132,15 @@ func.func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
   %A_Y = memref.dim %A, %c1 : memref<?x?xindex>
   scf.parallel (%arg3) = (%c0) to (%A_Y) step (%c25) {
     %A_view = memref.subview %A[%c0, %arg3][%A_X, %c25][%c1, %c1] :
-        memref<?x?xindex> to memref<?x?xindex, #map>
+        memref<?x?xindex> to memref<?x?xindex, strided<[?, ?], offset: ?>>
     %B_view = memref.subview %B[%c0, %arg3][%A_X, %c25][%c1, %c1] :
-        memref<?x?xindex> to memref<?x?xindex, #map>
+        memref<?x?xindex> to memref<?x?xindex, strided<[?, ?], offset: ?>>
     linalg.generic {
       indexing_maps = [affine_map<(i, j) -> (i, j)>,
                        affine_map<(i, j) -> (i, j)>],
       iterator_types = ["parallel", "parallel"]}
-      ins(%A_view : memref<?x?xindex, #map>)
-      outs(%B_view : memref<?x?xindex, #map>) {
+      ins(%A_view : memref<?x?xindex, strided<[?, ?], offset: ?>>)
+      outs(%B_view : memref<?x?xindex, strided<[?, ?], offset: ?>>) {
     ^bb0(%a: index, %b: index):
       linalg.yield %a : index
     }

diff  --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index 225718ee059fd..a3c68e561416a 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -599,7 +599,6 @@ func.func @pointwise_no_view(%M: index, %N: index) {
 
 #map0 = affine_map<(d0, d1) -> (d0)>
 #map1 = affine_map<(d0, d1) -> (d0, d1)>
-#map2 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 func.func @fusion_of_three(%arg0: memref<100x10xf32>,
                       %arg1: memref<100xf32>,
@@ -633,14 +632,14 @@ func.func @fusion_of_three(%arg0: memref<100x10xf32>,
   scf.for %i = %c0 to %2 step %c1 {
     scf.for %j = %c0 to %3 step %c1 {
       %6 = memref.subview %1[%i, %j][%c1, %c1][%c1, %c1] :
-      memref<100x10xf32> to memref<?x?xf32, #map2>
+      memref<100x10xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       %7 = memref.subview %arg2[%i, %j][%c1, %c1][%c1, %c1] :
-      memref<100x10xf32> to memref<?x?xf32, #map2>
+      memref<100x10xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       linalg.generic {
         indexing_maps = [#map1, #map1],
         iterator_types = ["parallel", "parallel"]}
-        ins(%6 : memref<?x?xf32, #map2>)
-       outs(%7 : memref<?x?xf32, #map2>) {
+        ins(%6 : memref<?x?xf32, strided<[?, ?], offset: ?>>)
+       outs(%7 : memref<?x?xf32, strided<[?, ?], offset: ?>>) {
           ^bb0(%arg3: f32, %arg4: f32):
             %8 = math.exp %arg3 : f32
             linalg.yield %8 : f32
@@ -669,7 +668,6 @@ func.func @fusion_of_three(%arg0: memref<100x10xf32>,
 
 #map0 = affine_map<(d0)[s0] -> (2, -d0 + s0)>
 #map1 = affine_map<(d0)[s0] -> (3, -d0 + s0)>
-#map2 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 #map3 = affine_map<(d0)[s0, s1] -> (s0 + 1, -d0 + s0 + s1)>
 #map4 = affine_map<(d0)[s0, s1] -> (s0 + 2, -d0 + s0 + s1)>
 
@@ -688,11 +686,11 @@ func.func @fill_and_conv(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2:
     scf.for %arg4 = %c0 to %5 step %c3 {
       %6 = affine.min #map3(%arg3)[%2, %4]
       %7 = affine.min #map4(%arg4)[%3, %5]
-      %8 = memref.subview %arg0[%arg3, %arg4] [%6, %7] [1, 1] : memref<?x?xf32> to memref<?x?xf32, #map2>
+      %8 = memref.subview %arg0[%arg3, %arg4] [%6, %7] [1, 1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
       %9 = affine.min #map0(%arg3)[%4]
       %10 = affine.min #map1(%arg4)[%5]
-      %11 = memref.subview %arg2[%arg3, %arg4] [%9, %10] [1, 1] : memref<?x?xf32> to memref<?x?xf32, #map2>
-      linalg.conv_2d ins(%8, %arg1 : memref<?x?xf32, #map2>, memref<?x?xf32>) outs(%11 : memref<?x?xf32, #map2>)
+      %11 = memref.subview %arg2[%arg3, %arg4] [%9, %10] [1, 1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+      linalg.conv_2d ins(%8, %arg1 : memref<?x?xf32, strided<[?, 1], offset: ?>>, memref<?x?xf32>) outs(%11 : memref<?x?xf32, strided<[?, 1], offset: ?>>)
     }
   }
   return

diff  --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 86ea60dbd7b3d..2b5a7e6e06c4b 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -151,22 +151,19 @@ transform.with_pdl_patterns {
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)>
-#map2 = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)>
-#map5 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 #map6 = affine_map<(d0, d1, d2) -> (d0, d2)>
 #map7 = affine_map<(d0, d1, d2) -> (d1, d2)>
 #map8 = affine_map<(d0, d1, d2) -> (d0, d1)>
 
 // CHECK: promote_rank_reducing_subviews(%[[arg0:.+]]: memref<{{.*}}>, %[[arg1:.+]]: memref<{{.*}}>, %[[arg2:.+]]: memref<{{.*}}>, %[[lb1:.+]]: index, %[[lb2:.+]]: index, %[[lb3:.+]]: index, %[[lb4:.+]]: index, %[[lb5:.+]]: index, %[[lb6:.+]]: index, %[[ub1:.+]]: index, %[[ub2:.+]]: index
-func.func @promote_rank_reducing_subviews(%arg0:  memref<?x?x?x64xf32, #map0>, %arg1: memref<128x3x3x64xf32, #map0>, %arg2: memref<?x?x?x128xf32>,
+func.func @promote_rank_reducing_subviews(%arg0:  memref<?x?x?x64xf32, strided<[?, ?, ?, ?], offset: ?>>, %arg1: memref<128x3x3x64xf32, strided<[?, ?, ?, ?], offset: ?>>, %arg2: memref<?x?x?x128xf32>,
                                           %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: index, %arg8: index, %ub1: index, %ub2: index) {
-  %13 = memref.subview %arg0[%arg3, 0, %arg4, %arg8] [1, 1, %ub1, 32] [1, 1, 1, 1] : memref<?x?x?x64xf32, #map0> to memref<?x32xf32, #map5>
-  %14 = memref.subview %arg1[0, %arg6, %arg7, %arg8] [128, 1, 1, 32] [1, 1, 1, 1] : memref<128x3x3x64xf32, #map0> to memref<128x32xf32, #map5>
-  %9 = memref.subview %arg2[%arg3, %arg4, %arg5, 0] [1, 1, %ub2, 128] [1, 1, 1, 1] : memref<?x?x?x128xf32> to memref<?x128xf32, #map2>
+  %13 = memref.subview %arg0[%arg3, 0, %arg4, %arg8] [1, 1, %ub1, 32] [1, 1, 1, 1] : memref<?x?x?x64xf32, strided<[?, ?, ?, ?], offset: ?>> to memref<?x32xf32, strided<[?, ?], offset: ?>>
+  %14 = memref.subview %arg1[0, %arg6, %arg7, %arg8] [128, 1, 1, 32] [1, 1, 1, 1] : memref<128x3x3x64xf32, strided<[?, ?, ?, ?], offset: ?>> to memref<128x32xf32, strided<[?, ?], offset: ?>>
+  %9 = memref.subview %arg2[%arg3, %arg4, %arg5, 0] [1, 1, %ub2, 128] [1, 1, 1, 1] : memref<?x?x?x128xf32> to memref<?x128xf32, strided<[128, 1], offset: ?>>
 
   // CHECK: %[[a_alloc:.+]] = memref.alloc
-  // CHECK: %[[a_view:.+]] = memref.view %[[a_alloc]]{{.*}}                 
+  // CHECK: %[[a_view:.+]] = memref.view %[[a_alloc]]{{.*}}
   // CHECK: %[[a_pro_subview:.+]] = memref.subview %[[a_view]][0, 0] [%[[ub1]], {{.+}}] [1, 1]
 
   // CHECK: memref.alloc
@@ -182,7 +179,7 @@ func.func @promote_rank_reducing_subviews(%arg0:  memref<?x?x?x64xf32, #map0>, %
   // CHECK-SAME: ins(%[[a_pro_subview]], %[[b_pro_subview]]
   // CHECK-SAME: outs(%[[c_pro_subview]]
 
-  linalg.generic {indexing_maps = [#map6, #map7, #map8], iterator_types = ["parallel", "parallel", "reduction"]} ins(%13, %14 : memref<?x32xf32, #map5>, memref<128x32xf32, #map5>) outs(%9 : memref<?x128xf32, #map2>) {
+  linalg.generic {indexing_maps = [#map6, #map7, #map8], iterator_types = ["parallel", "parallel", "reduction"]} ins(%13, %14 : memref<?x32xf32, strided<[?, ?], offset: ?>>, memref<128x32xf32, strided<[?, ?], offset: ?>>) outs(%9 : memref<?x128xf32, strided<[128, 1], offset: ?>>) {
   ^bb0(%arg9: f32, %arg10: f32, %arg11: f32):
     %15 = arith.mulf %arg9, %arg10 : f32
     %16 = arith.addf %arg11, %15 : f32

diff  --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index 08144a2fee63e..421a04f89cffc 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -42,15 +42,14 @@ func.func @subview_of_static_full_size(%arg0 : memref<4x6x16x32xi8>) -> memref<4
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
 func.func @subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
-    %arg2 : index) -> memref<?x?x?xf32, #map0>
+    %arg2 : index) -> memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c4 = arith.constant 4 : index
-  %0 = memref.subview %arg0[%c0, %arg1, %c1] [%c4, %c1, %arg2] [%c1, %c1, %c1] : memref<?x?x?xf32> to memref<?x?x?xf32, #map0>
-  return %0 : memref<?x?x?xf32, #map0>
+  %0 = memref.subview %arg0[%c0, %arg1, %c1] [%c4, %c1, %arg2] [%c1, %c1, %c1] : memref<?x?x?xf32> to memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
+  return %0 : memref<?x?x?xf32, strided<[?, ?, ?], offset: ?>>
 }
 // CHECK-LABEL: func @subview_canonicalize
 //  CHECK-SAME:   %[[ARG0:.+]]: memref<?x?x?xf32>
@@ -62,15 +61,14 @@ func.func @subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
 
 // -----
 
-#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 func.func @rank_reducing_subview_canonicalize(%arg0 : memref<?x?x?xf32>, %arg1 : index,
-    %arg2 : index) -> memref<?x?xf32, #map0>
+  %arg2 : index) -> memref<?x?xf32, strided<[?, 1], offset: ?>>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c4 = arith.constant 4 : index
-  %0 = memref.subview %arg0[%c0, %arg1, %c1] [%c4, 1, %arg2] [%c1, %c1, %c1] : memref<?x?x?xf32> to memref<?x?xf32, #map0>
-  return %0 : memref<?x?xf32, #map0>
+  %0 = memref.subview %arg0[%c0, %arg1, %c1] [%c4, 1, %arg2] [%c1, %c1, %c1] : memref<?x?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+  return %0 : memref<?x?xf32, strided<[?, 1], offset: ?>>
 }
 // CHECK-LABEL: func @rank_reducing_subview_canonicalize
 //  CHECK-SAME:   %[[ARG0:.+]]: memref<?x?x?xf32>
@@ -254,27 +252,25 @@ func.func @alloc_alignment_const_fold() -> memref<?xf32> {
 
 // CHECK-LABEL: func @alloc_const_fold_with_symbols1(
 //  CHECK: %[[c1:.+]] = arith.constant 1 : index
-//  CHECK: %[[mem1:.+]] = memref.alloc({{.*}})[%[[c1]], %[[c1]]] : memref<?xi32, #map>
-//  CHECK: return %[[mem1]] : memref<?xi32, #map>
-#map0 = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-func.func @alloc_const_fold_with_symbols1(%arg0 : index) -> memref<?xi32, #map0> {
+//  CHECK: %[[mem1:.+]] = memref.alloc({{.*}})[%[[c1]], %[[c1]]] : memref<?xi32, strided{{.*}}>
+//  CHECK: return %[[mem1]] : memref<?xi32, strided{{.*}}>
+func.func @alloc_const_fold_with_symbols1(%arg0 : index) -> memref<?xi32, strided<[?], offset: ?>> {
   %c1 = arith.constant 1 : index
-  %0 = memref.alloc(%arg0)[%c1, %c1] : memref<?xi32, #map0>
-  return %0 : memref<?xi32, #map0>
+  %0 = memref.alloc(%arg0)[%c1, %c1] : memref<?xi32, strided<[?], offset: ?>>
+  return %0 : memref<?xi32, strided<[?], offset: ?>>
 }
 
 // -----
 
 // CHECK-LABEL: func @alloc_const_fold_with_symbols2(
 //  CHECK: %[[c1:.+]] = arith.constant 1 : index
-//  CHECK: %[[mem1:.+]] = memref.alloc()[%[[c1]], %[[c1]]] : memref<1xi32, #map>
-//  CHECK: %[[mem2:.+]] = memref.cast %[[mem1]] : memref<1xi32, #map> to memref<?xi32, #map>
-//  CHECK: return %[[mem2]] : memref<?xi32, #map>
-#map0 = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-func.func @alloc_const_fold_with_symbols2() -> memref<?xi32, #map0> {
+//  CHECK: %[[mem1:.+]] = memref.alloc()[%[[c1]], %[[c1]]] : memref<1xi32, strided{{.*}}>
+//  CHECK: %[[mem2:.+]] = memref.cast %[[mem1]] : memref<1xi32, strided{{.*}}> to memref<?xi32, strided{{.*}}>
+//  CHECK: return %[[mem2]] : memref<?xi32, strided{{.*}}>
+func.func @alloc_const_fold_with_symbols2() -> memref<?xi32, strided<[?], offset: ?>> {
   %c1 = arith.constant 1 : index
-  %0 = memref.alloc(%c1)[%c1, %c1] : memref<?xi32, #map0>
-  return %0 : memref<?xi32, #map0>
+  %0 = memref.alloc(%c1)[%c1, %c1] : memref<?xi32, strided<[?], offset: ?>>
+  return %0 : memref<?xi32, strided<[?], offset: ?>>
 }
 
 // -----
@@ -475,10 +471,9 @@ func.func @fold_rank_memref(%arg0 : memref<?x?xf32>) -> (index) {
 
 // -----
 
-#map = affine_map<(d0, d1) -> (d0 * 42 + d1)>
-func.func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
-  %0 = memref.subview %arg0[0, 0] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, #map>
-  return %0 : memref<20x42xf32, #map>
+func.func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, strided<[42, 1]>> {
+  %0 = memref.subview %arg0[0, 0] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, strided<[42, 1]>>
+  return %0 : memref<20x42xf32, strided<[42, 1]>>
 }
 // CHECK-LABEL: func @fold_no_op_subview(
 //       CHECK:   %[[ARG0:.+]]: memref<20x42xf32>)
@@ -487,10 +482,9 @@ func.func @fold_no_op_subview(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #m
 
 // -----
 
-#map = affine_map<(d0, d1) -> (d0 * 42 + d1 + 1)>
-func.func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
-  %0 = memref.subview %arg0[0, 1] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, #map>
-  return %0 : memref<20x42xf32, #map>
+func.func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, strided<[42, 1], offset: 1>> {
+  %0 = memref.subview %arg0[0, 1] [20, 42] [1, 1] : memref<20x42xf32> to memref<20x42xf32, strided<[42, 1], offset: 1>>
+  return %0 : memref<20x42xf32, strided<[42, 1], offset: 1>>
 }
 // CHECK-LABEL: func @no_fold_subview_with_non_zero_offset(
 //       CHECK:   %[[SUBVIEW:.+]] = memref.subview
@@ -498,10 +492,9 @@ func.func @no_fold_subview_with_non_zero_offset(%arg0 : memref<20x42xf32>) -> me
 
 // -----
 
-#map = affine_map<(d0, d1) -> (d0 * 42 + d1 * 2)>
-func.func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, #map> {
-  %0 = memref.subview %arg0[0, 0] [20, 42] [1, 2] : memref<20x42xf32> to memref<20x42xf32, #map>
-  return %0 : memref<20x42xf32, #map>
+func.func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> memref<20x42xf32, strided<[42, 2]>> {
+  %0 = memref.subview %arg0[0, 0] [20, 42] [1, 2] : memref<20x42xf32> to memref<20x42xf32, strided<[42, 2]>>
+  return %0 : memref<20x42xf32, strided<[42, 2]>>
 }
 // CHECK-LABEL: func @no_fold_subview_with_non_unit_stride(
 //       CHECK:   %[[SUBVIEW:.+]] = memref.subview
@@ -509,14 +502,13 @@ func.func @no_fold_subview_with_non_unit_stride(%arg0 : memref<20x42xf32>) -> me
 
 // -----
 
-#map = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>
-func.func @no_fold_dynamic_no_op_subview(%arg0 : memref<?x?xf32>) -> memref<?x?xf32, #map> {
+func.func @no_fold_dynamic_no_op_subview(%arg0 : memref<?x?xf32>) -> memref<?x?xf32, strided<[?, 1], offset: ?>> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %0 = memref.dim %arg0, %c0 : memref<?x?xf32>
   %1 = memref.dim %arg0, %c1 : memref<?x?xf32>
-  %2 = memref.subview %arg0[0, 0] [%0, %1] [1, 1] : memref<?x?xf32> to memref<?x?xf32, #map>
-  return %2 : memref<?x?xf32, #map>
+  %2 = memref.subview %arg0[0, 0] [%0, %1] [1, 1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, 1], offset: ?>>
+  return %2 : memref<?x?xf32, strided<[?, 1], offset: ?>>
 }
 // CHECK-LABEL: func @no_fold_dynamic_no_op_subview(
 //       CHECK:   %[[SUBVIEW:.+]] = memref.subview
@@ -535,11 +527,10 @@ func.func @atomicrmw_cast_fold(%arg0 : f32, %arg1 : memref<4xf32>, %c : index) {
 
 // -----
 
-#map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 func.func @copy_of_cast(%m1: memref<?xf32>, %m2: memref<*xf32>) {
-  %casted1 = memref.cast %m1 : memref<?xf32> to memref<?xf32, #map>
-  %casted2 = memref.cast %m2 : memref<*xf32> to memref<?xf32, #map>
-  memref.copy %casted1, %casted2 : memref<?xf32, #map> to memref<?xf32, #map>
+  %casted1 = memref.cast %m1 : memref<?xf32> to memref<?xf32, strided<[?], offset: ?>>
+  %casted2 = memref.cast %m2 : memref<*xf32> to memref<?xf32, strided<[?], offset: ?>>
+  memref.copy %casted1, %casted2 : memref<?xf32, strided<[?], offset: ?>> to memref<?xf32, strided<[?], offset: ?>>
   return
 }
 

diff  --git a/mlir/test/Dialect/MemRef/simplify-extract-strided-metadata.mlir b/mlir/test/Dialect/MemRef/simplify-extract-strided-metadata.mlir
index 8ef1729c0ce07..0daeb4a23a1f7 100644
--- a/mlir/test/Dialect/MemRef/simplify-extract-strided-metadata.mlir
+++ b/mlir/test/Dialect/MemRef/simplify-extract-strided-metadata.mlir
@@ -214,16 +214,15 @@ func.func @extract_strided_metadata_of_rank_reduced_subview_w_variable_strides(
 //   CHECK-DAG: %[[FINAL_OFFSET:.*]] = affine.apply #[[$OFFSETS_MAP]]()[%[[DYN_OFFSET0]], %[[DYN_OFFSET1]]]
 //
 //       CHECK: return %[[BASE]], %[[FINAL_OFFSET]], %[[C64]], %[[C64]], %[[C128]], %[[C1]]
-#map0 = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)>
 func.func @extract_strided_metadata_of_subview_w_variable_offset(
     %arg0: memref<384x128xf32>, %arg1 : index, %arg2 : index)
     -> (memref<f32>, index, index, index, index, index) {
 
   %subview = memref.subview %arg0[%arg1, %arg2] [64, 64] [1, 1] :
-    memref<384x128xf32> to memref<64x64xf32, #map0>
+    memref<384x128xf32> to memref<64x64xf32, strided<[128, 1], offset: ?>>
 
   %base_buffer, %offset, %sizes:2, %strides:2 = memref.extract_strided_metadata %subview :
-    memref<64x64xf32, #map0> -> memref<f32>, index, index, index, index, index
+  memref<64x64xf32, strided<[128, 1], offset: ?>> -> memref<f32>, index, index, index, index, index
 
   return %base_buffer, %offset, %sizes#0, %sizes#1, %strides#0, %strides#1 :
     memref<f32>, index, index, index, index, index

diff  --git a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
index 288c7e67f756b..25f38452e3b4a 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
@@ -1,19 +1,16 @@
 // RUN: mlir-opt %s -test-vector-transfer-collapse-inner-most-dims -split-input-file | FileCheck %s
 
-#map1 = affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 3072 + s0 + d1 * 8 + d2 + d3)>
-func.func @contiguous_inner_most_view(%in: memref<1x1x8x1xf32, #map1>) -> vector<1x8x1xf32>{
+func.func @contiguous_inner_most_view(%in: memref<1x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>) -> vector<1x8x1xf32>{
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.0 : f32
-  %0 = vector.transfer_read %in[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true]} : memref<1x1x8x1xf32, #map1>, vector<1x8x1xf32>
+  %0 = vector.transfer_read %in[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true]} : memref<1x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>, vector<1x8x1xf32>
   return %0 : vector<1x8x1xf32>
 }
-//  CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 3072 + s0 + d1 * 8 + d2 + d3)>
-//  CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 3072 + s0 + d1 * 8 + d2)>
-//      CHECK: func @contiguous_inner_most_view(%[[SRC:.+]]: memref<1x1x8x1xf32, #[[MAP0]]>
+//      CHECK: func @contiguous_inner_most_view(%[[SRC:.+]]: memref<1x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>
 //      CHECK:   %[[SRC_0:.+]] = memref.subview %[[SRC]]
-// CHECK-SAME:    memref<1x1x8x1xf32, #[[MAP0]]> to memref<1x1x8xf32, #[[MAP1]]>
+// CHECK-SAME:    memref<1x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>> to memref<1x1x8xf32
 //      CHECK:   %[[VEC:.+]] = vector.transfer_read %[[SRC_0]]
-// CHECK-SAME:    memref<1x1x8xf32, #[[MAP1]]>, vector<1x8xf32>
+// CHECK-SAME:    memref<1x1x8xf32, {{.*}}>, vector<1x8xf32>
 //      CHECK:   %[[RESULT:.+]] = vector.shape_cast %[[VEC]]
 //      CHECK:   return %[[RESULT]]
 

diff  --git a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
index 3c8e280212bed..e4d77459b1beb 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
@@ -62,12 +62,10 @@ func.func @transfer_read_0d(%arg : memref<i8>) -> vector<i8> {
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2, d3)[s0, s1] -> (d0 * s1 + s0 + d1 * 32 + d2 * 4 + d3)>
-
-func.func @transfer_read_flattenable_with_dynamic_dims_and_indices(%arg0 : memref<?x?x8x4xi8, #map0>, %arg1 : index, %arg2 : index) -> vector<8x4xi8> {
+func.func @transfer_read_flattenable_with_dynamic_dims_and_indices(%arg0 : memref<?x?x8x4xi8, strided<[?, 32, 4, 1], offset: ?>>, %arg1 : index, %arg2 : index) -> vector<8x4xi8> {
     %c0_i8 = arith.constant 0 : i8
     %c0 = arith.constant 0 : index
-    %result = vector.transfer_read %arg0[%arg1, %arg2, %c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<?x?x8x4xi8, #map0>, vector<8x4xi8>
+    %result = vector.transfer_read %arg0[%arg1, %arg2, %c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<?x?x8x4xi8, strided<[?, 32, 4, 1], offset: ?>>, vector<8x4xi8>
     return %result : vector<8x4xi8>
 }
 
@@ -86,11 +84,9 @@ func.func @transfer_read_flattenable_with_dynamic_dims_and_indices(%arg0 : memre
 
 // -----
 
-#map0 = affine_map<(d0, d1, d2, d3)[s0, s1] -> (d0 * s1 + s0 + d1 * 32 + d2 * 4 + d3)>
-
-func.func @transfer_write_flattenable_with_dynamic_dims_and_indices(%vec : vector<8x4xi8>, %dst : memref<?x?x8x4xi8, #map0>, %arg1 : index, %arg2 : index) {
+func.func @transfer_write_flattenable_with_dynamic_dims_and_indices(%vec : vector<8x4xi8>, %dst : memref<?x?x8x4xi8, strided<[?, 32, 4, 1], offset: ?>>, %arg1 : index, %arg2 : index) {
     %c0 = arith.constant 0 : index
-    vector.transfer_write %vec, %dst[%arg1, %arg2, %c0, %c0] {in_bounds = [true, true]} : vector<8x4xi8>, memref<?x?x8x4xi8, #map0>
+    vector.transfer_write %vec, %dst[%arg1, %arg2, %c0, %c0] {in_bounds = [true, true]} : vector<8x4xi8>, memref<?x?x8x4xi8, strided<[?, 32, 4, 1], offset: ?>>
     return
 }
 

diff  --git a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
index 200033d608ec8..7f86dc3371371 100644
--- a/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
+++ b/mlir/test/Dialect/Vector/vector-warp-distribute.mlir
@@ -85,24 +85,23 @@ func.func @rewrite_warp_op_to_scf_if(%laneid: index,
 // CHECK-DIST-AND-PROP: vector.transfer_write {{.*}} : vector<1xf32>
 // CHECK-DIST-AND-PROP: vector.transfer_write {{.*}} : vector<2xf32>
 
-#map0 =  affine_map<(d0)[s0] -> (d0 + s0)>
 func.func @warp(%laneid: index, %arg1: memref<1024xf32>, %arg2: memref<1024xf32>,
            %arg3: memref<1024xf32>, %gid : index) {
   vector.warp_execute_on_lane_0(%laneid)[32] {
-    %sa = memref.subview %arg1[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, #map0>
-    %sb = memref.subview %arg2[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, #map0>
-    %sc = memref.subview %arg3[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, #map0>
+    %sa = memref.subview %arg1[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, strided<[1], offset: ?>>
+    %sb = memref.subview %arg2[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, strided<[1], offset: ?>>
+    %sc = memref.subview %arg3[%gid] [128] [1] : memref<1024xf32> to memref<128xf32, strided<[1], offset: ?>>
     %c0 = arith.constant 0 : index
     %c32 = arith.constant 32 : index
     %cst = arith.constant 0.000000e+00 : f32
-    %2 = vector.transfer_read %sa[%c0], %cst : memref<128xf32, #map0>, vector<32xf32>
-    %3 = vector.transfer_read %sa[%c32], %cst : memref<128xf32, #map0>, vector<32xf32>
-    %4 = vector.transfer_read %sb[%c0], %cst : memref<128xf32, #map0>, vector<64xf32>
-    %5 = vector.transfer_read %sb[%c32], %cst : memref<128xf32, #map0>, vector<64xf32>
+    %2 = vector.transfer_read %sa[%c0], %cst : memref<128xf32, strided<[1], offset: ?>>, vector<32xf32>
+    %3 = vector.transfer_read %sa[%c32], %cst : memref<128xf32, strided<[1], offset: ?>>, vector<32xf32>
+    %4 = vector.transfer_read %sb[%c0], %cst : memref<128xf32, strided<[1], offset: ?>>, vector<64xf32>
+    %5 = vector.transfer_read %sb[%c32], %cst : memref<128xf32, strided<[1], offset: ?>>, vector<64xf32>
     %6 = arith.addf %2, %3 : vector<32xf32>
     %7 = arith.addf %4, %5 : vector<64xf32>
-    vector.transfer_write %6, %sc[%c0] : vector<32xf32>, memref<128xf32, #map0>
-    vector.transfer_write %7, %sc[%c32] : vector<64xf32>, memref<128xf32, #map0>
+    vector.transfer_write %6, %sc[%c0] : vector<32xf32>, memref<128xf32, strided<[1], offset: ?>>
+    vector.transfer_write %7, %sc[%c32] : vector<64xf32>, memref<128xf32, strided<[1], offset: ?>>
   }
   return
 }

diff  --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir
index 68bcd6fa96b62..3d5165e797ade 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir
@@ -37,9 +37,6 @@ func.func @transfer_read_1d(%A : memref<?x?xf32>, %base1 : index, %base2 : index
   return
 }
 
-#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-#map1 = affine_map<(d0, d1) -> (6 * d0 + 2 * d1)>
-
 // Vector load with unit stride only on last dim.
 func.func @transfer_read_1d_unit_stride(%A : memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
@@ -53,9 +50,9 @@ func.func @transfer_read_1d_unit_stride(%A : memref<?x?xf32>) {
   scf.for %arg2 = %c1 to %c5 step %c2 {
     scf.for %arg3 = %c0 to %c6 step %c3 {
       %0 = memref.subview %A[%arg2, %arg3] [1, 2] [1, 1]
-          : memref<?x?xf32> to memref<1x2xf32, #map0>
+          : memref<?x?xf32> to memref<1x2xf32, strided<[?, 1], offset: ?>>
       %1 = vector.transfer_read %0[%c0, %c0], %fm42 {in_bounds=[true]}
-          : memref<1x2xf32, #map0>, vector<2xf32>
+          : memref<1x2xf32, strided<[?, 1], offset: ?>>, vector<2xf32>
       vector.print %1 : vector<2xf32>
     }
   }
@@ -80,11 +77,11 @@ func.func @transfer_read_1d_non_static_unit_stride(%A : memref<?x?xf32>) {
 // Vector load where last dim has non-unit stride.
 func.func @transfer_read_1d_non_unit_stride(%A : memref<?x?xf32>) {
   %B = memref.reinterpret_cast %A to offset: [0], sizes: [4, 3], strides: [6, 2]
-      : memref<?x?xf32> to memref<4x3xf32, #map1>
+      : memref<?x?xf32> to memref<4x3xf32, strided<[6, 2]>>
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
   %fm42 = arith.constant -42.0: f32
-  %vec = vector.transfer_read %B[%c2, %c1], %fm42 {in_bounds=[false]} : memref<4x3xf32, #map1>, vector<3xf32>
+  %vec = vector.transfer_read %B[%c2, %c1], %fm42 {in_bounds=[false]} : memref<4x3xf32, strided<[6, 2]>>, vector<3xf32>
   vector.print %vec : vector<3xf32>
   return
 }

diff  --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index bd16f38362098..827ca899139d2 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -474,10 +474,6 @@ func.func @dyn_shape_fold(%L : index, %M : index) -> (memref<4 x ? x 8 x ? x ? x
   return %b, %c, %d, %e : memref<4 x ? x 8 x ? x ? x f32>, memref<? x ? x i32>, memref<? x ? x f32>, memref<4 x ? x 8 x ? x ? x f32>
 }
 
-#map1 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-#map2 = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s2 + d1 * s1 + d2 + s0)>
-#map3 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
 // CHECK-LABEL: func @dim_op_fold(
 // CHECK-SAME: %[[ARG0:[a-z0-9]*]]: index
 // CHECK-SAME: %[[ARG1:[a-z0-9]*]]: index
@@ -499,15 +495,15 @@ func.func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?x
     affine.for %arg4 = 0 to %ub {
       %s = memref.dim %0, %c0 : memref<?x?xf32>
       %v = memref.view %3[%c0][%arg4, %s] : memref<?xi8> to memref<?x?xf32>
-      %sv = memref.subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
+      %sv = memref.subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
       %l = memref.dim %v, %c1 : memref<?x?xf32>
-      %u = memref.dim %sv, %c0 : memref<?x?xf32, #map1>
+      %u = memref.dim %sv, %c0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
       affine.for %arg5 = %l to %u {
         "foo"() : () -> ()
       }
-      %sv2 = memref.subview %0[0, 0][17, %arg4][1, 1] : memref<?x?xf32> to memref<17x?xf32, #map3>
+      %sv2 = memref.subview %0[0, 0][17, %arg4][1, 1] : memref<?x?xf32> to memref<17x?xf32, strided<[?, 1], offset: ?>>
       %l2 = memref.dim %v, %c1 : memref<?x?xf32>
-      %u2 = memref.dim %sv2, %c1 : memref<17x?xf32, #map3>
+      %u2 = memref.dim %sv2, %c1 : memref<17x?xf32, strided<[?, 1], offset: ?>>
       scf.for %arg5 = %l2 to %u2 step %c1 {
         "foo"() : () -> ()
       }


        


More information about the Mlir-commits mailing list