[Mlir-commits] [mlir] 95371ce - Enable FileCheck -enable-var-scope by default in MLIR test

Mehdi Amini llvmlistbot at llvm.org
Thu Jun 11 17:46:23 PDT 2020


Author: Mehdi Amini
Date: 2020-06-12T00:43:09Z
New Revision: 95371ce9c2cb835b47866c75756f2c6734a1fe9b

URL: https://github.com/llvm/llvm-project/commit/95371ce9c2cb835b47866c75756f2c6734a1fe9b
DIFF: https://github.com/llvm/llvm-project/commit/95371ce9c2cb835b47866c75756f2c6734a1fe9b.diff

LOG: Enable FileCheck -enable-var-scope by default in MLIR test

This option avoids to accidentally reuse variable across -LABEL match,
it can be explicitly opted-in by prefixing the variable name with $

Differential Revision: https://reviews.llvm.org/D81531

Added: 
    

Modified: 
    mlir/test/Conversion/AffineToStandard/lower-affine.mlir
    mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
    mlir/test/Conversion/GPUToSPIRV/load-store.mlir
    mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
    mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
    mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
    mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
    mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
    mlir/test/Dialect/Affine/SuperVectorize/normalize_maps.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
    mlir/test/Dialect/Affine/affine-data-copy.mlir
    mlir/test/Dialect/Affine/canonicalize.mlir
    mlir/test/Dialect/Affine/loop-tiling.mlir
    mlir/test/Dialect/Affine/loop-unswitch.mlir
    mlir/test/Dialect/Affine/ops.mlir
    mlir/test/Dialect/Affine/simplify-affine-structures.mlir
    mlir/test/Dialect/Affine/unroll-jam.mlir
    mlir/test/Dialect/Affine/unroll.mlir
    mlir/test/Dialect/Linalg/affine.mlir
    mlir/test/Dialect/Linalg/canonicalize.mlir
    mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
    mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
    mlir/test/Dialect/Linalg/fusion-tensor.mlir
    mlir/test/Dialect/Linalg/fusion.mlir
    mlir/test/Dialect/Linalg/loops.mlir
    mlir/test/Dialect/Linalg/promote.mlir
    mlir/test/Dialect/Linalg/roundtrip.mlir
    mlir/test/Dialect/Linalg/standard.mlir
    mlir/test/Dialect/Linalg/tile.mlir
    mlir/test/Dialect/Linalg/tile_conv.mlir
    mlir/test/Dialect/Linalg/tile_conv_padding.mlir
    mlir/test/Dialect/Linalg/transform-patterns.mlir
    mlir/test/Dialect/Quant/canonicalize.mlir
    mlir/test/Dialect/Quant/convert-const.mlir
    mlir/test/Dialect/Quant/convert-fakequant.mlir
    mlir/test/Dialect/Shape/canonicalize.mlir
    mlir/test/Dialect/Vector/vector-contract-transforms.mlir
    mlir/test/Dialect/Vector/vector-slices-transforms.mlir
    mlir/test/IR/core-ops.mlir
    mlir/test/IR/parser.mlir
    mlir/test/IR/test-func-set-type.mlir
    mlir/test/Transforms/canonicalize-dce.mlir
    mlir/test/Transforms/canonicalize.mlir
    mlir/test/Transforms/location-snapshot.mlir
    mlir/test/Transforms/loop-fusion.mlir
    mlir/test/Transforms/memref-dataflow-opt.mlir
    mlir/test/Transforms/pipeline-data-transfer.mlir
    mlir/test/lit.cfg.py

Removed: 
    


################################################################################
diff  --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index faac1cbf153a..2ca3ffe947fb 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -329,7 +329,7 @@ func @if_for() {
 // CHECK-NEXT:     %[[f:.*]] = cmpi "slt", %{{.*}}, %[[e]] : index
 // CHECK-NEXT:     %[[g:.*]] = select %[[f]], %{{.*}}, %[[e]] : index
 // CHECK-NEXT:     %[[c1_0:.*]] = constant 1 : index
-// CHECK-NEXT:     for %{{.*}} = %[[v3]] to %[[v6]] step %[[c1_0]] {
+// CHECK-NEXT:     for %{{.*}} = %[[d]] to %[[g]] step %[[c1_0]] {
 // CHECK-NEXT:       call @body2(%{{.*}}, %{{.*}}) : (index, index) -> ()
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
@@ -364,7 +364,7 @@ func @loop_min_max(%N : index) {
 // CHECK-NEXT:   %[[c0123456:.+]] = cmpi "slt", %[[r012345]], %{{.*}} : index
 // CHECK-NEXT:   %[[r0123456:.+]] = select %[[c0123456]], %[[r012345]], %{{.*}} : index
 // CHECK-NEXT:   %[[c1:.*]] = constant 1 : index
-// CHECK-NEXT:   for %{{.*}} = %[[c0]] to %[[v11]] step %[[c1]] {
+// CHECK-NEXT:   for %{{.*}} = %[[c0]] to %[[r0123456]] step %[[c1]] {
 // CHECK-NEXT:     call @body(%{{.*}}) : (index) -> ()
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return

diff  --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
index f0c9b414c9c8..c13afefdb78d 100644
--- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
+++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
@@ -52,11 +52,11 @@ gpu.module @kernel {
 
 gpu.module @kernel {
   // Workgroup buffers are allocated as globals.
-  // NVVM: llvm.mlir.global internal @[[buffer:.*]]()
+  // NVVM: llvm.mlir.global internal @[[$buffer:.*]]()
   // NVVM-SAME:  addr_space = 3
   // NVVM-SAME:  !llvm<"[4 x float]">
 
-  // ROCDL: llvm.mlir.global internal @[[buffer:.*]]()
+  // ROCDL: llvm.mlir.global internal @[[$buffer:.*]]()
   // ROCDL-SAME:  addr_space = 3
   // ROCDL-SAME:  !llvm<"[4 x float]">
 
@@ -68,12 +68,12 @@ gpu.module @kernel {
   gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) {
     // Get the address of the first element in the global array.
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[buffer]] : !llvm<"[4 x float] addrspace(3)*">
+    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm<"[4 x float] addrspace(3)*">
     // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
     // NVVM-SAME: !llvm<"float addrspace(3)*">
 
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[buffer]] : !llvm<"[4 x float] addrspace(3)*">
+    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm<"[4 x float] addrspace(3)*">
     // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
     // ROCDL-SAME: !llvm<"float addrspace(3)*">
 
@@ -118,11 +118,11 @@ gpu.module @kernel {
 
 gpu.module @kernel {
   // Check that the total size was computed correctly.
-  // NVVM: llvm.mlir.global internal @[[buffer:.*]]()
+  // NVVM: llvm.mlir.global internal @[[$buffer:.*]]()
   // NVVM-SAME:  addr_space = 3
   // NVVM-SAME:  !llvm<"[48 x float]">
 
-  // ROCDL: llvm.mlir.global internal @[[buffer:.*]]()
+  // ROCDL: llvm.mlir.global internal @[[$buffer:.*]]()
   // ROCDL-SAME:  addr_space = 3
   // ROCDL-SAME:  !llvm<"[48 x float]">
 
@@ -131,12 +131,12 @@ gpu.module @kernel {
   gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) {
     // Get the address of the first element in the global array.
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[buffer]] : !llvm<"[48 x float] addrspace(3)*">
+    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm<"[48 x float] addrspace(3)*">
     // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
     // NVVM-SAME: !llvm<"float addrspace(3)*">
 
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[buffer]] : !llvm<"[48 x float] addrspace(3)*">
+    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm<"[48 x float] addrspace(3)*">
     // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
     // ROCDL-SAME: !llvm<"float addrspace(3)*">
 
@@ -187,14 +187,14 @@ gpu.module @kernel {
 
 gpu.module @kernel {
   // Check that several buffers are defined.
-  // NVVM: llvm.mlir.global internal @[[buffer1:.*]]()
+  // NVVM: llvm.mlir.global internal @[[$buffer1:.*]]()
   // NVVM-SAME:  !llvm<"[1 x float]">
-  // NVVM: llvm.mlir.global internal @[[buffer2:.*]]()
+  // NVVM: llvm.mlir.global internal @[[$buffer2:.*]]()
   // NVVM-SAME:  !llvm<"[2 x float]">
 
-  // ROCDL: llvm.mlir.global internal @[[buffer1:.*]]()
+  // ROCDL: llvm.mlir.global internal @[[$buffer1:.*]]()
   // ROCDL-SAME:  !llvm<"[1 x float]">
-  // ROCDL: llvm.mlir.global internal @[[buffer2:.*]]()
+  // ROCDL: llvm.mlir.global internal @[[$buffer2:.*]]()
   // ROCDL-SAME:  !llvm<"[2 x float]">
 
   // NVVM-LABEL: llvm.func @multiple
@@ -204,11 +204,11 @@ gpu.module @kernel {
       private(%arg3: memref<3xf32, 5>, %arg4: memref<4xf32, 5>) {
 
     // Workgroup buffers.
-    // NVVM: llvm.mlir.addressof @[[buffer1]]
-    // NVVM: llvm.mlir.addressof @[[buffer2]]
+    // NVVM: llvm.mlir.addressof @[[$buffer1]]
+    // NVVM: llvm.mlir.addressof @[[$buffer2]]
 
-    // ROCDL: llvm.mlir.addressof @[[buffer1]]
-    // ROCDL: llvm.mlir.addressof @[[buffer2]]
+    // ROCDL: llvm.mlir.addressof @[[$buffer1]]
+    // ROCDL: llvm.mlir.addressof @[[$buffer2]]
 
     // Private buffers.
     // NVVM: %[[c3:.*]] = llvm.mlir.constant(3 : i64)

diff  --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
index 12a5d9df61a8..f33ee9783a51 100644
--- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
@@ -24,8 +24,8 @@ module attributes {
   // CHECK-LABEL: spv.module Logical GLSL450
   gpu.module @kernels {
     // CHECK-DAG: spv.globalVariable @[[NUMWORKGROUPSVAR:.*]] built_in("NumWorkgroups") : !spv.ptr<vector<3xi32>, Input>
-    // CHECK-DAG: spv.globalVariable @[[LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spv.ptr<vector<3xi32>, Input>
-    // CHECK-DAG: spv.globalVariable @[[WORKGROUPIDVAR:.*]] built_in("WorkgroupId") : !spv.ptr<vector<3xi32>, Input>
+    // CHECK-DAG: spv.globalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spv.ptr<vector<3xi32>, Input>
+    // CHECK-DAG: spv.globalVariable @[[$WORKGROUPIDVAR:.*]] built_in("WorkgroupId") : !spv.ptr<vector<3xi32>, Input>
     // CHECK-LABEL:    spv.func @load_store_kernel
     // CHECK-SAME: %[[ARG0:.*]]: !spv.ptr<!spv.struct<!spv.array<48 x f32, stride=4> [0]>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 0)>}
     // CHECK-SAME: %[[ARG1:.*]]: !spv.ptr<!spv.struct<!spv.array<48 x f32, stride=4> [0]>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 1)>}
@@ -36,10 +36,10 @@ module attributes {
     // CHECK-SAME: %[[ARG6:.*]]: i32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 6), StorageBuffer>}
     gpu.func @load_store_kernel(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>, %arg3: index, %arg4: index, %arg5: index, %arg6: index) kernel
       attributes {spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}} {
-      // CHECK: %[[ADDRESSWORKGROUPID:.*]] = spv._address_of @[[WORKGROUPIDVAR]]
+      // CHECK: %[[ADDRESSWORKGROUPID:.*]] = spv._address_of @[[$WORKGROUPIDVAR]]
       // CHECK: %[[WORKGROUPID:.*]] = spv.Load "Input" %[[ADDRESSWORKGROUPID]]
       // CHECK: %[[WORKGROUPIDX:.*]] = spv.CompositeExtract %[[WORKGROUPID]]{{\[}}0 : i32{{\]}}
-      // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spv._address_of @[[LOCALINVOCATIONIDVAR]]
+      // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spv._address_of @[[$LOCALINVOCATIONIDVAR]]
       // CHECK: %[[LOCALINVOCATIONID:.*]] = spv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]]
       // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}}
       %0 = "gpu.block_id"() {dimension = "x"} : () -> index

diff  --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 791ba6f20ac0..b4131a3fda0e 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -15,19 +15,19 @@ func @parallel_loop_bidy_bidx(%arg0 : index, %arg1 : index, %arg2 : index,
   return
 }
 
-// CHECK:       #[[MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
-// CHECK:       #[[MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
+// CHECK:       #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
+// CHECK:       #[[$MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @parallel_loop_bidy_bidx(
 // CHECK-SAME:                                  [[VAL_0:%.*]]: index, [[VAL_1:%.*]]: index, [[VAL_2:%.*]]: index, [[VAL_3:%.*]]: index, [[VAL_4:%.*]]: index, [[VAL_5:%.*]]: memref<?x?xf32>, [[VAL_6:%.*]]: memref<?x?xf32>) {
 // CHECK:           [[VAL_7:%.*]] = constant 2 : index
 // CHECK:           [[VAL_8:%.*]] = constant 1 : index
-// CHECK:           [[VAL_9:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_2]], [[VAL_0]], [[VAL_4]]]
-// CHECK:           [[VAL_10:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_3]], [[VAL_1]], [[VAL_7]]]
+// CHECK:           [[VAL_9:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_2]], [[VAL_0]], [[VAL_4]]]
+// CHECK:           [[VAL_10:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_3]], [[VAL_1]], [[VAL_7]]]
 // CHECK:           gpu.launch blocks([[VAL_11:%.*]], [[VAL_12:%.*]], [[VAL_13:%.*]]) in ([[VAL_14:%.*]] = [[VAL_10]], [[VAL_15:%.*]] = [[VAL_9]], [[VAL_16:%.*]] = [[VAL_8]]) threads([[VAL_17:%.*]], [[VAL_18:%.*]], [[VAL_19:%.*]]) in ([[VAL_20:%.*]] = [[VAL_8]], [[VAL_21:%.*]] = [[VAL_8]], [[VAL_22:%.*]] = [[VAL_8]]) {
-// CHECK:             [[VAL_23:%.*]] = affine.apply #[[MAP1]]([[VAL_12]]){{\[}}[[VAL_4]], [[VAL_0]]]
-// CHECK:             [[VAL_24:%.*]] = affine.apply #[[MAP1]]([[VAL_11]]){{\[}}[[VAL_7]], [[VAL_1]]]
+// CHECK:             [[VAL_23:%.*]] = affine.apply #[[$MAP1]]([[VAL_12]]){{\[}}[[VAL_4]], [[VAL_0]]]
+// CHECK:             [[VAL_24:%.*]] = affine.apply #[[$MAP1]]([[VAL_11]]){{\[}}[[VAL_7]], [[VAL_1]]]
 // CHECK:             [[VAL_25:%.*]] = load [[VAL_5]]{{\[}}[[VAL_23]], [[VAL_24]]] : memref<?x?xf32>
 // CHECK:             store [[VAL_25]], [[VAL_6]]{{\[}}[[VAL_24]], [[VAL_23]]] : memref<?x?xf32>
 // CHECK:             gpu.terminator
@@ -66,8 +66,8 @@ func @parallel_loop_tiled(%arg0 : index, %arg1 : index, %arg2 : index,
   return
 }
 
-// CHECK:       #[[MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
-// CHECK:       #[[MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
+// CHECK:       #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
+// CHECK:       #[[$MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @parallel_loop_tiled(
@@ -76,15 +76,15 @@ func @parallel_loop_tiled(%arg0 : index, %arg1 : index, %arg2 : index,
 // CHECK:           [[VAL_33:%.*]] = constant 1 : index
 // CHECK:           [[VAL_34:%.*]] = constant 4 : index
 // CHECK:           [[VAL_35:%.*]] = constant 1 : index
-// CHECK:           [[VAL_36:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_28]], [[VAL_26]], [[VAL_34]]]
-// CHECK:           [[VAL_37:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_29]], [[VAL_27]], [[VAL_34]]]
-// CHECK:           [[VAL_38:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_34]], [[VAL_32]], [[VAL_33]]]
-// CHECK:           [[VAL_39:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_34]], [[VAL_32]], [[VAL_33]]]
+// CHECK:           [[VAL_36:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_28]], [[VAL_26]], [[VAL_34]]]
+// CHECK:           [[VAL_37:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_29]], [[VAL_27]], [[VAL_34]]]
+// CHECK:           [[VAL_38:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_34]], [[VAL_32]], [[VAL_33]]]
+// CHECK:           [[VAL_39:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_34]], [[VAL_32]], [[VAL_33]]]
 // CHECK:           gpu.launch blocks([[VAL_40:%.*]], [[VAL_41:%.*]], [[VAL_42:%.*]]) in ([[VAL_43:%.*]] = [[VAL_37]], [[VAL_44:%.*]] = [[VAL_36]], [[VAL_45:%.*]] = [[VAL_35]]) threads([[VAL_46:%.*]], [[VAL_47:%.*]], [[VAL_48:%.*]]) in ([[VAL_49:%.*]] = [[VAL_39]], [[VAL_50:%.*]] = [[VAL_38]], [[VAL_51:%.*]] = [[VAL_35]]) {
-// CHECK:             [[VAL_52:%.*]] = affine.apply #[[MAP1]]([[VAL_41]]){{\[}}[[VAL_34]], [[VAL_26]]]
-// CHECK:             [[VAL_53:%.*]] = affine.apply #[[MAP1]]([[VAL_40]]){{\[}}[[VAL_34]], [[VAL_27]]]
-// CHECK:             [[VAL_54:%.*]] = affine.apply #[[MAP1]]([[VAL_47]]){{\[}}[[VAL_33]], [[VAL_32]]]
-// CHECK:             [[VAL_55:%.*]] = affine.apply #[[MAP1]]([[VAL_46]]){{\[}}[[VAL_33]], [[VAL_32]]]
+// CHECK:             [[VAL_52:%.*]] = affine.apply #[[$MAP1]]([[VAL_41]]){{\[}}[[VAL_34]], [[VAL_26]]]
+// CHECK:             [[VAL_53:%.*]] = affine.apply #[[$MAP1]]([[VAL_40]]){{\[}}[[VAL_34]], [[VAL_27]]]
+// CHECK:             [[VAL_54:%.*]] = affine.apply #[[$MAP1]]([[VAL_47]]){{\[}}[[VAL_33]], [[VAL_32]]]
+// CHECK:             [[VAL_55:%.*]] = affine.apply #[[$MAP1]]([[VAL_46]]){{\[}}[[VAL_33]], [[VAL_32]]]
 // CHECK:             [[VAL_56:%.*]] = addi [[VAL_52]], [[VAL_54]] : index
 // CHECK:             [[VAL_57:%.*]] = addi [[VAL_53]], [[VAL_55]] : index
 // CHECK:             [[VAL_58:%.*]] = load [[VAL_30]]{{\[}}[[VAL_56]], [[VAL_57]]] : memref<?x?xf32>
@@ -115,17 +115,17 @@ func @parallel_loop_bidy_seq(%arg0 : index, %arg1 : index, %arg2 : index,
   return
 }
 
-// CHECK:       #[[MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
-// CHECK:       #[[MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
+// CHECK:       #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
+// CHECK:       #[[$MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @parallel_loop_bidy_seq(
 // CHECK-SAME:                                 [[VAL_59:%.*]]: index, [[VAL_60:%.*]]: index, [[VAL_61:%.*]]: index, [[VAL_62:%.*]]: index, [[VAL_63:%.*]]: index, [[VAL_64:%.*]]: memref<?x?xf32>, [[VAL_65:%.*]]: memref<?x?xf32>) {
 // CHECK:           [[VAL_66:%.*]] = constant 2 : index
 // CHECK:           [[VAL_67:%.*]] = constant 1 : index
-// CHECK:           [[VAL_68:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_61]], [[VAL_59]], [[VAL_63]]]
+// CHECK:           [[VAL_68:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_61]], [[VAL_59]], [[VAL_63]]]
 // CHECK:           gpu.launch blocks([[VAL_69:%.*]], [[VAL_70:%.*]], [[VAL_71:%.*]]) in ([[VAL_72:%.*]] = [[VAL_67]], [[VAL_73:%.*]] = [[VAL_68]], [[VAL_74:%.*]] = [[VAL_67]]) threads([[VAL_75:%.*]], [[VAL_76:%.*]], [[VAL_77:%.*]]) in ([[VAL_78:%.*]] = [[VAL_67]], [[VAL_79:%.*]] = [[VAL_67]], [[VAL_80:%.*]] = [[VAL_67]]) {
-// CHECK:             [[VAL_81:%.*]] = affine.apply #[[MAP1]]([[VAL_70]]){{\[}}[[VAL_63]], [[VAL_59]]]
+// CHECK:             [[VAL_81:%.*]] = affine.apply #[[$MAP1]]([[VAL_70]]){{\[}}[[VAL_63]], [[VAL_59]]]
 // CHECK:             scf.for [[VAL_82:%.*]] = [[VAL_60]] to [[VAL_62]] step [[VAL_66]] {
 // CHECK:               [[VAL_83:%.*]] = load [[VAL_64]]{{\[}}[[VAL_81]], [[VAL_82]]] : memref<?x?xf32>
 // CHECK:               store [[VAL_83]], [[VAL_65]]{{\[}}[[VAL_82]], [[VAL_81]]] : memref<?x?xf32>
@@ -166,8 +166,8 @@ func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
   return
 }
 
-// CHECK:       #[[MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
-// CHECK:       #[[MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
+// CHECK:       #[[$MAP0:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
+// CHECK:       #[[$MAP1:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @parallel_loop_tiled_seq(
@@ -176,12 +176,12 @@ func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
 // CHECK:           [[VAL_91:%.*]] = constant 1 : index
 // CHECK:           [[VAL_92:%.*]] = constant 4 : index
 // CHECK:           [[VAL_93:%.*]] = constant 1 : index
-// CHECK:           [[VAL_94:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_86]], [[VAL_84]], [[VAL_92]]]
-// CHECK:           [[VAL_95:%.*]] = affine.apply #[[MAP0]](){{\[}}[[VAL_92]], [[VAL_90]], [[VAL_91]]]
+// CHECK:           [[VAL_94:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_86]], [[VAL_84]], [[VAL_92]]]
+// CHECK:           [[VAL_95:%.*]] = affine.apply #[[$MAP0]](){{\[}}[[VAL_92]], [[VAL_90]], [[VAL_91]]]
 // CHECK:           gpu.launch blocks([[VAL_96:%.*]], [[VAL_97:%.*]], [[VAL_98:%.*]]) in ([[VAL_99:%.*]] = [[VAL_93]], [[VAL_100:%.*]] = [[VAL_94]], [[VAL_101:%.*]] = [[VAL_93]]) threads([[VAL_102:%.*]], [[VAL_103:%.*]], [[VAL_104:%.*]]) in ([[VAL_105:%.*]] = [[VAL_93]], [[VAL_106:%.*]] = [[VAL_95]], [[VAL_107:%.*]] = [[VAL_93]]) {
-// CHECK:             [[VAL_108:%.*]] = affine.apply #[[MAP1]]([[VAL_97]]){{\[}}[[VAL_92]], [[VAL_84]]]
+// CHECK:             [[VAL_108:%.*]] = affine.apply #[[$MAP1]]([[VAL_97]]){{\[}}[[VAL_92]], [[VAL_84]]]
 // CHECK:             scf.for [[VAL_109:%.*]] = [[VAL_85]] to [[VAL_87]] step [[VAL_92]] {
-// CHECK:               [[VAL_110:%.*]] = affine.apply #[[MAP1]]([[VAL_103]]){{\[}}[[VAL_91]], [[VAL_90]]]
+// CHECK:               [[VAL_110:%.*]] = affine.apply #[[$MAP1]]([[VAL_103]]){{\[}}[[VAL_91]], [[VAL_90]]]
 // CHECK:               scf.for [[VAL_111:%.*]] = [[VAL_90]] to [[VAL_92]] step [[VAL_91]] {
 // CHECK:                 [[VAL_112:%.*]] = addi [[VAL_108]], [[VAL_110]] : index
 // CHECK:                 [[VAL_113:%.*]] = addi [[VAL_109]], [[VAL_111]] : index
@@ -241,59 +241,59 @@ module {
   }
 }
 
-// CHECK:       #[[MAP0:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK:       #[[MAP1:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
-// CHECK:       #[[MAP2:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
-// CHECK:       #[[MAP3:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
-// CHECK:       #[[MAP4:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
-// CHECK:       #[[MAP5:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// CHECK:       #[[$MAP0:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK:       #[[$MAP1:.*]] = affine_map<()[s0, s1, s2] -> ((s0 - s1) ceildiv s2)>
+// CHECK:       #[[$MAP2:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
+// CHECK:       #[[$MAP3:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+// CHECK:       #[[$MAP4:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
+// CHECK:       #[[$MAP5:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 // CHECK:       module {
 // CHECK-LABEL:   func @sum(
-// CHECK-SAME:              [[VAL_0:%.*]]: memref<?x?xf32, #[[MAP0]]>, [[VAL_1:%.*]]: memref<?x?xf32, #[[MAP0]]>, [[VAL_2:%.*]]: memref<?x?xf32, #[[MAP0]]>) {
+// CHECK-SAME:              [[VAL_0:%.*]]: memref<?x?xf32, #[[$MAP0]]>, [[VAL_1:%.*]]: memref<?x?xf32, #[[$MAP0]]>, [[VAL_2:%.*]]: memref<?x?xf32, #[[$MAP0]]>) {
 // CHECK:           %[[C1:.*]] = constant 1 : index
 // CHECK:           %[[C0:.*]] = constant 0 : index
 // CHECK:           %[[C3:.*]] = constant 3 : index
 // CHECK:           %[[C2:.*]] = constant 2 : index
-// CHECK:           [[VAL_7:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:           [[VAL_8:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
+// CHECK:           [[VAL_7:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:           [[VAL_8:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
 // CHECK:           [[VAL_9:%.*]] = constant 1 : index
-// CHECK:           [[VAL_10:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_7]], %[[C0]], %[[C2]]]
-// CHECK:           [[VAL_11:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_8]], %[[C0]], %[[C3]]]
+// CHECK:           [[VAL_10:%.*]] = affine.apply #[[$MAP1]](){{\[}}[[VAL_7]], %[[C0]], %[[C2]]]
+// CHECK:           [[VAL_11:%.*]] = affine.apply #[[$MAP1]](){{\[}}[[VAL_8]], %[[C0]], %[[C3]]]
 // CHECK:           [[VAL_12:%.*]] = constant 4 : index
-// CHECK:           [[VAL_13:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_12]], %[[C0]], %[[C1]]]
+// CHECK:           [[VAL_13:%.*]] = affine.apply #[[$MAP1]](){{\[}}[[VAL_12]], %[[C0]], %[[C1]]]
 // CHECK:           [[VAL_14:%.*]] = constant 3 : index
-// CHECK:           [[VAL_15:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_14]], %[[C0]], %[[C1]]]
+// CHECK:           [[VAL_15:%.*]] = affine.apply #[[$MAP1]](){{\[}}[[VAL_14]], %[[C0]], %[[C1]]]
 // CHECK:           gpu.launch blocks([[VAL_16:%.*]], [[VAL_17:%.*]], [[VAL_18:%.*]]) in ([[VAL_19:%.*]] = [[VAL_10]], [[VAL_20:%.*]] = [[VAL_11]], [[VAL_21:%.*]] = [[VAL_9]]) threads([[VAL_22:%.*]], [[VAL_23:%.*]], [[VAL_24:%.*]]) in ([[VAL_25:%.*]] = [[VAL_13]], [[VAL_26:%.*]] = [[VAL_15]], [[VAL_27:%.*]] = [[VAL_9]]) {
-// CHECK:             [[VAL_28:%.*]] = affine.apply #[[MAP2]]([[VAL_16]]){{\[}}%[[C2]], %[[C0]]]
-// CHECK:             [[VAL_29:%.*]] = affine.apply #[[MAP2]]([[VAL_17]]){{\[}}%[[C3]], %[[C0]]]
-// CHECK:             [[VAL_30:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_31:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]]
+// CHECK:             [[VAL_28:%.*]] = affine.apply #[[$MAP2]]([[VAL_16]]){{\[}}%[[C2]], %[[C0]]]
+// CHECK:             [[VAL_29:%.*]] = affine.apply #[[$MAP2]]([[VAL_17]]){{\[}}%[[C3]], %[[C0]]]
+// CHECK:             [[VAL_30:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_31:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]]
 // CHECK:             [[VAL_31_SQUARED:%.*]] = muli [[VAL_31]], [[VAL_31]] : index
-// CHECK:             [[VAL_32:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_33:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_32]]]
-// CHECK:             [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_35:%.*]] = dim [[VAL_1]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_36:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]]
-// CHECK:             [[VAL_37:%.*]] = dim [[VAL_1]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_38:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]]
-// CHECK:             [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_40:%.*]] = dim [[VAL_2]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_41:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]]
-// CHECK:             [[VAL_42:%.*]] = dim [[VAL_2]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
-// CHECK:             [[VAL_43:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
-// CHECK:             [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]]
+// CHECK:             [[VAL_32:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_33:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_32]]]
+// CHECK:             [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
+// CHECK:             [[VAL_35:%.*]] = dim [[VAL_1]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_36:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]]
+// CHECK:             [[VAL_37:%.*]] = dim [[VAL_1]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_38:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]]
+// CHECK:             [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
+// CHECK:             [[VAL_40:%.*]] = dim [[VAL_2]], %[[C0]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_41:%.*]] = affine.min #[[$MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]]
+// CHECK:             [[VAL_42:%.*]] = dim [[VAL_2]], %[[C1]] : memref<?x?xf32, #[[$MAP0]]>
+// CHECK:             [[VAL_43:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
+// CHECK:             [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
+// CHECK:             [[VAL_45:%.*]] = affine.apply #[[$MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:             [[VAL_46:%.*]] = cmpi "slt", [[VAL_45]], [[VAL_31_SQUARED]] : index
 // CHECK:             scf.if [[VAL_46]] {
-// CHECK:               [[VAL_47:%.*]] = affine.apply #[[MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]]
+// CHECK:               [[VAL_47:%.*]] = affine.apply #[[$MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:               [[VAL_48:%.*]] = cmpi "slt", [[VAL_47]], [[VAL_33]] : index
 // CHECK:               scf.if [[VAL_48]] {
-// CHECK:                 [[VAL_49:%.*]] = load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[MAP5]]>
-// CHECK:                 [[VAL_50:%.*]] = load [[VAL_39]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[MAP5]]>
-// CHECK:                 [[VAL_51:%.*]] = load [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[MAP5]]>
+// CHECK:                 [[VAL_49:%.*]] = load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
+// CHECK:                 [[VAL_50:%.*]] = load [[VAL_39]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
+// CHECK:                 [[VAL_51:%.*]] = load [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
 // CHECK:                 [[VAL_52:%.*]] = addf [[VAL_49]], [[VAL_50]] : f32
-// CHECK:                 store [[VAL_52]], [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[MAP5]]>
+// CHECK:                 store [[VAL_52]], [[VAL_44]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
 // CHECK:               }
 // CHECK:             }
 // CHECK:             gpu.terminator

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
index 71e9d5cabc00..b13a502a2e60 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
@@ -206,6 +206,15 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
 }
 
 // CHECK-LABEL: func @dynamic_load(
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
@@ -222,6 +231,15 @@ func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
 }
 
 // CHECK-LABEL: func @prefetch
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
 //      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
 // CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
@@ -251,6 +269,15 @@ func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
 }
 
 // CHECK-LABEL: func @dynamic_store
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
@@ -267,6 +294,15 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
 }
 
 // CHECK-LABEL: func @mixed_store
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
index 06e9e93b6253..f9f9dfffbbbf 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
@@ -316,8 +316,19 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
 // -----
 
 // CHECK-LABEL: func @static_store
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 // BAREPTR-LABEL: func @static_store
 // BAREPTR-SAME: %[[A:.*]]: !llvm<"float*">
+// BAREPTR-SAME:         %[[I:[a-zA-Z0-9]*]]: !llvm.i64
+// BAREPTR-SAME:         %[[J:[a-zA-Z0-9]*]]: !llvm.i64
 func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
index ea21a6d9fea7..2129cf6819a9 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
@@ -950,7 +950,27 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
 }
 
 // CHECK-LABEL: func @subview_const_size(
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64
 // CHECK32-LABEL: func @subview_const_size(
+// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK32-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32
 func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // The last "insertvalue" that populates the memref descriptor from the function arguments.
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -964,18 +984,18 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
   // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
+  // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
-  // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
+  // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
   // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
   // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
+  // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
   // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
   // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
+  // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
   // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm<"float*"> to !llvm<"float*">
@@ -985,18 +1005,18 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
   // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+  // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
-  // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
+  // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
   // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
   // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
+  // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
   // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
   // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+  // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
   // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   %1 = subview %0[%arg0, %arg1][4, 2][%arg0, %arg1] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
@@ -1005,7 +1025,27 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
 }
 
 // CHECK-LABEL: func @subview_const_stride(
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i64
+// CHECK-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i64
 // CHECK32-LABEL: func @subview_const_stride(
+// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm<"float*">,
+// CHECK32-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG5:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG6:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG7:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: !llvm.i32
+// CHECK32-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: !llvm.i32
 func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // The last "insertvalue" that populates the memref descriptor from the function arguments.
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
@@ -1019,15 +1059,15 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
   // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i64
+  // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i64
-  // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i64
+  // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i64
   // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
+  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
   // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
-  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
+  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
   // CHECK: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
   // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
@@ -1038,15 +1078,15 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
   // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : !llvm.i32
+  // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : !llvm.i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : !llvm.i32
-  // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : !llvm.i32
+  // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : !llvm.i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : !llvm.i32
   // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
   // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
   // CHECK32: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm<"{ float*, float*, i32, [2 x i32], [2 x i32] }">
   %1 = subview %0[%arg0, %arg1][%arg0, %arg1][1, 2] :

diff  --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
index 152a110d82dd..ce590961b2b3 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
@@ -52,8 +52,8 @@ func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %d
 
 // -----
 
-// CHECK: #[[ADD:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
-// CHECK: #[[SUB:map[0-9]+]] = affine_map<()[s0] -> (s0 - 1)>
+// CHECK: #[[$ADD:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK: #[[$SUB:map[0-9]+]] = affine_map<()[s0] -> (s0 - 1)>
 
 // CHECK-LABEL: func @materialize_read(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
 func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
@@ -72,27 +72,27 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
   // CHECK-NEXT:          scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {
   // CHECK-NEXT:            scf.for %[[I5:.*]] = %[[C0]] to %[[C4]] step %[[C1]] {
   // CHECK-NEXT:              scf.for %[[I6:.*]] = %[[C0]] to %[[C5]] step %[[C1]] {
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[ADD]](%[[I0]], %[[I4]])
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$ADD]](%[[I0]], %[[I4]])
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
   // CHECK-NEXT:                %[[L0:.*]] = select
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
   // CHECK-NEXT:                %[[L1:.*]] = select
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
   // CHECK-NEXT:                %[[L2:.*]] = select
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[ADD]](%[[I3]], %[[I6]])
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$ADD]](%[[I3]], %[[I6]])
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
@@ -129,8 +129,8 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
 
 // -----
 
-// CHECK: #[[ADD:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
-// CHECK: #[[SUB:map[0-9]+]] = affine_map<()[s0] -> (s0 - 1)>
+// CHECK: #[[$ADD:map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK: #[[$SUB:map[0-9]+]] = affine_map<()[s0] -> (s0 - 1)>
 
 // CHECK-LABEL:func @materialize_write(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
 func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
@@ -151,28 +151,28 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
   // CHECK-NEXT:          scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {
   // CHECK-NEXT:            scf.for %[[I5:.*]] = %[[C0]] to %[[C4]] step %[[C1]] {
   // CHECK-NEXT:              scf.for %[[I6:.*]] = %[[C0]] to %[[C5]] step %[[C1]] {
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[ADD]](%[[I0]], %[[I4]])
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$ADD]](%[[I0]], %[[I4]])
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select {{.*}}, {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
   // CHECK-NEXT:                %[[S0:.*]] = select {{.*}}, %[[C0]], {{.*}} : index
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[ADD]](%[[I1]], %[[I5]])
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$ADD]](%[[I1]], %[[I5]])
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select {{.*}}, {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
   // CHECK-NEXT:                %[[S1:.*]] = select {{.*}}, %[[C0]], {{.*}} : index
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", %[[I2]], %{{.*}} : index
   // CHECK-NEXT:                {{.*}} = select {{.*}}, %[[I2]], {{.*}} : index
   // CHECK-NEXT:                {{.*}} = cmpi "slt", %[[I2]], %[[C0]] : index
   // CHECK-NEXT:                %[[S2:.*]] = select {{.*}}, %[[C0]], {{.*}} : index
   //
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[ADD]](%[[I3]], %[[I6]])
-  // CHECK-NEXT:                {{.*}} = affine.apply #[[SUB]]()[%{{.*}}]
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$ADD]](%[[I3]], %[[I6]])
+  // CHECK-NEXT:                {{.*}} = affine.apply #[[$SUB]]()[%{{.*}}]
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = select {{.*}}, {{.*}}, {{.*}} : index
   // CHECK-NEXT:                {{.*}} = cmpi "slt", {{.*}}, %[[C0]] : index
@@ -211,12 +211,12 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
 
-// FULL-UNROLL-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
-// FULL-UNROLL-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
-// FULL-UNROLL-DAG: #[[MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
+// FULL-UNROLL-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
+// FULL-UNROLL-DAG: #[[$MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
+// FULL-UNROLL-DAG: #[[$MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
 
 
 // CHECK-LABEL: transfer_read_progressive(
@@ -235,7 +235,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
   // CHECK-DAG: %[[alloc:.*]] = alloca() {alignment = 128 : i64} : memref<3xvector<15xf32>>
   // CHECK-DAG: %[[dim:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // CHECK: affine.for %[[I:.*]] = 0 to 3 {
-  // CHECK:   %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]]
+  // CHECK:   %[[add:.*]] = affine.apply #[[$MAP0]](%[[I]])[%[[base]]]
   // CHECK:   %[[cond1:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
   // CHECK:   scf.if %[[cond1]] {
   // CHECK:     %[[vec_1d:.*]] = vector.transfer_read %[[A]][%[[add]], %[[base]]], %[[cst]] : memref<?x?xf32>, vector<15xf32>
@@ -259,7 +259,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
   // FULL-UNROLL:   vector.insert %{{.*}}, %[[VEC0]] [0] : vector<15xf32> into vector<3x15xf32>
   // FULL-UNROLL:   scf.yield %{{.*}} : vector<3x15xf32>
   // FULL-UNROLL: }
-  // FULL-UNROLL: affine.apply #[[MAP1]]()[%[[base]]]
+  // FULL-UNROLL: affine.apply #[[$MAP1]]()[%[[base]]]
   // FULL-UNROLL: cmpi "slt", %{{.*}}, %[[DIM]] : index
   // FULL-UNROLL: %[[VEC2:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
   // FULL-UNROLL:   vector.transfer_read %[[A]][%{{.*}}, %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
@@ -269,7 +269,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
   // FULL-UNROLL:   vector.insert %{{.*}}, %[[VEC1]] [1] : vector<15xf32> into vector<3x15xf32>
   // FULL-UNROLL:   scf.yield %{{.*}} : vector<3x15xf32>
   // FULL-UNROLL: }
-  // FULL-UNROLL: affine.apply #[[MAP2]]()[%[[base]]]
+  // FULL-UNROLL: affine.apply #[[$MAP2]]()[%[[base]]]
   // FULL-UNROLL: cmpi "slt", %{{.*}}, %[[DIM]] : index
   // FULL-UNROLL: %[[VEC3:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
   // FULL-UNROLL:   vector.transfer_read %[[A]][%{{.*}}, %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
@@ -288,12 +288,12 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
 
-// FULL-UNROLL-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
-// FULL-UNROLL-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
-// FULL-UNROLL-DAG: #[[MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
+// FULL-UNROLL-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
+// FULL-UNROLL-DAG: #[[$MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
+// FULL-UNROLL-DAG: #[[$MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
 
 // CHECK-LABEL: transfer_write_progressive(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]+]]: memref<?x?xf32>,
@@ -309,7 +309,7 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
   // CHECK: store %[[vec]], %[[vmemref]][] : memref<vector<3x15xf32>>
   // CHECK: %[[dim:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // CHECK: affine.for %[[I:.*]] = 0 to 3 {
-  // CHECK:   %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]]
+  // CHECK:   %[[add:.*]] = affine.apply #[[$MAP0]](%[[I]])[%[[base]]]
   // CHECK:   %[[cmp:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
   // CHECK:   scf.if %[[cmp]] {
   // CHECK:     %[[vec_1d:.*]] = load %0[%[[I]]] : memref<3xvector<15xf32>>
@@ -322,13 +322,13 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
   // FULL-UNROLL:   %[[V0:.*]] = vector.extract %[[vec]][0] : vector<3x15xf32>
   // FULL-UNROLL:   vector.transfer_write %[[V0]], %[[A]][%[[base]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
   // FULL-UNROLL: }
-  // FULL-UNROLL: %[[I1:.*]] = affine.apply #[[MAP1]]()[%[[base]]]
+  // FULL-UNROLL: %[[I1:.*]] = affine.apply #[[$MAP1]]()[%[[base]]]
   // FULL-UNROLL: %[[CMP1:.*]] = cmpi "slt", %[[I1]], %[[DIM]] : index
   // FULL-UNROLL: scf.if %[[CMP1]] {
   // FULL-UNROLL:   %[[V1:.*]] = vector.extract %[[vec]][1] : vector<3x15xf32>
   // FULL-UNROLL:   vector.transfer_write %[[V1]], %[[A]][%[[I1]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
   // FULL-UNROLL: }
-  // FULL-UNROLL: %[[I2:.*]] = affine.apply #[[MAP2]]()[%[[base]]]
+  // FULL-UNROLL: %[[I2:.*]] = affine.apply #[[$MAP2]]()[%[[base]]]
   // FULL-UNROLL: %[[CMP2:.*]] = cmpi "slt", %[[I2]], %[[DIM]] : index
   // FULL-UNROLL: scf.if %[[CMP2]] {
   // FULL-UNROLL:   %[[V2:.*]] = vector.extract %[[vec]][2] : vector<3x15xf32>
@@ -342,12 +342,12 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
 
-// FULL-UNROLL-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
-// FULL-UNROLL-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
-// FULL-UNROLL-DAG: #[[MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
+// FULL-UNROLL-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
+// FULL-UNROLL-DAG: #[[$MAP1:.*]] = affine_map<()[s0] -> (s0 + 1)>
+// FULL-UNROLL-DAG: #[[$MAP2:.*]] = affine_map<()[s0] -> (s0 + 2)>
 
 // CHECK-LABEL: transfer_write_progressive_not_masked(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]+]]: memref<?x?xf32>,
@@ -363,16 +363,16 @@ func @transfer_write_progressive_not_masked(%A : memref<?x?xf32>, %base: index,
   // CHECK-NEXT: %[[vmemref:.*]] = vector.type_cast %[[alloc]] : memref<3xvector<15xf32>> to memref<vector<3x15xf32>>
   // CHECK-NEXT: store %[[vec]], %[[vmemref]][] : memref<vector<3x15xf32>>
   // CHECK-NEXT: affine.for %[[I:.*]] = 0 to 3 {
-  // CHECK-NEXT:   %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]]
+  // CHECK-NEXT:   %[[add:.*]] = affine.apply #[[$MAP0]](%[[I]])[%[[base]]]
   // CHECK-NEXT:   %[[vec_1d:.*]] = load %0[%[[I]]] : memref<3xvector<15xf32>>
   // CHECK-NEXT:   vector.transfer_write %[[vec_1d]], %[[A]][%[[add]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
 
   // FULL-UNROLL: %[[VEC0:.*]] = vector.extract %[[vec]][0] : vector<3x15xf32>
   // FULL-UNROLL: vector.transfer_write %[[VEC0]], %[[A]][%[[base]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
-  // FULL-UNROLL: %[[I1:.*]] = affine.apply #[[MAP1]]()[%[[base]]]
+  // FULL-UNROLL: %[[I1:.*]] = affine.apply #[[$MAP1]]()[%[[base]]]
   // FULL-UNROLL: %[[VEC1:.*]] = vector.extract %[[vec]][1] : vector<3x15xf32>
   // FULL-UNROLL: vector.transfer_write %2, %[[A]][%[[I1]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
-  // FULL-UNROLL: %[[I2:.*]] = affine.apply #[[MAP2]]()[%[[base]]]
+  // FULL-UNROLL: %[[I2:.*]] = affine.apply #[[$MAP2]]()[%[[base]]]
   // FULL-UNROLL: %[[VEC2:.*]] = vector.extract %[[vec]][2] : vector<3x15xf32>
   // FULL-UNROLL: vector.transfer_write %[[VEC2:.*]], %[[A]][%[[I2]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
   vector.transfer_write %vec, %A[%base, %base] {masked = [false, false]} :

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/normalize_maps.mlir b/mlir/test/Dialect/Affine/SuperVectorize/normalize_maps.mlir
index 7c2bacd1a46e..0c01899133ff 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/normalize_maps.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/normalize_maps.mlir
@@ -1,11 +1,11 @@
 // RUN: mlir-opt %s -affine-super-vectorizer-test -normalize-maps |  FileCheck %s
 
-// CHECK-DAG: #[[ZERO:[a-zA-Z0-9]+]] = affine_map<() -> (0)>
-// CHECK-DAG: #[[ID1:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[D0TIMES2:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0 * 2)>
-// CHECK-DAG: #[[D0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
-// CHECK-DAG: #[[MINSD0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (-d0 + d1)>
-// CHECK-DAG: #[[D0MINUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 - d1)>
+// CHECK-DAG: #[[$ZERO:[a-zA-Z0-9]+]] = affine_map<() -> (0)>
+// CHECK-DAG: #[[$ID1:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$D0TIMES2:[a-zA-Z0-9]+]] = affine_map<(d0) -> (d0 * 2)>
+// CHECK-DAG: #[[$D0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
+// CHECK-DAG: #[[$MINSD0PLUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (-d0 + d1)>
+// CHECK-DAG: #[[$D0MINUSD1:[a-zA-Z0-9]+]] = affine_map<(d0, d1) -> (d0 - d1)>
 
 // CHECK-LABEL: func @simple()
 func @simple() {
@@ -16,9 +16,9 @@ func @simple() {
     %3 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%0, %0)
   }
   // CHECK-NEXT: affine.for %{{.*}} = 0 to 7
-  // CHECK-NEXT:   {{.*}} affine.apply #[[ID1]](%{{.*}})
-  // CHECK-NEXT:   {{.*}} affine.apply #[[D0TIMES2]](%{{.*}})
-  // CHECK-NEXT:   {{.*}} affine.apply #[[ZERO]]()
+  // CHECK-NEXT:   {{.*}} affine.apply #[[$ID1]](%{{.*}})
+  // CHECK-NEXT:   {{.*}} affine.apply #[[$D0TIMES2]](%{{.*}})
+  // CHECK-NEXT:   {{.*}} affine.apply #[[$ZERO]]()
 
   affine.for %i1 = 0 to 7 {
     affine.for %i2 = 0 to 42 {
@@ -31,9 +31,9 @@ func @simple() {
   }
   //      CHECK: affine.for %{{.*}} = 0 to 7
   // CHECK-NEXT:   affine.for %{{.*}} = 0 to 42
-  // CHECK-NEXT:     {{.*}} affine.apply #[[D0PLUSD1]](%{{.*}}, %{{.*}})
-  // CHECK-NEXT:     {{.*}} affine.apply #[[MINSD0PLUSD1]](%{{.*}}, %{{.*}})
-  // CHECK-NEXT:     {{.*}} affine.apply #[[D0MINUSD1]](%{{.*}}, %{{.*}})
+  // CHECK-NEXT:     {{.*}} affine.apply #[[$D0PLUSD1]](%{{.*}}, %{{.*}})
+  // CHECK-NEXT:     {{.*}} affine.apply #[[$MINSD0PLUSD1]](%{{.*}}, %{{.*}})
+  // CHECK-NEXT:     {{.*}} affine.apply #[[$D0MINUSD1]](%{{.*}}, %{{.*}})
 
   affine.for %i3 = 0 to 16 {
     affine.for %i4 = 0 to 47 step 2 {
@@ -50,9 +50,9 @@ func @simple() {
   // CHECK:      affine.for %{{.*}} = 0 to 16
   // CHECK-NEXT:   affine.for %{{.*}} = 0 to 47 step 2
   // CHECK-NEXT:     affine.for %{{.*}} = 0 to 78 step 16
-  // CHECK-NEXT:       {{.*}} affine.apply #[[ID1]](%{{.*}})
-  // CHECK-NEXT:       {{.*}} affine.apply #[[ID1]](%{{.*}})
-  // CHECK-NEXT:       {{.*}} affine.apply #[[ID1]](%{{.*}})
+  // CHECK-NEXT:       {{.*}} affine.apply #[[$ID1]](%{{.*}})
+  // CHECK-NEXT:       {{.*}} affine.apply #[[$ID1]](%{{.*}})
+  // CHECK-NEXT:       {{.*}} affine.apply #[[$ID1]](%{{.*}})
 
   return
 }

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
index ed0a39c967a3..bbeced633bcd 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0" | FileCheck %s
 
 // Permutation maps used in vectorization.
-// CHECK: #[[map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
+// CHECK: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
 
 #map0 = affine_map<(d0) -> (d0)>
 #mapadd1 = affine_map<(d0) -> (d0 + 1)>
@@ -29,7 +29,7 @@ func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-NEXT: %{{.*}} = affine.apply #map0(%[[C0]])
 // CHECK-NEXT: %{{.*}} = affine.apply #map0(%[[C0]])
 // CHECK-NEXT: %{{.*}} = constant 0.0{{.*}}: f32
-// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[$map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i0 = 0 to %M { // vectorized due to scalar -> vector
      %a0 = affine.load %A[%c0, %c0] : memref<?x?xf32>
    }
@@ -334,7 +334,7 @@ func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK:     %{{.*}} = affine.apply #map0(%{{.*}})
 // CHECK:     %{{.*}} = affine.apply #map0(%{{.*}})
 // CHECK:     %{{.*}} = constant 0.0{{.*}}: f32
-// CHECK:     {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK:     {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[$map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %{{.*}} in DFS post-order prevents vectorizing %{{.*}}
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
        %a18 = affine.load %A[%c0, %c0] : memref<?x?xf32>
@@ -363,7 +363,7 @@ func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK:      %{{.*}} = affine.apply #map0(%{{.*}})
 // CHECK-NEXT: %{{.*}} = affine.apply #map0(%{{.*}})
 // CHECK-NEXT: %{{.*}} = constant 0.0{{.*}}: f32
-// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[$map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %{{.*}}
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
        %a18 = affine.load %A[%c0, %c0] : memref<?x?xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
index de757a4ad88b..f2cd769a7cc1 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
@@ -2,14 +2,14 @@
 // RUN: mlir-opt %s -affine-super-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0" | FileCheck %s
 
 // Permutation maps used in vectorization.
-// CHECK-DAG: #[[map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
-// CHECK-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
-// VECT-DAG: #[[map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// VECT-DAG: #[[map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
-// VECT-DAG: #[[map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
-// VECT-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
+// CHECK-DAG: #[[$map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: #[[$map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
+// CHECK-DAG: #[[$map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
+// VECT-DAG: #[[$map_id1:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// VECT-DAG: #[[$map_id2:map[0-9]+]] = affine_map<(d0, d1) -> (d0, d1)>
+// VECT-DAG: #[[$map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
+// VECT-DAG: #[[$map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
 
 func @vec2d(%A : memref<?x?x?xf32>) {
    %c0 = constant 0 : index
@@ -112,8 +112,8 @@ func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: me
   // VECT-NEXT: %[[M:.*]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
   // VECT-NEXT: %[[K:.*]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
   // VECT-NEXT: %[[N:.*]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
-  //      VECT: {{.*}} #[[map_id1]](%[[M]]) step 4 {
-  // VECT-NEXT:   {{.*}} #[[map_id1]](%[[N]]) step 8 {
+  //      VECT: {{.*}} #[[$map_id1]](%[[M]]) step 4 {
+  // VECT-NEXT:   {{.*}} #[[$map_id1]](%[[N]]) step 8 {
   //      VECT:     %[[VC0:.*]] = constant dense<0.000000e+00> : vector<4x8xf32>
   // VECT-NEXT:     vector.transfer_write %[[VC0]], %{{.*}}[%{{.*}}, %{{.*}}] : vector<4x8xf32>, memref<?x?xf32>
   affine.for %i0 = affine_map<(d0) -> (d0)>(%c0) to affine_map<(d0) -> (d0)>(%M) {
@@ -122,11 +122,11 @@ func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: me
       affine.store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
     }
   }
-  //      VECT:  affine.for %[[I2:.*]] = #[[map_id1]](%[[C0]]) to #[[map_id1]](%[[M]]) step 4 {
-  // VECT-NEXT:    affine.for %[[I3:.*]] = #[[map_id1]](%[[C0]]) to #[[map_id1]](%[[N]]) step 8 {
-  // VECT-NEXT:      affine.for %[[I4:.*]] = #map5(%[[C0]]) to #[[map_id1]](%[[K]]) {
-  //      VECT:        %[[A:.*]] = vector.transfer_read %{{.*}}[%[[I4]], %[[I3]]], %{{.*}} {permutation_map = #[[map_proj_d0d1_zerod1]]} : memref<?x?xf32>, vector<4x8xf32>
-  //      VECT:        %[[B:.*]] = vector.transfer_read %{{.*}}[%[[I2]], %[[I4]]], %{{.*}} {permutation_map = #[[map_proj_d0d1_d0zero]]} : memref<?x?xf32>, vector<4x8xf32>
+  //      VECT:  affine.for %[[I2:.*]] = #[[$map_id1]](%[[C0]]) to #[[$map_id1]](%[[M]]) step 4 {
+  // VECT-NEXT:    affine.for %[[I3:.*]] = #[[$map_id1]](%[[C0]]) to #[[$map_id1]](%[[N]]) step 8 {
+  // VECT-NEXT:      affine.for %[[I4:.*]] = #map5(%[[C0]]) to #[[$map_id1]](%[[K]]) {
+  //      VECT:        %[[A:.*]] = vector.transfer_read %{{.*}}[%[[I4]], %[[I3]]], %{{.*}} {permutation_map = #[[$map_proj_d0d1_zerod1]]} : memref<?x?xf32>, vector<4x8xf32>
+  //      VECT:        %[[B:.*]] = vector.transfer_read %{{.*}}[%[[I2]], %[[I4]]], %{{.*}} {permutation_map = #[[$map_proj_d0d1_d0zero]]} : memref<?x?xf32>, vector<4x8xf32>
   // VECT-NEXT:        %[[C:.*]] = mulf %[[B]], %[[A]] : vector<4x8xf32>
   //      VECT:        %[[D:.*]] = vector.transfer_read %{{.*}}[%[[I2]], %[[I3]]], %{{.*}} : memref<?x?xf32>, vector<4x8xf32>
   // VECT-NEXT:        %[[E:.*]] = addf %[[D]], %[[C]] : vector<4x8xf32>

diff  --git a/mlir/test/Dialect/Affine/affine-data-copy.mlir b/mlir/test/Dialect/Affine/affine-data-copy.mlir
index 97d64a6d1b44..694615ecb1c2 100644
--- a/mlir/test/Dialect/Affine/affine-data-copy.mlir
+++ b/mlir/test/Dialect/Affine/affine-data-copy.mlir
@@ -18,8 +18,8 @@
 #ub = affine_map<(d0) -> (d0 + 128)>
 
 // Map used to index the buffer while computing.
-// CHECK-DAG: [[MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[MAP_PLUS_128:map[0-9]+]] = affine_map<(d0) -> (d0 + 128)>
+// CHECK-DAG: [[$MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$MAP_PLUS_128:map[0-9]+]] = affine_map<(d0) -> (d0 + 128)>
 
 // CHECK-LABEL: func @matmul
 // FILTER-LABEL: func @matmul
@@ -52,8 +52,8 @@ func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<40
 // CHECK:     [[BUFC:%[0-9]+]] = alloc() : memref<128x128xf32>
 // The result matrix's copy gets hoisted out.
 // Result matrix copy-in.
-// CHECK:     affine.for %[[II:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:       affine.for %[[JJ:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
+// CHECK:     affine.for %[[II:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:       affine.for %[[JJ:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
 // CHECK:         affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
 // CHECK:         affine.store %{{.*}}, [[BUFC]][-%[[I]] + %[[II]], -%[[J]] + %[[JJ]]] : memref<128x128xf32>
 // CHECK:       }
@@ -62,8 +62,8 @@ func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<40
 // LHS matrix copy-in.
 // CHECK:     affine.for %[[K:.*]] = 0 to 4096 step 128 {
 // CHECK:      [[BUFA:%[0-9]+]] = alloc() : memref<128x128xf32>
-// CHECK:       affine.for %[[II:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:         affine.for %[[KK:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
+// CHECK:       affine.for %[[II:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:         affine.for %[[KK:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
 // CHECK:           affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
 // CHECK:           affine.store %{{.*}}, [[BUFA]][-%[[I]] + %[[II]], -%[[K]] + %[[KK]]] : memref<128x128xf32>
 // CHECK:         }
@@ -71,17 +71,17 @@ func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<40
 
 // RHS matrix copy-in.
 // CHECK:       [[BUFB:%[0-9]+]] = alloc() : memref<128x128xf32>
-// CHECK:       affine.for %[[KK:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:         affine.for %[[JJ:.*]] = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
+// CHECK:       affine.for %[[KK:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:         affine.for %[[JJ:.*]] = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
 // CHECK:           affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
 // CHECK:           affine.store %{{.*}}, [[BUFB]][-%[[K]] + %[[KK]], -%[[J]] + %[[JJ]]] : memref<128x128xf32>
 // CHECK:         }
 // CHECK:       }
 
 // Computation on the fast buffers.
-// CHECK:       affine.for %{{.*}} = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:         affine.for %{{.*}} = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:           affine.for %{{.*}} = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
+// CHECK:       affine.for %{{.*}} = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:         affine.for %{{.*}} = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:           affine.for %{{.*}} = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
 // CHECK:             affine.load [[BUFA]][-%{{.*}} + %{{.*}}, -%{{.*}} + %{{.*}}] : memref<128x128xf32>
 // CHECK:             affine.load [[BUFB]][-%{{.*}} + %{{.*}}, -%{{.*}} + %{{.*}}] : memref<128x128xf32>
 // CHECK:             affine.load [[BUFC]][-%{{.*}} + %{{.*}}, -%{{.*}} + %{{.*}}] : memref<128x128xf32>
@@ -96,8 +96,8 @@ func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<40
 // CHECK:     }
 
 // Result matrix copy out.
-// CHECK:     affine.for %{{.*}} = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
-// CHECK:       affine.for %{{.*}} = #[[MAP_IDENTITY]](%{{.*}}) to #[[MAP_PLUS_128]](%{{.*}}) {
+// CHECK:     affine.for %{{.*}} = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
+// CHECK:       affine.for %{{.*}} = #[[$MAP_IDENTITY]](%{{.*}}) to #[[$MAP_PLUS_128]](%{{.*}}) {
 // CHECK:         affine.load [[BUFC]][-%{{.*}} + %{{.*}}, -%{{.*}} + %{{.*}}] : memref<128x128xf32>
 // CHECK:         store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<4096x4096xf32>
 // CHECK:       }
@@ -200,9 +200,9 @@ func @single_elt_buffers(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1024xf
 
 #map_ub = affine_map<(d0) -> (4096, d0 + 100)>
 
-// CHECK-DAG: [[MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[MAP_MIN_UB1:map[0-9]+]] = affine_map<(d0) -> (d0 + 100, 4096)>
-// CHECK-DAG: [[MAP_MIN_UB2:map[0-9]+]] = affine_map<(d0) -> (4096, d0 + 100)>
+// CHECK-DAG: [[$MAP_IDENTITY:map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$MAP_MIN_UB1:map[0-9]+]] = affine_map<(d0) -> (d0 + 100, 4096)>
+// CHECK-DAG: [[$MAP_MIN_UB2:map[0-9]+]] = affine_map<(d0) -> (4096, d0 + 100)>
 
 // CHECK-LABEL: func @min_upper_bound
 func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
@@ -217,16 +217,16 @@ func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
 }
 // CHECK:      affine.for %[[IV1:.*]] = 0 to 4096 step 100
 // CHECK:        %[[BUF:.*]] = alloc() : memref<100xf32>
-// CHECK-NEXT:   affine.for %[[IV2:.*]] = #[[MAP_IDENTITY]](%[[IV1]]) to min #[[MAP_MIN_UB1]](%[[IV1]]) {
+// CHECK-NEXT:   affine.for %[[IV2:.*]] = #[[$MAP_IDENTITY]](%[[IV1]]) to min #[[$MAP_MIN_UB1]](%[[IV1]]) {
 // CHECK-NEXT:     affine.load %{{.*}}[%[[IV2]]] : memref<4096xf32>
 // CHECK-NEXT:     affine.store %{{.*}}, %[[BUF]][-%[[IV1]] + %[[IV2]]] : memref<100xf32>
 // CHECK-NEXT:   }
-// CHECK-NEXT:   affine.for %[[IV2:.*]] = #[[MAP_IDENTITY]](%[[IV1]]) to min #[[MAP_MIN_UB2]](%[[IV1]]) {
+// CHECK-NEXT:   affine.for %[[IV2:.*]] = #[[$MAP_IDENTITY]](%[[IV1]]) to min #[[$MAP_MIN_UB2]](%[[IV1]]) {
 // CHECK-NEXT:     affine.load %[[BUF]][-%[[IV1]] + %[[IV2]]] : memref<100xf32>
 // CHECK-NEXT:     mulf
 // CHECK-NEXT:     affine.store %{{.*}}, %[[BUF]][-%[[IV1]] + %[[IV2]]] : memref<100xf32>
 // CHECK-NEXT:   }
-// CHECK:        affine.for %[[IV2:.*]] = #[[MAP_IDENTITY]](%[[IV1]]) to min #[[MAP_MIN_UB1]](%[[IV1]]) {
+// CHECK:        affine.for %[[IV2:.*]] = #[[$MAP_IDENTITY]](%[[IV1]]) to min #[[$MAP_MIN_UB1]](%[[IV1]]) {
 // CHECK-NEXT:     affine.load %[[BUF]][-%[[IV1]] + %[[IV2]]] : memref<100xf32>
 // CHECK-NEXT:     affine.store %{{.*}}, %{{.*}}[%[[IV2]]] : memref<4096xf32>
 // CHECK-NEXT:   }
@@ -242,8 +242,8 @@ func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
 #lb = affine_map<()[s0, s1] -> (s0 * 512, s1 * 6)>
 #ub = affine_map<()[s0, s1] -> (s0 * 512 + 512, s1 * 6 + 6)>
 
-// CHECK-DAG: #[[LB:.*]] = affine_map<()[s0, s1] -> (s0 * 512, s1 * 6)>
-// CHECK-DAG: #[[UB:.*]] = affine_map<()[s0, s1] -> (s0 * 512 + 512, s1 * 6 + 6)>
+// CHECK-DAG: #[[$LB:.*]] = affine_map<()[s0, s1] -> (s0 * 512, s1 * 6)>
+// CHECK-DAG: #[[$UB:.*]] = affine_map<()[s0, s1] -> (s0 * 512 + 512, s1 * 6 + 6)>
 
 // CHECK-LABEL: max_lower_bound(%{{.*}}: memref<2048x516xf64>,
 // CHECK-SAME: [[i:arg[0-9]+]]
@@ -257,15 +257,15 @@ func @max_lower_bound(%M: memref<2048x516xf64>, %i : index, %j : index) {
   return
 }
 
-// CHECK:      %[[BUF=.*]] = alloc() : memref<2048x6xf64>
+// CHECK:      %[[BUF:.*]] = alloc() : memref<2048x6xf64>
 // CHECK-NEXT: affine.for %[[ii:.*]] = 0 to 2048 {
-// CHECK-NEXT:   affine.for %[[jj:.*]] = max #[[LB]]()[%[[i]], %[[j]]] to min #[[UB]]()[%[[i]], %[[j]]] {
+// CHECK-NEXT:   affine.for %[[jj:.*]] = max #[[$LB]]()[%[[i]], %[[j]]] to min #[[$UB]]()[%[[i]], %[[j]]] {
 // CHECK-NEXT:      affine.load %{{.*}}[%[[ii]], %[[jj]]] : memref<2048x516xf64>
 // CHECK-NEXT:      affine.store %{{.*}}, %[[BUF]][%[[ii]], %[[jj]] - symbol(%[[j]]) * 6] : memref<2048x6xf64>
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
 // CHECK-NEXT: affine.for %[[ii_:.*]] = 0 to 2048 {
-// CHECK-NEXT:   affine.for %[[jj_:.*]] = max #[[LB]]()[%{{.*}}, %{{.*}}] to min #[[UB]]()[%{{.*}}, %{{.*}}] {
+// CHECK-NEXT:   affine.for %[[jj_:.*]] = max #[[$LB]]()[%{{.*}}, %{{.*}}] to min #[[$UB]]()[%{{.*}}, %{{.*}}] {
 // CHECK-NEXT:     affine.load %[[BUF]][%[[ii_]], %[[jj_]] - symbol(%[[j]]) * 6] : memref<2048x6xf64>
 // CHECK-NEXT:    }
 // CHECK-NEXT: }

diff  --git a/mlir/test/Dialect/Affine/canonicalize.mlir b/mlir/test/Dialect/Affine/canonicalize.mlir
index 5c7fba52976a..b29a68c219e9 100644
--- a/mlir/test/Dialect/Affine/canonicalize.mlir
+++ b/mlir/test/Dialect/Affine/canonicalize.mlir
@@ -1,46 +1,46 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s
 
 // Affine maps for test case: compose_affine_maps_1dto2d_no_symbols
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
 
 // Affine maps for test case: compose_affine_maps_1dto2d_with_symbols
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
-// CHECK-DAG: [[MAP4b:#map[0-9]+]] = affine_map<(d0) -> (d0 - 7)>
-// CHECK-DAG: [[MAP7:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 - 3)>
-// CHECK-DAG: [[MAP7a:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 + 1)>
+// CHECK-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
+// CHECK-DAG: [[$MAP4b:#map[0-9]+]] = affine_map<(d0) -> (d0 - 7)>
+// CHECK-DAG: [[$MAP7:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 - 3)>
+// CHECK-DAG: [[$MAP7a:#map[0-9]+]] = affine_map<(d0) -> (d0 * 2 + 1)>
 
 // Affine map for test case: compose_affine_maps_d2_tile
-// CHECK-DAG: [[MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 4) * 4 - (d1 floordiv 4) * 4)>
-// CHECK-DAG: [[MAP8a:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 8) * 8 - (d1 floordiv 8) * 8)>
+// CHECK-DAG: [[$MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 4) * 4 - (d1 floordiv 4) * 4)>
+// CHECK-DAG: [[$MAP8a:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 8) * 8 - (d1 floordiv 8) * 8)>
 
 // Affine maps for test case: compose_affine_maps_dependent_loads
-// CHECK-DAG: [[MAP9:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
-// CHECK-DAG: [[MAP10:#map[0-9]+]] = affine_map<(d0) -> (d0 * 3)>
-// CHECK-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 3) ceildiv 3)>
-// CHECK-DAG: [[MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 * 7 - 49)>
+// CHECK-DAG: [[$MAP9:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// CHECK-DAG: [[$MAP10:#map[0-9]+]] = affine_map<(d0) -> (d0 * 3)>
+// CHECK-DAG: [[$MAP11:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 3) ceildiv 3)>
+// CHECK-DAG: [[$MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 * 7 - 49)>
 
 // Affine maps for test case: compose_affine_maps_diamond_dependency
-// CHECK-DAG: [[MAP13A:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 6) ceildiv 8)>
-// CHECK-DAG: [[MAP13B:#map[0-9]+]] = affine_map<(d0) -> ((d0 * 4 - 4) floordiv 3)>
+// CHECK-DAG: [[$MAP13A:#map[0-9]+]] = affine_map<(d0) -> ((d0 + 6) ceildiv 8)>
+// CHECK-DAG: [[$MAP13B:#map[0-9]+]] = affine_map<(d0) -> ((d0 * 4 - 4) floordiv 3)>
 
 // Affine maps for test case: partial_fold_map
-// CHECK-DAG: [[MAP15:#map[0-9]+]] = affine_map<()[s0] -> (s0 - 42)>
+// CHECK-DAG: [[$MAP15:#map[0-9]+]] = affine_map<()[s0] -> (s0 - 42)>
 
 // Affine maps for test cases: symbolic_composition_*
-// CHECK-DAG: [[map_symbolic_composition_a:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 512)>
-// CHECK-DAG: [[map_symbolic_composition_b:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 4)>
-// CHECK-DAG: [[map_symbolic_composition_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 * 3 + s1)>
-// CHECK-DAG: [[map_symbolic_composition_d:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 3 + s0)>
+// CHECK-DAG: [[$MAP_symbolic_composition_a:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 512)>
+// CHECK-DAG: [[$MAP_symbolic_composition_b:#map[0-9]+]] = affine_map<()[s0] -> (s0 * 4)>
+// CHECK-DAG: [[$MAP_symbolic_composition_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 * 3 + s1)>
+// CHECK-DAG: [[$MAP_symbolic_composition_d:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 3 + s0)>
 
 // Affine maps for test cases: map_mix_dims_and_symbols_*
-// CHECK-DAG: [[map_mix_dims_and_symbols_b:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 + s0 * 42 + 6)>
-// CHECK-DAG: [[map_mix_dims_and_symbols_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 4 + s0 * 168 - 4)>
-// CHECK-DAG: [[map_mix_dims_and_symbols_d:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 + s0 * 42 + 6) ceildiv 8)>
-// CHECK-DAG: [[map_mix_dims_and_symbols_e:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 * 4 + s0 * 168 - 4) floordiv 3)>
+// CHECK-DAG: [[$MAP_mix_dims_and_symbols_b:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 + s0 * 42 + 6)>
+// CHECK-DAG: [[$MAP_mix_dims_and_symbols_c:#map[0-9]+]] = affine_map<()[s0, s1] -> (s1 * 4 + s0 * 168 - 4)>
+// CHECK-DAG: [[$MAP_mix_dims_and_symbols_d:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 + s0 * 42 + 6) ceildiv 8)>
+// CHECK-DAG: [[$MAP_mix_dims_and_symbols_e:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s1 * 4 + s0 * 168 - 4) floordiv 3)>
 
-// Affine maps for test case: symbolic_semi_affine
-// CHECK-DAG: [[symbolic_semi_affine:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 floordiv (s0 + 1))>
+// Affine maps for test case: $symbolic_semi_affine
+// CHECK-DAG: [[$symbolic_semi_affine:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 floordiv (s0 + 1))>
 
 // CHECK-LABEL: func @compose_affine_maps_1dto2d_no_symbols() {
 func @compose_affine_maps_1dto2d_no_symbols() {
@@ -53,7 +53,7 @@ func @compose_affine_maps_1dto2d_no_symbols() {
     %x1_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%x0, %x0)
     %x1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%x0, %x0)
 
-    // CHECK: [[I0A:%[0-9]+]] = affine.apply [[MAP0]](%{{.*}})
+    // CHECK: [[I0A:%[0-9]+]] = affine.apply [[$MAP0]](%{{.*}})
     // CHECK-NEXT: [[V0:%[0-9]+]] = load %0{{\[}}[[I0A]], [[I0A]]{{\]}}
     %v0 = load %0[%x1_0, %x1_1] : memref<4x4xf32>
 
@@ -62,7 +62,7 @@ func @compose_affine_maps_1dto2d_no_symbols() {
     %y1_0 = affine.apply affine_map<(d0, d1) -> (d0)> (%y0, %y0)
     %y1_1 = affine.apply affine_map<(d0, d1) -> (d1)> (%y0, %y0)
 
-    // CHECK-NEXT: [[I1A:%[0-9]+]] = affine.apply [[MAP1]](%{{.*}})
+    // CHECK-NEXT: [[I1A:%[0-9]+]] = affine.apply [[$MAP1]](%{{.*}})
     // CHECK-NEXT: store [[V0]], %0{{\[}}[[I1A]], [[I1A]]{{\]}}
     store %v0, %0[%y1_0, %y1_1] : memref<4x4xf32>
 
@@ -91,20 +91,20 @@ func @compose_affine_maps_1dto2d_with_symbols() {
     %c4 = constant 4 : index
     %x0 = affine.apply affine_map<(d0)[s0] -> (d0 - s0)> (%i0)[%c4]
 
-    // CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP4]](%{{.*}})
+    // CHECK: [[I0:%[0-9]+]] = affine.apply [[$MAP4]](%{{.*}})
     // CHECK-NEXT: [[V0:%[0-9]+]] = load %{{[0-9]+}}{{\[}}[[I0]], [[I0]]{{\]}}
     %v0 = load %0[%x0, %x0] : memref<4x4xf32>
 
     // Test load[%x0, %x1] with symbol %c4 captured by '%x0' map.
     %x1 = affine.apply affine_map<(d0) -> (d0 + 1)> (%i0)
     %y1 = affine.apply affine_map<(d0, d1) -> (d0+d1)> (%x0, %x1)
-    // CHECK-NEXT: [[I1:%[0-9]+]] = affine.apply [[MAP7]](%{{.*}})
+    // CHECK-NEXT: [[I1:%[0-9]+]] = affine.apply [[$MAP7]](%{{.*}})
     // CHECK-NEXT: store [[V0]], %{{[0-9]+}}{{\[}}[[I1]], [[I1]]{{\]}}
     store %v0, %0[%y1, %y1] : memref<4x4xf32>
 
     // Test store[%x1, %x0] with symbol %c4 captured by '%x0' map.
     %y2 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%x1, %x0)
-    // CHECK-NEXT: [[I2:%[0-9]+]] = affine.apply [[MAP7]](%{{.*}})
+    // CHECK-NEXT: [[I2:%[0-9]+]] = affine.apply [[$MAP7]](%{{.*}})
     // CHECK-NEXT: store [[V0]], %{{[0-9]+}}{{\[}}[[I2]], [[I2]]{{\]}}
     store %v0, %0[%y2, %y2] : memref<4x4xf32>
 
@@ -112,7 +112,7 @@ func @compose_affine_maps_1dto2d_with_symbols() {
     %c5 = constant 5 : index
     %x2 = affine.apply affine_map<(d0)[s0] -> (d0 + s0)> (%i0)[%c5]
     %y3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)> (%x2, %x0)
-    // CHECK: [[I3:%[0-9]+]] = affine.apply [[MAP7a]](%{{.*}})
+    // CHECK: [[I3:%[0-9]+]] = affine.apply [[$MAP7a]](%{{.*}})
     // CHECK-NEXT: store [[V0]], %{{[0-9]+}}{{\[}}[[I3]], [[I3]]{{\]}}
     store %v0, %0[%y3, %y3] : memref<4x4xf32>
   }
@@ -140,8 +140,8 @@ func @compose_affine_maps_2d_tile() {
             ((d0 * s0) + d2)> (%x0, %x1, %x2, %x3)[%c4, %c8]
           %x41 = affine.apply affine_map<(d0, d1, d2, d3)[s0, s1] ->
             ((d1 * s1) + d3)> (%x0, %x1, %x2, %x3)[%c4, %c8]
-          // CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP8]](%{{.*}}, %{{.*}})
-          // CHECK: [[I1:%[0-9]+]] = affine.apply [[MAP8a]](%{{.*}}, %{{.*}})
+          // CHECK: [[I0:%[0-9]+]] = affine.apply [[$MAP8]](%{{.*}}, %{{.*}})
+          // CHECK: [[I1:%[0-9]+]] = affine.apply [[$MAP8a]](%{{.*}}, %{{.*}})
           // CHECK-NEXT: [[L0:%[0-9]+]] = load %{{[0-9]+}}{{\[}}[[I0]], [[I1]]{{\]}}
           %v0 = load %0[%x40, %x41] : memref<16x32xf32>
 
@@ -172,9 +172,9 @@ func @compose_affine_maps_dependent_loads() {
         %x02 = affine.apply affine_map<(d0, d1, d2)[s0, s1] -> (d2 * s0)>
             (%i0, %i1, %i2)[%c3, %c7]
 
-        // CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP9]](%{{.*}})
-        // CHECK: [[I1:%[0-9]+]] = affine.apply [[MAP4b]](%{{.*}})
-        // CHECK: [[I2:%[0-9]+]] = affine.apply [[MAP10]](%{{.*}})
+        // CHECK: [[I0:%[0-9]+]] = affine.apply [[$MAP9]](%{{.*}})
+        // CHECK: [[I1:%[0-9]+]] = affine.apply [[$MAP4b]](%{{.*}})
+        // CHECK: [[I2:%[0-9]+]] = affine.apply [[$MAP10]](%{{.*}})
         // CHECK-NEXT: [[V0:%[0-9]+]] = load %{{[0-9]+}}{{\[}}[[I0]], [[I1]]{{\]}}
         %v0 = load %0[%x00, %x01] : memref<16x32xf32>
 
@@ -191,8 +191,8 @@ func @compose_affine_maps_dependent_loads() {
         %x11 = affine.apply affine_map<(d0, d1)[s0, s1] -> (d1 ceildiv s0)>
            (%x01, %x00)[%c3, %c7]
 
-        // CHECK-NEXT: [[I2A:%[0-9]+]] = affine.apply [[MAP12]](%{{.*}})
-        // CHECK-NEXT: [[I2B:%[0-9]+]] = affine.apply [[MAP11]](%{{.*}})
+        // CHECK-NEXT: [[I2A:%[0-9]+]] = affine.apply [[$MAP12]](%{{.*}})
+        // CHECK-NEXT: [[I2B:%[0-9]+]] = affine.apply [[$MAP11]](%{{.*}})
         // CHECK-NEXT: store [[V0]], %{{[0-9]+}}{{\[}}[[I2A]], [[I2B]]{{\]}}
         store %v0, %0[%x10, %x11] : memref<16x32xf32>
       }
@@ -209,8 +209,8 @@ func @compose_affine_maps_diamond_dependency(%arg0: f32, %arg1: memref<4x4xf32>)
     %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
     %d0 = affine.apply affine_map<(d0, d1) -> (d0 ceildiv 8)> (%b, %c)
     %d1 = affine.apply affine_map<(d0, d1) -> (d1 floordiv 3)> (%b, %c)
-    // CHECK: [[I0:%[0-9]+]] = affine.apply [[MAP13A]](%{{.*}})
-    // CHECK: [[I1:%[0-9]+]] = affine.apply [[MAP13B]](%{{.*}})
+    // CHECK: [[I0:%[0-9]+]] = affine.apply [[$MAP13A]](%{{.*}})
+    // CHECK: [[I1:%[0-9]+]] = affine.apply [[$MAP13B]](%{{.*}})
     // CHECK-NEXT: store %arg0, %arg1{{\[}}[[I0]], [[I1]]{{\]}}
     store %arg0, %arg1[%d0, %d1] : memref<4x4xf32>
   }
@@ -260,7 +260,7 @@ func @partial_fold_map(%arg1: index, %arg2: index) -> index {
   // TODO: Constant fold one index into affine.apply
   %c42 = constant 42 : index
   %2 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%arg1, %c42)
-  // CHECK: [[X:%[0-9]+]] = affine.apply [[MAP15]]()[%{{.*}}]
+  // CHECK: [[X:%[0-9]+]] = affine.apply [[$MAP15]]()[%{{.*}}]
   return %2 : index
 }
 
@@ -269,7 +269,7 @@ func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0 * 4)>(%arg0)
   %1 = affine.apply affine_map<()[s0, s1] -> (8 * s0)>()[%0, %arg0]
   %2 = affine.apply affine_map<()[s0, s1] -> (16 * s1)>()[%arg1, %1]
-  // CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_a]]()[%{{.*}}]
+  // CHECK: %{{.*}} = affine.apply [[$MAP_symbolic_composition_a]]()[%{{.*}}]
   return %2 : index
 }
 
@@ -277,7 +277,7 @@ func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
 func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %0]
-  // CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
+  // CHECK: %{{.*}} = affine.apply [[$MAP_symbolic_composition_b]]()[%{{.*}}]
   return %1 : index
 }
 
@@ -286,7 +286,7 @@ func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: in
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<(d0) -> (d0)>(%arg1)
   %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
-  // CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_c]]()[%{{.*}}, %{{.*}}]
+  // CHECK: %{{.*}} = affine.apply [[$MAP_symbolic_composition_c]]()[%{{.*}}, %{{.*}}]
   return %2 : index
 }
 
@@ -295,7 +295,7 @@ func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: in
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<()[s0] -> (s0)>()[%arg1]
   %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
-  // CHECK: %{{.*}} = affine.apply [[map_symbolic_composition_d]]()[%{{.*}}, %{{.*}}]
+  // CHECK: %{{.*}} = affine.apply [[$MAP_symbolic_composition_d]]()[%{{.*}}, %{{.*}}]
   return %2 : index
 }
 
@@ -304,7 +304,7 @@ func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: in
 func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
-  // CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_b]]()[%{{.*}}, %{{.*}}]
+  // CHECK: {{.*}} = affine.apply [[$MAP_mix_dims_and_symbols_b]]()[%{{.*}}, %{{.*}}]
 
   return %b : index
 }
@@ -314,7 +314,7 @@ func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
-  // CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_c]]()[%{{.*}}, %{{.*}}]
+  // CHECK: {{.*}} = affine.apply [[$MAP_mix_dims_and_symbols_c]]()[%{{.*}}, %{{.*}}]
   return %c : index
 }
 
@@ -324,7 +324,7 @@ func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
   %d = affine.apply affine_map<()[s0] -> (s0 ceildiv 8)> ()[%b]
-  // CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_d]]()[%{{.*}}, %{{.*}}]
+  // CHECK: {{.*}} = affine.apply [[$MAP_mix_dims_and_symbols_d]]()[%{{.*}}, %{{.*}}]
   return %d : index
 }
 
@@ -335,7 +335,7 @@ func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
   %d = affine.apply affine_map<()[s0] -> (s0 ceildiv 8)> ()[%b]
   %e = affine.apply affine_map<(d0) -> (d0 floordiv 3)> (%c)
-  // CHECK: {{.*}} = affine.apply [[map_mix_dims_and_symbols_e]]()[%{{.*}}, %{{.*}}]
+  // CHECK: {{.*}} = affine.apply [[$MAP_mix_dims_and_symbols_e]]()[%{{.*}}, %{{.*}}]
   return %e : index
 }
 
@@ -359,8 +359,8 @@ func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
   %res2 = affine.apply affine_map<()[s0, s1] -> (s1)>()[%N, %K]
   %res3 = affine.apply affine_map<()[s0, s1] -> (1024)>()[%N, %K]
   // CHECK-DAG: {{.*}} = constant 1024 : index
-  // CHECK-DAG: {{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
-  // CHECK-DAG: {{.*}} = affine.apply [[map_symbolic_composition_b]]()[%{{.*}}]
+  // CHECK-DAG: {{.*}} = affine.apply [[$MAP_symbolic_composition_b]]()[%{{.*}}]
+  // CHECK-DAG: {{.*}} = affine.apply [[$MAP_symbolic_composition_b]]()[%{{.*}}]
   return %res1, %res2, %res3 : index, index, index
 }
 
@@ -370,7 +370,7 @@ func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
   affine.for %i0 = 1 to 100 {
     %1 = affine.apply affine_map<()[s0] -> (s0 + 1)> ()[%M]
     %2 = affine.apply affine_map<(d0)[s0] -> (d0 floordiv s0)> (%i0)[%1]
-    // CHECK-DAG: {{.*}} = affine.apply [[symbolic_semi_affine]](%{{.*}})[%{{.*}}]
+    // CHECK-DAG: {{.*}} = affine.apply [[$symbolic_semi_affine]](%{{.*}})[%{{.*}}]
     store %f1, %A[%2] : memref<?xf32>
   }
   return
@@ -378,8 +378,8 @@ func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
-// CHECK: [[MAP1:#map[0-9]+]] = affine_map<()[s0] -> (100, s0)>
+// CHECK: [[$MAP0:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK: [[$MAP1:#map[0-9]+]] = affine_map<()[s0] -> (100, s0)>
 
 // CHECK-LABEL:  func @constant_fold_bounds(%arg0: index) {
 func @constant_fold_bounds(%N : index) {
@@ -403,7 +403,7 @@ func @constant_fold_bounds(%N : index) {
   }
 
   // None of the bounds can be folded.
-  // CHECK: affine.for %{{.*}} = max [[MAP0]]()[%{{.*}}] to min [[MAP1]]()[%{{.*}}] {
+  // CHECK: affine.for %{{.*}} = max [[$MAP0]]()[%{{.*}}] to min [[$MAP1]]()[%{{.*}}] {
   affine.for %k = max affine_map<()[s0] -> (0, s0)> ()[%l] to min affine_map<()[s0] -> (100, s0)> ()[%N] {
     "foo"(%k, %c3) : (index, index) -> ()
   }
@@ -423,7 +423,7 @@ func @fold_empty_loop() {
 
 // -----
 
-// CHECK-DAG: [[SET:#set[0-9]+]] = affine_set<(d0, d1)[s0] : (d0 >= 0, -d0 + 1022 >= 0, d1 >= 0, -d1 + s0 - 2 >= 0)>
+// CHECK-DAG: [[$SET:#set[0-9]+]] = affine_set<(d0, d1)[s0] : (d0 >= 0, -d0 + 1022 >= 0, d1 >= 0, -d1 + s0 - 2 >= 0)>
 
 // CHECK-LABEL: func @canonicalize_affine_if
 // CHECK-SAME: [[M:%.*]]: index,
@@ -433,7 +433,7 @@ func @canonicalize_affine_if(%M : index, %N : index) {
   // Drop unused operand %M, propagate %c1022, and promote %N to symbolic.
   affine.for %i = 0 to 1024 {
     affine.for %j = 0 to %N {
-      // CHECK: affine.if [[SET]](%{{.*}}, %{{.*}}){{\[}}[[N]]{{\]}}
+      // CHECK: affine.if [[$SET]](%{{.*}}, %{{.*}}){{\[}}[[N]]{{\]}}
       affine.if affine_set<(d0, d1, d2, d3)[s0] : (d1 >= 0, d0 - d1 >= 0, d2 >= 0, d3 - d2 - 2 >= 0)> (%c1022, %i, %j, %N)[%M] {
         "foo"() : () -> ()
       }
@@ -445,8 +445,8 @@ func @canonicalize_affine_if(%M : index, %N : index) {
 
 // -----
 
-// CHECK-DAG: [[LBMAP:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (1024, s0 * 2)>
+// CHECK-DAG: [[$LBMAP:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (1024, s0 * 2)>
 
 // CHECK-LABEL: func @canonicalize_bounds
 // CHECK-SAME: [[M:%.*]]: index,
@@ -456,7 +456,7 @@ func @canonicalize_bounds(%M : index, %N : index) {
   %c1024 = constant 1024 : index
   // Drop unused operand %N, drop duplicate operand %M, propagate %c1024, and
   // promote %M to a symbolic one.
-  // CHECK: affine.for %{{.*}} = 0 to min [[UBMAP]](){{\[}}[[M]]{{\]}}
+  // CHECK: affine.for %{{.*}} = 0 to min [[$UBMAP]](){{\[}}[[M]]{{\]}}
   affine.for %i = 0 to min affine_map<(d0, d1, d2, d3) -> (d0, d1 + d2)> (%c1024, %M, %M, %N) {
     "foo"() : () -> ()
   }
@@ -466,7 +466,7 @@ func @canonicalize_bounds(%M : index, %N : index) {
     "foo"() : () -> ()
   }
   // Lower bound canonicalize.
-  // CHECK: affine.for %{{.*}} = max [[LBMAP]](){{\[}}[[N]]{{\]}} to [[M]]
+  // CHECK: affine.for %{{.*}} = max [[$LBMAP]](){{\[}}[[N]]{{\]}} to [[M]]
   affine.for %i = max affine_map<(d0, d1) -> (d0, d1)> (%c0, %N) to %M {
     "foo"() : () -> ()
   }
@@ -553,13 +553,13 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-// CHECK: #[[map:.*]] = affine_map<(d0, d1) -> (d0, d1 - 2)>
+// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d0, d1 - 2)>
 
 func @affine_min(%arg0: index) {
   affine.for %i = 0 to %arg0 {
     affine.for %j = 0 to %arg0 {
       %c2 = constant 2 : index
-      // CHECK: affine.min #[[map]]
+      // CHECK: affine.min #[[$MAP]]
       %0 = affine.min affine_map<(d0,d1,d2)->(d0, d1 - d2)>(%i, %j, %c2)
       "consumer"(%0) : (index) -> ()
     }
@@ -578,7 +578,7 @@ func @affine_min(%arg0: index) {
 #map1 = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
 #map2 = affine_map<(d0)[s0] -> (1024, -d0 + s0)>
 
-// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (1024, s1 * -1024 + s0)>
+// CHECK: #[[$MAP:.*]] = affine_map<()[s0, s1] -> (1024, s1 * -1024 + s0)>
 
 // CHECK: func @rep(%[[ARG0:.*]]: index, %[[ARG1:.*]]: index)
 func @rep(%arg0 : index, %arg1 : index) -> index {
@@ -588,7 +588,7 @@ func @rep(%arg0 : index, %arg1 : index) -> index {
   // CHECK-NOT: affine.apply
   %0 = affine.apply #map1(%arg0)[%c1024, %c0]
 
-  // CHECK: affine.min #[[MAP]]()[%[[ARG1]], %[[ARG0]]]
+  // CHECK: affine.min #[[$MAP]]()[%[[ARG1]], %[[ARG0]]]
   %1 = affine.min #map2(%0)[%arg1]
   return %1 : index
 }

diff  --git a/mlir/test/Dialect/Affine/loop-tiling.mlir b/mlir/test/Dialect/Affine/loop-tiling.mlir
index 7deb8772f6d0..0e83b3fa16f6 100644
--- a/mlir/test/Dialect/Affine/loop-tiling.mlir
+++ b/mlir/test/Dialect/Affine/loop-tiling.mlir
@@ -4,18 +4,18 @@
 
 // -----
 
-// CHECK-DAG: [[UB:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32)>
-// CHECK-DAG: [[UB_MIN:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32, 50)>
-// CHECK-DAG: [[ID:#map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[ID_PLUS_21:#map[0-9]+]] = affine_map<(d0) -> (d0 + 21)>
+// CHECK-DAG: [[$UB:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32)>
+// CHECK-DAG: [[$UB_MIN:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32, 50)>
+// CHECK-DAG: [[$ID:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$ID_PLUS_21:#map[0-9]+]] = affine_map<(d0) -> (d0 + 21)>
 
 // CHECK-LABEL: func @loop_tiling()
 // CHECK-NEXT:   affine.for %{{.*}} = 0 to 256 step 32 {
 // CHECK-NEXT:     affine.for %{{.*}} = 0 to 512 step 32 {
 // CHECK-NEXT:       affine.for %{{.*}} = 0 to 1024 step 32 {
-// CHECK-NEXT:         affine.for %[[I:.*]] = [[ID]](%{{.*}}) to [[UB]](%{{.*}}) {
-// CHECK-NEXT:           affine.for %[[J:.*]] = [[ID]](%{{.*}}) to [[UB]](%{{.*}}) {
-// CHECK-NEXT:             affine.for %[[K:.*]] = [[ID]](%{{.*}}) to [[UB]](%{{.*}}) {
+// CHECK-NEXT:         affine.for %[[I:.*]] = [[$ID]](%{{.*}}) to [[$UB]](%{{.*}}) {
+// CHECK-NEXT:           affine.for %[[J:.*]] = [[$ID]](%{{.*}}) to [[$UB]](%{{.*}}) {
+// CHECK-NEXT:             affine.for %[[K:.*]] = [[$ID]](%{{.*}}) to [[$UB]](%{{.*}}) {
 // CHECK-NEXT:               "test.foo"(%[[I]], %[[J]], %[[K]])
 // CHECK-NEXT:             }
 // CHECK-NEXT:           }
@@ -24,12 +24,12 @@
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
 // CHECK-NEXT:   affine.for %{{.*}} = 0 to 50 step 32 {
-// CHECK-NEXT:     affine.for %[[X:.*]] = [[ID]](%{{.*}}) to min [[UB_MIN]](%{{.*}}) {
+// CHECK-NEXT:     affine.for %[[X:.*]] = [[$ID]](%{{.*}}) to min [[$UB_MIN]](%{{.*}}) {
 // CHECK-NEXT:       "test.bar"(%[[X]], %[[X]])
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
 // CHECK-NEXT: affine.for %[[I:.*]] = 0 to 21 step 32 {
-// CHECK-NEXT:   affine.for %[[Y:.*]] = [[ID]](%[[I]]) to [[ID_PLUS_21]](%[[I]])  {
+// CHECK-NEXT:   affine.for %[[Y:.*]] = [[$ID]](%[[I]]) to [[$ID_PLUS_21]](%[[I]])  {
 // CHECK-NEXT:     "test.foobar"(%[[Y]])
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
@@ -57,10 +57,10 @@ func @loop_tiling() {
 
 // -----
 
-// CHECK-DAG: [[IDENTITY:#map[0-9]+]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: [[LB:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
-// CHECK-DAG: [[UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
-// CHECK-DAG: [[UB_INTRA_TILE:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0, 4096 floordiv s1)>
+// CHECK-DAG: [[$IDENTITY:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: [[$LB:#map[0-9]+]] = affine_map<()[s0] -> (0, s0)>
+// CHECK-DAG: [[$UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
+// CHECK-DAG: [[$UB_INTRA_TILE:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0, 4096 floordiv s1)>
 
 #lb = affine_map<()[s0] -> (0, s0)>
 #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
@@ -72,8 +72,8 @@ func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
     addi %i, %i : index
   }
   return
-// CHECK:       affine.for %{{.*}} = max [[LB]]()[%{{.*}}] to min [[UB]]()[%{{.*}}, %{{.*}}] step 32 {
-// CHECK-NEXT:    affine.for %[[I:.*]] = [[IDENTITY]](%{{.*}}) to min [[UB_INTRA_TILE]](%{{.*}})[%{{.*}}, %{{.*}}] {
+// CHECK:       affine.for %{{.*}} = max [[$LB]]()[%{{.*}}] to min [[$UB]]()[%{{.*}}, %{{.*}}] step 32 {
+// CHECK-NEXT:    affine.for %[[I:.*]] = [[$IDENTITY]](%{{.*}}) to min [[$UB_INTRA_TILE]](%{{.*}})[%{{.*}}, %{{.*}}] {
 // CHECK-NEXT:      addi %[[I]], %[[I]]
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
@@ -108,7 +108,7 @@ func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256
 
 // -----
 
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
+// CHECK-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
 
 func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %cst = constant 0.000000e+00 : f32
@@ -133,8 +133,8 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
 // CHECK:       dim %{{.*}}, %c0 : memref<?x?xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to %{{.*}} step 32 {
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to %{{.*}} step 32 {
-// CHECK-NEXT:      affine.for %{{.*}} = #map3(%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}] {
-// CHECK-NEXT:        affine.for %{{.*}} = #map3(%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}] {
+// CHECK-NEXT:      affine.for %{{.*}} = #map3(%{{.*}}) to min [[$UBMAP]](%{{.*}})[%{{.*}}] {
+// CHECK-NEXT:        affine.for %{{.*}} = #map3(%{{.*}}) to min [[$UBMAP]](%{{.*}})[%{{.*}}] {
 // CHECK-NEXT:          affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 // CHECK-NEXT:          affine.for %{{.*}} = 0 to %{{.*}} {
 // CHECK-NEXT:            affine.load
@@ -154,7 +154,7 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
 
 // CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0)>
 // CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 + s1)>
-// CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)>
+// CHECK-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)>
 
 func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
   %c0 = constant 0 : index
@@ -167,7 +167,7 @@ func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: i
 
 // CHECK:       dim %{{.*}}, %c0 : memref<?xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to [[MAP1]]()[%{{.*}}, %{{.*}}] step 32 {
-// CHECK-NEXT:    affine.for %{{.*}} = [[MAP0]](%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}, %{{.*}}] {
+// CHECK-NEXT:    affine.for %{{.*}} = [[MAP0]](%{{.*}}) to min [[$UBMAP]](%{{.*}})[%{{.*}}, %{{.*}}] {
 // CHECK-NEXT:      affine.load
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
@@ -183,14 +183,14 @@ func @tile_size_larger_than_trip_count_symbolic_bound(%M: index, %N :  index) {
   return
 }
 
-// CHECK-DAG: #[[ID:.*]] = affine_map<(d0) -> (d0)>
-// CHECK-DAG: #[[ID_PLUS_2:.*]] = affine_map<(d0) -> (d0 + 2)>
-// CHECK-DAG: #[[ID_PLUS_4:.*]] = affine_map<(d0) -> (d0 + 4)>
+// CHECK-DAG: #[[$ID:.*]] = affine_map<(d0) -> (d0)>
+// CHECK-DAG: #[[$ID_PLUS_2:.*]] = affine_map<(d0) -> (d0 + 2)>
+// CHECK-DAG: #[[$ID_PLUS_4:.*]] = affine_map<(d0) -> (d0 + 4)>
 // CHECK: %[[M:.*]]: index, %[[N:.*]]: index
-// CHECK:      affine.for %[[I:.*]] = #[[ID]](%[[M]]) to #[[ID_PLUS_2]](%[[M]]) step 32
-// CHECK-NEXT:   affine.for %[[J:.*]] = #[[ID]](%[[N]]) to #[[ID_PLUS_4]](%[[N]]) step 32
-// CHECK-NEXT:     affine.for %arg4 = #[[ID]](%[[I]]) to #[[ID_PLUS_2]](%[[I]])
-// CHECK-NEXT:       affine.for %arg5 = #[[ID]](%[[J]]) to #[[ID_PLUS_4]](%[[J]])
+// CHECK:      affine.for %[[I:.*]] = #[[$ID]](%[[M]]) to #[[$ID_PLUS_2]](%[[M]]) step 32
+// CHECK-NEXT:   affine.for %[[J:.*]] = #[[$ID]](%[[N]]) to #[[$ID_PLUS_4]](%[[N]]) step 32
+// CHECK-NEXT:     affine.for %arg4 = #[[$ID]](%[[I]]) to #[[$ID_PLUS_2]](%[[I]])
+// CHECK-NEXT:       affine.for %arg5 = #[[$ID]](%[[J]]) to #[[$ID_PLUS_4]](%[[J]])
 // CHECK-NEXT:         "test.foo"
 
 // -----
@@ -222,25 +222,25 @@ func @separate_full_tile_2d(%M : index, %N : index) {
   return
 }
 
-// SEPARATE-DAG: #[[SEP_COND:.*]] = affine_set<(d0, d1)[s0, s1] : (-d0 + s0 - 32 >= 0, -d1 + s1 - 32 >= 0)>
-// SEPARATE-DAG: #[[LB:.*]] = affine_map<(d0) -> (d0)>
-// SEPARATE-DAG: #[[FULL_TILE_UB:.*]] = affine_map<(d0) -> (d0 + 32)>
-// SEPARATE-DAG: #[[PART_TILE_UB:.*]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
+// SEPARATE-DAG: #[[$SEP_COND:.*]] = affine_set<(d0, d1)[s0, s1] : (-d0 + s0 - 32 >= 0, -d1 + s1 - 32 >= 0)>
+// SEPARATE-DAG: #[[$LB:.*]] = affine_map<(d0) -> (d0)>
+// SEPARATE-DAG: #[[$FULL_TILE_UB:.*]] = affine_map<(d0) -> (d0 + 32)>
+// SEPARATE-DAG: #[[$PART_TILE_UB:.*]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
 
 // SEPARATE-LABEL: func @separate_full_tile_2d(
 // SEPARATE: %[[M:.*]]: index, %[[N:.*]]: index
 
 // SEPARATE:       affine.for %[[I:.*]] =
 // SEPARATE-NEXT:    affine.for %[[J:.*]] =
-// SEPARATE-NEXT:      affine.if #[[SEP_COND]](%arg2, %arg3)[%arg0, %arg1] {
-// SEPARATE-NEXT:        affine.for %{{.*}} = #[[LB]](%[[I]]) to #[[FULL_TILE_UB]](%[[I]]) {
-// SEPARATE-NEXT:          affine.for %{{.*}} = #[[LB]](%[[J]]) to #[[FULL_TILE_UB]](%[[J]]) {
+// SEPARATE-NEXT:      affine.if #[[$SEP_COND]](%arg2, %arg3)[%arg0, %arg1] {
+// SEPARATE-NEXT:        affine.for %{{.*}} = #[[$LB]](%[[I]]) to #[[$FULL_TILE_UB]](%[[I]]) {
+// SEPARATE-NEXT:          affine.for %{{.*}} = #[[$LB]](%[[J]]) to #[[$FULL_TILE_UB]](%[[J]]) {
 // SEPARATE-NEXT:           "test.foo"
 // SEPARATE-NEXT:          }
 // SEPARATE-NEXT:        }
 // SEPARATE-NEXT:      } else {
-// SEPARATE-NEXT:        affine.for %{{.*}} = #[[LB]](%[[I]]) to min #[[PART_TILE_UB]](%[[I]])[%[[M]]] {
-// SEPARATE-NEXT:          affine.for %{{.*}} = #[[LB]](%[[J]]) to min #[[PART_TILE_UB]](%[[J]])[%[[N]]] {
+// SEPARATE-NEXT:        affine.for %{{.*}} = #[[$LB]](%[[I]]) to min #[[$PART_TILE_UB]](%[[I]])[%[[M]]] {
+// SEPARATE-NEXT:          affine.for %{{.*}} = #[[$LB]](%[[J]]) to min #[[$PART_TILE_UB]](%[[J]])[%[[N]]] {
 // SEPARATE-NEXT:           "test.foo"
 // SEPARATE-NEXT:          }
 // SEPARATE-NEXT:        }
@@ -257,14 +257,14 @@ func @separate_full_tile_1d_max_min(%M : index, %N : index, %P : index, %Q : ind
   return
 }
 
-// SEPARATE-DAG: #[[SEP_COND:.*]] = affine_set<(d0)[s0, s1] : (-d0 + s0 - 32 >= 0, -d0 + s1 - 32 >= 0)>
+// SEPARATE-DAG: #[[$SEP_COND:.*]] = affine_set<(d0)[s0, s1] : (-d0 + s0 - 32 >= 0, -d0 + s1 - 32 >= 0)>
 // SEPARATE-DAG: #[[TILE_LB:.*]] = affine_map<(d0) -> (d0)>
-// SEPARATE-DAG: #[[FULL_TILE_UB:.*]] = affine_map<(d0) -> (d0 + 32)>
+// SEPARATE-DAG: #[[$FULL_TILE_UB:.*]] = affine_map<(d0) -> (d0 + 32)>
 // SEPARATE-DAG: #[[PARTIAL_TILE_UB:.*]] = affine_map<(d0, d1, d2) -> (d2 + 32, d0, d1)>
 
 // SEPARATE:         affine.for %arg4
-// SEPARATE-NEXT:      affine.if #[[SEP_COND]](%arg4)[%arg2, %arg3] {
-// SEPARATE-NEXT:        affine.for %arg5 = #[[TILE_LB]](%arg4) to #[[FULL_TILE_UB]](%arg4) {
+// SEPARATE-NEXT:      affine.if #[[$SEP_COND]](%arg4)[%arg2, %arg3] {
+// SEPARATE-NEXT:        affine.for %arg5 = #[[TILE_LB]](%arg4) to #[[$FULL_TILE_UB]](%arg4) {
 // SEPARATE-NEXT:        }
 // SEPARATE-NEXT:      } else {
 // SEPARATE-NEXT:        affine.for %arg5 = #[[TILE_LB]](%arg4) to min #[[PARTIAL_TILE_UB]](%arg2, %arg3, %arg4) {

diff  --git a/mlir/test/Dialect/Affine/loop-unswitch.mlir b/mlir/test/Dialect/Affine/loop-unswitch.mlir
index 59eb5ff70ff0..0080e379427f 100644
--- a/mlir/test/Dialect/Affine/loop-unswitch.mlir
+++ b/mlir/test/Dialect/Affine/loop-unswitch.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -split-input-file -test-affine-loop-unswitch | FileCheck %s
 
-// CHECK-DAG: #[[SET:.*]] = affine_set<(d0) : (d0 - 2 >= 0)>
+// CHECK-DAG: #[[$SET:.*]] = affine_set<(d0) : (d0 - 2 >= 0)>
 
 // CHECK-LABEL: func @if_else_imperfect
 func @if_else_imperfect(%A : memref<100xi32>, %B : memref<100xi32>, %v : i32) {
@@ -22,7 +22,7 @@ func @external()
 
 // CHECK:       affine.for %[[I:.*]] = 0 to 100 {
 // CHECK-NEXT:    affine.store %{{.*}}, %[[A]][%[[I]]]
-// CHECK-NEXT:    affine.if #[[SET]](%[[I]]) {
+// CHECK-NEXT:    affine.if #[[$SET]](%[[I]]) {
 // CHECK-NEXT:      affine.for %[[J:.*]] = 0 to 100 {
 // CHECK-NEXT:        affine.store %{{.*}}, %[[A]][%[[J]]]
 // CHECK-NEXT:        affine.store %{{.*}}, %[[B]][%[[J]]]

diff  --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir
index 832c0aa477a4..2faa2c60f0ee 100644
--- a/mlir/test/Dialect/Affine/ops.mlir
+++ b/mlir/test/Dialect/Affine/ops.mlir
@@ -61,33 +61,33 @@ func @affine_terminator() {
 
 // -----
 
-// CHECK-DAG: #[[MAP0:map[0-9]+]] = affine_map<(d0)[s0] -> (1000, d0 + 512, s0)>
-// CHECK-DAG: #[[MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)>
-// CHECK-DAG: #[[MAP2:map[0-9]+]] = affine_map<()[s0, s1] -> (s0 - s1, 11)>
-// CHECK-DAG: #[[MAP3:map[0-9]+]] = affine_map<() -> (77, 78, 79)>
+// CHECK-DAG: #[[$MAP0:map[0-9]+]] = affine_map<(d0)[s0] -> (1000, d0 + 512, s0)>
+// CHECK-DAG: #[[$MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)>
+// CHECK-DAG: #[[$MAP2:map[0-9]+]] = affine_map<()[s0, s1] -> (s0 - s1, 11)>
+// CHECK-DAG: #[[$MAP3:map[0-9]+]] = affine_map<() -> (77, 78, 79)>
 
 // CHECK-LABEL: @affine_min
 func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
-  // CHECK: affine.min #[[MAP0]](%arg0)[%arg1]
+  // CHECK: affine.min #[[$MAP0]](%arg0)[%arg1]
   %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
-  // CHECK: affine.min #[[MAP1]](%arg0, %arg1)[%arg2]
+  // CHECK: affine.min #[[$MAP1]](%arg0, %arg1)[%arg2]
   %1 = affine.min affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)> (%arg0, %arg1)[%arg2]
-  // CHECK: affine.min #[[MAP2]]()[%arg1, %arg2]
+  // CHECK: affine.min #[[$MAP2]]()[%arg1, %arg2]
   %2 = affine.min affine_map<()[s0, s1] -> (s0 - s1, 11)> ()[%arg1, %arg2]
-  // CHECK: affine.min #[[MAP3]]()
+  // CHECK: affine.min #[[$MAP3]]()
   %3 = affine.min affine_map<()[] -> (77, 78, 79)> ()[]
   return
 }
 
 // CHECK-LABEL: @affine_max
 func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
-  // CHECK: affine.max #[[MAP0]](%arg0)[%arg1]
+  // CHECK: affine.max #[[$MAP0]](%arg0)[%arg1]
   %0 = affine.max affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
-  // CHECK: affine.max #[[MAP1]](%arg0, %arg1)[%arg2]
+  // CHECK: affine.max #[[$MAP1]](%arg0, %arg1)[%arg2]
   %1 = affine.max affine_map<(d0, d1)[s0] -> (d0 - d1, s0 + 512)> (%arg0, %arg1)[%arg2]
-  // CHECK: affine.max #[[MAP2]]()[%arg1, %arg2]
+  // CHECK: affine.max #[[$MAP2]]()[%arg1, %arg2]
   %2 = affine.max affine_map<()[s0, s1] -> (s0 - s1, 11)> ()[%arg1, %arg2]
-  // CHECK: affine.max #[[MAP3]]()
+  // CHECK: affine.max #[[$MAP3]]()
   %3 = affine.max affine_map<()[] -> (77, 78, 79)> ()[]
   return
 }

diff  --git a/mlir/test/Dialect/Affine/simplify-affine-structures.mlir b/mlir/test/Dialect/Affine/simplify-affine-structures.mlir
index 9637ba3f4146..91f153f1fb21 100644
--- a/mlir/test/Dialect/Affine/simplify-affine-structures.mlir
+++ b/mlir/test/Dialect/Affine/simplify-affine-structures.mlir
@@ -1,8 +1,8 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -simplify-affine-structures | FileCheck %s
 
-// CHECK-DAG: #[[SET_EMPTY:.*]] = affine_set<() : (1 == 0)>
-// CHECK-DAG: #[[SET_2D:.*]] = affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0)>
-// CHECK-DAG: #[[SET_7_11:.*]] = affine_set<(d0, d1) : (d0 * 7 + d1 * 5 + 88 == 0, d0 * 5 - d1 * 11 + 60 == 0, d0 * 11 + d1 * 7 - 24 == 0, d0 * 7 + d1 * 5 + 88 == 0)>
+// CHECK-DAG: #[[$SET_EMPTY:.*]] = affine_set<() : (1 == 0)>
+// CHECK-DAG: #[[$SET_2D:.*]] = affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0)>
+// CHECK-DAG: #[[$SET_7_11:.*]] = affine_set<(d0, d1) : (d0 * 7 + d1 * 5 + 88 == 0, d0 * 5 - d1 * 11 + 60 == 0, d0 * 11 + d1 * 7 - 24 == 0, d0 * 7 + d1 * 5 + 88 == 0)>
 
 // An external function that we will use in bodies to avoid DCE.
 func @external() -> ()
@@ -11,7 +11,7 @@ func @external() -> ()
 func @test_gaussian_elimination_empty_set0() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (2 == 0)>(%arg0, %arg1) {
         call @external() : () -> ()
       }
@@ -24,7 +24,7 @@ func @test_gaussian_elimination_empty_set0() {
 func @test_gaussian_elimination_empty_set1() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (1 >= 0, -1 >= 0)> (%arg0, %arg1) {
         call @external() : () -> ()
       }
@@ -37,7 +37,7 @@ func @test_gaussian_elimination_empty_set1() {
 func @test_gaussian_elimination_non_empty_set2() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: #[[SET_2D]](%arg0, %arg1)
+      // CHECK: #[[$SET_2D]](%arg0, %arg1)
       affine.if affine_set<(d0, d1) : (d0 - 100 == 0, d1 - 10 == 0, -d0 + 100 >= 0, d1 >= 0, d1 + 101 >= 0)>(%arg0, %arg1) {
         call @external() : () -> ()
       }
@@ -52,7 +52,7 @@ func @test_gaussian_elimination_empty_set3() {
   %c11 = constant 11 : index
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: #[[SET_EMPTY]]()
+      // CHECK: #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1)[s0, s1] : (d0 - s0 == 0, d0 + s0 == 0, s0 - 1 == 0)>(%arg0, %arg1)[%c7, %c11] {
         call @external() : () -> ()
       }
@@ -73,7 +73,7 @@ func @test_gaussian_elimination_non_empty_set4() {
   %c11 = constant 11 : index
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: #[[SET_7_11]](%arg0, %arg1)
+      // CHECK: #[[$SET_7_11]](%arg0, %arg1)
       affine.if #set_2d_non_empty(%arg0, %arg1)[%c7, %c11] {
         call @external() : () -> ()
       }
@@ -95,7 +95,7 @@ func @test_gaussian_elimination_empty_set5() {
   %c11 = constant 11 : index
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
-      // CHECK: #[[SET_EMPTY]]()
+      // CHECK: #[[$SET_EMPTY]]()
       affine.if #set_2d_empty(%arg0, %arg1)[%c7, %c11] {
         call @external() : () -> ()
       }
@@ -162,33 +162,33 @@ func @test_fuzz_explosion(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : i
 func @test_empty_set(%N : index) {
   affine.for %i = 0 to 10 {
     affine.for %j = 0 to 10 {
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (d0 - d1 >= 0, d1 - d0 - 1 >= 0)>(%i, %j) {
         "foo"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0) : (d0 >= 0, -d0 - 1 >= 0)>(%i) {
         "bar"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0) : (d0 >= 0, -d0 - 1 >= 0)>(%i) {
         "foo"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0)[s0, s1] : (d0 >= 0, -d0 + s0 - 1 >= 0, -s0 >= 0)>(%i)[%N, %N] {
         "bar"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       // The set below implies d0 = d1; so d1 >= d0, but d0 >= d1 + 1.
       affine.if affine_set<(d0, d1, d2) : (d0 - d1 == 0, d2 - d0 >= 0, d0 - d1 - 1 >= 0)>(%i, %j, %N) {
         "foo"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       // The set below has rational solutions but no integer solutions; GCD test catches it.
       affine.if affine_set<(d0, d1) : (d0*2 -d1*2 - 1 == 0, d0 >= 0, -d0 + 100 >= 0, d1 >= 0, -d1 + 100 >= 0)>(%i, %j) {
         "foo"() : () -> ()
       }
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (d1 == 0, d0 - 1 >= 0, - d0 - 1 >= 0)>(%i, %j) {
         "foo"() : () -> ()
       }
@@ -198,12 +198,12 @@ func @test_empty_set(%N : index) {
   affine.for %k = 0 to 10 {
     affine.for %l = 0 to 10 {
       // Empty because no multiple of 8 lies between 4 and 7.
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0) : (8*d0 - 4 >= 0, -8*d0 + 7 >= 0)>(%k) {
         "foo"() : () -> ()
       }
       // Same as above but with equalities and inequalities.
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (d0 - 4*d1 == 0, 4*d1 - 5 >= 0, -4*d1 + 7 >= 0)>(%k, %l) {
         "foo"() : () -> ()
       }
@@ -211,12 +211,12 @@ func @test_empty_set(%N : index) {
       // 8*d1 here is a multiple of 4, and so can't lie between 9 and 11. GCD
       // tightening will tighten constraints to 4*d0 + 8*d1 >= 12 and 4*d0 +
       // 8*d1 <= 8; hence infeasible.
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1) : (4*d0 + 8*d1 - 9 >= 0, -4*d0 - 8*d1 + 11 >= 0)>(%k, %l) {
         "foo"() : () -> ()
       }
       // Same as above but with equalities added into the mix.
-      // CHECK: affine.if #[[SET_EMPTY]]()
+      // CHECK: affine.if #[[$SET_EMPTY]]()
       affine.if affine_set<(d0, d1, d2) : (d0 - 4*d2 == 0, d0 + 8*d1 - 9 >= 0, -d0 - 8*d1 + 11 >= 0)>(%k, %k, %l) {
         "foo"() : () -> ()
       }
@@ -224,7 +224,7 @@ func @test_empty_set(%N : index) {
   }
 
   affine.for %m = 0 to 10 {
-    // CHECK: affine.if #[[SET_EMPTY]]()
+    // CHECK: affine.if #[[$SET_EMPTY]]()
     affine.if affine_set<(d0) : (d0 mod 2 - 3 == 0)> (%m) {
       "foo"() : () -> ()
     }
@@ -238,21 +238,21 @@ func @test_empty_set(%N : index) {
 // An external function that we will use in bodies to avoid DCE.
 func @external() -> ()
 
-// CHECK-DAG: #[[SET:.*]] = affine_set<()[s0] : (s0 >= 0, -s0 + 50 >= 0)
-// CHECK-DAG: #[[EMPTY_SET:.*]] = affine_set<() : (1 == 0)
-// CHECK-DAG: #[[UNIV_SET:.*]] = affine_set<() : (0 == 0)
+// CHECK-DAG: #[[$SET:.*]] = affine_set<()[s0] : (s0 >= 0, -s0 + 50 >= 0)
+// CHECK-DAG: #[[$EMPTY_SET:.*]] = affine_set<() : (1 == 0)
+// CHECK-DAG: #[[$UNIV_SET:.*]] = affine_set<() : (0 == 0)
 
 // CHECK-LABEL: func @simplify_set
 func @simplify_set(%a : index, %b : index) {
-  // CHECK: affine.if #[[SET]]
+  // CHECK: affine.if #[[$SET]]
   affine.if affine_set<(d0, d1) : (d0 - d1 + d1 + d0 >= 0, 2 >= 0, d0 >= 0, -d0 + 50 >= 0, -d0 + 100 >= 0)>(%a, %b) {
     call @external() : () -> ()
   }
-  // CHECK: affine.if #[[EMPTY_SET]]
+  // CHECK: affine.if #[[$EMPTY_SET]]
   affine.if affine_set<(d0, d1) : (d0 mod 2 - 1 == 0, d0 - 2 * (d0 floordiv 2) == 0)>(%a, %b) {
     call @external() : () -> ()
   }
-  // CHECK: affine.if #[[UNIV_SET]]
+  // CHECK: affine.if #[[$UNIV_SET]]
   affine.if affine_set<(d0, d1) : (1 >= 0, 3 >= 0)>(%a, %b) {
     call @external() : () -> ()
   }

diff  --git a/mlir/test/Dialect/Affine/unroll-jam.mlir b/mlir/test/Dialect/Affine/unroll-jam.mlir
index b1513be7b937..9b0991882780 100644
--- a/mlir/test/Dialect/Affine/unroll-jam.mlir
+++ b/mlir/test/Dialect/Affine/unroll-jam.mlir
@@ -1,15 +1,15 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -affine-loop-unroll-jam="unroll-jam-factor=2" | FileCheck %s
 // RUN: mlir-opt -allow-unregistered-dialect %s -affine-loop-unroll-jam="unroll-jam-factor=4" | FileCheck --check-prefix=UJAM-FOUR %s
 
-// CHECK-DAG: [[MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
-// CHECK-DAG: [[MAP_DIV_OFFSET:#map[0-9]+]] = affine_map<()[s0] -> (((s0 - 1) floordiv 2) * 2 + 1)>
-// CHECK-DAG: [[MAP_MULTI_RES:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s0 floordiv 2) * 2, (s1 floordiv 2) * 2, 1024)>
-// CHECK-DAG: [[MAP_SYM_UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, s1, 1024)>
+// CHECK-DAG: [[$MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// CHECK-DAG: [[$MAP_DIV_OFFSET:#map[0-9]+]] = affine_map<()[s0] -> (((s0 - 1) floordiv 2) * 2 + 1)>
+// CHECK-DAG: [[$MAP_MULTI_RES:#map[0-9]+]] = affine_map<()[s0, s1] -> ((s0 floordiv 2) * 2, (s1 floordiv 2) * 2, 1024)>
+// CHECK-DAG: [[$MAP_SYM_UB:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0, s1, 1024)>
 
-// UJAM-FOUR-DAG: [[UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (s0 + 8)>
-// UJAM-FOUR-DAG: [[MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
-// UJAM-FOUR-DAG: [[MAP_PLUS_2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
-// UJAM-FOUR-DAG: [[MAP_PLUS_3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// UJAM-FOUR-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<()[s0] -> (s0 + 8)>
+// UJAM-FOUR-DAG: [[$MAP_PLUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UJAM-FOUR-DAG: [[$MAP_PLUS_2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UJAM-FOUR-DAG: [[$MAP_PLUS_3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
 
 // CHECK-LABEL: func @unroll_jam_imperfect_nest() {
 func @unroll_jam_imperfect_nest() {
@@ -25,17 +25,17 @@ func @unroll_jam_imperfect_nest() {
 }
 // CHECK:      affine.for [[IV0:%arg[0-9]+]] = 0 to 100 step 2 {
 // CHECK-NEXT:   [[RES1:%[0-9]+]] = "addi32"([[IV0]], [[IV0]])
-// CHECK-NEXT:   [[INC:%[0-9]+]] = affine.apply [[MAP_PLUS_1]]([[IV0]])
+// CHECK-NEXT:   [[INC:%[0-9]+]] = affine.apply [[$MAP_PLUS_1]]([[IV0]])
 // CHECK-NEXT:   [[RES2:%[0-9]+]] = "addi32"([[INC]], [[INC]])
 // CHECK-NEXT:   affine.for %{{.*}} = 0 to 17 {
 // CHECK-NEXT:     [[RES3:%[0-9]+]] = "addi32"([[IV0]], [[IV0]])
 // CHECK-NEXT:     "addi32"([[RES3]], [[RES3]]) : (i32, i32) -> i32
-// CHECK-NEXT:     [[INC1:%[0-9]+]] = affine.apply [[MAP_PLUS_1]]([[IV0]])
+// CHECK-NEXT:     [[INC1:%[0-9]+]] = affine.apply [[$MAP_PLUS_1]]([[IV0]])
 // CHECK-NEXT:     [[RES4:%[0-9]+]] = "addi32"([[INC1]], [[INC1]])
 // CHECK-NEXT:     "addi32"([[RES4]], [[RES4]]) : (i32, i32) -> i32
 // CHECK-NEXT:   }
 // CHECK:        "foo"([[IV0]], [[RES1]])
-// CHECK-NEXT:   affine.apply [[MAP_PLUS_1]]([[IV0]])
+// CHECK-NEXT:   affine.apply [[$MAP_PLUS_1]]([[IV0]])
 // CHECK-NEXT:   "foo"({{.*}}, [[RES2]])
 // CHECK:      }
 // Cleanup loop (single iteration).
@@ -50,14 +50,14 @@ func @unroll_jam_imperfect_nest() {
 // CHECK-LABEL: func @loop_nest_unknown_count_1
 // CHECK-SAME: [[N:arg[0-9]+]]: index
 func @loop_nest_unknown_count_1(%N : index) {
-  // CHECK-NEXT: affine.for %{{.*}} = 1 to [[MAP_DIV_OFFSET]]()[%[[N]]] step 2 {
+  // CHECK-NEXT: affine.for %{{.*}} = 1 to [[$MAP_DIV_OFFSET]]()[%[[N]]] step 2 {
   // CHECK-NEXT:   affine.for %{{.*}} = 1 to 100 {
   // CHECK-NEXT:     "foo"() : () -> i32
   // CHECK-NEXT:     "foo"() : () -> i32
   // CHECK-NEXT:   }
   // CHECK-NEXT: }
   // A cleanup loop should be generated here.
-  // CHECK-NEXT: affine.for %{{.*}} = [[MAP_DIV_OFFSET]]()[%[[N]]] to %[[N]] {
+  // CHECK-NEXT: affine.for %{{.*}} = [[$MAP_DIV_OFFSET]]()[%[[N]]] to %[[N]] {
   // CHECK-NEXT:   affine.for %{{.*}} = 1 to 100 {
   // CHECK-NEXT:     "foo"() : () -> i32
   // CHECK-NEXT:   }
@@ -73,19 +73,19 @@ func @loop_nest_unknown_count_1(%N : index) {
 // UJAM-FOUR-LABEL: func @loop_nest_unknown_count_2
 // UJAM-FOUR-SAME: %[[N:arg[0-9]+]]: index
 func @loop_nest_unknown_count_2(%N : index) {
-  // UJAM-FOUR-NEXT: affine.for [[IV0:%arg[0-9]+]] = %[[N]] to  [[UBMAP]]()[%[[N]]] step 4 {
+  // UJAM-FOUR-NEXT: affine.for [[IV0:%arg[0-9]+]] = %[[N]] to  [[$UBMAP]]()[%[[N]]] step 4 {
   // UJAM-FOUR-NEXT:   affine.for [[IV1:%arg[0-9]+]] = 1 to 100 {
   // UJAM-FOUR-NEXT:     "foo"([[IV0]])
-  // UJAM-FOUR-NEXT:     [[IV_PLUS_1:%[0-9]+]] = affine.apply [[MAP_PLUS_1]]([[IV0]])
+  // UJAM-FOUR-NEXT:     [[IV_PLUS_1:%[0-9]+]] = affine.apply [[$MAP_PLUS_1]]([[IV0]])
   // UJAM-FOUR-NEXT:     "foo"([[IV_PLUS_1]])
-  // UJAM-FOUR-NEXT:     [[IV_PLUS_2:%[0-9]+]] = affine.apply [[MAP_PLUS_2]]([[IV0]])
+  // UJAM-FOUR-NEXT:     [[IV_PLUS_2:%[0-9]+]] = affine.apply [[$MAP_PLUS_2]]([[IV0]])
   // UJAM-FOUR-NEXT:     "foo"([[IV_PLUS_2]])
-  // UJAM-FOUR-NEXT:     [[IV_PLUS_3:%[0-9]+]] = affine.apply [[MAP_PLUS_3]]([[IV0]])
+  // UJAM-FOUR-NEXT:     [[IV_PLUS_3:%[0-9]+]] = affine.apply [[$MAP_PLUS_3]]([[IV0]])
   // UJAM-FOUR-NEXT:     "foo"([[IV_PLUS_3]])
   // UJAM-FOUR-NEXT:   }
   // UJAM-FOUR-NEXT: }
   // The cleanup loop is a single iteration one and is promoted.
-  // UJAM-FOUR-NEXT: [[RES:%[0-9]+]] = affine.apply [[UBMAP]]()[%[[N]]]
+  // UJAM-FOUR-NEXT: [[RES:%[0-9]+]] = affine.apply [[$UBMAP]]()[%[[N]]]
   // UJAM-FOUR-NEXT: affine.for [[IV0]] = 1 to 100 {
   // UJAM-FOUR-NEXT:   "foo"([[RES]])
   // UJAM-FOUR-NEXT: }
@@ -109,14 +109,14 @@ func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index)
   }
   return
 }
-// CHECK-NEXT:  affine.for [[IV0:%arg[0-9]+]] = 0 to min [[MAP_MULTI_RES]]()[%[[M]], %[[N]]] step 2 {
+// CHECK-NEXT:  affine.for [[IV0:%arg[0-9]+]] = 0 to min [[$MAP_MULTI_RES]]()[%[[M]], %[[N]]] step 2 {
 // CHECK-NEXT:    affine.for [[IV1:%arg[0-9]+]] = 0 to %[[K]] {
 // CHECK-NEXT:      "foo"([[IV0]], [[IV1]])
-// CHECK-NEXT:      [[RES:%[0-9]+]] = affine.apply [[MAP_PLUS_1]]([[IV0]])
+// CHECK-NEXT:      [[RES:%[0-9]+]] = affine.apply [[$MAP_PLUS_1]]([[IV0]])
 // CHECK-NEXT:      "foo"([[RES]], [[IV1]])
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  affine.for [[IV0]] = max [[MAP_MULTI_RES]]()[%[[M]], %[[N]]] to min [[MAP_SYM_UB]]()[%[[M]], %[[N]]] {
+// CHECK-NEXT:  affine.for [[IV0]] = max [[$MAP_MULTI_RES]]()[%[[M]], %[[N]]] to min [[$MAP_SYM_UB]]()[%[[M]], %[[N]]] {
 // CHECK-NEXT:    affine.for [[IV1]] = 0 to %[[K]] {
 // CHECK-NEXT:      "foo"([[IV0]], [[IV1]])
 // CHECK-NEXT:    }

diff  --git a/mlir/test/Dialect/Affine/unroll.mlir b/mlir/test/Dialect/Affine/unroll.mlir
index a7d65d154544..6a92101bf83a 100644
--- a/mlir/test/Dialect/Affine/unroll.mlir
+++ b/mlir/test/Dialect/Affine/unroll.mlir
@@ -3,25 +3,25 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -affine-loop-unroll="unroll-factor=4" | FileCheck %s --check-prefix UNROLL-BY-4
 // RUN: mlir-opt -allow-unregistered-dialect %s -affine-loop-unroll="unroll-factor=1" | FileCheck %s --check-prefix UNROLL-BY-1
 
-// UNROLL-FULL-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
-// UNROLL-FULL-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
-// UNROLL-FULL-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
-// UNROLL-FULL-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 4)>
-// UNROLL-FULL-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
-// UNROLL-FULL-DAG: [[MAP5:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
-// UNROLL-FULL-DAG: [[MAP6:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
-
-// SHORT-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
-
-// UNROLL-BY-4-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
-// UNROLL-BY-4-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
-// UNROLL-BY-4-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
-// UNROLL-BY-4-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
-// UNROLL-BY-4-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
-// UNROLL-BY-4-DAG: [[MAP5:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
-// UNROLL-BY-4-DAG: [[MAP6:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
-// UNROLL-BY-4-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0) -> (d0)>
-// UNROLL-BY-4-DAG: [[MAP_TRIP_COUNT_MULTIPLE_FOUR:#map[0-9]+]] = affine_map<()[s0, s1, s2] -> (s0 + ((-s0 + s1) floordiv 4) * 4, s0 + ((-s0 + s2) floordiv 4) * 4, s0 + ((-s0) floordiv 4) * 4 + 1024)>
+// UNROLL-FULL-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UNROLL-FULL-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UNROLL-FULL-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// UNROLL-FULL-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 4)>
+// UNROLL-FULL-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// UNROLL-FULL-DAG: [[$MAP5:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
+// UNROLL-FULL-DAG: [[$MAP6:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
+
+// SHORT-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+
+// UNROLL-BY-4-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// UNROLL-BY-4-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 + 2)>
+// UNROLL-BY-4-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
+// UNROLL-BY-4-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// UNROLL-BY-4-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 3)>
+// UNROLL-BY-4-DAG: [[$MAP5:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0 + 1)>
+// UNROLL-BY-4-DAG: [[$MAP6:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+// UNROLL-BY-4-DAG: [[$MAP11:#map[0-9]+]] = affine_map<(d0) -> (d0)>
+// UNROLL-BY-4-DAG: [[$MAP_TRIP_COUNT_MULTIPLE_FOUR:#map[0-9]+]] = affine_map<()[s0, s1, s2] -> (s0 + ((-s0 + s1) floordiv 4) * 4, s0 + ((-s0 + s2) floordiv 4) * 4, s0 + ((-s0) floordiv 4) * 4 + 1024)>
 
 // UNROLL-FULL-LABEL: func @loop_nest_simplest() {
 func @loop_nest_simplest() {
@@ -44,11 +44,11 @@ func @loop_nest_simple_iv_use() {
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
     // UNROLL-FULL: %0 = "addi32"(%c0, %c0) : (index, index) -> i32
-    // UNROLL-FULL: %1 = affine.apply [[MAP0]](%c0)
+    // UNROLL-FULL: %1 = affine.apply [[$MAP0]](%c0)
     // UNROLL-FULL-NEXT:  %2 = "addi32"(%1, %1) : (index, index) -> i32
-    // UNROLL-FULL: %3 = affine.apply [[MAP1]](%c0)
+    // UNROLL-FULL: %3 = affine.apply [[$MAP1]](%c0)
     // UNROLL-FULL-NEXT:  %4 = "addi32"(%3, %3) : (index, index) -> i32
-    // UNROLL-FULL: %5 = affine.apply [[MAP2]](%c0)
+    // UNROLL-FULL: %5 = affine.apply [[$MAP2]](%c0)
     // UNROLL-FULL-NEXT:  %6 = "addi32"(%5, %5) : (index, index) -> i32
     affine.for %j = 0 to 4 {
       %x = "addi32"(%j, %j) : (index, index) -> i32
@@ -65,16 +65,16 @@ func @loop_nest_body_def_use() {
   affine.for %i = 0 to 100 step 2 {
     // UNROLL-FULL: %c0_0 = constant 0 : index
     %c0 = constant 0 : index
-    // UNROLL-FULL:      %0 = affine.apply [[MAP0]](%c0)
+    // UNROLL-FULL:      %0 = affine.apply [[$MAP0]](%c0)
     // UNROLL-FULL-NEXT: %1 = "addi32"(%0, %c0_0) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %2 = affine.apply [[MAP0]](%c0)
-    // UNROLL-FULL-NEXT: %3 = affine.apply [[MAP0]](%2)
+    // UNROLL-FULL-NEXT: %2 = affine.apply [[$MAP0]](%c0)
+    // UNROLL-FULL-NEXT: %3 = affine.apply [[$MAP0]](%2)
     // UNROLL-FULL-NEXT: %4 = "addi32"(%3, %c0_0) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %5 = affine.apply [[MAP1]](%c0)
-    // UNROLL-FULL-NEXT: %6 = affine.apply [[MAP0]](%5)
+    // UNROLL-FULL-NEXT: %5 = affine.apply [[$MAP1]](%c0)
+    // UNROLL-FULL-NEXT: %6 = affine.apply [[$MAP0]](%5)
     // UNROLL-FULL-NEXT: %7 = "addi32"(%6, %c0_0) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %8 = affine.apply [[MAP2]](%c0)
-    // UNROLL-FULL-NEXT: %9 = affine.apply [[MAP0]](%8)
+    // UNROLL-FULL-NEXT: %8 = affine.apply [[$MAP2]](%c0)
+    // UNROLL-FULL-NEXT: %9 = affine.apply [[$MAP0]](%8)
     // UNROLL-FULL-NEXT: %10 = "addi32"(%9, %c0_0) : (index, index) -> index
     affine.for %j = 0 to 4 {
       %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
@@ -91,23 +91,23 @@ func @loop_nest_strided() {
   // UNROLL-FULL-NEXT: %c2_0 = constant 2 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
-    // UNROLL-FULL:      %0 = affine.apply [[MAP0]](%c2_0)
+    // UNROLL-FULL:      %0 = affine.apply [[$MAP0]](%c2_0)
     // UNROLL-FULL-NEXT: %1 = "addi32"(%0, %0) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %2 = affine.apply [[MAP1]](%c2_0)
-    // UNROLL-FULL-NEXT: %3 = affine.apply [[MAP0]](%2)
+    // UNROLL-FULL-NEXT: %2 = affine.apply [[$MAP1]](%c2_0)
+    // UNROLL-FULL-NEXT: %3 = affine.apply [[$MAP0]](%2)
     // UNROLL-FULL-NEXT: %4 = "addi32"(%3, %3) : (index, index) -> index
     affine.for %j = 2 to 6 step 2 {
       %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
         (index) -> (index)
       %y = "addi32"(%x, %x) : (index, index) -> index
     }
-    // UNROLL-FULL:      %5 = affine.apply [[MAP0]](%c2)
+    // UNROLL-FULL:      %5 = affine.apply [[$MAP0]](%c2)
     // UNROLL-FULL-NEXT: %6 = "addi32"(%5, %5) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %7 = affine.apply [[MAP1]](%c2)
-    // UNROLL-FULL-NEXT: %8 = affine.apply [[MAP0]](%7)
+    // UNROLL-FULL-NEXT: %7 = affine.apply [[$MAP1]](%c2)
+    // UNROLL-FULL-NEXT: %8 = affine.apply [[$MAP0]](%7)
     // UNROLL-FULL-NEXT: %9 = "addi32"(%8, %8) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %10 = affine.apply [[MAP3]](%c2)
-    // UNROLL-FULL-NEXT: %11 = affine.apply [[MAP0]](%10)
+    // UNROLL-FULL-NEXT: %10 = affine.apply [[$MAP3]](%c2)
+    // UNROLL-FULL-NEXT: %11 = affine.apply [[$MAP0]](%10)
     // UNROLL-FULL-NEXT: %12 = "addi32"(%11, %11) : (index, index) -> index
     affine.for %k = 2 to 7 step 2 {
       %z = "affine.apply" (%k) { map = affine_map<(d0) -> (d0 + 1)> } :
@@ -123,7 +123,7 @@ func @loop_nest_multiple_results() {
   // UNROLL-FULL: %c0 = constant 0 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
-    // UNROLL-FULL: %0 = affine.apply [[MAP4]](%arg0, %c0)
+    // UNROLL-FULL: %0 = affine.apply [[$MAP4]](%arg0, %c0)
     // UNROLL-FULL-NEXT: %1 = "addi32"(%0, %0) : (index, index) -> index
     // UNROLL-FULL-NEXT: %2 = affine.apply #map{{.*}}(%arg0, %c0)
     // UNROLL-FULL-NEXT: %3:2 = "fma"(%2, %0, %0) : (index, index, index) -> (index, index)
@@ -153,19 +153,19 @@ func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
   affine.for %i = 0 to 100 {
     // UNROLL-FULL: %0 = "vld"(%arg1) : (index) -> i32
     %ld = "vld"(%i) : (index) -> i32
-    // UNROLL-FULL: %1 = affine.apply [[MAP0]](%c0)
+    // UNROLL-FULL: %1 = affine.apply [[$MAP0]](%c0)
     // UNROLL-FULL-NEXT: %2 = "vmulf"(%c0, %1) : (index, index) -> index
     // UNROLL-FULL-NEXT: %3 = "vaddf"(%2, %2) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %4 = affine.apply [[MAP0]](%c0)
-    // UNROLL-FULL-NEXT: %5 = affine.apply [[MAP0]](%4)
+    // UNROLL-FULL-NEXT: %4 = affine.apply [[$MAP0]](%c0)
+    // UNROLL-FULL-NEXT: %5 = affine.apply [[$MAP0]](%4)
     // UNROLL-FULL-NEXT: %6 = "vmulf"(%4, %5) : (index, index) -> index
     // UNROLL-FULL-NEXT: %7 = "vaddf"(%6, %6) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %8 = affine.apply [[MAP1]](%c0)
-    // UNROLL-FULL-NEXT: %9 = affine.apply [[MAP0]](%8)
+    // UNROLL-FULL-NEXT: %8 = affine.apply [[$MAP1]](%c0)
+    // UNROLL-FULL-NEXT: %9 = affine.apply [[$MAP0]](%8)
     // UNROLL-FULL-NEXT: %10 = "vmulf"(%8, %9) : (index, index) -> index
     // UNROLL-FULL-NEXT: %11 = "vaddf"(%10, %10) : (index, index) -> index
-    // UNROLL-FULL-NEXT: %12 = affine.apply [[MAP2]](%c0)
-    // UNROLL-FULL-NEXT: %13 = affine.apply [[MAP0]](%12)
+    // UNROLL-FULL-NEXT: %12 = affine.apply [[$MAP2]](%c0)
+    // UNROLL-FULL-NEXT: %13 = affine.apply [[$MAP0]](%12)
     // UNROLL-FULL-NEXT: %14 = "vmulf"(%12, %13) : (index, index) -> index
     // UNROLL-FULL-NEXT: %15 = "vaddf"(%14, %14) : (index, index) -> index
     affine.for %j = 0 to 4 {
@@ -186,16 +186,16 @@ func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
 func @loop_nest_seq_multiple() {
   // UNROLL-FULL: c0 = constant 0 : index
   // UNROLL-FULL-NEXT: %c0_0 = constant 0 : index
-  // UNROLL-FULL-NEXT: %0 = affine.apply [[MAP0]](%c0_0)
+  // UNROLL-FULL-NEXT: %0 = affine.apply [[$MAP0]](%c0_0)
   // UNROLL-FULL-NEXT: "mul"(%0, %0) : (index, index) -> ()
-  // UNROLL-FULL-NEXT: %1 = affine.apply [[MAP0]](%c0_0)
-  // UNROLL-FULL-NEXT: %2 = affine.apply [[MAP0]](%1)
+  // UNROLL-FULL-NEXT: %1 = affine.apply [[$MAP0]](%c0_0)
+  // UNROLL-FULL-NEXT: %2 = affine.apply [[$MAP0]](%1)
   // UNROLL-FULL-NEXT: "mul"(%2, %2) : (index, index) -> ()
-  // UNROLL-FULL-NEXT: %3 = affine.apply [[MAP1]](%c0_0)
-  // UNROLL-FULL-NEXT: %4 = affine.apply [[MAP0]](%3)
+  // UNROLL-FULL-NEXT: %3 = affine.apply [[$MAP1]](%c0_0)
+  // UNROLL-FULL-NEXT: %4 = affine.apply [[$MAP0]](%3)
   // UNROLL-FULL-NEXT: "mul"(%4, %4) : (index, index) -> ()
-  // UNROLL-FULL-NEXT: %5 = affine.apply [[MAP2]](%c0_0)
-  // UNROLL-FULL-NEXT: %6 = affine.apply [[MAP0]](%5)
+  // UNROLL-FULL-NEXT: %5 = affine.apply [[$MAP2]](%c0_0)
+  // UNROLL-FULL-NEXT: %6 = affine.apply [[$MAP0]](%5)
   // UNROLL-FULL-NEXT: "mul"(%6, %6) : (index, index) -> ()
   affine.for %j = 0 to 4 {
     %x = "affine.apply" (%j) { map = affine_map<(d0) -> (d0 + 1)> } :
@@ -207,17 +207,17 @@ func @loop_nest_seq_multiple() {
   %k = constant 99 : index
   // UNROLL-FULL: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %m = 0 to 100 step 2 {
-    // UNROLL-FULL: %7 = affine.apply [[MAP0]](%c0)
-    // UNROLL-FULL-NEXT: %8 = affine.apply [[MAP6]](%c0)[%c99]
-    // UNROLL-FULL-NEXT: %9 = affine.apply [[MAP0]](%c0)
-    // UNROLL-FULL-NEXT: %10 = affine.apply [[MAP0]](%9)
-    // UNROLL-FULL-NEXT: %11 = affine.apply [[MAP6]](%9)[%c99]
-    // UNROLL-FULL-NEXT: %12 = affine.apply [[MAP1]](%c0)
-    // UNROLL-FULL-NEXT: %13 = affine.apply [[MAP0]](%12)
-    // UNROLL-FULL-NEXT: %14 = affine.apply [[MAP6]](%12)[%c99]
-    // UNROLL-FULL-NEXT: %15 = affine.apply [[MAP2]](%c0)
-    // UNROLL-FULL-NEXT: %16 = affine.apply [[MAP0]](%15)
-    // UNROLL-FULL-NEXT: %17 = affine.apply [[MAP6]](%15)[%c99]
+    // UNROLL-FULL: %7 = affine.apply [[$MAP0]](%c0)
+    // UNROLL-FULL-NEXT: %8 = affine.apply [[$MAP6]](%c0)[%c99]
+    // UNROLL-FULL-NEXT: %9 = affine.apply [[$MAP0]](%c0)
+    // UNROLL-FULL-NEXT: %10 = affine.apply [[$MAP0]](%9)
+    // UNROLL-FULL-NEXT: %11 = affine.apply [[$MAP6]](%9)[%c99]
+    // UNROLL-FULL-NEXT: %12 = affine.apply [[$MAP1]](%c0)
+    // UNROLL-FULL-NEXT: %13 = affine.apply [[$MAP0]](%12)
+    // UNROLL-FULL-NEXT: %14 = affine.apply [[$MAP6]](%12)[%c99]
+    // UNROLL-FULL-NEXT: %15 = affine.apply [[$MAP2]](%c0)
+    // UNROLL-FULL-NEXT: %16 = affine.apply [[$MAP0]](%15)
+    // UNROLL-FULL-NEXT: %17 = affine.apply [[$MAP6]](%15)[%c99]
     affine.for %n = 0 to 4 {
       %y = "affine.apply" (%n) { map = affine_map<(d0) -> (d0 + 1)> } :
         (index) -> (index)
@@ -243,11 +243,11 @@ func @loop_nest_unroll_full() {
 // SHORT-LABEL: func @loop_nest_outer_unroll() {
 func @loop_nest_outer_unroll() {
   // SHORT:      affine.for %arg0 = 0 to 4 {
-  // SHORT-NEXT:   %0 = affine.apply [[MAP0]](%arg0)
+  // SHORT-NEXT:   %0 = affine.apply [[$MAP0]](%arg0)
   // SHORT-NEXT:   %1 = "addi32"(%0, %0) : (index, index) -> index
   // SHORT-NEXT: }
   // SHORT-NEXT: affine.for %arg0 = 0 to 4 {
-  // SHORT-NEXT:   %0 = affine.apply [[MAP0]](%arg0)
+  // SHORT-NEXT:   %0 = affine.apply [[$MAP0]](%arg0)
   // SHORT-NEXT:   %1 = "addi32"(%0, %0) : (index, index) -> index
   // SHORT-NEXT: }
   affine.for %i = 0 to 2 {
@@ -411,11 +411,11 @@ func @loop_nest_single_iteration_after_unroll(%N: index) {
   // UNROLL-BY-4: affine.for %arg1 = 0 to %arg0 {
   affine.for %i = 0 to %N {
     // UNROLL-BY-4: %0 = "addi32"(%c0, %c0) : (index, index) -> i32
-    // UNROLL-BY-4-NEXT: %1 = affine.apply [[MAP0]](%c0)
+    // UNROLL-BY-4-NEXT: %1 = affine.apply [[$MAP0]](%c0)
     // UNROLL-BY-4-NEXT: %2 = "addi32"(%1, %1) : (index, index) -> i32
-    // UNROLL-BY-4-NEXT: %3 = affine.apply [[MAP1]](%c0)
+    // UNROLL-BY-4-NEXT: %3 = affine.apply [[$MAP1]](%c0)
     // UNROLL-BY-4-NEXT: %4 = "addi32"(%3, %3) : (index, index) -> i32
-    // UNROLL-BY-4-NEXT: %5 = affine.apply [[MAP2]](%c0)
+    // UNROLL-BY-4-NEXT: %5 = affine.apply [[$MAP2]](%c0)
     // UNROLL-BY-4-NEXT: %6 = "addi32"(%5, %5) : (index, index) -> i32
     // UNROLL-BY-4-NEXT: %7 = "addi32"(%c4, %c4) : (index, index) -> i32
     // UNROLL-BY-4-NOT: for
@@ -452,7 +452,7 @@ func @loop_nest_operand1() {
 // UNROLL-BY-4-LABEL: func @loop_nest_operand2() {
 func @loop_nest_operand2() {
 // UNROLL-BY-4:      affine.for %arg0 = 0 to 100 step 2 {
-// UNROLL-BY-4-NEXT:   affine.for %arg1 = [[MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
+// UNROLL-BY-4-NEXT:   affine.for %arg1 = [[$MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
 // UNROLL-BY-4-NEXT:     %0 = "foo"() : () -> i32
 // UNROLL-BY-4-NEXT:     %1 = "foo"() : () -> i32
 // UNROLL-BY-4-NEXT:     %2 = "foo"() : () -> i32
@@ -474,7 +474,7 @@ func @loop_nest_operand2() {
 func @loop_nest_operand3() {
   // UNROLL-BY-4: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
-    // UNROLL-BY-4: affine.for %arg1 = [[MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
+    // UNROLL-BY-4: affine.for %arg1 = [[$MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
     // UNROLL-BY-4-NEXT: %1 = "foo"() : () -> i32
     // UNROLL-BY-4-NEXT: %2 = "foo"() : () -> i32
     // UNROLL-BY-4-NEXT: %3 = "foo"() : () -> i32
@@ -538,13 +538,13 @@ func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index)
   }
   return
 }
-// CHECK-NEXT:  affine.for %arg0 = %arg0 to min [[MAP_TRIP_COUNT_MULTIPLE_FOUR]]()[%arg0, %arg1, %arg2] step 4 {
+// CHECK-NEXT:  affine.for %arg0 = %arg0 to min [[$MAP_TRIP_COUNT_MULTIPLE_FOUR]]()[%arg0, %arg1, %arg2] step 4 {
 // CHECK-NEXT:    "foo"() : () -> ()
 // CHECK-NEXT:    "foo"() : () -> ()
 // CHECK-NEXT:    "foo"() : () -> ()
 // CHECK-NEXT:    "foo"() : () -> ()
 // CHECK-NEXT:  }
-// CHECK-NEXT:  affine.for %arg1 = max [[MAP_TRIP_COUNT_MULTIPLE_FOUR]]()[%arg0, %arg1, %arg2] to min #map28()[%arg1, %arg2] {
+// CHECK-NEXT:  affine.for %arg1 = max [[$MAP_TRIP_COUNT_MULTIPLE_FOUR]]()[%arg0, %arg1, %arg2] to min #map28()[%arg1, %arg2] {
 // CHECK-NEXT:    "foo"() : () -> ()
 // CHECK-NEXT:  }
 // CHECK-NEXT:  return

diff  --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir
index 4e5fc43f5611..5d20b0b4e0a9 100644
--- a/mlir/test/Dialect/Linalg/affine.mlir
+++ b/mlir/test/Dialect/Linalg/affine.mlir
@@ -3,11 +3,11 @@
 // Test that we can lower all the way to LLVM without crashing, don't check results here.
 // RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1
 
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
-// CHECK-DAG: #[[stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
+// CHECK-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
 
-// CHECK-DAG: #[[clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
+// CHECK-DAG: #[[$clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
 
 func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c0 = constant 0 : index
@@ -42,18 +42,18 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 }
 
 // CHECK-LABEL: func @conv_view3(
-//  CHECK: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//  CHECK: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
+//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECK:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECK:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECK:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECK:   affine.for %{{.*}} = 0 to %[[B]] {
 //       CHECK:     affine.for %{{.*}} = 0 to %[[X0]] {
 //       CHECK:       affine.for %{{.*}} = 0 to %[[K]] {
 //       CHECK:         affine.for %{{.*}} = 0 to %[[Q]] {
 //       CHECK:           affine.for %{{.*}} = 0 to %[[Z0]] {
-//       CHECK:            %[[SUM:.*]] = affine.apply #[[stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECK:            %[[SUM:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
 //       No padding needed here; only affine loads.
 //       CHECK-NEXT:       affine.load
 //       CHECK-NEXT:       affine.load
@@ -86,8 +86,8 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 //       CHECK:               affine.for %{{.*}} = 0 to %[[Z1]] {
 //       CHECK:                 %[[SUM0:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
 //       CHECK:                 %[[SUM1:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
-//       CHECK:                 %[[IDX:.*]] = affine.max #[[clampMinMap]](%[[SUM0]])
-//       CHECK:                 %[[IDY:.*]] = affine.max #[[clampMinMap]](%[[SUM1]])
+//       CHECK:                 %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[SUM0]])
+//       CHECK:                 %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[SUM1]])
 // Padded conv involves an affine.max in the memory access and this is not
 // allowed by affine.load. Use std.load in such cases.
 //       CHECK:                 %{{.*}} = load %{{.*}}[%{{.*}}, %[[IDX]], %[[IDY]], %{{.*}}] : memref<?x?x?x?xf32>

diff  --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 00d0aaa89d4f..914c7b7ce345 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -34,10 +34,10 @@ func @collapsing_tensor_reshapes(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf3
        tensor<?x?x?xf32> into tensor<?x?xf32>
   return %1 : tensor<?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: collapsing_tensor_reshapes
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT:   linalg.tensor_reshape
 
 // -----
@@ -55,10 +55,10 @@ func @expanding_tensor_reshapes(%arg0 : tensor<?x?xf32>) -> tensor<?x?x?x?x?xf32
        tensor<?x?x?xf32> into tensor<?x?x?x?x?xf32>
   return %1 : tensor<?x?x?x?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: expanding_tensor_reshapes
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT:   linalg.tensor_reshape
 
 // -----
@@ -76,10 +76,10 @@ func @collapsing_memref_reshapes(%arg0 : memref<?x?x?x?x?xf32>) -> memref<?x?xf3
        memref<?x?x?xf32> into memref<?x?xf32>
   return %1 : memref<?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: collapsing_memref_reshapes
-//       CHECK:   linalg.reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
+//       CHECK:   linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT:   linalg.reshape
 
 // -----
@@ -97,10 +97,10 @@ func @expanding_memref_reshapes(%arg0 : memref<?x?xf32>) -> memref<?x?x?x?x?xf32
        memref<?x?x?xf32> into memref<?x?x?x?x?xf32>
   return %1 : memref<?x?x?x?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 // CHECK-LABEL: expanding_memref_reshapes
-//       CHECK:   linalg.reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
+//       CHECK:   linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT:   linalg.reshape
 
 // -----

diff  --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index a5169c35d18d..c4ea61a9501d 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -21,19 +21,19 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
        } : tensor<?x1x?xf32> -> tensor<?x1x?x1x?xf32>
   return %0 : tensor<?x1x?x1x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
-//   CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
-//   CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-//   CHECK-DAG: #[[MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-//   CHECK-DAG: #[[MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>
-//   CHECK-DAG: #[[MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
+//   CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+//   CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>
+//   CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)>
 // CHECK-LABEL: func @drop_one_trip_loops
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
 //       CHECK: linalg.generic
-//  CHECK-SAME:   indexing_maps = [#[[MAP2]], #[[MAP3]]]
+//  CHECK-SAME:   indexing_maps = [#[[$MAP2]], #[[$MAP3]]]
 //  CHECK-SAME:   iterator_types = ["parallel", "parallel", "parallel"]
-//       CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP4]], #[[MAP5]], #[[MAP6]]]
+//       CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP4]], #[[$MAP5]], #[[$MAP6]]]
 
 // -----
 
@@ -55,11 +55,11 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
        } : tensor<1x1xf32> -> tensor<1x1xf32>
   return %0 : tensor<1x1xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> ()>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> ()>
 // CHECK-LABEL: func @drop_all_loops
 //       CHECK:   linalg.tensor_reshape %{{.*}} []
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP0]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
 //  CHECK-SAME:     iterator_types = []
 
 // -----
@@ -84,11 +84,11 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>) -> tensor<5xf32> {
   }  : tensor<1x5xf32> -> tensor<5xf32>
   return %0 : tensor<5xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @leading_dim_1_canonicalization
-//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[MAP0]]]
+//       CHECK:   linalg.tensor_reshape %{{.*}} [#[[$MAP0]]]
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP1]], #[[MAP1]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP1]], #[[$MAP1]]]
 //  CHECK-SAME:     iterator_types = ["parallel"]
 
 // -----
@@ -120,13 +120,13 @@ func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) -> tensor<5x5
        } : tensor<1x5xf32>, tensor<5x1xf32> -> tensor<5x5xf32>
   return %2 : tensor<5x5xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
-//   CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
+//   CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @broadcast_test
 //   CHECK-NOT:   linalg.tensor_reshape
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
 //  CHECK-SAME:     iterator_types = ["parallel", "parallel"]
 //   CHECK-NOT:   linalg.tensor_reshape
 
@@ -153,13 +153,13 @@ func @broadcast_scalar(%arg0 : tensor<1x1xf32>) -> tensor<?x?xf32>
 	} : tensor<1x1xf32> -> tensor<?x?xf32>
    return %0 : tensor<?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> ()>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> ()>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @broadcast_scalar
 //  CHECK-SAME:   %[[ARG0:.*]]: tensor<1x1xf32>
 //       CHECK:   %[[A:.*]] = linalg.tensor_reshape %[[ARG0]] []
 //  CHECK-SAME:     tensor<1x1xf32> into tensor<f32>
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
 //  CHECK-SAME:     iterator_types = ["parallel", "parallel"]
 //  CHECK-SAME:     %[[A]]

diff  --git a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
index a977ab4cadd9..14c0797bc952 100644
--- a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
+++ b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
@@ -21,11 +21,11 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
        } : tensor<?x1x?xf32> -> tensor<?x1x?x1x?xf32>
   return %0 : tensor<?x1x?x1x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d2)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d1, 0, d2)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d2)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d1, 0, d2)>
 // CHECK-LABEL: func @drop_one_trip_loops
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
 //  CHECK-SAME:     iterator_types = ["parallel", "parallel", "parallel"]
 
 // -----
@@ -48,10 +48,10 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
        } : tensor<1x1xf32> -> tensor<1x1xf32>
   return %0 : tensor<1x1xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> (0, 0)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> (0, 0)>
 // CHECK-LABEL: func @drop_all_loops
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP0]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
 //  CHECK-SAME:     iterator_types = []
 
 // -----
@@ -74,10 +74,10 @@ func @drop_all_loops(%arg0 : memref<1x1xf32>, %arg1 : memref<1x1xf32>)
     } : memref<1x1xf32>, memref<1x1xf32>
   return
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> (0, 0)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> (0, 0)>
 // CHECK-LABEL: func @drop_all_loops
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP0]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
 //  CHECK-SAME:     iterator_types = []
 
 // -----
@@ -102,9 +102,9 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>) -> tensor<5xf32> {
   }  : tensor<1x5xf32> -> tensor<5xf32>
   return %0 : tensor<5xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0) -> (0, d0)>
-//   CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0) -> (d0)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0) -> (0, d0)>
+//   CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0)>
 // CHECK-LABEL: func @leading_dim_1_canonicalization
 //       CHECK:   linalg.generic
-//  CHECK-SAME:     indexing_maps = [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:     indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
 //  CHECK-SAME:     iterator_types = ["parallel"]

diff  --git a/mlir/test/Dialect/Linalg/fusion-tensor.mlir b/mlir/test/Dialect/Linalg/fusion-tensor.mlir
index 5f1f90707a6e..5c49a59227c1 100644
--- a/mlir/test/Dialect/Linalg/fusion-tensor.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-tensor.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -linalg-fusion-for-tensor-ops -split-input-file | FileCheck %s
 
-// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @add_mul_fusion
@@ -12,7 +12,7 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
       linalg.yield %1 : f32
   }: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
   // CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
-  // CHECK-SAME: indexing_maps = {{\[}}[[MAP0]], [[MAP0]], [[MAP0]], [[MAP0]]{{\]}}
+  // CHECK-SAME: indexing_maps = {{\[}}[[$MAP0]], [[$MAP0]], [[$MAP0]], [[$MAP0]]{{\]}}
   %2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
     // CHECK: ^{{[a-zA-Z0-9_]*}}
     // CHECK-SAME: [[ARG0:%[a-zA-Z0-9_]*]]
@@ -31,8 +31,8 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
 
 // -----
 
-// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
+// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
 
@@ -45,7 +45,7 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
       linalg.yield %1 : f32
   }: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
   // CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
-  // CHECK-SAME: indexing_maps = {{\[}}[[MAP0]], [[MAP1]], [[MAP0]], [[MAP0]]{{\]}}
+  // CHECK-SAME: indexing_maps = {{\[}}[[$MAP0]], [[$MAP1]], [[$MAP0]], [[$MAP0]]{{\]}}
   %2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
     ^bb0(%arg5: f32, %arg6: f32):       // no predecessors
       %3 = mulf %arg5, %arg6 : f32
@@ -56,8 +56,8 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
 
 // -----
 
-// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
+// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
 
@@ -70,7 +70,7 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
       linalg.yield %1 : f32
   }: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
   // CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
-  // CHECK-SAME: indexing_maps = {{\[}}[[MAP1]], [[MAP0]], [[MAP0]], [[MAP0]]{{\]}}
+  // CHECK-SAME: indexing_maps = {{\[}}[[$MAP1]], [[$MAP0]], [[$MAP0]], [[$MAP0]]{{\]}}
   %2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
     ^bb0(%arg5: f32, %arg6: f32):       // no predecessors
       %3 = mulf %arg5, %arg6 : f32
@@ -81,8 +81,8 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
 
 // -----
 
-// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0)>
+// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
+// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0)>
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d0)>
 #map2 = affine_map<(d0) -> (d0)>
@@ -96,7 +96,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
       linalg.yield %1 : f32
   }: tensor<?xf32>, tensor<?xf32> -> tensor<?xf32>
   // CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
-  // CHECK-SAME: indexing_maps = {{\[}}[[MAP1]], [[MAP1]], [[MAP0]], [[MAP0]]
+  // CHECK-SAME: indexing_maps = {{\[}}[[$MAP1]], [[$MAP1]], [[$MAP0]], [[$MAP0]]
   %2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
     ^bb0(%arg5: f32, %arg6: f32):       // no predecessors
       %3 = mulf %arg5, %arg6 : f32
@@ -107,7 +107,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
 
 // -----
 
-// CHECK: #[[MAP0:.*]] = affine_map<() -> ()>
+// CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>
 #map0 = affine_map<() -> ()>
 
 // CHECK-LABEL: @add_mul_scalar_fusion
@@ -132,8 +132,8 @@ func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tenso
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
@@ -160,13 +160,13 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
 //       CHECK: linalg.generic
 //  CHECK-SAME:   args_in = 2
 //  CHECK-SAME:   args_out = 1
-//  CHECK-SAME:   indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP1]]]
+//  CHECK-SAME:   indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]]
 //   CHECK-NOT: linalg.generic
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
@@ -192,7 +192,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
 //       CHECK: linalg.generic
 //  CHECK-SAME:   args_in = 2
 //  CHECK-SAME:   args_out = 1
-//  CHECK-SAME:   indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:   indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT: linalg.generic
 
 // -----
@@ -238,7 +238,7 @@ func @generic_op_constant_fusion(%arg0 : tensor<5x?x?xf32>) -> tensor<5x?x?xf32>
        }: tensor<5xf32>, tensor<5x?x?xf32> -> tensor<5x?x?xf32>
   return %1 : tensor<5x?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @generic_op_constant_fusion
 //       CHECK:   %[[CST:.*]] = constant {{.*}} : f32
 //       CHECK:   linalg.generic
@@ -266,7 +266,7 @@ func @generic_op_zero_dim_constant_fusion(%arg0 : tensor<5x?x?xf32>)
        }: tensor<f32>, tensor<5x?x?xf32> -> tensor<5x?x?xf32>
   return %1 : tensor<5x?x?xf32>
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @generic_op_zero_dim_constant_fusion
 //       CHECK:   %[[CST:.*]] = constant {{.*}} : f32
 //       CHECK:   linalg.generic
@@ -303,13 +303,13 @@ func @generic_op_indexed_generic_op_fusion(%arg0: tensor<?x?xi32>,
     }: tensor<?x?xi32> -> tensor<?x?xi32>
   return
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @generic_op_indexed_generic_op_fusion
 //   CHECK-NOT: linalg.generic
 //       CHECK: linalg.indexed_generic
 // CHECK-SAME:    args_in = 2
 // CHECK-SAME:    args_out = 1
-// CHECK-SAME:    indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP0]]]
+// CHECK-SAME:    indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP0]]]
 //      CHECK: ^{{[a-zA-Z0-9_]*}}
 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
 // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
@@ -350,12 +350,12 @@ func @indexed_generic_op_generic_op_fusion(%arg0: tensor<?x?xi32>,
   } : tensor<?x?xi32>, tensor<?x?xi32> -> tensor<?x?xi32>
   return
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @indexed_generic_op_generic_op_fusion
 //       CHECK: linalg.indexed_generic
 // CHECK-SAME:    args_in = 2
 // CHECK-SAME:    args_out = 1
-// CHECK-SAME:    indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP0]]]
+// CHECK-SAME:    indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP0]]]
 //      CHECK: ^{{[a-zA-Z0-9_]*}}
 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
 // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
@@ -401,12 +401,12 @@ func @indexed_generic_op_fusion(%arg0: tensor<?x?xi32>) {
     }: tensor<?x?xi32> -> tensor<?x?xi32>
   return
 }
-//   CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
+//   CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-LABEL: func @indexed_generic_op_fusion
 //       CHECK: linalg.indexed_generic
 // CHECK-SAME:    args_in = 1
 // CHECK-SAME:    args_out = 1
-// CHECK-SAME:    indexing_maps = [#[[MAP0]], #[[MAP0]]]
+// CHECK-SAME:    indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
 //      CHECK: ^{{[a-zA-Z0-9_]*}}
 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
 // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
@@ -424,8 +424,8 @@ func @indexed_generic_op_fusion(%arg0: tensor<?x?xi32>) {
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
@@ -452,13 +452,13 @@ func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
 //       CHECK: linalg.indexed_generic
 //  CHECK-SAME:   args_in = 1
 //  CHECK-SAME:   args_out = 1
-//  CHECK-SAME:   indexing_maps = [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:   indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT: linalg.tensor_reshape
 
 // -----
 
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
+// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
+// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
@@ -484,5 +484,5 @@ func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
 //       CHECK: linalg.indexed_generic
 //  CHECK-SAME:   args_in = 1
 //  CHECK-SAME:   args_out = 1
-//  CHECK-SAME:   indexing_maps = [#[[MAP0]], #[[MAP1]]]
+//  CHECK-SAME:   indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
 //   CHECK-NOT: linalg.tensor_reshape

diff  --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index b6020df5fb77..da6bd26ef1de 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -49,7 +49,7 @@ func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,
 
 // -----
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
 func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -91,9 +91,9 @@ func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f2
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK-DAG:  %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK-DAG:  %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
@@ -143,9 +143,9 @@ func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f3
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[D_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
@@ -199,9 +199,9 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f4
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
@@ -212,7 +212,7 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 
 // -----
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
 func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -258,9 +258,9 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f5
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK-DAG:  %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK-DAG:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
 // CHECK:  scf.for %[[I:.*]] = %{{.*}} to %[[D_0]] step %{{.*}} {
 // CHECK:    scf.for %[[J:.*]] = %{{.*}} to %[[B_1]] step %{{.*}} {
 // CHECK:      scf.for %[[K:.*]] = %{{.*}} to %[[D_1]] step %{{.*}} {
@@ -409,11 +409,11 @@ func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f7
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
 // CHECK:  linalg.matmul(%[[A]], %[[C]], %[[E]])
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[A_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {

diff  --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index a78a2c1c934d..d1e86ba361c6 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -4,27 +4,27 @@
 // Test that we can lower all the way to LLVM without crashing, don't check results here.
 // RUN: mlir-opt %s -convert-linalg-to-loops -convert-linalg-to-llvm -o=/dev/null 2>&1
 
-// CHECKLOOP-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECKLOOP-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECKLOOP-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECKLOOP-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// CHECKLOOP-DAG: #[[clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
+// CHECKLOOP-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECKLOOP-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECKLOOP-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECKLOOP-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// CHECKLOOP-DAG: #[[$clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
 
-// CHECKLOOP-DAG: #[[Stride1Dilation1:.*]] = affine_map<(d0, d1) -> (d0  + d1)>
-// CHECKLOOP-DAG: #[[Stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
-// CHECKLOOP-DAG: #[[Stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
-// CHECKLOOP-DAG: #[[Stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
+// CHECKLOOP-DAG: #[[$stride1Dilation1:.*]] = affine_map<(d0, d1) -> (d0  + d1)>
+// CHECKLOOP-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
+// CHECKLOOP-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
+// CHECKLOOP-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
 
-// CHECKPARALLEL-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECKPARALLEL-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECKPARALLEL-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECKPARALLEL-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// CHECKPARALLEL-DAG: #[[clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
+// CHECKPARALLEL-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECKPARALLEL-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECKPARALLEL-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECKPARALLEL-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// CHECKPARALLEL-DAG: #[[$clampMinMap:.*]] = affine_map<(d0) -> (d0, 0)>
 
-// CHECKPARALLEL-DAG: #[[Stride1Dilation1:.*]] = affine_map<(d0, d1) -> (d0  + d1)>
-// CHECKPARALLEL-DAG: #[[Stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
-// CHECKPARALLEL-DAG: #[[Stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
-// CHECKPARALLEL-DAG: #[[Stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
+// CHECKPARALLEL-DAG: #[[$stride1Dilation1:.*]] = affine_map<(d0, d1) -> (d0  + d1)>
+// CHECKPARALLEL-DAG: #[[$stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)>
+// CHECKPARALLEL-DAG: #[[$stride2Dilation4:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1 * 4)>
+// CHECKPARALLEL-DAG: #[[$stride3Dilation5:.*]] = affine_map<(d0, d1) -> (d0 * 3 + d1 * 5)>
 
 
 func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
@@ -152,22 +152,22 @@ func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf3
   return
 }
 // CHECKLOOP-LABEL: func @dot_view(
-//       CHECKLOOP:   %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<f32>) {
-//       CHECKLOOP: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[strided1D]]>
+//       CHECKLOOP:   %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<f32>) {
+//       CHECKLOOP: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[$strided1D]]>
 //       CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
-//   CHECKLOOP-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[strided1D]]>
-//   CHECKLOOP-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//   CHECKLOOP-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
+//   CHECKLOOP-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 //   CHECKLOOP-DAG:   %[[inc:.*]] = mulf %[[a]], %[[b]] : f32
 //   CHECKLOOP-DAG:   %[[c:.*]] = load %{{.*}}[] : memref<f32>
 //   CHECKLOOP-DAG:   %[[res:.*]] = addf %[[c]], %[[inc]] : f32
 //       CHECKLOOP:   store %[[res]], %{{.*}}[] : memref<f32>
 
 // CHECKPARALLEL-LABEL: func @dot_view(
-//       CHECKPARALLEL:   %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<f32>) {
-//       CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[strided1D]]>
+//       CHECKPARALLEL:   %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<f32>) {
+//       CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[$strided1D]]>
 //       CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
-//   CHECKPARALLEL-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[strided1D]]>
-//   CHECKPARALLEL-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//   CHECKPARALLEL-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
+//   CHECKPARALLEL-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 //   CHECKPARALLEL-DAG:   %[[inc:.*]] = mulf %[[a]], %[[b]] : f32
 //   CHECKPARALLEL-DAG:   %[[c:.*]] = load %{{.*}}[] : memref<f32>
 //   CHECKPARALLEL-DAG:   %[[res:.*]] = addf %[[c]], %[[inc]] : f32
@@ -178,14 +178,14 @@ func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
   return
 }
 // CHECKLOOP-LABEL: func @fill_view(
-//       CHECKLOOP: %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: f32) {
+//       CHECKLOOP: %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: f32) {
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-//       CHECKLOOP:     store %{{.*}}, %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//       CHECKLOOP:     store %{{.*}}, %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 
 // CHECKPARALLEL-LABEL: func @fill_view(
-//       CHECKPARALLEL: %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: f32) {
+//       CHECKPARALLEL: %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: f32) {
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}) = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) {
-//       CHECKPARALLEL:     store %{{.*}}, %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//       CHECKPARALLEL:     store %{{.*}}, %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 
 func @fill_view0(%arg0: memref<f32>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<f32>, f32
@@ -202,32 +202,32 @@ func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
   return
 }
 // CHECKLOOP-LABEL: func @fill_view3(
-//       CHECKLOOP: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: f32) {
+//       CHECKLOOP: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: f32) {
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-//       CHECKLOOP:         store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:         store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 // CHECKPARALLEL-LABEL: func @fill_view3(
-//       CHECKPARALLEL: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: f32) {
+//       CHECKPARALLEL: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: f32) {
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}, %{{.*}}) {
-//       CHECKPARALLEL:     store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:     store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.copy(%arg0, %arg1) : memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>
   return
 }
 // CHECKLOOP-LABEL: func @copy_view(
-//       CHECKLOOP: %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>) {
+//       CHECKLOOP: %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<?xf32, #[[$strided1D]]>) {
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-//       CHECKLOOP:     %[[L:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
-//       CHECKLOOP:     store %[[L]], %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//       CHECKLOOP:     %[[L:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
+//       CHECKLOOP:     store %[[L]], %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 
 // CHECKPARALLEL-LABEL: func @copy_view(
-//       CHECKPARALLEL: %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>) {
+//       CHECKPARALLEL: %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: memref<?xf32, #[[$strided1D]]>) {
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}) = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) {
-//       CHECKPARALLEL:     %[[L:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
-//       CHECKPARALLEL:     store %[[L]], %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
+//       CHECKPARALLEL:     %[[L:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
+//       CHECKPARALLEL:     store %[[L]], %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 
 func @copy_view0(%arg0: memref<f32>, %arg1: memref<f32>) {
   linalg.copy(%arg0, %arg1) : memref<f32>, memref<f32>
@@ -248,74 +248,74 @@ func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
   return
 }
 // CHECKLOOP-LABEL: func @copy_view3
-//       CHECKLOOP: (%{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
+//       CHECKLOOP: (%{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-//       CHECKLOOP:         %[[L:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:         store %[[L]], %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:         %[[L:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:         store %[[L]], %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 // CHECKPARALLEL-LABEL: func @copy_view3
-//       CHECKPARALLEL: (%{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
+//       CHECKPARALLEL: (%{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}, %{{.*}}) {
-//       CHECKPARALLEL:     %[[L:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:     store %[[L]], %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:     %[[L:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:     store %[[L]], %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg2: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   linalg.conv(%arg0, %arg1, %arg2) {strides = [2]}: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
   return
 }
 // CHECKLOOP-LABEL: func @conv_view3(
-//       CHECKLOOP: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
+//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
 //       CHECKLOOP:         scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKLOOP:           scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
-//       CHECKLOOP:             %[[SUM:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM]], %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:             %[[SUM:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM]], %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKLOOP:             %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
-//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:             %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKLOOP:             %{{.*}} = addf %{{.*}}, %{{.*}} : f32
-//       CHECKLOOP:             store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:             store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 // CHECKPARALLEL-LABEL: func @conv_view3(
-//       CHECKPARALLEL: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL: %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
+//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
-//       CHECKPARALLEL:         %[[SUM:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM]], %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:         %[[SUM:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM]], %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKPARALLEL:         %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
-//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKPARALLEL:         %{{.*}} = addf %{{.*}}, %{{.*}} : f32
-//       CHECKPARALLEL:         store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:         store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?xf32, #[[$strided3D]]>
 
 func @conv_view4(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
   linalg.conv(%arg0, %arg1, %arg2) {dilations = [4, 5], strides = [2, 3]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
   return
 }
 // CHECKLOOP-LABEL: func @conv_view4(
-//       CHECKLOOP: %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
-//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP: %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>) {
+//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[X1]] step %{{.*}} {
@@ -323,36 +323,36 @@ func @conv_view4(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %
 //       CHECKLOOP:           scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKLOOP:             scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
 //       CHECKLOOP:               scf.for %{{.*}} = %{{.*}} to %[[Z1]] step %{{.*}} {
-//       CHECKLOOP:                 %[[SUM0:.*]] = affine.apply #[[Stride2Dilation4]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:                 %[[SUM1:.*]] = affine.apply #[[Stride3Dilation5]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM0]], %[[SUM1]], %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:                 %[[SUM0:.*]] = affine.apply #[[$stride2Dilation4]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:                 %[[SUM1:.*]] = affine.apply #[[$stride3Dilation5]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM0]], %[[SUM1]], %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKLOOP:                 %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
-//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKLOOP:                 %{{.*}} = addf %{{.*}}, %{{.*}} : f32
-//       CHECKLOOP:                 store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:                 store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 
 // CHECKPARALLEL-LABEL: func @conv_view4(
-//       CHECKPARALLEL: %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
-//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL: %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>) {
+//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[X1]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
 //       CHECKPARALLEL:         scf.for %{{.*}} = %{{.*}} to %[[Z1]] step %{{.*}} {
-//       CHECKPARALLEL:           %[[SUM0:.*]] = affine.apply #[[Stride2Dilation4]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:           %[[SUM1:.*]] = affine.apply #[[Stride3Dilation5]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM0]], %[[SUM1]], %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:           %[[SUM0:.*]] = affine.apply #[[$stride2Dilation4]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:           %[[SUM1:.*]] = affine.apply #[[$stride3Dilation5]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %[[SUM0]], %[[SUM1]], %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKPARALLEL:           %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
-//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       CHECKPARALLEL:           %{{.*}} = addf %{{.*}}, %{{.*}} : f32
-//       CHECKPARALLEL:           store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:           store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32, #[[$strided4D]]>
 
 func @conv_padding(%arg0: memref<?x?x?x?xf32>,
                    %arg1: memref<?x?x?x?xf32>,
@@ -382,8 +382,8 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 //       CHECKLOOP:               scf.for %{{.*}} = %{{.*}} to %[[Z1]] step %{{.*}} {
 //       CHECKLOOP:                 %[[SUM0:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
 //       CHECKLOOP:                 %[[SUM1:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
-//       CHECKLOOP:                 %[[IDX:.*]] = affine.max #[[clampMinMap]](%[[SUM0]])
-//       CHECKLOOP:                 %[[IDY:.*]] = affine.max #[[clampMinMap]](%[[SUM1]])
+//       CHECKLOOP:                 %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[SUM0]])
+//       CHECKLOOP:                 %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[SUM1]])
 //       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %[[IDX]], %[[IDY]], %{{.*}}] : memref<?x?x?x?xf32>
 //       CHECKLOOP:                 %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : f32
 //       CHECKLOOP:                 %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32>
@@ -408,8 +408,8 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 //       CHECKPARALLEL:         scf.for %{{.*}} = %{{.*}} to %[[Z1]] step %{{.*}} {
 //       CHECKPARALLEL:           %[[SUM0:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
 //       CHECKPARALLEL:           %[[SUM1:.*]] = affine.apply #{{.*}}(%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:           %[[IDX:.*]] = affine.max #[[clampMinMap]](%[[SUM0]])
-//       CHECKPARALLEL:           %[[IDY:.*]] = affine.max #[[clampMinMap]](%[[SUM1]])
+//       CHECKPARALLEL:           %[[IDX:.*]] = affine.max #[[$clampMinMap]](%[[SUM0]])
+//       CHECKPARALLEL:           %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[SUM1]])
 //       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %[[IDX]], %[[IDY]], %{{.*}}] : memref<?x?x?x?xf32>
 //       CHECKPARALLEL:           %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : f32
 //       CHECKPARALLEL:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<?x?x?x?xf32>
@@ -434,8 +434,8 @@ func @pooling_max(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKLOOP:         scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKLOOP:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKLOOP:           %{{.*}} = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKLOOP:           %[[RES:.*]] = select %{{.*}}, %{{.*}}, %{{.*}} : f32
@@ -449,8 +449,8 @@ func @pooling_max(%arg0: memref<?x?xf32>,
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %{{.*}}, %{{.*}} : f32
@@ -472,8 +472,8 @@ func @pooling_min(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKLOOP:         scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKLOOP:           %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKLOOP:           %{{.*}} = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKLOOP:           %[[RES:.*]] = select %{{.*}}, %{{.*}}, %{{.*}} : f32
@@ -487,8 +487,8 @@ func @pooling_min(%arg0: memref<?x?xf32>,
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKPARALLEL:         %{{.*}} = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKPARALLEL:         %[[RES:.*]] = select %{{.*}}, %{{.*}}, %{{.*}} : f32
@@ -510,8 +510,8 @@ func @pooling_sum(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKLOOP:         scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKLOOP:           %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKLOOP:           %[[RHS:.*]] = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKLOOP:           %[[LHS:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKLOOP:           %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32
@@ -525,8 +525,8 @@ func @pooling_sum(%arg0: memref<?x?xf32>,
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
-//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[Stride2Dilation1]](%{{.*}}, %{{.*}})
-//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[Stride1Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IX:.*]] = affine.apply #[[$stride2Dilation1]](%{{.*}}, %{{.*}})
+//       CHECKPARALLEL:         %[[IY:.*]] = affine.apply #[[$stride1Dilation1]](%{{.*}}, %{{.*}})
 //       CHECKPARALLEL:         %[[RHS:.*]] = load %{{.*}}[%[[IX]], %[[IY]]] : memref<?x?xf32>
 //       CHECKPARALLEL:         %[[LHS:.*]] = load %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 //       CHECKPARALLEL:         %[[RES:.*]] = addf %[[LHS]], %[[RHS]] : f32
@@ -558,23 +558,23 @@ func @generic_region(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1:
 //       CHECKLOOP: scf.for %[[i:.*]] = {{.*}}
 //       CHECKLOOP:   scf.for %[[j:.*]] = {{.*}}
 //       CHECKLOOP:     scf.for %[[k:.*]] = {{.*}}
-//       CHECKLOOP:       %[[a:.*]] = load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32, #[[strided2D]]>
-//       CHECKLOOP:       %[[b:.*]] = load %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:       %[[c:.*]] = load %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:       %[[a:.*]] = load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32, #[[$strided2D]]>
+//       CHECKLOOP:       %[[b:.*]] = load %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:       %[[c:.*]] = load %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKLOOP:       %[[d:.*]] = mulf %[[a]], %[[b]] : f32
 //       CHECKLOOP:       %[[e:.*]] = addf %[[c]], %[[d]] : f32
-//       CHECKLOOP:       store %[[d]], %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:       store %[[e]], %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:       store %[[d]], %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKLOOP:       store %[[e]], %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[$strided3D]]>
 
 // CHECKPARALLEL-LABEL: @generic_region
 //       CHECKPARALLEL: scf.parallel (%[[i:[a-zA-Z0-9_]*]], %[[j:[a-zA-Z0-9_]*]], %[[k:[a-zA-Z0-9_]*]])
-//       CHECKPARALLEL:   %[[a:.*]] = load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32, #[[strided2D]]>
-//       CHECKPARALLEL:   %[[b:.*]] = load %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[c:.*]] = load %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[a:.*]] = load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32, #[[$strided2D]]>
+//       CHECKPARALLEL:   %[[b:.*]] = load %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   %[[c:.*]] = load %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[$strided3D]]>
 //       CHECKPARALLEL:   %[[d:.*]] = mulf %[[a]], %[[b]] : f32
 //       CHECKPARALLEL:   %[[e:.*]] = addf %[[c]], %[[d]] : f32
-//       CHECKPARALLEL:   store %[[d]], %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   store %[[e]], %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   store %[[d]], %{{.*}}[%[[i]], %[[j]], %[[k]]] : memref<?x?x?xf32, #[[$strided3D]]>
+//       CHECKPARALLEL:   store %[[e]], %{{.*}}[%[[i]], %[[k]], %[[j]]] : memref<?x?x?xf32, #[[$strided3D]]>
 
 #trait4 = {
   args_in = 1,
@@ -849,9 +849,9 @@ func @scalar_code(%arg0: memref<f32>, %arg1 : memref<f32>, %arg2 : memref<f32>)
   return
 }
 // CHECKLOOP-LABEL: @scalar_code
-//  CHECKLOOP-SAME: %[[ARG0]]: memref<f32>
-//  CHECKLOOP-SAME: %[[ARG1]]: memref<f32>
-//  CHECKLOOP-SAME: %[[ARG2]]: memref<f32>
+//  CHECKLOOP-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<f32>
+//  CHECKLOOP-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<f32>
+//  CHECKLOOP-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<f32>
 //   CHECKLOOP-NOT: scf.for
 //       CHECKLOOP: load %[[ARG0]][]
 //       CHECKLOOP: load %[[ARG1]][]
@@ -859,9 +859,9 @@ func @scalar_code(%arg0: memref<f32>, %arg1 : memref<f32>, %arg2 : memref<f32>)
 //       CHECKLOOP: store %{{.*}}, %[[ARG2]][]
 
 // CHECKPARALLEL-LABEL: @scalar_code
-//  CHECKPARALLEL-SAME: %[[ARG0]]: memref<f32>
-//  CHECKPARALLEL-SAME: %[[ARG1]]: memref<f32>
-//  CHECKPARALLEL-SAME: %[[ARG2]]: memref<f32>
+//  CHECKPARALLEL-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<f32>
+//  CHECKPARALLEL-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<f32>
+//  CHECKPARALLEL-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<f32>
 //   CHECKPARALLEL-NOT: scf.for
 //       CHECKPARALLEL: load %[[ARG0]][]
 //       CHECKPARALLEL: load %[[ARG1]][]

diff  --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index b8c52e43504b..c4c5e00c42e7 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -5,8 +5,8 @@
 #map2 = affine_map<(d0) -> (d0 + 4)>
 #map3 = affine_map<(d0) -> (d0 + 3)>
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c4 = constant 4 : index
@@ -44,25 +44,25 @@ func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 //       CHECK:         %[[tmpA:.*]] = alloc() : memref<32xi8>
 //       CHECK:         %[[fullA:.*]] = std.view %[[tmpA]][{{.*}}][{{.*}}] : memref<32xi8> to memref<?x?xf32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
-//       CHECK:         %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpB:.*]] = alloc() : memref<48xi8>
 //       CHECK:         %[[fullB:.*]] = std.view %[[tmpB]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
-//       CHECK:         %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpC:.*]] = alloc() : memref<24xi8>
 //       CHECK:         %[[fullC:.*]] = std.view %[[tmpC]][{{.*}}][{{.*}}] : memref<24xi8> to memref<?x?xf32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
-//       CHECK:         %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>
 
-//       CHECK:         linalg.copy(%[[vA]], %[[partialA]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vB]], %[[partialB]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vC]], %[[partialC]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vA]], %[[partialA]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vB]], %[[partialB]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vC]], %[[partialC]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.matmul(%[[partialA]], %[[partialB]], %[[partialC]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.matmul(%[[partialA]], %[[partialB]], %[[partialC]]) : memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.copy(%[[partialC]], %[[vC]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D]]>
+//       CHECK:         linalg.copy(%[[partialC]], %[[vC]]) : memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D]]>
 //
 //       CHECK:         dealloc %[[tmpA]] : memref<32xi8>
 //       CHECK:         dealloc %[[tmpB]] : memref<48xi8>
@@ -106,25 +106,25 @@ func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 //       CHECK:         %[[tmpA_f64:.*]] = alloc() : memref<64xi8>
 //       CHECK:         %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][{{.*}}][{{.*}}] : memref<64xi8> to memref<?x?xf64>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
-//       CHECK:         %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpB_f64:.*]] = alloc() : memref<96xi8>
 //       CHECK:         %[[fullB_f64:.*]] = std.view %[[tmpB_f64]][{{.*}}][{{.*}}] : memref<96xi8> to memref<?x?xf64>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
-//       CHECK:         %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpC_f64:.*]] = alloc() : memref<48xi8>
 //       CHECK:         %[[fullC_f64:.*]] = std.view %[[tmpC_f64]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf64>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
-//       CHECK:         %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>
 
-//       CHECK:         linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.matmul(%[[partialA_f64]], %[[partialB_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.matmul(%[[partialA_f64]], %[[partialB_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D]]>
+//       CHECK:         linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D]]>
 //
 //       CHECK:         dealloc %[[tmpA_f64]] : memref<64xi8>
 //       CHECK:         dealloc %[[tmpB_f64]] : memref<96xi8>
@@ -168,25 +168,25 @@ func @matmul_i32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 //       CHECK:         %[[tmpA_i32:.*]] = alloc() : memref<32xi8>
 //       CHECK:         %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][{{.*}}][{{.*}}] : memref<32xi8> to memref<?x?xi32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
-//       CHECK:         %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpB_i32:.*]] = alloc() : memref<48xi8>
 //       CHECK:         %[[fullB_i32:.*]] = std.view %[[tmpB_i32]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xi32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
-//       CHECK:         %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>
 ///
 //       CHECK:         %[[tmpC_i32:.*]] = alloc() : memref<24xi8>
 //       CHECK:         %[[fullC_i32:.*]] = std.view %[[tmpC_i32]][{{.*}}][{{.*}}] : memref<24xi8> to memref<?x?xi32>
 //     DYNAMIC:         std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
-//       CHECK:         %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
+//       CHECK:         %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>
 
-//       CHECK:         linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
-//       CHECK:         linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
+//       CHECK:         linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.matmul(%[[partialA_i32]], %[[partialB_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
+//       CHECK:         linalg.matmul(%[[partialA_i32]], %[[partialB_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
 //
-//       CHECK:         linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D]]>
+//       CHECK:         linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D]]>
 //
 //       CHECK:         dealloc %[[tmpA_i32]] : memref<32xi8>
 //       CHECK:         dealloc %[[tmpB_i32]] : memref<48xi8>

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 5237db79c42e..6fded85c504a 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -15,7 +15,7 @@ func @range(%arg0: index, %arg1: index, %arg2: index) {
 
 // -----
 
-// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 
 func @views(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index) {
   %c0 = constant 0 : index
@@ -59,12 +59,12 @@ func @views(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index
 //  CHECK-SAME:     memref<?x?xf32>,
 //  CHECK-SAME:     !linalg.range,
 //  CHECK-SAME:     index,
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>
 //  CHECK-NEXT:  linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] :
 //  CHECK-SAME:     memref<?x?xf32>,
 //  CHECK-SAME:     index,
 //  CHECK-SAME:     !linalg.range,
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>
 //  CHECK-NEXT:  linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] :
 //  CHECK-SAME:     memref<?x?xf32>,
 //  CHECK-SAME:     index,
@@ -76,8 +76,8 @@ func @views(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index
 
 // -----
 
-// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 
 func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
           %arg1: memref<?xf32, offset: ?, strides: [1]>,
@@ -96,33 +96,33 @@ func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 }
 // CHECK-LABEL: func @ops(%
 //  CHECK-NEXT:  linalg.matmul(%{{.*}}, %{{.*}}, %{{.*}}) :
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2D]]>,
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2D]]>,
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2D]]>
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]>,
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]>,
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]>
 //  CHECK-NEXT:  linalg.matvec(%{{.*}}, %{{.*}}, %{{.*}}) :
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2D]]>,
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>,
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]>,
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>,
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>
 //  CHECK-NEXT:  linalg.dot(%{{.*}}, %{{.*}}, %{{.*}}) :
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>,
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>,
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>,
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>,
 //  CHECK-SAME:     memref<f32>
 
 // -----
 
-// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 
 func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<?xf32, offset: ?, strides: [1]>, f32
   return
 }
 // CHECK-LABEL: func @fill_view(
-//       CHECK:  %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: f32) {
-//       CHECK:   linalg.fill(%{{.*}}, %{{.*}}) : memref<?xf32, #[[strided1D]]>, f32
+//       CHECK:  %{{.*}}: memref<?xf32, #[[$strided1D]]>, %{{.*}}: f32) {
+//       CHECK:   linalg.fill(%{{.*}}, %{{.*}}) : memref<?xf32, #[[$strided1D]]>, f32
 
 // -----
 
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %0 = linalg.transpose %arg0 (i, j, k) -> (k, j, i) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
@@ -130,23 +130,23 @@ func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
 }
 // CHECK-LABEL: func @transpose
 //       CHECK:   linalg.transpose %{{.*}} ([[i:.*]], [[j:.*]], [[k:.*]]) -> ([[k]], [[j]], [[i]]) :
-//  CHECK-SAME:      memref<?x?x?xf32, #[[strided3D]]>
+//  CHECK-SAME:      memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, f32
   return
 }
 // CHECK-LABEL: func @fill_view3(
-//       CHECK:  %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: f32) {
-//       CHECK:   linalg.fill(%{{.*}}, %{{.*}}) : memref<?x?x?xf32, #[[strided3D]]>, f32
+//       CHECK:  %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: f32) {
+//       CHECK:   linalg.fill(%{{.*}}, %{{.*}}) : memref<?x?x?xf32, #[[$strided3D]]>, f32
 
 // -----
 
-// CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
 
 func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>,
                 %arg1: memref<?xf32, offset: ?, strides: [1]>) {
@@ -156,13 +156,13 @@ func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>,
 }
 // CHECK-LABEL: func @copy_view(
 //       CHECK:   linalg.copy(%{{.*}}, %{{.*}}) :
-//  CHECK-SAME:     memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
+//  CHECK-SAME:     memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>
 
 // -----
 
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
-// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
+// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
 
 func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
                  %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
@@ -172,16 +172,16 @@ func @copy_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
   return
 }
 // CHECK-LABEL: func @copy_view3(
-//       CHECK:  %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
+//       CHECK:  %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[$strided3D]]>) {
 //       CHECK:   linalg.copy(%{{.*}}, %{{.*}}) {
-//  CHECK-SAME:     inputPermutation = #[[map0]],
-//  CHECK-SAME:     outputPermutation = #[[map1]]} :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]>,
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]>
+//  CHECK-SAME:     inputPermutation = #[[$map0]],
+//  CHECK-SAME:     outputPermutation = #[[$map1]]} :
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>,
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
                  %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
@@ -193,13 +193,13 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
 }
 // CHECK-LABEL: func @conv_view3(
 //       CHECK:   linalg.conv(%{{.*}}, %{{.*}}, %{{.*}}) :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]>,
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]>,
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]>
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>,
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>,
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
-// CHECK-DAG: #[[strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)>
+// CHECK-DAG: #[[$strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)>
 
 func @conv_view6(%arg0: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?, ?, 1]>,
                  %arg1: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?, ?, 1]>,
@@ -213,9 +213,9 @@ func @conv_view6(%arg0: memref<?x?x?x?x?x?xf32, offset: ?, strides: [?, ?, ?, ?,
 // CHECK-LABEL: func @conv_view6(
 //       CHECK:   linalg.conv(%{{.*}}, %{{.*}}, %{{.*}}) {
 //  CHECK-SAME:     dilations = [4, 4, 5, 5], strides = [2, 2, 3, 3]} :
-//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[strided6D]]>,
-//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[strided6D]]>,
-//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[strided6D]]>
+//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[$strided6D]]>,
+//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[$strided6D]]>,
+//  CHECK-SAME:     memref<?x?x?x?x?x?xf32, #[[$strided6D]]>
 
 // -----
 
@@ -283,8 +283,8 @@ func @pooling_sum(%arg0: memref<?x?x?xf32>,
 
 // -----
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 #accesses = [
   affine_map<(i, j, k) -> (j, i)>,
@@ -314,7 +314,7 @@ func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
 //  CHECK-SAME:     indexing_maps = [#{{.*}}, #{{.*}}], iterator_types = ["parallel", "parallel", "parallel"],
 //  CHECK-SAME:     library_call = "some_external_function_name_1"
 //  CHECK-SAME:     {foo = 1 : i64}
-//       CHECK:     memref<?x?xvector<3x4xi4>, #[[strided2D]]>, memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:     memref<?x?xvector<3x4xi4>, #[[$strided2D]]>, memref<?x?x?xf32, #[[$strided3D]]>
 
 func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
                                 %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
@@ -331,7 +331,7 @@ func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
 //  CHECK-SAME:     indexing_maps = [#{{.*}}, #{{.*}}], iterator_types = ["parallel", "parallel", "parallel"],
 //  CHECK-SAME:     library_call = "some_external_function_name_1"}
 //  CHECK-SAME:     {foo = 1 : i64}
-//       CHECK:     tensor<?x?xvector<3x4xi4>>, memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:     tensor<?x?xvector<3x4xi4>>, memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
@@ -436,8 +436,8 @@ func @indexed_generic_op_zero_rank(%arg0: tensor<f32>) -> (tensor<3x4xf32>)
 
 // -----
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 
 #accesses = [
   affine_map<(i, j, k) -> (j, i)>,
@@ -468,8 +468,8 @@ func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1
 //  CHECK-SAME:     {foo = 1 : i64}
 //       CHECK:    ^{{.*}}(%{{.*}}: vector<3x4xi4>, %{{.*}}: f32):
 //       CHECK:      linalg.yield %{{.*}} : f32
-//       CHECK:    memref<?x?xvector<3x4xi4>, #[[strided2D]]>,
-//  CHECK-SAME:    memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:    memref<?x?xvector<3x4xi4>, #[[$strided2D]]>,
+//  CHECK-SAME:    memref<?x?x?xf32, #[[$strided3D]]>
 
 func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
                       %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
@@ -487,19 +487,19 @@ func @indexed_generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?,
 //  CHECK-SAME:     {foo = 1 : i64}
 //       CHECK:    ^{{.*}}(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: vector<3x4xi4>, %{{.*}}: f32):
 //       CHECK:      linalg.yield %{{.*}} : f32
-//       CHECK:    }: memref<?x?xvector<3x4xi4>, #[[strided2D]]>,
-//  CHECK-SAME:                       memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:    }: memref<?x?xvector<3x4xi4>, #[[$strided2D]]>,
+//  CHECK-SAME:                       memref<?x?x?xf32, #[[$strided3D]]>
 
 // -----
 
-// CHECK-DAG: #[[reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
-// CHECK-DAG: #[[reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
-// CHECK-DAG: #[[reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
-// CHECK-DAG: #[[reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-// CHECK-DAG: #[[reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
-// CHECK-DAG: #[[reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
-// CHECK-DAG: #[[reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
+// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)>
+// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)>
+// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)>
+// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
+// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)>
+// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>
 
 func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2: tensor<3x?x5xf32>) {
   // Reshapes that collapse and expand back a contiguous buffer.
@@ -548,21 +548,21 @@ func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2:
   return
 }
 // CHECK-LABEL: func @reshape_static
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<12x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
 //  CHECK-SAME:     memref<12x5xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD0]], #[[reshapeD12]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<3x20xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD0]], #[[reshapeD12]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]]
 //  CHECK-SAME:     memref<3x20xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD012]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD012]]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<60xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD012]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD012]]]
 //  CHECK-SAME:     memref<60xf32> into memref<3x4x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshape5D01]], #[[reshape5D2]], #[[reshape5D34]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]]
 //  CHECK-SAME:     memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshape5D01]], #[[reshape5D2]], #[[reshape5D34]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]]
 //  CHECK-SAME:     memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
 //
 //       CHECK:   linalg.tensor_reshape {{.*}}: tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32>
@@ -572,10 +572,10 @@ func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2:
 
 // -----
 
-// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
-// CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>
+// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
+// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>
 
 func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
                       %arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>,
@@ -605,18 +605,18 @@ func @reshape_dynamic(%arg0: memref<?x?x?xf32>,
   return
 }
 // CHECK-LABEL: func @reshape
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
 //  CHECK-SAME:     memref<?x?x?xf32> into memref<?x?xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
 //  CHECK-SAME:     memref<?x?xf32> into memref<?x?x?xf32>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3DOFF0]]> into memref<?x?xf32, #[[strided2DOFF0]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2DOFF0]]> into memref<?x?x?xf32, #[[strided3DOFF0]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
-//  CHECK-SAME:     memref<?x?x?xf32, #[[strided3D]]> into memref<?x?xf32, #[[strided2D]]>
-//       CHECK:   linalg.reshape {{.*}} [#[[reshapeD01]], #[[reshapeD2]]]
-//  CHECK-SAME:     memref<?x?xf32, #[[strided2D]]> into memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3DOFF0]]> into memref<?x?xf32, #[[$strided2DOFF0]]>
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2DOFF0]]> into memref<?x?x?xf32, #[[$strided3DOFF0]]>
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$strided3D]]> into memref<?x?xf32, #[[$strided2D]]>
+//       CHECK:   linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]]
+//  CHECK-SAME:     memref<?x?xf32, #[[$strided2D]]> into memref<?x?x?xf32, #[[$strided3D]]>
 
 
 // TODO: Return tensors need a semantics convention update.

diff  --git a/mlir/test/Dialect/Linalg/standard.mlir b/mlir/test/Dialect/Linalg/standard.mlir
index b94c504434ed..0ba3465443fa 100644
--- a/mlir/test/Dialect/Linalg/standard.mlir
+++ b/mlir/test/Dialect/Linalg/standard.mlir
@@ -1,14 +1,14 @@
 // RUN: mlir-opt %s -convert-linalg-to-std | FileCheck %s
 
-// CHECK-DAG: #[[map0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
-// CHECK-DAG: #[[map2:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d2 * s2 + d1)>
-// CHECK-DAG: #[[map3:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
-// CHECK-DAG: #[[map4:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
-// CHECK-DAG: #[[map5:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
-// CHECK-DAG: #[[map6:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
-// CHECK-DAG: #[[map7:.*]] = affine_map<()[s0] -> (s0)>
-// CHECK-DAG: #[[map8:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
+// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
+// CHECK-DAG: #[[$map2:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d2 * s2 + d1)>
+// CHECK-DAG: #[[$map3:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
+// CHECK-DAG: #[[$map4:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
+// CHECK-DAG: #[[$map5:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
+// CHECK-DAG: #[[$map6:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+// CHECK-DAG: #[[$map7:.*]] = affine_map<()[s0] -> (s0)>
+// CHECK-DAG: #[[$map8:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
 
 func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
           %arg1: memref<?xf32, offset: ?, strides: [1]>,
@@ -19,32 +19,32 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
   return
 }
 // CHECK-LABEL: func @dot(
-//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?xf32, #[[map0]]>,
-//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?xf32, #[[map0]]>,
+//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?xf32, #[[$map0]]>,
+//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?xf32, #[[$map0]]>,
 //  CHECK-SAME: %[[arg2:[a-zA-z0-9]*]]: memref<f32>) {
 //       CHECK:   %[[o0:.*]] = memref_cast %[[arg0]] :
-//  CHECK-SAME:     memref<?xf32, #[[map0]]> to memref<?xf32, #[[map6]]>
+//  CHECK-SAME:     memref<?xf32, #[[$map0]]> to memref<?xf32, #[[$map6]]>
 //       CHECK:   %[[o1:.*]] = memref_cast %[[arg1]] :
-//  CHECK-SAME:     memref<?xf32, #[[map0]]> to memref<?xf32, #[[map6]]>
+//  CHECK-SAME:     memref<?xf32, #[[$map0]]> to memref<?xf32, #[[$map6]]>
 //       CHECK:   %[[o2:.*]] = memref_cast %[[arg2]] :
-//  CHECK-SAME:     memref<f32> to memref<f32, #[[map7]]>
+//  CHECK-SAME:     memref<f32> to memref<f32, #[[$map7]]>
 //       CHECK:   call @linalg_dot_viewsxf32_viewsxf32_viewf32(
 //  CHECK-SAME:     %[[o0]], %[[o1]], %[[o2]]) :
-//  CHECK-SAME:   memref<?xf32, #[[map6]]>, memref<?xf32, #[[map6]]>, memref<f32, #[[map7]]>
+//  CHECK-SAME:   memref<?xf32, #[[$map6]]>, memref<?xf32, #[[$map6]]>, memref<f32, #[[$map7]]>
 
 func @copy(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   linalg.copy(%arg0, %arg1) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
   return
 }
 // CHECK-LABEL: func @copy(
-//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>,
-//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>) {
+//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>,
+//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>) {
 //       CHECK:   %[[o0:.*]] = memref_cast %[[arg0]] :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[map1]]> to memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$map1]]> to memref<?x?x?xf32, #[[$map8]]>
 //       CHECK:   %[[o1:.*]] = memref_cast %[[arg1]] :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[map1]]> to memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$map1]]> to memref<?x?x?xf32, #[[$map8]]>
 //       CHECK:   call @linalg_copy_viewsxsxsxf32_viewsxsxsxf32(%[[o0]], %[[o1]]) :
-//  CHECK-SAME:   memref<?x?x?xf32, #[[map8]]>, memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:   memref<?x?x?xf32, #[[$map8]]>, memref<?x?x?xf32, #[[$map8]]>
 
 func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   linalg.copy(%arg0, %arg1) {inputPermutation = affine_map<(i, j, k) -> (i, k, j)>,
@@ -53,18 +53,18 @@ func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %a
   return
 }
 // CHECK-LABEL: func @copy_transpose(
-//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>,
-//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>) {
+//  CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>,
+//  CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>) {
 //       CHECK:   %[[t0:.*]] = linalg.transpose %[[arg0]]
-//  CHECK-SAME:     (d0, d1, d2) -> (d0, d2, d1) : memref<?x?x?xf32, #[[map1]]>
+//  CHECK-SAME:     (d0, d1, d2) -> (d0, d2, d1) : memref<?x?x?xf32, #[[$map1]]>
 //       CHECK:   %[[t1:.*]] = linalg.transpose %[[arg1]]
-//  CHECK-SAME:     (d0, d1, d2) -> (d2, d1, d0) : memref<?x?x?xf32, #[[map1]]>
+//  CHECK-SAME:     (d0, d1, d2) -> (d2, d1, d0) : memref<?x?x?xf32, #[[$map1]]>
 //       CHECK:   %[[o0:.*]] = memref_cast %[[t0]] :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[map2]]> to memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$map2]]> to memref<?x?x?xf32, #[[$map8]]>
 //       CHECK:   %[[o1:.*]] = memref_cast %[[t1]] :
-//  CHECK-SAME:     memref<?x?x?xf32, #[[map4]]> to memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:     memref<?x?x?xf32, #[[$map4]]> to memref<?x?x?xf32, #[[$map8]]>
 //       CHECK:   call @linalg_copy_viewsxsxsxf32_viewsxsxsxf32(%[[o0]], %[[o1]]) :
-//  CHECK-SAME:   memref<?x?x?xf32, #[[map8]]>, memref<?x?x?xf32, #[[map8]]>
+//  CHECK-SAME:   memref<?x?x?xf32, #[[$map8]]>, memref<?x?x?xf32, #[[$map8]]>
 
 #matmul_accesses = [
   affine_map<(m, n, k) -> (m, k)>,

diff  --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index b17a3f3e5efd..f55e20fe76c9 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -3,26 +3,26 @@
 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=0,0,2" -mlir-disable-threading=true | FileCheck %s -check-prefix=TILE-002
 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -mlir-disable-threading=true | FileCheck %s -check-prefix=TILE-234
 
-//   TILE-2-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-//  TILE-02-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// TILE-002-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// TILE-234-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
-
-//   TILE-2-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-//  TILE-02-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// TILE-002-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// TILE-234-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-
-//   TILE-2-DAG: #[[bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
-//  TILE-02-DAG: #[[bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
-// TILE-002-DAG: #[[bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
-// TILE-234-DAG: #[[bound_map_2:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
-// TILE-234-DAG: #[[bound_map_3:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
-// TILE-234-DAG: #[[bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>
-
-//   TILE-2-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
-//  TILE-02-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
-// TILE-234-DAG: #[[stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
+//   TILE-2-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+//  TILE-02-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// TILE-002-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// TILE-234-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
+
+//   TILE-2-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+//  TILE-02-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// TILE-002-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// TILE-234-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+
+//   TILE-2-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+//  TILE-02-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+// TILE-002-DAG: #[[$bound_map:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+// TILE-234-DAG: #[[$bound_map_2:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+// TILE-234-DAG: #[[$bound_map_3:.*]] = affine_map<(d0)[s0] -> (3, -d0 + s0)>
+// TILE-234-DAG: #[[$bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>
+
+//   TILE-2-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
+//  TILE-02-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
+// TILE-234-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
 
 func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.matmul(%arg0, %arg1, %arg2) : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>
@@ -31,76 +31,76 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 // TILE-2-LABEL: func @matmul(
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localM]]]
+//       TILE-2:   %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-2:   %[[localK:.*]] = dim %{{.*}}, %c0
-//       TILE-2:   %[[szK:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localK]]]
-//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[szK:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localK]]]
+//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-2:   linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>
 
 // TILE-02-LABEL: func @matmul(
 //       TILE-02-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-02-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-02: %[[N:.*]] = dim %arg1, %c1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02: %[[N:.*]] = dim %arg1, %c1 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-02: scf.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
-//       TILE-02:   %[[K:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[K:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c1
-//       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
-//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[szN:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[localN]]]
+//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-02:   %[[localK:.*]] = dim %{{.*}}, %c1
-//       TILE-02:   %[[szK:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localK]]]
-//       TILE-02:   %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[szK:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[localK]]]
+//       TILE-02:   %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-02:   linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>
 
 // TILE-002-LABEL: func @matmul(
 //       TILE-002-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-002-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-002: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-002: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-002: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
-//       TILE-002:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-002:   %[[localK:.*]] = dim %{{.*}}, %c1
-//       TILE-002:   %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]]
-//       TILE-002:   %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[szK:.*]] = affine.min #[[$bound_map]](%[[K]])[%[[localK]]]
+//       TILE-002:   %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-002:   %[[localK:.*]] = dim %{{.*}}, %c0
-//       TILE-002:   %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]]
-//       TILE-002:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-002:   %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-002:   linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[szK:.*]] = affine.min #[[$bound_map]](%[[K]])[%[[localK]]]
+//       TILE-002:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-002:   %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-002:   linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>
 
 // TILE-234-LABEL: func @matmul(
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-234-DAG: %[[C3:.*]] = constant 3 : index
 //       TILE-234-DAG: %[[C4:.*]] = constant 4 : index
-//       TILE-234: %[[ubM:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[ubN:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[ubM:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-234: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-234: %[[ubN:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[ubM]] step %{{.*}} {
 //       TILE-234:    scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[ubN]] step %{{.*}} {
 //       TILE-234:      scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
 //       TILE-234:        %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:        %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:        %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:        %[[localK:.*]] = dim %{{.*}}, %c1
-//       TILE-234:        %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]]
-//       TILE-234:        %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        %[[szK:.*]] = affine.min #[[$bound_map_4]](%[[K]])[%[[localK]]]
+//       TILE-234:        %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-234:        %[[localK:.*]] = dim %{{.*}}, %c0
-//       TILE-234:        %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]]
+//       TILE-234:        %[[szK:.*]] = affine.min #[[$bound_map_4]](%[[K]])[%[[localK]]]
 //       TILE-234:        %[[localN:.*]] = dim %{{.*}}, %c1
-//       TILE-234:        %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
-//       TILE-234:        %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[szK]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[localN]]]
+//       TILE-234:        %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[szK]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-234:        %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:        %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:        %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:        %[[localN:.*]] = dim %{{.*}}, %c1
-//       TILE-234:        %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
-//       TILE-234:        %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[localN]]]
+//       TILE-234:        %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //
-//       TILE-234:        linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>
 
 // When the buffer shapes are known at compile time, it is possible to avoid
 // the "min" in subview size computation. This test uses buffer sizes divisible
@@ -119,9 +119,9 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
 //       TILE-2-DAG: %[[M:.*]] = constant 10 : index
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-2:   %[[MIN2:.*]] = affine.min #map2(%[[I]])
-//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[MIN2]], 16] [1, 1] : memref<10x16xf32, #[[strided2D]]> to memref<?x16xf32, #[[strided2D]]>
+//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[MIN2]], 16] [1, 1] : memref<10x16xf32, #[[$strided2D]]> to memref<?x16xf32, #[[$strided2D]]>
 //       TILE-2:   %[[MIN22:.*]] = affine.min #map2(%[[I]])
-//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[MIN22]], 12] [1, 1] : memref<10x12xf32, #[[strided2D]]> to memref<?x12xf32, #[[strided2D]]>
+//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[MIN22]], 12] [1, 1] : memref<10x12xf32, #[[$strided2D]]> to memref<?x12xf32, #[[$strided2D]]>
 //       TILE-2:   linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]])
 
 // TILE-02-LABEL: func @matmul_static(
@@ -130,10 +130,10 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
 //       TILE-02-DAG: %[[N:.*]] = constant 12 : index
 //       TILE-02: scf.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
 //       TILE-02:   %[[MIN2:.*]] = affine.min #map2(%[[J]])
-//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [16, %[[MIN2]]] [1, 1] : memref<16x12xf32, #[[strided2D]]> to memref<16x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [16, %[[MIN2]]] [1, 1] : memref<16x12xf32, #[[$strided2D]]> to memref<16x?xf32, #[[$strided2D]]>
 //       TILE-02:   %[[MIN22:.*]] = affine.min #map2(%[[J]])
-//       TILE-02:   %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [10, %[[MIN22]]] [1, 1] : memref<10x12xf32, #[[strided2D]]> to memref<10x?xf32, #[[strided2D]]>
-//       TILE-02:   linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<10x16xf32, #[[strided2D]]>, memref<16x?xf32, #[[strided2D]]>, memref<10x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [10, %[[MIN22]]] [1, 1] : memref<10x12xf32, #[[$strided2D]]> to memref<10x?xf32, #[[$strided2D]]>
+//       TILE-02:   linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<10x16xf32, #[[$strided2D]]>, memref<16x?xf32, #[[$strided2D]]>, memref<10x?xf32, #[[$strided2D]]>
 
 // TILE-002-LABEL: func @matmul_static(
 //       TILE-002-DAG: %[[C0:.*]] = constant 0 : index
@@ -141,10 +141,10 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
 //       TILE-002-DAG: %[[C16:.*]] = constant 16 : index
 //       TILE-002: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[C16]] step %{{.*}} {
 //       TILE-002:   %[[MIN2:.*]] = affine.min #map2(%[[K]])
-//       TILE-002:   %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [10, %[[MIN2]]] [1, 1] : memref<10x16xf32, #[[strided2D]]> to memref<10x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [10, %[[MIN2]]] [1, 1] : memref<10x16xf32, #[[$strided2D]]> to memref<10x?xf32, #[[$strided2D]]>
 //       TILE-002:   %[[MIN22:.*]] = affine.min #map2(%[[K]])
-//       TILE-002:   %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[MIN22]], 12] [1, 1] : memref<16x12xf32, #[[strided2D]]> to memref<?x12xf32, #[[strided2D]]>
-//       TILE-002:   linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<10x?xf32, #[[strided2D]]>, memref<?x12xf32, #[[strided2D]]>, memref<10x12xf32, #[[strided2D]]>
+//       TILE-002:   %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[MIN22]], 12] [1, 1] : memref<16x12xf32, #[[$strided2D]]> to memref<?x12xf32, #[[$strided2D]]>
+//       TILE-002:   linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<10x?xf32, #[[$strided2D]]>, memref<?x12xf32, #[[$strided2D]]>, memref<10x12xf32, #[[$strided2D]]>
 
 // TILE-234-LABEL: func @matmul_static(
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
@@ -157,11 +157,11 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>, %arg1:
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[C10]] step %{{.*}} {
 //       TILE-234:    scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[C12]] step %{{.*}} {
 //       TILE-234:      scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[C16]] step %{{.*}} {
-//       TILE-234:        %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<10x16xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-234:        %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<16x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-234:        %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<10x12xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<10x16xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-234:        %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<16x12xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
+//       TILE-234:        %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%{{.*}}, %{{.*}}] [1, 1] : memref<10x12xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //
-//       TILE-234:        linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:        linalg.matmul(%[[sAik]], %[[sBkj]], %[[sCij]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D]]>
 
 func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.matvec(%arg0, %arg1, %arg2) : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>
@@ -173,16 +173,16 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //  TILE-2-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-2:   %[[localM:.*]] = dim %[[ARG0]], %c0
-//       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localM]]]
+//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-2:   %[[localN:.*]] = dim %{{.*}}, %c0
-//       TILE-2:   %[[szN:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localN]]]
-//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-2:   linalg.matvec(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
+//       TILE-2:   %[[szN:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localN]]]
+//       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
+//       TILE-2:   linalg.matvec(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>
 
 // TILE-02-LABEL: func @matvec(
 // TILE-02-SAME: %[[ARG0:[0-9a-zA-Z]*]]: memref
@@ -190,16 +190,16 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 // TILE-02-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref
 //       TILE-02-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-02-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-02: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-02: scf.for %[[J]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} {
-//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-02: scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} {
+//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c1
-//       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
-//       TILE-02:   %[[sAj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[szN:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[localN]]]
+//       TILE-02:   %[[sAj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c0
-//       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
-//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-02:   linalg.matvec(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
+//       TILE-02:   %[[szN:.*]] = affine.min #[[$bound_map]](%[[J]])[%[[localN]]]
+//       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
+//       TILE-02:   linalg.matvec(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[$strided2D]]>, memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>
 
 // TILE-002-LABEL: func @matvec(
 // TILE-002-SAME: %[[ARG0:[0-9a-zA-Z]*]]: memref
@@ -214,23 +214,23 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-234-DAG: %[[C3:.*]] = constant 3 : index
-//       TILE-234: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[$strided2D]]>
+//       TILE-234: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[$strided2D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-234:    scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} {
 //       TILE-234:      %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:      %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:      %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:      %[[localN:.*]] = dim %{{.*}}, %c1
-//       TILE-234:      %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
-//       TILE-234:      %[[sAij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
+//       TILE-234:      %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[localN]]]
+//       TILE-234:      %[[sAij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[$strided2D]]> to memref<?x?xf32, #[[$strided2D]]>
 //       TILE-234:      %[[localN:.*]] = dim %{{.*}}, %c0
-//       TILE-234:      %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
-//       TILE-234:      %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
+//       TILE-234:      %[[szN:.*]] = affine.min #[[$bound_map_3]](%[[J]])[%[[localN]]]
+//       TILE-234:      %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
 //       TILE-234:      %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:      %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:      %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
+//       TILE-234:      %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:      %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
 //
-//       TILE-234:      linalg.matvec(%[[sAij]], %[[sBj]], %[[sCi]]) : memref<?x?xf32, #[[strided2D]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
+//       TILE-234:      linalg.matvec(%[[sAij]], %[[sBj]], %[[sCi]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>
 
 func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
   linalg.dot(%arg0, %arg1, %arg2) : memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>, memref<f32>
@@ -239,15 +239,15 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
 // TILE-2-LABEL: func @dot(
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[strided1D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[$strided1D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
+//       TILE-2:   %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localM]]]
+//       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
 //       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-2:   linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>, memref<f32>
+//       TILE-2:   %[[szM:.*]] = affine.min #[[$bound_map]](%[[I]])[%[[localM]]]
+//       TILE-2:   %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
+//       TILE-2:   linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>
 
 // TILE-02-LABEL: func @dot(
 //   TILE-02-NOT: scf.for
@@ -258,15 +258,15 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
 // TILE-234-LABEL: func @dot(
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-234:  %[[ubK:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[strided1D]]>
+//       TILE-234:  %[[ubK:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[$strided1D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}} to %[[ubK]] step %{{.*}} {
 //       TILE-234:    %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:    %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:    %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
+//       TILE-234:    %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:    %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
 //       TILE-234:    %[[localM:.*]] = dim %{{.*}}, %c0
-//       TILE-234:    %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:    %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-234:    linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>, memref<f32>
+//       TILE-234:    %[[szM:.*]] = affine.min #[[$bound_map_2]](%[[I]])[%[[localM]]]
+//       TILE-234:    %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
+//       TILE-234:    linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref<?xf32, #[[$strided1D]]>, memref<?xf32, #[[$strided1D]]>, memref<f32>
 
 func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
   linalg.fill(%arg0, %arg1) : memref<127x99xf32>, f32
@@ -276,13 +276,13 @@ func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
 //       TILE-2:   for
 //   TILE-2-NOT:   for
 //       TILE-2:       subview{{.*}} : memref<127x99xf32>
-//       TILE-2:       linalg.fill{{.*}} : memref<?x99xf32, #[[stride_99_1_layout_map]]>, f32
+//       TILE-2:       linalg.fill{{.*}} : memref<?x99xf32, #[[$stride_99_1_layout_map]]>, f32
 
 // TILE-02-LABEL: func @fill_static
 //       TILE-02:   for
 //   TILE-02-NOT:   for
 //       TILE-02:       subview{{.*}} : memref<127x99xf32>
-//       TILE-02:       linalg.fill{{.*}} : memref<127x?xf32, #[[stride_99_1_layout_map]]>, f32
+//       TILE-02:       linalg.fill{{.*}} : memref<127x?xf32, #[[$stride_99_1_layout_map]]>, f32
 
 // TILE-002-LABEL: func @fill_static
 //   TILE-002-NOT:   for
@@ -293,7 +293,7 @@ func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
 //       TILE-234:     for
 //   TILE-234-NOT:   for
 //       TILE-234:       subview{{.*}} : memref<127x99xf32>
-//       TILE-234:       linalg.fill{{.*}} : memref<?x?xf32, #[[stride_99_1_layout_map]]>, f32
+//       TILE-234:       linalg.fill{{.*}} : memref<?x?xf32, #[[$stride_99_1_layout_map]]>, f32
 
 
 func @fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: f32) {

diff  --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir
index 86300f87938e..1bbb8b60382e 100644
--- a/mlir/test/Dialect/Linalg/tile_conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv.mlir
@@ -1,44 +1,44 @@
 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004
 
-// TILE-23004-DAG: #[[D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)>
-// TILE-23004-DAG: #[[S0x10p90D0x30pS1:.*]] = affine_map<(d0)[s0, s1] -> (s0 * 10 + 90, d0 * -30 + s1)>
-// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// TILE-23004-DAG: #[[bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>
+// TILE-23004-DAG: #[[$D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)>
+// TILE-23004-DAG: #[[$S0x10p90D0x30pS1:.*]] = affine_map<(d0)[s0, s1] -> (s0 * 10 + 90, d0 * -30 + s1)>
+// TILE-23004-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// TILE-23004-DAG: #[[$bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>
 
 func @conv(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
   linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
   return
 }
 // TILE-23004-LABEL: func @conv(
-//       TILE-23004:   %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
+//       TILE-23004:   %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>) {
 //       TILE-23004-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-23004-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-23004-DAG: %[[C3:.*]] = constant 3 : index
 //       TILE-23004-DAG: %[[C4:.*]] = constant 4 : index
-//       TILE-23004:   %[[Q:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[B:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[X0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:   %[[Q:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:   %[[B:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:   %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:   %[[X0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       TILE-23004:   scf.for %[[ivI:.*]] = %{{.*}} to %[[B]] step %{{.*}} {
 //       TILE-23004:     scf.for %[[ivJ:.*]] = %{{.*}} to %[[X0]] step %{{.*}} {
 //       TILE-23004:       scf.for %[[ivK:.*]] = %{{.*}} to %[[Q]] step %{{.*}} {
-//       TILE-23004:         %[[Z0:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[Z1:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[Z2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[szK:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[Z2]]]
-//       TILE-23004:         %[[K:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[Z0:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[Z1:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[Z2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[szK:.*]] = affine.min #[[$bound_map_4]](%[[ivK]])[%[[Z2]]]
+//       TILE-23004:         %[[K:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
 //
-//       TILE-23004:         %[[J1:.*]] = affine.apply #[[D0x30pS0x10]](%[[ivJ]])
-//       TILE-23004:         %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[I1pStep:.*]] = affine.min #[[S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]]
-//       TILE-23004:         %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[J1:.*]] = affine.apply #[[$D0x30pS0x10]](%[[ivJ]])
+//       TILE-23004:         %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[I1pStep:.*]] = affine.min #[[$S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]]
+//       TILE-23004:         %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
 //       TILE-23004:         %[[dim3:.*]] = dim %{{.*}}, %c3
-//       TILE-23004:         %[[sz3:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[dim3]]]
-//       TILE-23004:         %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[sz3:.*]] = affine.min #[[$bound_map_4]](%[[ivK]])[%[[dim3]]]
+//       TILE-23004:         %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
 //
-//       TILE-23004:         %[[X0:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[X1:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[X0:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[X1:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
+//       TILE-23004:         %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
 //
-//       TILE-23004:         linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[$strided4D]]>, memref<?x?x?x?xf32, #[[$strided4D]]>, memref<?x?x?x?xf32, #[[$strided4D]]>

diff  --git a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
index 7bcaa25f210b..262bfff17855 100644
--- a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
@@ -1,36 +1,36 @@
 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004
 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2" | FileCheck %s -check-prefix=TILE-20000
 
-// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// TILE-20000-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
-// TILE-20000-DAG: #[[minmap:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
+// TILE-23004-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// TILE-20000-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
+// TILE-20000-DAG: #[[$minmap:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
 
 func @conv_padding(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
   linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], padding = dense<[[1, 1], [0, 1]]> : tensor<2x2xi64>, strides = [30, 40]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
   return
 }
 // TILE-23004-LABEL: func @conv_padding(
-//  TILE-23004-SAME:   %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
-//  TILE-23004-SAME:   %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
-//  TILE-23004-SAME:   %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>)
+//  TILE-23004-SAME:   %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
+//  TILE-23004-SAME:   %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
+//  TILE-23004-SAME:   %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>)
 //       TILE-23004:         linalg.conv(%[[ARG0]], %[[ARG1]], %[[ARG2]])
 
 // TILE-20000-LABEL: func @conv_padding(
-//  TILE-20000-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
-//  TILE-20000-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
-//  TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>)
+//  TILE-20000-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
+//  TILE-20000-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
+//  TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>)
 //   TILE-20000-DAG:   %[[C0:.*]] = constant 0 : index
 //   TILE-20000-DAG:   %[[C2:.*]] = constant 2 : index
 //       TILE-20000:   %[[B:.*]] = dim %[[ARG1]], %c0
 //       TILE-20000:   scf.for %[[ivI:.*]] = %[[C0]] to %[[B]] step %[[C2]] {
 //       TILE-20000:     %[[DIM10:.*]] = dim %[[ARG1]], %c0
-//       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM10]]]
+//       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[$minmap]](%[[ivI]])[%[[DIM10]]]
 //       TILE-20000:     %[[DIM11:.*]] = dim %[[ARG1]], %c1
 //       TILE-20000:     %[[DIM12:.*]] = dim %[[ARG1]], %c2
 //       TILE-20000:     %[[DIM13:.*]] = dim %[[ARG1]], %c3
 //       TILE-20000:     %[[SUBVIEW1:.*]] = subview %[[ARG1]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM11]], %[[DIM12]], %[[DIM13]]]
 //       TILE-20000:     %[[DIM20:.*]] = dim %[[ARG2]], %c0
-//       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM20]]]
+//       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[$minmap]](%[[ivI]])[%[[DIM20]]]
 //       TILE-20000:     %[[DIM21:.*]] = dim %[[ARG2]], %c1
 //       TILE-20000:     %[[DIM22:.*]] = dim %[[ARG2]], %c2
 //       TILE-20000:     %[[DIM23:.*]] = dim %[[ARG2]], %c3

diff  --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index a70421f35831..7ea28a274e05 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -1,15 +1,15 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-patterns | FileCheck %s
 
-// CHECK-DAG: #[[STRIDED_1D:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1  + s0)>
+// CHECK-DAG: #[[$STRIDED_1D:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1  + s0)>
 // Map corresponding to a 2D memory access where the stride along the last dim is known to be 1.
-// CHECK-DAG: #[[STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 // Map corresponding to a 2D memory access where the stride along all dims are unknown.
-// CHECK-DAG: #[[STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-// CHECK-DAG: #[[mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
-// CHECK-DAG: #[[kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
-// CHECK-DAG: #[[mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
-// CHECK-DAG: #[[nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
-// CHECK-DAG: #[[km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
+// CHECK-DAG: #[[$STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// CHECK-DAG: #[[$mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// CHECK-DAG: #[[$kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// CHECK-DAG: #[[$mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+// CHECK-DAG: #[[$nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
+// CHECK-DAG: #[[$km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
 
 func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
           %y: memref<?xf32, offset: ?, strides: [1]>,
@@ -48,7 +48,7 @@ func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK-DAG:     %[[c6:.*]] = constant 6 : index
 // CHECK:         scf.parallel {{.*}} step (%[[c5]])
 // CHECK:           scf.for {{.*}} step %[[c6]]
-// CHECK:             linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
+// CHECK:             linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?xf32, #[[$STRIDED_1D]]>, memref<?xf32, #[[$STRIDED_1D]]>
 
 func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
@@ -85,7 +85,7 @@ func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:                           scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2]] {
 // CHECK:                             scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] {
 // CHECK:                               scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] {
-// CHECK:                                 linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:                                 linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>
 
 #matmul_trait = {
   args_in = 2,
@@ -112,7 +112,7 @@ func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
 //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x16xf32>
 //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<16x32xf32>
 //       CHECK: vector.transfer_read %{{.*}} : memref<8x32xf32>, vector<8x32xf32>
-//       CHECK: vector.contract {indexing_maps = [#[[mk]], #[[kn]], #[[mn]]], iterator_types = ["parallel", "parallel", "reduction"]} %{{.*}}, %{{.*}}, %{{.*}} : vector<8x16xf32>, vector<16x32xf32> into vector<8x32xf32>
+//       CHECK: vector.contract {indexing_maps = [#[[$mk]], #[[$kn]], #[[$mn]]], iterator_types = ["parallel", "parallel", "reduction"]} %{{.*}}, %{{.*}}, %{{.*}} : vector<8x16xf32>, vector<16x32xf32> into vector<8x32xf32>
 //       CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<8x32xf32>
 
 func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
@@ -159,12 +159,12 @@ func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 }
 // CHECK-LABEL:  func @permute_generic
 // CHECK:        linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME:   indexing_maps = [#[[kn]], #[[nm]], #[[km]]],
+// CHECK-SAME:   indexing_maps = [#[[$kn]], #[[$nm]], #[[$km]]],
 // CHECK-SAME:   iterator_types = ["parallel", "reduction", "parallel"],
 // CHECK-SAME:   library_call = "linalg_matmul"} %{{.*}}, %{{.*}}, %{{.*}}
-// CHECK:          memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
-// CHECK-SAME:     memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
-// CHECK-SAME:     memref<?x?xf32, #[[STRIDED_2D_u_1]]>
+// CHECK:          memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
+// CHECK-SAME:     memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
+// CHECK-SAME:     memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
 
 #indexed_matmul_trait = {
   args_in = 2,
@@ -189,12 +189,12 @@ func @permute_generic_indexed(
 }
 // CHECK-LABEL:  func @permute_generic_indexed
 // CHECK:        linalg.indexed_generic {args_in = 2 : i64, args_out = 1 : i64,
-// CHECK-SAME:     indexing_maps = [#[[kn]], #[[nm]], #[[km]]],
+// CHECK-SAME:     indexing_maps = [#[[$kn]], #[[$nm]], #[[$km]]],
 // CHECK-SAME:     iterator_types = ["parallel", "reduction", "parallel"],
 // CHECK-SAME:     library_call = "linalg_matmul_indexed"} %{{.*}}, %{{.*}}, %{{.*}}
-// CHECK:            memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
-// CHECK-SAME:       memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
-// CHECK-SAME:       memref<?x?xf32, #[[STRIDED_2D_u_1]]>
+// CHECK:            memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
+// CHECK-SAME:       memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
+// CHECK-SAME:       memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
 
 func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %x: memref<?xf32, offset: ?, strides: [1]>,
@@ -211,7 +211,7 @@ func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK-DAG:     %[[c6:.*]] = constant 6 : index
 // CHECK:         scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]]
 // CHECK:           scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]]
-// CHECK:             linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
+// CHECK:             linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?xf32, #[[$STRIDED_1D]]>, memref<?xf32, #[[$STRIDED_1D]]>
 
 func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
@@ -242,7 +242,7 @@ func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:                     scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] {
 // CHECK:                       scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
 // CHECK:                         scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
-// CHECK:                                 linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:                                 linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>
 
 func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                              %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
@@ -274,6 +274,10 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
   return
 }
 // CHECK-LABEL: func @promote_subview_matmul
+// CHECK-DAG:     %[[c0:.*]] = constant 0 : index
+// CHECK-DAG:     %[[c2000:.*]] = constant 2000 : index
+// CHECK-DAG:     %[[c3000:.*]] = constant 3000 : index
+// CHECK-DAG:     %[[c4000:.*]] = constant 4000 : index
 // CHECK:         scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
 // CHECK:           scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
 // CHECK:             scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
@@ -282,13 +286,13 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:               %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
 // CHECK:               %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK:               %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK:               %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:               %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK:               %[[a1:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK:               %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK:               %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:               %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK:               %[[a2:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK:               %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK:               %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:               %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK:               linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
 // CHECK:               linalg.copy(%[[s1]], %[[l1]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
 // CHECK:               linalg.copy(%[[s2]], %[[l2]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
@@ -324,6 +328,10 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
   return
 }
 // CHECK-LABEL: func @promote_first_subview_matmul
+// CHECK-DAG:     %[[c0:.*]] = constant 0 : index
+// CHECK-DAG:     %[[c2000:.*]] = constant 2000 : index
+// CHECK-DAG:     %[[c3000:.*]] = constant 3000 : index
+// CHECK-DAG:     %[[c4000:.*]] = constant 4000 : index
 // CHECK:   scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
 // CHECK:     scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
 // CHECK:       scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
@@ -332,17 +340,17 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
 // CHECK:         %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
 // CHECK:         %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK:         %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK:         %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:         %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK-NOT:     %[[a1:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK-NOT:     %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK-NOT:     %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK-NOT:     %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK-NOT:     %[[a2:.*]] = alloc({{%.*}}) : memref<?xi8>
 // CHECK-NOT:     %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK-NOT:     %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK-NOT:     %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK:         linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
 // CHECK-NOT:     linalg.copy(%[[s1]], %[[l1]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
 // CHECK-NOT:     linalg.copy(%[[s2]], %[[l2]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>^
-// CHECK:         linalg.matmul(%[[v0]], %[[s1]], %[[s2]]) : memref<?x?xf32>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:         linalg.matmul(%[[v0]], %[[s1]], %[[s2]]) : memref<?x?xf32>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>
 
 func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   %c2000 = constant 2000 : index
@@ -361,7 +369,7 @@ func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
 // CHECK:         %[[s0:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
 // CHECK:         %[[a0:.*]] = alloc({{%.*}}) {alignment = 32 : i64} : memref<?xi8>
 // CHECK:         %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
-// CHECK:         %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
+// CHECK:         %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
 // CHECK:         linalg.fill(%[[v0]], {{%.*}}) : memref<?x?xf32>, f32
 // CHECK:         linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
 // CHECK:         linalg.fill(%[[v0]], %[[cf]]) : memref<?x?xf32>, f32

diff  --git a/mlir/test/Dialect/Quant/canonicalize.mlir b/mlir/test/Dialect/Quant/canonicalize.mlir
index f9fc4fc466c5..11d460c8988a 100644
--- a/mlir/test/Dialect/Quant/canonicalize.mlir
+++ b/mlir/test/Dialect/Quant/canonicalize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=fail
+// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s
 
 // -----
 // CHECK-LABEL: redundant_scast

diff  --git a/mlir/test/Dialect/Quant/convert-const.mlir b/mlir/test/Dialect/Quant/convert-const.mlir
index 611ed7038c06..bb8f8cf61c9d 100644
--- a/mlir/test/Dialect/Quant/convert-const.mlir
+++ b/mlir/test/Dialect/Quant/convert-const.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -split-input-file -quant-convert-const | FileCheck %s --dump-input=fail
+// RUN: mlir-opt %s -split-input-file -quant-convert-const | FileCheck %s
 
 // Magic numbers:
 //   7.8125e-03 = 1/128 = 2/256 : real range = [-1.0, 0.9921875] (for 8bit, zeroPoint=128)

diff  --git a/mlir/test/Dialect/Quant/convert-fakequant.mlir b/mlir/test/Dialect/Quant/convert-fakequant.mlir
index f5709e6a8e10..0fa665eaad84 100644
--- a/mlir/test/Dialect/Quant/convert-fakequant.mlir
+++ b/mlir/test/Dialect/Quant/convert-fakequant.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -split-input-file -quant-convert-simulated-quantization | FileCheck %s --dump-input=fail
+// RUN: mlir-opt %s -split-input-file -quant-convert-simulated-quantization | FileCheck %s
 
 // -----
 // Verifies a quint8 single point.

diff  --git a/mlir/test/Dialect/Shape/canonicalize.mlir b/mlir/test/Dialect/Shape/canonicalize.mlir
index 6693e44d9b30..292579d98f8e 100644
--- a/mlir/test/Dialect/Shape/canonicalize.mlir
+++ b/mlir/test/Dialect/Shape/canonicalize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize <%s | FileCheck %s --dump-input=fail
+// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize <%s | FileCheck %s
 
 // CHECK-LABEL: func @f
 func @f(%arg0: tensor<2x3x4xf32>) -> !shape.shape {
@@ -343,7 +343,7 @@ func @f(%arg0 : !shape.shape) -> !shape.shape {
 // Folding of any with partially constant operands is not yet implemented.
 // CHECK-LABEL: func @f
 func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> !shape.shape {
-  // CHECK-NEXT: shape.any
+  // CHECK-NEXT: %[[CS:.*]] = shape.any
   // CHECK-NEXT: return %[[CS]]
   %1 = shape.any %arg0, %arg1
   return %1 : !shape.shape

diff  --git a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
index a0f5e66fea4b..702cd013a313 100644
--- a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
@@ -616,7 +616,7 @@ func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
 
 // CHECK-LABEL: func @broadcast_stretch_at_end
 // CHECK-SAME: %[[A:.*0]]: vector<4x1xf32>
-// CHECK:      %[[C:.*]] = constant dense<0.000000e+00> : vector<4x3xf32>
+// CHECK:      %[[C0:.*]] = constant dense<0.000000e+00> : vector<4x3xf32>
 // CHECK:      %[[T0:.*]] = vector.extract %[[A]][0] : vector<4x1xf32>
 // CHECK:      %[[T1:.*]] = vector.extract %[[T0]][0] : vector<1xf32>
 // CHECK:      %[[T2:.*]] = splat %[[T1]] : vector<3xf32>
@@ -678,10 +678,10 @@ func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32>
 // CHECK-LABEL: func @genbool_1d
 // CHECK: %[[TT:.*]] = constant true
 // CHECK: %[[C1:.*]] = constant dense<false> : vector<8xi1>
-// CHECK: %[[T0.*]] = vector.insert %[[TT]], %[[C1]] [0] : i1 into vector<8xi1>
-// CHECK: %[[T1.*]] = vector.insert %[[TT]], %[[T0]] [1] : i1 into vector<8xi1>
-// CHECK: %[[T2.*]] = vector.insert %[[TT]], %[[T1]] [2] : i1 into vector<8xi1>
-// CHECK: %[[T3.*]] = vector.insert %[[TT]], %[[T2]] [3] : i1 into vector<8xi1>
+// CHECK: %[[T0:.*]] = vector.insert %[[TT]], %[[C1]] [0] : i1 into vector<8xi1>
+// CHECK: %[[T1:.*]] = vector.insert %[[TT]], %[[T0]] [1] : i1 into vector<8xi1>
+// CHECK: %[[T2:.*]] = vector.insert %[[TT]], %[[T1]] [2] : i1 into vector<8xi1>
+// CHECK: %[[T3:.*]] = vector.insert %[[TT]], %[[T2]] [3] : i1 into vector<8xi1>
 // CHECK: return %[[T3]] : vector<8xi1>
 
 func @genbool_1d() -> vector<8xi1> {
@@ -705,7 +705,7 @@ func @genbool_2d() -> vector<4x4xi1> {
 }
 
 // CHECK-LABEL: func @genbool_3d
-// CHECK: %[[Tt:.*]] = constant true
+// CHECK: %[[TT:.*]] = constant true
 // CHECK: %[[C1:.*]] = constant dense<false> : vector<4xi1>
 // CHECK: %[[C2:.*]] = constant dense<false> : vector<3x4xi1>
 // CHECK: %[[C3:.*]] = constant dense<false> : vector<2x3x4xi1>

diff  --git a/mlir/test/Dialect/Vector/vector-slices-transforms.mlir b/mlir/test/Dialect/Vector/vector-slices-transforms.mlir
index d00356f5f503..6297a3118fa2 100644
--- a/mlir/test/Dialect/Vector/vector-slices-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-slices-transforms.mlir
@@ -36,7 +36,7 @@ func @insert_slices(%arg0: vector<2x2xf32>,
 //       CHECK: %[[X1:.*]] = vector.extract_strided_slice %arg0 {offsets = [0, 2], sizes = [2, 1], strides = [1, 1]}
 //       CHECK: %[[X2:.*]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [1, 2], strides = [1, 1]}
 //       CHECK: %[[X3:.*]] = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [1, 1], strides = [1, 1]}
-//       CHECK: %[[X4:.*]] = vector.insert_strided_slice %[[X0]], %[[C0]] {offsets = [0, 0], strides = [1, 1]}
+//       CHECK: %[[X4:.*]] = vector.insert_strided_slice %[[X0]], %[[C]] {offsets = [0, 0], strides = [1, 1]}
 //       CHECK: %[[X5:.*]] = vector.insert_strided_slice %[[X1]], %[[X4]] {offsets = [0, 2], strides = [1, 1]}
 //       CHECK: %[[X6:.*]] = vector.insert_strided_slice %[[X2]], %[[X5]] {offsets = [2, 0], strides = [1, 1]}
 //       CHECK: %[[X7:.*]] = vector.insert_strided_slice %[[X3]], %[[X6]] {offsets = [2, 2], strides = [1, 1]}

diff  --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index e524461f4b0c..4c5fa8fb1eac 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -8,17 +8,17 @@
 
 // CHECK: #map1 = affine_map<()[s0] -> (s0 + 1)>
 
-// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
-// CHECK-DAG: #[[BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
+// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+// CHECK-DAG: #[[$BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
 
-// CHECK-DAG: #[[BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
-// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
+// CHECK-DAG: #[[$BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 
-// CHECK-DAG: #[[BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)>
-// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>
-// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
-// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>
+// CHECK-DAG: #[[$BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)>
+// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
+// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>
+// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>
 
 // CHECK-LABEL: func @func_with_ops
 // CHECK-SAME: %[[ARG:.*]]: f32
@@ -689,10 +689,10 @@ func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64
   // CHECK: %1 = memref_cast %arg1 : memref<?xf32> to memref<4xf32>
   %1 = memref_cast %arg1 : memref<?xf32> to memref<4xf32>
 
-  // CHECK: {{%.*}} = memref_cast %arg2 : memref<64x16x4xf32, #[[BASE_MAP0]]> to memref<64x16x4xf32, #[[BASE_MAP3]]>
+  // CHECK: {{%.*}} = memref_cast %arg2 : memref<64x16x4xf32, #[[$BASE_MAP0]]> to memref<64x16x4xf32, #[[$BASE_MAP3]]>
   %2 = memref_cast %arg2 : memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]> to memref<64x16x4xf32, offset: ?, strides: [?, ?, ?]>
 
-  // CHECK: {{%.*}} = memref_cast {{%.*}} : memref<64x16x4xf32, #[[BASE_MAP3]]> to memref<64x16x4xf32, #[[BASE_MAP0]]>
+  // CHECK: {{%.*}} = memref_cast {{%.*}} : memref<64x16x4xf32, #[[$BASE_MAP3]]> to memref<64x16x4xf32, #[[$BASE_MAP0]]>
   %3 = memref_cast %2 : memref<64x16x4xf32, offset: ?, strides: [?, ?, ?]> to memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]>
 
   // CHECK: memref_cast %{{.*}} : memref<4xf32> to memref<*xf32>
@@ -728,31 +728,31 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
 
   %0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
   // CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<?x?x?xf32, #[[BASE_MAP3]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<?x?x?xf32, #[[$BASE_MAP3]]>
   %1 = subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1]
     : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to
       memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
 
   %2 = alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
   // CHECK: subview %2[%c1] [%arg0] [%c1] :
-  // CHECK-SAME: memref<64xf32, #[[BASE_MAP1]]>
-  // CHECK-SAME: to memref<?xf32, #[[SUBVIEW_MAP1]]>
+  // CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]>
+  // CHECK-SAME: to memref<?xf32, #[[$SUBVIEW_MAP1]]>
   %3 = subview %2[%c1][%arg0][%c1]
     : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to
       memref<?xf32, affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>>
 
   %4 = alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>>
   // CHECK: subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] :
-  // CHECK-SAME: memref<64x22xf32, #[[BASE_MAP2]]>
-  // CHECK-SAME: to memref<?x?xf32, #[[SUBVIEW_MAP2]]>
+  // CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]>
+  // CHECK-SAME: to memref<?x?xf32, #[[$SUBVIEW_MAP2]]>
   %5 = subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0]
     : memref<64x22xf32, offset:0, strides: [22, 1]> to
       memref<?x?xf32, offset:?, strides: [?, ?]>
 
   // CHECK: subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<4x4x4xf32, #[[SUBVIEW_MAP3]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]>
   %6 = subview %0[0, 2, 0][4, 4, 4][1, 1, 1]
     : memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to
       memref<4x4x4xf32, offset:8, strides: [64, 4, 1]>
@@ -760,20 +760,20 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
   %7 = alloc(%arg1, %arg2) : memref<?x?xf32>
   // CHECK: subview {{%.*}}[0, 0] [4, 4] [1, 1] :
   // CHECK-SAME: memref<?x?xf32>
-  // CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP4]]>
+  // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP4]]>
   %8 = subview %7[0, 0][4, 4][1, 1]
     : memref<?x?xf32> to memref<4x4xf32, offset: ?, strides:[?, 1]>
 
   %9 = alloc() : memref<16x4xf32>
   // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [{{%.*}}, {{%.*}}] :
   // CHECK-SAME: memref<16x4xf32>
-  // CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP2]]
+  // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP2]]
   %10 = subview %9[%arg1, %arg1][4, 4][%arg2, %arg2]
     : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]>
 
   // CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [2, 2] :
   // CHECK-SAME: memref<16x4xf32>
-  // CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP5]]
+  // CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP5]]
   %11 = subview %9[%arg1, %arg2][4, 4][2, 2]
     : memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]>
 

diff  --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir
index 733f04d3a690..bf371fd70f44 100644
--- a/mlir/test/IR/parser.mlir
+++ b/mlir/test/IR/parser.mlir
@@ -42,7 +42,7 @@
 // CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0) : (d0 - 1 == 0)>
 #set2 = affine_set<(d0) : (d0 - 1 == 0)>
 
-// CHECK-DAG: [[SET_TRUE:#set[0-9]+]] = affine_set<() : (0 == 0)>
+// CHECK-DAG: [[$SET_TRUE:#set[0-9]+]] = affine_set<() : (0 == 0)>
 
 // CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0)[s0] : (d0 - 2 >= 0, -d0 + 4 >= 0)>
 
@@ -826,7 +826,7 @@ func @type_alias() -> !i32_type_alias {
 
 // CHECK-LABEL: func @no_integer_set_constraints(
 func @no_integer_set_constraints() {
-  // CHECK: affine.if [[SET_TRUE]]() {
+  // CHECK: affine.if [[$SET_TRUE]]() {
   affine.if affine_set<() : ()> () {
   }
   return

diff  --git a/mlir/test/IR/test-func-set-type.mlir b/mlir/test/IR/test-func-set-type.mlir
index 0ec890ed92ac..032283ad705d 100644
--- a/mlir/test/IR/test-func-set-type.mlir
+++ b/mlir/test/IR/test-func-set-type.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -test-func-set-type -split-input-file | FileCheck %s --dump-input=fail
+// RUN: mlir-opt %s -test-func-set-type -split-input-file | FileCheck %s
 
 // It's currently not possible to have an attribute with a function type due to
 // parser ambiguity. So instead we reference a function declaration to take the

diff  --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir
index 6028821934ff..266aa1c494ff 100644
--- a/mlir/test/Transforms/canonicalize-dce.mlir
+++ b/mlir/test/Transforms/canonicalize-dce.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=fail
+// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s
 
 // Test case: Simple case of deleting a dead pure op.
 

diff  --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index dc5c9a7253b3..7333446c6e5d 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -694,16 +694,16 @@ func @view(%arg0 : index) -> (f32, f32, f32, f32) {
 
 // -----
 
-// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
-// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
-// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
-// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
-// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
-// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
-// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
-// CHECK-DAG: #[[SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2 * 2)>
-// CHECK-DAG: #[[SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
-// CHECK-DAG: #[[SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
+// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
+// CHECK-DAG: #[[$SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
+// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
+// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
+// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
+// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
+// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
+// CHECK-DAG: #[[$SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2 * 2)>
+// CHECK-DAG: #[[$SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
+// CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
 
 
 // CHECK-LABEL: func @subview
@@ -731,8 +731,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // Note that the subview uses the base memrefs layout map because it used
   // zero offset and unit stride arguments.
   // CHECK: subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [1, 1, 1] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<7x11x2xf32, #[[BASE_MAP0]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<7x11x2xf32, #[[$BASE_MAP0]]>
   %1 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c1, %c1, %c1]
     : memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
       memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
@@ -740,8 +740,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview with one dynamic operand can also be folded.
   // CHECK: subview %[[ALLOC0]][0, %[[ARG0]], 0] [7, 11, 15] [1, 1, 1] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<7x11x15xf32, #[[$SUBVIEW_MAP0]]>
   %2 = subview %0[%c0, %arg0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
     : memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
       memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
@@ -751,8 +751,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   %3 = alloc(%arg0) : memref<?x16x4xf32, offset : 0, strides : [64, 4, 1]>
   // Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
   // CHECK: subview %[[ALLOC1]][0, 0, 0] [7, 11, 15] [1, 1, 1] :
-  // CHECK-SAME: memref<?x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<7x11x15xf32, #[[BASE_MAP0]]>
+  // CHECK-SAME: memref<?x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<7x11x15xf32, #[[$BASE_MAP0]]>
   %4 = subview %3[%c0, %c0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
     : memref<?x16x4xf32, offset : 0, strides : [64, 4, 1]> to
       memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
@@ -760,8 +760,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview offset operands are folded correctly w.r.t. base strides.
   // CHECK: subview %[[ALLOC0]][1, 2, 7] [7, 11, 2] [1, 1, 1] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
-  // CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
+  // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP1]]>
   %5 = subview %0[%c1, %c2, %c7] [%c7, %c11, %c2] [%c1, %c1, %c1]
     : memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
       memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
@@ -769,8 +769,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview stride operands are folded correctly w.r.t. base strides.
   // CHECK: subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [2, 7, 11] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
-  // CHECK-SAME: to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
+  // CHECK-SAME: to memref<7x11x2xf32, #[[$SUBVIEW_MAP2]]>
   %6 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c2, %c7, %c11]
     : memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
       memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
@@ -778,8 +778,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview shape are folded, but offsets and strides are not even if base memref is static
   // CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
-  // CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
+  // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
   %10 = subview %0[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
     memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -788,8 +788,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview strides are folded, but offsets and shape are not even if base memref is static
   // CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 7, 11] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
-  // CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP4]]
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
+  // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP4]]
   %11 = subview %0[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c7, %c11] :
     memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -798,8 +798,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
 
   // Test: subview offsets are folded, but strides and shape are not even if base memref is static
   // CHECK: subview %[[ALLOC0]][1, 2, 7] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] :
-  // CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
-  // CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP5]]
+  // CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
+  // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP5]]
   %13 = subview %0[%c1, %c2, %c7] [%arg1, %arg1, %arg1] [%arg0, %arg0, %arg0] :
     memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -811,7 +811,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // Test: subview shape are folded, even if base memref is not static
   // CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
   // CHECK-SAME: memref<?x?x?xf32> to
-  // CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
+  // CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
   %15 = subview %14[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
     memref<?x?x?xf32> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -820,7 +820,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // TEST: subview strides are folded, in the type only the most minor stride is folded.
   // CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 2, 2] :
   // CHECK-SAME: memref<?x?x?xf32> to
-  // CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP6]]
+  // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP6]]
   %16 = subview %14[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c2, %c2] :
     memref<?x?x?xf32> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -829,7 +829,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // TEST: subview offsets are folded but the type offset remains dynamic, when the base memref is not static
   // CHECK: subview %[[ALLOC2]][1, 1, 1] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
   // CHECK-SAME: memref<?x?x?xf32> to
-  // CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
+  // CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP3]]
   %17 = subview %14[%c1, %c1, %c1] [%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] :
     memref<?x?x?xf32> to
     memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
@@ -842,7 +842,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // TEST: subview strides are maintained when sizes are folded
   // CHECK: subview %[[ALLOC3]][%arg1, %arg1] [2, 4] [1, 1] :
   // CHECK-SAME: memref<12x4xf32> to
-  // CHECK-SAME: memref<2x4xf32, #[[SUBVIEW_MAP7]]>
+  // CHECK-SAME: memref<2x4xf32, #[[$SUBVIEW_MAP7]]>
   %19 = subview %18[%arg1, %arg1] [%c2, %c4] [1, 1] :
     memref<12x4xf32> to
     memref<?x?xf32, offset: ?, strides:[4, 1]>
@@ -851,7 +851,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   // TEST: subview strides and sizes are maintained when offsets are folded
   // CHECK: subview %[[ALLOC3]][2, 4] [12, 4] [1, 1] :
   // CHECK-SAME: memref<12x4xf32> to
-  // CHECK-SAME: memref<12x4xf32, #[[SUBVIEW_MAP8]]>
+  // CHECK-SAME: memref<12x4xf32, #[[$SUBVIEW_MAP8]]>
   %20 = subview %18[%c2, %c4] [12, 4] [1, 1] :
     memref<12x4xf32> to
     memref<12x4xf32, offset: ?, strides:[4, 1]>
@@ -960,8 +960,8 @@ func @memref_cast_folding_subview(%arg0: memref<4x5xf32>, %i: index) -> (memref<
 
 // -----
 
-// CHECK-DAG: #[[map0:.*]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
-// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
+// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 
 // CHECK-LABEL: func @memref_cast_folding_subview_static(
 func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index, %b: index)
@@ -970,8 +970,8 @@ func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index, %b: i
   %0 = memref_cast %V : memref<16x16xf32> to memref<?x?xf32>
   %1 = subview %0[0, 0][3, 4][1, 1] : memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
 
-  // CHECK:  subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
-  // CHECK:  memref_cast{{.*}}: memref<3x4xf32, #[[map0]]> to memref<3x4xf32, #[[map1]]>
+  // CHECK:  subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, #[[$map0]]>
+  // CHECK:  memref_cast{{.*}}: memref<3x4xf32, #[[$map0]]> to memref<3x4xf32, #[[$map1]]>
   return %1: memref<3x4xf32, offset:?, strides:[?, 1]>
 }
 

diff  --git a/mlir/test/Transforms/location-snapshot.mlir b/mlir/test/Transforms/location-snapshot.mlir
index cde2018ba939..8638efd3ccca 100644
--- a/mlir/test/Transforms/location-snapshot.mlir
+++ b/mlir/test/Transforms/location-snapshot.mlir
@@ -1,12 +1,12 @@
 // RUN: mlir-opt -allow-unregistered-dialect -snapshot-op-locations='filename=%/t' -mlir-print-debuginfo %s | FileCheck %s -DFILE=%/t
 // RUN: mlir-opt -allow-unregistered-dialect -snapshot-op-locations='filename=%/t tag='tagged'' -mlir-print-debuginfo %s | FileCheck %s --check-prefix=TAG -DFILE=%/t
 
-// CHECK-LABEL: func @function
+// CHECK: func @function(
 // CHECK-NEXT: loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})
 // CHECK-NEXT: loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})
 // CHECK-NEXT: } loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})
 
-// TAG-LABEL: func @function
+// TAG: func @function(
 // TAG-NEXT: loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])
 // TAG-NEXT: loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])
 // TAG-NEXT: } loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])

diff  --git a/mlir/test/Transforms/loop-fusion.mlir b/mlir/test/Transforms/loop-fusion.mlir
index 4d63b7347e60..0e79e0b67ba2 100644
--- a/mlir/test/Transforms/loop-fusion.mlir
+++ b/mlir/test/Transforms/loop-fusion.mlir
@@ -70,9 +70,9 @@ func @should_fuse_reduction_to_pointwise() {
 
 // -----
 
-// CHECK-DAG: [[MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
-// CHECK-DAG: [[MAP_SHIFT_D0_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
-// CHECK-DAG: [[MAP_SHIFT_D1_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
+// CHECK-DAG: [[$MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[$MAP_SHIFT_D0_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + 1)>
+// CHECK-DAG: [[$MAP_SHIFT_D1_BY_ONE:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
 
 // CHECK-LABEL: func @should_fuse_loop_nests_with_shifts() {
 func @should_fuse_loop_nests_with_shifts() {
@@ -100,10 +100,10 @@ func @should_fuse_loop_nests_with_shifts() {
   // NOTE: Should create a private memref with reduced shape 9x9xf32.
   // CHECK:      affine.for %{{.*}} = 1 to 10 {
   // CHECK-NEXT:   affine.for %{{.*}} = 1 to 10 {
-  // CHECK-NEXT:     %[[I:.*]] = affine.apply [[MAP_SHIFT_MINUS_ONE_R1]](%{{.*}})
-  // CHECK-NEXT:     %[[J:.*]] = affine.apply [[MAP_SHIFT_MINUS_ONE_R1]](%{{.*}})
-  // CHECK-NEXT:     affine.apply [[MAP_SHIFT_D0_BY_ONE]](%[[I]], %[[J]])
-  // CHECK-NEXT:     affine.apply [[MAP_SHIFT_D1_BY_ONE]](%[[I]], %[[J]])
+  // CHECK-NEXT:     %[[I:.*]] = affine.apply [[$MAP_SHIFT_MINUS_ONE_R1]](%{{.*}})
+  // CHECK-NEXT:     %[[J:.*]] = affine.apply [[$MAP_SHIFT_MINUS_ONE_R1]](%{{.*}})
+  // CHECK-NEXT:     affine.apply [[$MAP_SHIFT_D0_BY_ONE]](%[[I]], %[[J]])
+  // CHECK-NEXT:     affine.apply [[$MAP_SHIFT_D1_BY_ONE]](%[[I]], %[[J]])
   // CHECK-NEXT:     affine.store %{{.*}}, %{{.*}}[0, 0] : memref<1x1xf32>
   // CHECK-NEXT:     affine.load %{{.*}}[0, 0] : memref<1x1xf32>
   // CHECK-NEXT:   }
@@ -507,9 +507,9 @@ func @permute_and_fuse() {
 
 // -----
 
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
 
 // Reshape from a 64 x f32 to 16 x 4 x f32.
 // CHECK-LABEL: func @fuse_reshape_64_16_4
@@ -537,9 +537,9 @@ func @fuse_reshape_64_16_4(%in : memref<64xf32>) {
 }
 
 // -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 4)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 4)>
+// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)>
 
 // Reshape a 16x4xf32 to 64xf32.
 // CHECK-LABEL: func @fuse_reshape_16_4_64
@@ -559,10 +559,10 @@ func @fuse_reshape_16_4_64() {
     "foo"(%w) : (f32) -> ()
   }
 // CHECK:       affine.for %{{.*}} = 0 to 64 {
-// CHECK-NEXT:    affine.apply [[MAP0]](%{{.*}})
-// CHECK-NEXT:    affine.apply [[MAP1]](%{{.*}})
+// CHECK-NEXT:    affine.apply [[$MAP0]](%{{.*}})
+// CHECK-NEXT:    affine.apply [[$MAP1]](%{{.*}})
 // CHECK-NEXT:    affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<16x4xf32>
-// CHECK-NEXT:    affine.apply [[MAP2]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:    affine.apply [[$MAP2]](%{{.*}}, %{{.*}})
 // CHECK-NEXT:    affine.store %{{.*}}, %{{.*}}[0] : memref<1xf32>
 // CHECK-NEXT:    affine.load %{{.*}}[0] : memref<1xf32>
 // CHECK-NEXT:    "foo"(%{{.*}}) : (f32) -> ()
@@ -624,18 +624,18 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
 // Everything above is fused to a single 2-d loop nest, and the 6-d tensor %in
 // is eliminated if -memref-dataflow-opt is also supplied.
 //
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) floordiv 288)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)>
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> ((((d0 * 9 + d1) mod 288) mod 144) floordiv 48)>
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) floordiv 16)>
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) mod 16)>
-// CHECK-DAG: [[MAP11:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 9 + d1)>
-// CHECK-DAG: [[MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 288)>
-// CHECK-DAG: [[MAP13:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 288) floordiv 144)>
-// CHECK-DAG: [[MAP14:#map[0-9]+]] = affine_map<(d0) -> (((d0 mod 288) mod 144) floordiv 48)>
-// CHECK-DAG: [[MAP15:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv 16)>
-// CHECK-DAG: [[MAP16:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)>
-// CHECK-DAG: [[MAP17:#map[0-9]+]] = affine_map<(d0) -> (0)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 9 + d1) floordiv 288)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 9 + d1) mod 288) floordiv 144)>
+// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> ((((d0 * 9 + d1) mod 288) mod 144) floordiv 48)>
+// CHECK-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) floordiv 16)>
+// CHECK-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) mod 16)>
+// CHECK-DAG: [[$MAP11:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 9 + d1)>
+// CHECK-DAG: [[$MAP12:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 288)>
+// CHECK-DAG: [[$MAP13:#map[0-9]+]] = affine_map<(d0) -> ((d0 mod 288) floordiv 144)>
+// CHECK-DAG: [[$MAP14:#map[0-9]+]] = affine_map<(d0) -> (((d0 mod 288) mod 144) floordiv 48)>
+// CHECK-DAG: [[$MAP15:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv 16)>
+// CHECK-DAG: [[$MAP16:#map[0-9]+]] = affine_map<(d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)>
+// CHECK-DAG: [[$MAP17:#map[0-9]+]] = affine_map<(d0) -> (0)>
 
 //
 // CHECK-LABEL: func @R6_to_R2_reshape
@@ -644,20 +644,20 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
 // CHECK:       alloc() : memref<64x9xi32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to 64 {
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to 9 {
-// CHECK-NEXT:      affine.apply [[MAP0]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP1]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP2]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP3]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP4]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP1]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP2]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP3]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP4]](%{{.*}}, %{{.*}})
 // CHECK-NEXT:      "foo"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (index, index, index, index, index, index) -> i32
 // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[0, ((%{{.*}} * 9 + %{{.*}}) mod 288) floordiv 144, (((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) floordiv 48, ((((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) mod 48) floordiv 16, ((((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) mod 48) mod 16, 0] : memref<1x2x3x3x16x1xi32>
-// CHECK-NEXT:      affine.apply [[MAP11]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP12]](%{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP13]](%{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP14]](%{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP15]](%{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP16]](%{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP17]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP11]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP12]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP13]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP14]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP15]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP16]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP17]](%{{.*}})
 // CHECK-NEXT:      affine.load %{{.*}}[0, ((%{{.*}} * 9 + %{{.*}}) mod 288) floordiv 144, (((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) floordiv 48, ((((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) mod 48) floordiv 16, ((((%{{.*}} * 9 + %{{.*}}) mod 288) mod 144) mod 48) mod 16, 0] : memref<1x2x3x3x16x1xi32>
 // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[0, 0] : memref<1x1xi32>
 // CHECK-NEXT:      affine.load %{{.*}}[0, 0] : memref<1x1xi32>
@@ -785,7 +785,7 @@ func @should_fuse_at_src_depth1_and_dst_depth1() {
 }
 
 // -----
-// CHECK: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 10 + d1)>
+// CHECK: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 10 + d1)>
 
 // CHECK-LABEL: func @should_fuse_src_depth1_at_dst_depth2
 func @should_fuse_src_depth1_at_dst_depth2() {
@@ -806,9 +806,9 @@ func @should_fuse_src_depth1_at_dst_depth2() {
   // loop IVs, so we should slice at depth 1 and insert the slice at depth 2.
   // CHECK:       affine.for %{{.*}} = 0 to 10 {
   // CHECK-NEXT:    affine.for %{{.*}} = 0 to 10 {
-  // CHECK-NEXT:      affine.apply [[MAP0]](%{{.*}}, %{{.*}})
+  // CHECK-NEXT:      affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
   // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[0] : memref<1xf32>
-  // CHECK-NEXT:      affine.apply [[MAP0]](%{{.*}}, %{{.*}})
+  // CHECK-NEXT:      affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
   // CHECK-NEXT:      affine.load %{{.*}}[0] : memref<1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
@@ -1210,19 +1210,19 @@ func @R3_to_R2_reshape() {
   }
   return
 }
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 3 + d1) floordiv 48)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1)>
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 48)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 3 + d1) floordiv 48)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 3 + d1)>
+// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0) -> (d0 floordiv 48)>
 
 // CHECK-LABEL: func @R3_to_R2_reshape()
 // CHECK-DAG:    alloc() : memref<1x1x1xi32>
 // CHECK:        affine.for %{{.*}} = 0 to 32 {
 // CHECK-NEXT:     affine.for %{{.*}} = 0 to 3 {
-// CHECK-NEXT:      affine.apply [[MAP0]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
 // CHECK-NEXT:      "foo"(%{{.*}}, %{{.*}}, %{{.*}}) : (index, index, index) -> i32
 // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[0, 0, 0] : memref<1x1x1xi32>
-// CHECK-NEXT:      affine.apply [[MAP1]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      affine.apply [[MAP2]](%{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP1]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      affine.apply [[$MAP2]](%{{.*}})
 // CHECK-NEXT:      affine.load %{{.*}}[0, 0, 0] : memref<1x1x1xi32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
@@ -1652,8 +1652,8 @@ func @should_fuse_live_out_writer(%arg0 : memref<10xf32>) -> memref<10xf32> {
 
 // The fused slice has 16 iterations from along %i0.
 
-// CHECK-DAG: [[MAP_LB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16)>
-// CHECK-DAG: [[MAP_UB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16 + 16)>
+// CHECK-DAG: [[$MAP_LB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16)>
+// CHECK-DAG: [[$MAP_UB:#map[0-9]+]] = affine_map<(d0) -> (d0 * 16 + 16)>
 
 // CHECK-LABEL: slice_tile
 func @slice_tile(%arg0: memref<128x8xf32>, %arg1: memref<32x8xf32>, %0 : f32) -> memref<32x8xf32> {
@@ -1681,7 +1681,7 @@ func @slice_tile(%arg0: memref<128x8xf32>, %arg1: memref<32x8xf32>, %0 : f32) ->
 }
 // CHECK:       affine.for %{{.*}} = 0 to 2 {
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to 8 {
-// CHECK-NEXT:      affine.for %{{.*}} = [[MAP_LB]](%{{.*}}) to [[MAP_UB]](%{{.*}}) {
+// CHECK-NEXT:      affine.for %{{.*}} = [[$MAP_LB]](%{{.*}}) to [[$MAP_UB]](%{{.*}}) {
 // CHECK-NEXT:        affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<32x8xf32>
 // CHECK-NEXT:      }
 // CHECK-NEXT:      affine.for %{{.*}} = 0 to 8 {
@@ -2001,13 +2001,13 @@ func @fuse_across_varying_dims_complex(%arg0: f32) {
   }
   return
 }
-// MAXIMAL-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 72 + d1) floordiv 2304)>
-// MAXIMAL-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)>
-// MAXIMAL-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)>
-// MAXIMAL-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)>
-// MAXIMAL-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)>
-// MAXIMAL-DAG: [[MAP7:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
-// MAXIMAL-DAG: [[MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>
+// MAXIMAL-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> ((d0 * 72 + d1) floordiv 2304)>
+// MAXIMAL-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (((d0 * 72 + d1) mod 2304) floordiv 1152)>
+// MAXIMAL-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)>
+// MAXIMAL-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)>
+// MAXIMAL-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)>
+// MAXIMAL-DAG: [[$MAP7:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
+// MAXIMAL-DAG: [[$MAP8:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 16 - d1 + 15)>
 // MAXIMAL-LABEL: func @fuse_across_varying_dims_complex
 // MAXIMAL-NEXT:  alloc() : memref<64x1xf32>
 // MAXIMAL-NEXT:  constant 0 : index
@@ -2018,25 +2018,25 @@ func @fuse_across_varying_dims_complex(%arg0: f32) {
 // MAXIMAL-NEXT:      affine.for %{{.*}} = 0 to 4 {
 // MAXIMAL-NEXT:        affine.for %{{.*}} = 0 to 16 {
 // MAXIMAL-NEXT:          affine.for %{{.*}} = 0 to 64 {
-// MAXIMAL-NEXT:            affine.apply [[MAP0]](%{{.*}}, %{{.*}})
-// MAXIMAL-NEXT:            affine.apply [[MAP1]](%{{.*}}, %{{.*}})
-// MAXIMAL-NEXT:            affine.apply [[MAP2]](%{{.*}}, %{{.*}})
-// MAXIMAL-NEXT:            affine.apply [[MAP3]](%{{.*}}, %{{.*}})
-// MAXIMAL-NEXT:            affine.apply [[MAP4]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:            affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:            affine.apply [[$MAP1]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:            affine.apply [[$MAP2]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:            affine.apply [[$MAP3]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:            affine.apply [[$MAP4]](%{{.*}}, %{{.*}})
 // MAXIMAL-NEXT:            affine.load %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] : memref<2x2x3x3x16x1xf32>
 // MAXIMAL-NEXT:            affine.store %{{.*}}, %{{.*}}[%{{.*}}, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:          }
 // MAXIMAL-NEXT:          affine.for %{{.*}} = 0 to 4 {
 // MAXIMAL-NEXT:            affine.for %{{.*}} = 0 to 16 {
-// MAXIMAL-NEXT:              affine.apply [[MAP7]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:              affine.apply [[$MAP7]](%{{.*}}, %{{.*}})
 // MAXIMAL-NEXT:              affine.load %{{.*}}[%{{.*}} * 16 + %{{.*}}, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:            }
 // MAXIMAL-NEXT:            affine.for %{{.*}} = 0 to 16 {
-// MAXIMAL-NEXT:              affine.apply [[MAP7]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:              affine.apply [[$MAP7]](%{{.*}}, %{{.*}})
 // MAXIMAL-NEXT:              affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<144x4xf32>
 // MAXIMAL-NEXT:            }
 // MAXIMAL-NEXT:          }
-// MAXIMAL-NEXT:          affine.apply [[MAP8]](%{{.*}}, %{{.*}})
+// MAXIMAL-NEXT:          affine.apply [[$MAP8]](%{{.*}}, %{{.*}})
 // MAXIMAL-NEXT:          affine.load %{{.*}}[%{{.*}} * 16 - %{{.*}} + 15, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:        }
 // MAXIMAL-NEXT:      }

diff  --git a/mlir/test/Transforms/memref-dataflow-opt.mlir b/mlir/test/Transforms/memref-dataflow-opt.mlir
index deb193e02bda..6d5288c775c7 100644
--- a/mlir/test/Transforms/memref-dataflow-opt.mlir
+++ b/mlir/test/Transforms/memref-dataflow-opt.mlir
@@ -1,10 +1,10 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -memref-dataflow-opt | FileCheck %s
 
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0)>
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d1)>
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 - 1)>
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
+// CHECK-DAG: [[$MAP0:#map[0-9]+]] = affine_map<(d0, d1) -> (d1 + 1)>
+// CHECK-DAG: [[$MAP1:#map[0-9]+]] = affine_map<(d0, d1) -> (d0)>
+// CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: [[$MAP3:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 - 1)>
+// CHECK-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
 
 // CHECK-LABEL: func @simple_store_load() {
 func @simple_store_load() {
@@ -77,10 +77,10 @@ func @store_load_affine_apply() -> memref<10x10xf32> {
 // CHECK-NEXT:  %{{.*}} = alloc() : memref<10x10xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to 10 {
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to 10 {
-// CHECK-NEXT:      %{{.*}} = affine.apply [[MAP0]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      %{{.*}} = affine.apply [[MAP1]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      %{{.*}} = affine.apply [[MAP2]](%{{.*}}, %{{.*}})
-// CHECK-NEXT:      %{{.*}} = affine.apply [[MAP3]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      %{{.*}} = affine.apply [[$MAP0]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      %{{.*}} = affine.apply [[$MAP1]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      %{{.*}} = affine.apply [[$MAP2]](%{{.*}}, %{{.*}})
+// CHECK-NEXT:      %{{.*}} = affine.apply [[$MAP3]](%{{.*}}, %{{.*}})
 // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<10x10xf32>
 // CHECK-NEXT:      %{{.*}} = addf %{{.*}}, %{{.*}} : f32
 // CHECK-NEXT:    }
@@ -240,7 +240,7 @@ func @store_load_store_nested_fwd(%N : index) -> f32 {
 // CHECK-NEXT:    affine.store %{{.*}}, %{{.*}}[%{{.*}}] : memref<10xf32>
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to %{{.*}} {
 // CHECK-NEXT:      %{{.*}} = addf %{{.*}}, %{{.*}} : f32
-// CHECK-NEXT:      %{{.*}} = affine.apply [[MAP4]](%{{.*}})
+// CHECK-NEXT:      %{{.*}} = affine.apply [[$MAP4]](%{{.*}})
 // CHECK-NEXT:      affine.store %{{.*}}, %{{.*}}[%{{.*}}] : memref<10xf32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }

diff  --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index aab4f4a618c9..80256a17d0a0 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -2,8 +2,8 @@
 
 // -----
 
-// CHECK-DAG: [[MOD_2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 2)>
-// CHECK-DAG: [[MAP_MINUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
+// CHECK-DAG: [[$MOD_2:#map[0-9]+]] = affine_map<(d0) -> (d0 mod 2)>
+// CHECK-DAG: [[$MAP_MINUS_1:#map[0-9]+]] = affine_map<(d0) -> (d0 - 1)>
 
 // CHECK-LABEL: func @loop_nest_dma() {
 func @loop_nest_dma() {
@@ -36,9 +36,9 @@ func @loop_nest_dma() {
 // CHECK-NEXT:  affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}} mod 2, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 1 to 8 {
 // CHECK-NEXT:    affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}} mod 2, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
-// CHECK-NEXT:    affine.apply [[MAP_MINUS_1]](%{{.*}})
-// CHECK-NEXT:    affine.apply [[MOD_2]](%{{.*}})
-// CHECK-NEXT:    affine.apply [[MOD_2]](%{{.*}})
+// CHECK-NEXT:    affine.apply [[$MAP_MINUS_1]](%{{.*}})
+// CHECK-NEXT:    affine.apply [[$MOD_2]](%{{.*}})
+// CHECK-NEXT:    affine.apply [[$MOD_2]](%{{.*}})
 // CHECK-NEXT:    affine.dma_wait %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} : memref<2x1xf32>
 // CHECK-NEXT:    affine.load %{{.*}}[%{{.*}} mod 2, %{{.*}}] : memref<2x32xf32, 1>
 // CHECK-NEXT:    "compute"(%{{.*}}) : (f32) -> f32
@@ -47,9 +47,9 @@ func @loop_nest_dma() {
 // CHECK-NEXT:      "do_more_compute"(%{{.*}}, %{{.*}}) : (index, index) -> ()
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  affine.apply [[MAP_MINUS_1]](%{{.*}})
-// CHECK-NEXT:  affine.apply [[MOD_2]](%{{.*}})
-// CHECK-NEXT:  affine.apply [[MOD_2]](%{{.*}})
+// CHECK-NEXT:  affine.apply [[$MAP_MINUS_1]](%{{.*}})
+// CHECK-NEXT:  affine.apply [[$MOD_2]](%{{.*}})
+// CHECK-NEXT:  affine.apply [[$MOD_2]](%{{.*}})
 // CHECK-NEXT:  affine.dma_wait %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} : memref<2x1xf32>
 // CHECK-NEXT:  affine.load %{{.*}}[%{{.*}} mod 2, %{{.*}}] : memref<2x32xf32, 1>
 // CHECK-NEXT:  "compute"(%{{.*}}) : (f32) -> f32
@@ -64,8 +64,8 @@ func @loop_nest_dma() {
 
 // -----
 
-// CHECK-DAG: [[FLOOR_MOD_2:#map[0-9]+]] = affine_map<(d0) -> ((d0 floordiv 4) mod 2)>
-// CHECK-DAG: [[REMAP_SHIFT_MINUS_4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
+// CHECK-DAG: [[$FLOOR_MOD_2:#map[0-9]+]] = affine_map<(d0) -> ((d0 floordiv 4) mod 2)>
+// CHECK-DAG: [[$REMAP_SHIFT_MINUS_4:#map[0-9]+]] = affine_map<(d0) -> (d0 - 4)>
 
 // CHECK-LABEL: @loop_step
 func @loop_step(%arg0: memref<512xf32>,
@@ -89,13 +89,13 @@ func @loop_step(%arg0: memref<512xf32>,
 // CHECK-NEXT:   affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[(%{{.*}} floordiv 4) mod 2, 0], [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
 // CHECK-NEXT:   affine.for %{{.*}} = 4 to 512 step 4 {
 // CHECK-NEXT:     affine.dma_start %{{.*}}[%{{.*}}], %{{.*}}[(%{{.*}} floordiv 4) mod 2, 0], [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
-// CHECK-NEXT:     affine.apply [[REMAP_SHIFT_MINUS_4]](%{{.*}})
-// CHECK-NEXT:     affine.apply [[FLOOR_MOD_2]](%{{.*}})
+// CHECK-NEXT:     affine.apply [[$REMAP_SHIFT_MINUS_4]](%{{.*}})
+// CHECK-NEXT:     affine.apply [[$FLOOR_MOD_2]](%{{.*}})
 // CHECK:          affine.dma_wait [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<2x1xi32>
 // CHECK-NEXT:     "compute"(%{{.*}}) : (index) -> ()
 // CHECK-NEXT:   }
-// CHECK-NEXT:   [[SHIFTED:%[0-9]+]] = affine.apply [[REMAP_SHIFT_MINUS_4]](%{{.*}})
-// CHECK-NEXT:   %{{.*}} = affine.apply [[FLOOR_MOD_2]]([[SHIFTED]])
+// CHECK-NEXT:   [[SHIFTED:%[0-9]+]] = affine.apply [[$REMAP_SHIFT_MINUS_4]](%{{.*}})
+// CHECK-NEXT:   %{{.*}} = affine.apply [[$FLOOR_MOD_2]]([[SHIFTED]])
 // CHECK:        affine.dma_wait [[TAG]][(%{{.*}} floordiv 4) mod 2, 0], %{{.*}} : memref<2x1xi32>
 // CHECK-NEXT:   "compute"(%{{.*}}) : (index) -> ()
 // CHECK-NEXT:   dealloc [[TAG]] : memref<2x1xi32>

diff  --git a/mlir/test/lit.cfg.py b/mlir/test/lit.cfg.py
index 7e8778fc4e83..8c556e2db535 100644
--- a/mlir/test/lit.cfg.py
+++ b/mlir/test/lit.cfg.py
@@ -75,3 +75,9 @@
 ])
 
 llvm_config.add_tool_substitutions(tools, tool_dirs)
+
+
+# FileCheck -enable-var-scope is enabled by default in MLIR test
+# This option avoids to accidentally reuse variable across -LABEL match,
+# it can be explicitly opted-in by prefixing the variable name with $
+config.environment['FILECHECK_OPTS'] = "-enable-var-scope"


        


More information about the Mlir-commits mailing list