[Mlir-commits] [mlir] 904f91d - [MLIR][Standard] Make the `dim` operation index an operand.

Frederik Gossen llvmlistbot at llvm.org
Wed Jun 10 06:55:15 PDT 2020


Author: Frederik Gossen
Date: 2020-06-10T13:54:47Z
New Revision: 904f91db5fcd74f493811df0787a1ddea651d03c

URL: https://github.com/llvm/llvm-project/commit/904f91db5fcd74f493811df0787a1ddea651d03c
DIFF: https://github.com/llvm/llvm-project/commit/904f91db5fcd74f493811df0787a1ddea651d03c.diff

LOG: [MLIR][Standard] Make the `dim` operation index an operand.

Allow for dynamic indices in the `dim` operation.
Rather than an attribute, the index is now an operand of type `index`.
This allows to apply the operation to dynamically ranked tensors.
The correct lowering of dynamic indices remains to be implemented.

Differential Revision: https://reviews.llvm.org/D81551

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
    mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
    mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
    mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
    mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
    mlir/test/Dialect/Affine/dma-generate.mlir
    mlir/test/Dialect/Affine/invalid.mlir
    mlir/test/Dialect/Affine/loop-tiling.mlir
    mlir/test/Dialect/Affine/ops.mlir
    mlir/test/Dialect/GPU/outlining.mlir
    mlir/test/Dialect/Linalg/affine.mlir
    mlir/test/Dialect/Linalg/fusion-2-level.mlir
    mlir/test/Dialect/Linalg/fusion.mlir
    mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
    mlir/test/Dialect/Linalg/loops.mlir
    mlir/test/Dialect/Linalg/parallel_loops.mlir
    mlir/test/Dialect/Linalg/promote.mlir
    mlir/test/Dialect/Linalg/tile.mlir
    mlir/test/Dialect/Linalg/tile_conv.mlir
    mlir/test/Dialect/Linalg/tile_conv_padding.mlir
    mlir/test/Dialect/Linalg/tile_parallel.mlir
    mlir/test/Dialect/Linalg/transform-patterns.mlir
    mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
    mlir/test/EDSC/builder-api-test.cpp
    mlir/test/IR/core-ops.mlir
    mlir/test/IR/invalid-ops.mlir
    mlir/test/Transforms/canonicalize.mlir
    mlir/test/Transforms/constant-fold.mlir
    mlir/test/Transforms/pipeline-data-transfer.mlir
    mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir
    mlir/test/mlir-cuda-runner/all-reduce-op.mlir
    mlir/test/mlir-cuda-runner/all-reduce-region.mlir
    mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir
    mlir/test/mlir-cuda-runner/shuffle.mlir
    mlir/test/mlir-cuda-runner/two-modules.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
index eae71b0263c1..35bc0cd43a67 100644
--- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
@@ -1372,51 +1372,46 @@ def DeallocOp : Std_Op<"dealloc", [MemoryEffects<[MemFree]>]> {
 def DimOp : Std_Op<"dim", [NoSideEffect]> {
   let summary = "dimension index operation";
   let description = [{
-    Syntax:
-
-    ```
-    operation ::= ssa-id `=` `std.dim` ssa-id `,` integer-literal `:` type
-    ```
+    The `dim` operation takes a memref/tensor and a dimension operand of type
+    `index`.
+    It returns the size of the requested dimension of the given memref/tensor.
 
-    The `dim` operation takes a memref or tensor operand and a dimension index,
-    and returns an [`index`](../LangRef.md#index-type) that is the size of that
-    dimension.
-
-    The `dim` operation is represented with a single integer attribute named
-    `index`, and the type specifies the type of the memref or tensor operand.
+    The specified memref or tensor type is that of the first operand.
 
     Example:
 
     ```mlir
     // Always returns 4, can be constant folded:
-    %x = dim %A, 0 : tensor<4 x ? x f32>
+    %c0 = constant 0 : index
+    %x = = dim %A, %c0 : tensor<4 x ? x f32>
 
     // Returns the dynamic dimension of %A.
-    %y = dim %A, 1 : tensor<4 x ? x f32>
+    %c1 = constant 1 : index
+    %y = dim %A, %c1 : tensor<4 x ? x f32>
 
     // Equivalent generic form:
-    %x = "std.dim"(%A) {index = 0 : i64} : (tensor<4 x ? x f32>) -> index
-    %y = "std.dim"(%A) {index = 1 : i64} : (tensor<4 x ? x f32>) -> index
+    %x = "std.dim"(%A, %c0) : (tensor<4 x ? x f32>, index) -> index
+    %y = "std.dim"(%A, %c1) : (tensor<4 x ? x f32>, index) -> index
     ```
   }];
 
   let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor],
                                  "any tensor or memref type">:$memrefOrTensor,
-                       APIntAttr:$index);
-  let results = (outs Index);
+                       Index:$index);
+  let results = (outs Index:$result);
 
-  let builders = [OpBuilder<
-    "OpBuilder &builder, OperationState &result, Value memrefOrTensor,"
-    "unsigned index", [{
-      auto indexType = builder.getIndexType();
-      auto indexAttr = builder.getIntegerAttr(indexType, index);
-      build(builder, result, indexType, memrefOrTensor, indexAttr);
-    }]>];
+  let assemblyFormat = [{
+    attr-dict $memrefOrTensor `,` $index `:` type($memrefOrTensor)
+  }];
+
+  let builders = [
+    OpBuilder<"OpBuilder &builder, OperationState &result, "
+              "Value memrefOrTensor, int64_t index">
+  ];
 
   let extraClassDeclaration = [{
-    unsigned getIndex() {
-      return getAttrOfType<IntegerAttr>("index").getValue().getZExtValue();
-    }
+    /// Helper function to get the index as a simple integer if it is constant.
+    Optional<int64_t> getConstantIndex();
   }];
 
   let hasFolder = 1;

diff  --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index 5d3984d8ac90..b388cb213fd4 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -583,7 +583,7 @@ void MemRefDescriptor::setConstantSize(OpBuilder &builder, Location loc,
           createIndexAttrConstant(builder, loc, indexType, size));
 }
 
-/// Builds IR extracting the pos-th size from the descriptor.
+/// Builds IR extracting the pos-th stride from the descriptor.
 Value MemRefDescriptor::stride(OpBuilder &builder, Location loc, unsigned pos) {
   return builder.create<LLVM::ExtractValueOp>(
       loc, indexType, value,
@@ -2114,17 +2114,24 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<DimOp> {
                   ConversionPatternRewriter &rewriter) const override {
     auto dimOp = cast<DimOp>(op);
     OperandAdaptor<DimOp> transformed(operands);
-    MemRefType type = dimOp.getOperand().getType().cast<MemRefType>();
+    MemRefType type = dimOp.memrefOrTensor().getType().cast<MemRefType>();
 
-    int64_t index = dimOp.getIndex();
+    Optional<int64_t> index = dimOp.getConstantIndex();
+    if (!index.hasValue()) {
+      // TODO(frgossen): Implement this lowering.
+      return failure();
+    }
+
+    int64_t i = index.getValue();
     // Extract dynamic size from the memref descriptor.
-    if (type.isDynamicDim(index))
+    if (type.isDynamicDim(i))
       rewriter.replaceOp(op, {MemRefDescriptor(transformed.memrefOrTensor())
-                                  .size(rewriter, op->getLoc(), index)});
+                                  .size(rewriter, op->getLoc(), i)});
     else
       // Use constant for static size.
-      rewriter.replaceOp(op, createIndexConstant(rewriter, op->getLoc(),
-                                                 type.getDimSize(index)));
+      rewriter.replaceOp(
+          op, createIndexConstant(rewriter, op->getLoc(), type.getDimSize(i)));
+
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 93378dddca8b..138d6a4ddcac 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -173,7 +173,7 @@ bool mlir::isValidDim(Value value, Region *region) {
   // The dim op is okay if its operand memref/tensor is defined at the top
   // level.
   if (auto dimOp = dyn_cast<DimOp>(op))
-    return isTopLevelValue(dimOp.getOperand());
+    return isTopLevelValue(dimOp.memrefOrTensor());
   return false;
 }
 
@@ -197,18 +197,22 @@ static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
 static bool isDimOpValidSymbol(DimOp dimOp, Region *region) {
   // The dim op is okay if its operand memref/tensor is defined at the top
   // level.
-  if (isTopLevelValue(dimOp.getOperand()))
+  if (isTopLevelValue(dimOp.memrefOrTensor()))
     return true;
 
   // The dim op is also okay if its operand memref/tensor is a view/subview
   // whose corresponding size is a valid symbol.
-  unsigned index = dimOp.getIndex();
-  if (auto viewOp = dyn_cast<ViewOp>(dimOp.getOperand().getDefiningOp()))
-    return isMemRefSizeValidSymbol<ViewOp>(viewOp, index, region);
-  if (auto subViewOp = dyn_cast<SubViewOp>(dimOp.getOperand().getDefiningOp()))
-    return isMemRefSizeValidSymbol<SubViewOp>(subViewOp, index, region);
-  if (auto allocOp = dyn_cast<AllocOp>(dimOp.getOperand().getDefiningOp()))
-    return isMemRefSizeValidSymbol<AllocOp>(allocOp, index, region);
+  Optional<int64_t> index = dimOp.getConstantIndex();
+  assert(index.hasValue() &&
+         "expect only `dim` operations with a constant index");
+  int64_t i = index.getValue();
+  if (auto viewOp = dyn_cast<ViewOp>(dimOp.memrefOrTensor().getDefiningOp()))
+    return isMemRefSizeValidSymbol<ViewOp>(viewOp, i, region);
+  if (auto subViewOp =
+          dyn_cast<SubViewOp>(dimOp.memrefOrTensor().getDefiningOp()))
+    return isMemRefSizeValidSymbol<SubViewOp>(subViewOp, i, region);
+  if (auto allocOp = dyn_cast<AllocOp>(dimOp.memrefOrTensor().getDefiningOp()))
+    return isMemRefSizeValidSymbol<AllocOp>(allocOp, i, region);
   return false;
 }
 

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 2738c2aefce4..0177dfca5460 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1264,81 +1264,85 @@ LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
 // DimOp
 //===----------------------------------------------------------------------===//
 
-static void print(OpAsmPrinter &p, DimOp op) {
-  p << "dim " << op.getOperand() << ", " << op.getIndex();
-  p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"index"});
-  p << " : " << op.getOperand().getType();
-}
-
-static ParseResult parseDimOp(OpAsmParser &parser, OperationState &result) {
-  OpAsmParser::OperandType operandInfo;
-  IntegerAttr indexAttr;
-  Type type;
-  Type indexType = parser.getBuilder().getIndexType();
-
-  return failure(
-      parser.parseOperand(operandInfo) || parser.parseComma() ||
-      parser.parseAttribute(indexAttr, indexType, "index", result.attributes) ||
-      parser.parseOptionalAttrDict(result.attributes) ||
-      parser.parseColonType(type) ||
-      parser.resolveOperand(operandInfo, type, result.operands) ||
-      parser.addTypeToList(indexType, result.types));
+void DimOp::build(OpBuilder &builder, OperationState &result,
+                  Value memrefOrTensor, int64_t index) {
+  auto loc = result.location;
+  Value indexValue = builder.create<ConstantIndexOp>(loc, index);
+  auto indexTy = builder.getIndexType();
+  build(builder, result, indexTy, memrefOrTensor, indexValue);
+}
+
+Optional<int64_t> DimOp::getConstantIndex() {
+  auto constantOp = index().getDefiningOp<ConstantOp>();
+  if (constantOp) {
+    return constantOp.getValue().cast<IntegerAttr>().getInt();
+  }
+  return {};
 }
 
 static LogicalResult verify(DimOp op) {
-  // Check that we have an integer index operand.
-  auto indexAttr = op.getAttrOfType<IntegerAttr>("index");
-  if (!indexAttr)
-    return op.emitOpError("requires an integer attribute named 'index'");
-  int64_t index = indexAttr.getInt();
 
-  auto type = op.getOperand().getType();
+  // Assume unknown index to be in range.
+  Optional<int64_t> index = op.getConstantIndex();
+  if (!index.hasValue())
+    return success();
+
+  // Check that constant index is not knowingly out of range.
+  auto type = op.memrefOrTensor().getType();
   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
-    if (index >= tensorType.getRank())
+    if (index.getValue() >= tensorType.getRank())
       return op.emitOpError("index is out of range");
   } else if (auto memrefType = type.dyn_cast<MemRefType>()) {
-    if (index >= memrefType.getRank())
+    if (index.getValue() >= memrefType.getRank())
       return op.emitOpError("index is out of range");
-
   } else if (type.isa<UnrankedTensorType>()) {
-    // ok, assumed to be in-range.
+    // Assume index to be in range.
   } else {
-    return op.emitOpError("requires an operand with tensor or memref type");
+    llvm_unreachable("expected operand with tensor or memref type");
   }
 
   return success();
 }
 
 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
-  // Constant fold dim when the size along the index referred to is a constant.
-  auto opType = memrefOrTensor().getType();
-  if (auto shapedType = opType.dyn_cast<ShapedType>())
-    if (!shapedType.isDynamicDim(getIndex()))
-      return IntegerAttr::get(IndexType::get(getContext()),
-                              shapedType.getShape()[getIndex()]);
-
-  // Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp.
-  auto memrefType = opType.dyn_cast<MemRefType>();
+  auto index = operands[1].dyn_cast<IntegerAttr>();
+
+  // All forms of folding require a known index.
+  if (!index)
+    return {};
+
+  // Fold if the shape extent along the given index is known.
+  auto argTy = memrefOrTensor().getType();
+  if (auto shapedTy = argTy.dyn_cast<ShapedType>()) {
+    if (!shapedTy.isDynamicDim(index.getInt())) {
+      Builder builder(getContext());
+      return builder.getIndexAttr(shapedTy.getShape()[index.getInt()]);
+    }
+  }
+
+  // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
+  auto memrefType = argTy.dyn_cast<MemRefType>();
   if (!memrefType)
     return {};
 
-  // The size at getIndex() is now known to be a dynamic size of a memref.
+  // The size at the given index is now known to be a dynamic size of a memref.
   auto memref = memrefOrTensor().getDefiningOp();
+  unsigned unsignedIndex = index.getValue().getZExtValue();
   if (auto alloc = dyn_cast_or_null<AllocOp>(memref))
     return *(alloc.getDynamicSizes().begin() +
-             memrefType.getDynamicDimIndex(getIndex()));
+             memrefType.getDynamicDimIndex(unsignedIndex));
 
   if (auto view = dyn_cast_or_null<ViewOp>(memref))
     return *(view.getDynamicSizes().begin() +
-             memrefType.getDynamicDimIndex(getIndex()));
+             memrefType.getDynamicDimIndex(unsignedIndex));
 
   if (auto subview = dyn_cast_or_null<SubViewOp>(memref)) {
-    assert(subview.isDynamicSize(getIndex()) &&
+    assert(subview.isDynamicSize(unsignedIndex) &&
            "Expected dynamic subview size");
-    return subview.getDynamicSize(getIndex());
+    return subview.getDynamicSize(unsignedIndex);
   }
 
-  /// dim(memrefcast) -> dim
+  // dim(memrefcast) -> dim
   if (succeeded(foldMemRefCast(*this)))
     return getResult();
 

diff  --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index de19331ce911..791ba6f20ac0 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -208,23 +208,23 @@ module {
     %c0 = constant 0 : index
     %c3 = constant 3 : index
     %c2 = constant 2 : index
-    %0 = dim %arg0, 0 : memref<?x?xf32, #map0>
-    %1 = dim %arg0, 1 : memref<?x?xf32, #map0>
+    %0 = dim %arg0, %c0 : memref<?x?xf32, #map0>
+    %1 = dim %arg0, %c1 : memref<?x?xf32, #map0>
     scf.parallel (%arg3, %arg4) = (%c0, %c0) to (%0, %1) step (%c2, %c3) {
-      %2 = dim %arg0, 0 : memref<?x?xf32, #map0>
+      %2 = dim %arg0, %c0 : memref<?x?xf32, #map0>
       %3 = affine.min #map1(%arg3)[%2]
       %squared_min = muli %3, %3 : index
-      %4 = dim %arg0, 1 : memref<?x?xf32, #map0>
+      %4 = dim %arg0, %c1 : memref<?x?xf32, #map0>
       %5 = affine.min #map2(%arg4)[%4]
       %6 = std.subview %arg0[%arg3, %arg4][%squared_min, %5][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
-      %7 = dim %arg1, 0 : memref<?x?xf32, #map0>
+      %7 = dim %arg1, %c0 : memref<?x?xf32, #map0>
       %8 = affine.min #map1(%arg3)[%7]
-      %9 = dim %arg1, 1 : memref<?x?xf32, #map0>
+      %9 = dim %arg1, %c1 : memref<?x?xf32, #map0>
       %10 = affine.min #map2(%arg4)[%9]
       %11 = std.subview %arg1[%arg3, %arg4][%8, %10][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
-      %12 = dim %arg2, 0 : memref<?x?xf32, #map0>
+      %12 = dim %arg2, %c0 : memref<?x?xf32, #map0>
       %13 = affine.min #map1(%arg3)[%12]
-      %14 = dim %arg2, 1 : memref<?x?xf32, #map0>
+      %14 = dim %arg2, %c1 : memref<?x?xf32, #map0>
       %15 = affine.min #map2(%arg4)[%14]
       %16 = std.subview %arg2[%arg3, %arg4][%13, %15][%c1, %c1] : memref<?x?xf32, #map0> to memref<?x?xf32, #map3>
       scf.parallel (%arg5, %arg6) = (%c0, %c0) to (%squared_min, %5) step (%c1, %c1) {
@@ -251,42 +251,42 @@ module {
 // CHECK:       module {
 // CHECK-LABEL:   func @sum(
 // CHECK-SAME:              [[VAL_0:%.*]]: memref<?x?xf32, #[[MAP0]]>, [[VAL_1:%.*]]: memref<?x?xf32, #[[MAP0]]>, [[VAL_2:%.*]]: memref<?x?xf32, #[[MAP0]]>) {
-// CHECK:           [[VAL_3:%.*]] = constant 1 : index
-// CHECK:           [[VAL_4:%.*]] = constant 0 : index
-// CHECK:           [[VAL_5:%.*]] = constant 3 : index
-// CHECK:           [[VAL_6:%.*]] = constant 2 : index
-// CHECK:           [[VAL_7:%.*]] = dim [[VAL_0]], 0 : memref<?x?xf32, #[[MAP0]]>
-// CHECK:           [[VAL_8:%.*]] = dim [[VAL_0]], 1 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:           %[[C1:.*]] = constant 1 : index
+// CHECK:           %[[C0:.*]] = constant 0 : index
+// CHECK:           %[[C3:.*]] = constant 3 : index
+// CHECK:           %[[C2:.*]] = constant 2 : index
+// CHECK:           [[VAL_7:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
+// CHECK:           [[VAL_8:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:           [[VAL_9:%.*]] = constant 1 : index
-// CHECK:           [[VAL_10:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_7]], [[VAL_4]], [[VAL_6]]]
-// CHECK:           [[VAL_11:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_8]], [[VAL_4]], [[VAL_5]]]
+// CHECK:           [[VAL_10:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_7]], %[[C0]], %[[C2]]]
+// CHECK:           [[VAL_11:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_8]], %[[C0]], %[[C3]]]
 // CHECK:           [[VAL_12:%.*]] = constant 4 : index
-// CHECK:           [[VAL_13:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_12]], [[VAL_4]], [[VAL_3]]]
+// CHECK:           [[VAL_13:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_12]], %[[C0]], %[[C1]]]
 // CHECK:           [[VAL_14:%.*]] = constant 3 : index
-// CHECK:           [[VAL_15:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_14]], [[VAL_4]], [[VAL_3]]]
+// CHECK:           [[VAL_15:%.*]] = affine.apply #[[MAP1]](){{\[}}[[VAL_14]], %[[C0]], %[[C1]]]
 // CHECK:           gpu.launch blocks([[VAL_16:%.*]], [[VAL_17:%.*]], [[VAL_18:%.*]]) in ([[VAL_19:%.*]] = [[VAL_10]], [[VAL_20:%.*]] = [[VAL_11]], [[VAL_21:%.*]] = [[VAL_9]]) threads([[VAL_22:%.*]], [[VAL_23:%.*]], [[VAL_24:%.*]]) in ([[VAL_25:%.*]] = [[VAL_13]], [[VAL_26:%.*]] = [[VAL_15]], [[VAL_27:%.*]] = [[VAL_9]]) {
-// CHECK:             [[VAL_28:%.*]] = affine.apply #[[MAP2]]([[VAL_16]]){{\[}}[[VAL_6]], [[VAL_4]]]
-// CHECK:             [[VAL_29:%.*]] = affine.apply #[[MAP2]]([[VAL_17]]){{\[}}[[VAL_5]], [[VAL_4]]]
-// CHECK:             [[VAL_30:%.*]] = dim [[VAL_0]], 0 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_28:%.*]] = affine.apply #[[MAP2]]([[VAL_16]]){{\[}}%[[C2]], %[[C0]]]
+// CHECK:             [[VAL_29:%.*]] = affine.apply #[[MAP2]]([[VAL_17]]){{\[}}%[[C3]], %[[C0]]]
+// CHECK:             [[VAL_30:%.*]] = dim [[VAL_0]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_31:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_30]]]
 // CHECK:             [[VAL_31_SQUARED:%.*]] = muli [[VAL_31]], [[VAL_31]] : index
-// CHECK:             [[VAL_32:%.*]] = dim [[VAL_0]], 1 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_32:%.*]] = dim [[VAL_0]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_33:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_32]]]
-// CHECK:             [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_35:%.*]] = dim [[VAL_1]], 0 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_34:%.*]] = subview [[VAL_0]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_31_SQUARED]], [[VAL_33]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK:             [[VAL_35:%.*]] = dim [[VAL_1]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_36:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_35]]]
-// CHECK:             [[VAL_37:%.*]] = dim [[VAL_1]], 1 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_37:%.*]] = dim [[VAL_1]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_38:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_37]]]
-// CHECK:             [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_40:%.*]] = dim [[VAL_2]], 0 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_39:%.*]] = subview [[VAL_1]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_36]], [[VAL_38]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK:             [[VAL_40:%.*]] = dim [[VAL_2]], %[[C0]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_41:%.*]] = affine.min #[[MAP3]]([[VAL_28]]){{\[}}[[VAL_40]]]
-// CHECK:             [[VAL_42:%.*]] = dim [[VAL_2]], 1 : memref<?x?xf32, #[[MAP0]]>
+// CHECK:             [[VAL_42:%.*]] = dim [[VAL_2]], %[[C1]] : memref<?x?xf32, #[[MAP0]]>
 // CHECK:             [[VAL_43:%.*]] = affine.min #[[MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
-// CHECK:             [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}[[VAL_3]], [[VAL_3]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
-// CHECK:             [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}[[VAL_3]], [[VAL_4]]]
+// CHECK:             [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[MAP0]]> to memref<?x?xf32, #[[MAP5]]>
+// CHECK:             [[VAL_45:%.*]] = affine.apply #[[MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:             [[VAL_46:%.*]] = cmpi "slt", [[VAL_45]], [[VAL_31_SQUARED]] : index
 // CHECK:             scf.if [[VAL_46]] {
-// CHECK:               [[VAL_47:%.*]] = affine.apply #[[MAP2]]([[VAL_23]]){{\[}}[[VAL_3]], [[VAL_4]]]
+// CHECK:               [[VAL_47:%.*]] = affine.apply #[[MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]]
 // CHECK:               [[VAL_48:%.*]] = cmpi "slt", [[VAL_47]], [[VAL_33]] : index
 // CHECK:               scf.if [[VAL_48]] {
 // CHECK:                 [[VAL_49:%.*]] = load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[MAP5]]>

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
index 5069172db926..f8b1067b42fe 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -convert-std-to-llvm %s | FileCheck %s
+// RUN: mlir-opt -convert-std-to-llvm %s | FileCheck %s --dump-input-on-failure
 // RUN: mlir-opt -convert-std-to-llvm='use-aligned-alloc=1' %s | FileCheck %s --check-prefix=ALIGNED-ALLOC
 
 // CHECK-LABEL: func @check_strided_memref_arguments(
@@ -355,15 +355,20 @@ func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
 
 // CHECK-LABEL: func @mixed_memref_dim
 func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
-//       CHECK:  llvm.mlir.constant(42 : index) : !llvm.i64
-  %0 = dim %mixed, 0 : memref<42x?x?x13x?xf32>
-//  CHECK-NEXT:  llvm.extractvalue %[[ld:.*]][3, 1] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
-  %1 = dim %mixed, 1 : memref<42x?x?x13x?xf32>
-//  CHECK-NEXT:  llvm.extractvalue %[[ld]][3, 2] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
-  %2 = dim %mixed, 2 : memref<42x?x?x13x?xf32>
-//  CHECK-NEXT:  llvm.mlir.constant(13 : index) : !llvm.i64
-  %3 = dim %mixed, 3 : memref<42x?x?x13x?xf32>
-//  CHECK-NEXT:  llvm.extractvalue %[[ld]][3, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
-  %4 = dim %mixed, 4 : memref<42x?x?x13x?xf32>
+// CHECK: llvm.mlir.constant(42 : index) : !llvm.i64
+  %c0 = constant 0 : index
+  %0 = dim %mixed, %c0 : memref<42x?x?x13x?xf32>
+// CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
+  %c1 = constant 1 : index
+  %1 = dim %mixed, %c1 : memref<42x?x?x13x?xf32>
+// CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
+  %c2 = constant 2 : index
+  %2 = dim %mixed, %c2 : memref<42x?x?x13x?xf32>
+// CHECK: llvm.mlir.constant(13 : index) : !llvm.i64
+  %c3 = constant 3 : index
+  %3 = dim %mixed, %c3 : memref<42x?x?x13x?xf32>
+// CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
+  %c4 = constant 4 : index
+  %4 = dim %mixed, %c4 : memref<42x?x?x13x?xf32>
   return
 }

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
index d81e2d4c057b..06e9e93b6253 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
@@ -351,19 +351,24 @@ func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f
 func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
 // CHECK:        llvm.mlir.constant(42 : index) : !llvm.i64
 // BAREPTR:      llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm<"{ float*, float*, i64, [5 x i64], [5 x i64] }">
-// BAREPTR-NEXT: llvm.mlir.constant(42 : index) : !llvm.i64
-  %0 = dim %static, 0 : memref<42x32x15x13x27xf32>
-// CHECK-NEXT:  llvm.mlir.constant(32 : index) : !llvm.i64
-// BAREPTR-NEXT:  llvm.mlir.constant(32 : index) : !llvm.i64
-  %1 = dim %static, 1 : memref<42x32x15x13x27xf32>
-// CHECK-NEXT:  llvm.mlir.constant(15 : index) : !llvm.i64
-// BAREPTR-NEXT:  llvm.mlir.constant(15 : index) : !llvm.i64
-  %2 = dim %static, 2 : memref<42x32x15x13x27xf32>
-// CHECK-NEXT:  llvm.mlir.constant(13 : index) : !llvm.i64
-// BAREPTR-NEXT:  llvm.mlir.constant(13 : index) : !llvm.i64
-  %3 = dim %static, 3 : memref<42x32x15x13x27xf32>
-// CHECK-NEXT:  llvm.mlir.constant(27 : index) : !llvm.i64
-// BAREPTR-NEXT:  llvm.mlir.constant(27 : index) : !llvm.i64
-  %4 = dim %static, 4 : memref<42x32x15x13x27xf32>
+// BAREPTR: llvm.mlir.constant(42 : index) : !llvm.i64
+  %c0 = constant 0 : index
+  %0 = dim %static, %c0 : memref<42x32x15x13x27xf32>
+// CHECK:  llvm.mlir.constant(32 : index) : !llvm.i64
+// BAREPTR:  llvm.mlir.constant(32 : index) : !llvm.i64
+  %c1 = constant 1 : index
+  %1 = dim %static, %c1 : memref<42x32x15x13x27xf32>
+// CHECK:  llvm.mlir.constant(15 : index) : !llvm.i64
+// BAREPTR:  llvm.mlir.constant(15 : index) : !llvm.i64
+  %c2 = constant 2 : index
+  %2 = dim %static, %c2 : memref<42x32x15x13x27xf32>
+// CHECK:  llvm.mlir.constant(13 : index) : !llvm.i64
+// BAREPTR:  llvm.mlir.constant(13 : index) : !llvm.i64
+  %c3 = constant 3 : index
+  %3 = dim %static, %c3 : memref<42x32x15x13x27xf32>
+// CHECK:  llvm.mlir.constant(27 : index) : !llvm.i64
+// BAREPTR:  llvm.mlir.constant(27 : index) : !llvm.i64
+  %c4 = constant 4 : index
+  %4 = dim %static, %c4 : memref<42x32x15x13x27xf32>
   return
 }

diff  --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
index 7391ebf81a39..152a110d82dd 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
@@ -45,8 +45,8 @@ func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %d
     }
   }
   // CHECK: %[[tensor:[0-9]+]] = alloc
-  // CHECK-NOT: {{.*}} dim %[[tensor]], 0
-  // CHECK-NOT: {{.*}} dim %[[tensor]], 3
+  // CHECK-NOT: {{.*}} dim %[[tensor]], %c0
+  // CHECK-NOT: {{.*}} dim %[[tensor]], %c3
   return
 }
 
@@ -233,7 +233,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
 
   // CHECK-DAG: %[[splat:.*]] = constant dense<7.000000e+00> : vector<15xf32>
   // CHECK-DAG: %[[alloc:.*]] = alloca() {alignment = 128 : i64} : memref<3xvector<15xf32>>
-  // CHECK-DAG: %[[dim:.*]] = dim %[[A]], 0 : memref<?x?xf32>
+  // CHECK-DAG: %[[dim:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // CHECK: affine.for %[[I:.*]] = 0 to 3 {
   // CHECK:   %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]]
   // CHECK:   %[[cond1:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
@@ -249,7 +249,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
   // FULL-UNROLL: %[[pad:.*]] = constant 7.000000e+00 : f32
   // FULL-UNROLL: %[[VEC0:.*]] = constant dense<7.000000e+00> : vector<3x15xf32>
   // FULL-UNROLL: %[[SPLAT:.*]] = constant dense<7.000000e+00> : vector<15xf32>
-  // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], 0 : memref<?x?xf32>
+  // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // FULL-UNROLL: cmpi "slt", %[[base]], %[[DIM]] : index
   // FULL-UNROLL: %[[VEC1:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
   // FULL-UNROLL:   vector.transfer_read %[[A]][%[[base]], %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
@@ -307,7 +307,7 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
   // CHECK: %[[alloc:.*]] = alloca() {alignment = 128 : i64} : memref<3xvector<15xf32>>
   // CHECK: %[[vmemref:.*]] = vector.type_cast %[[alloc]] : memref<3xvector<15xf32>> to memref<vector<3x15xf32>>
   // CHECK: store %[[vec]], %[[vmemref]][] : memref<vector<3x15xf32>>
-  // CHECK: %[[dim:.*]] = dim %[[A]], 0 : memref<?x?xf32>
+  // CHECK: %[[dim:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // CHECK: affine.for %[[I:.*]] = 0 to 3 {
   // CHECK:   %[[add:.*]] = affine.apply #[[MAP0]](%[[I]])[%[[base]]]
   // CHECK:   %[[cmp:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
@@ -316,7 +316,7 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
   // CHECK:     vector.transfer_write %[[vec_1d]], %[[A]][%[[add]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
   // CHECK:   }
 
-  // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], 0 : memref<?x?xf32>
+  // FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %c0 : memref<?x?xf32>
   // FULL-UNROLL: %[[CMP0:.*]] = cmpi "slt", %[[base]], %[[DIM]] : index
   // FULL-UNROLL: scf.if %[[CMP0]] {
   // FULL-UNROLL:   %[[V0:.*]] = vector.extract %[[vec]][0] : vector<3x15xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
index 10bf5009d5f6..ed0a39c967a3 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
@@ -12,14 +12,18 @@
 // Maps introduced to vectorize fastest varying memory index.
 // CHECK-LABEL: func @vec1d_1
 func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
 
 // CHECK: for {{.*}} step 128
 // CHECK-NEXT: %{{.*}} = affine.apply #map0(%[[C0]])
@@ -27,42 +31,50 @@ func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-NEXT: %{{.*}} = constant 0.0{{.*}}: f32
 // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i0 = 0 to %M { // vectorized due to scalar -> vector
-     %a0 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
+     %a0 = affine.load %A[%c0, %c0] : memref<?x?xf32>
    }
    return
 }
 
 // CHECK-LABEL: func @vec1d_2
 func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:for [[IV3:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
 // CHECK-NEXT: %[[CST:.*]] = constant 0.0{{.*}}: f32
 // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %[[CST]] : memref<?x?xf32>, vector<128xf32>
    affine.for %i3 = 0 to %M { // vectorized
-     %a3 = affine.load %A[%cst0, %i3] : memref<?x?xf32>
+     %a3 = affine.load %A[%c0, %i3] : memref<?x?xf32>
    }
    return
 }
 
 // CHECK-LABEL: func @vec1d_3
 func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %arg0, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %arg0, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %arg1, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %arg0, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %arg0, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %arg1, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:for [[IV8:%[arg0-9]+]] = 0 to [[ARG_M]] step 128
 // CHECK-NEXT:   for [[IV9:%[arg0-9]*]] = 0 to [[ARG_N]] {
 // CHECK-NEXT:   %[[APP9_0:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
@@ -131,15 +143,19 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
 
 // CHECK-LABEL: func @vec_rejected_1
 func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:for {{.*}} [[ARG_M]] {
    affine.for %i1 = 0 to %M { // not vectorized
      %a1 = affine.load %A[%i1, %i1] : memref<?x?xf32>
@@ -149,33 +165,41 @@ func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_2
 func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:   affine.for %{{.*}}{{[0-9]*}} = 0 to [[ARG_M]] {
    affine.for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
-     %a2 = affine.load %A[%i2, %cst0] : memref<?x?xf32>
+     %a2 = affine.load %A[%i2, %c0] : memref<?x?xf32>
    }
    return
 }
 
 // CHECK-LABEL: func @vec_rejected_3
 func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:for [[IV4:%[arg0-9]+]] = 0 to [[ARG_M]] step 128 {
 // CHECK-NEXT:   for [[IV5:%[arg0-9]*]] = 0 to [[ARG_N]] {
 // CHECK-NEXT:     %{{.*}} = constant 0.0{{.*}}: f32
@@ -190,15 +214,19 @@ func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_4
 func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK: for [[IV6:%[arg0-9]*]] = 0 to [[ARG_M]] {
 // CHECK-NEXT:   for [[IV7:%[arg0-9]*]] = 0 to [[ARG_N]] {
    affine.for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
@@ -211,15 +239,19 @@ func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_5
 func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK: for [[IV10:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV11:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
    affine.for %i10 = 0 to %M { // not vectorized, need per load transposes
@@ -233,15 +265,19 @@ func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_6
 func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK: for [[IV12:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV13:%[arg0-9]*]] = 0 to %{{[0-9]*}} {
 // CHECK:     for [[IV14:%[arg0-9]+]] = 0 to [[ARG_P]] step 128
@@ -257,15 +293,19 @@ func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_7
 func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:  affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
    affine.for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load
      %a16 = alloc(%M) : memref<?xvector<2xf32>>
@@ -276,15 +316,19 @@ func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_8
 func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
 // CHECK:     %{{.*}} = affine.apply #map0(%{{.*}})
@@ -293,7 +337,7 @@ func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK:     {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %{{.*}} in DFS post-order prevents vectorizing %{{.*}}
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
-       %a18 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a18 = affine.load %A[%c0, %c0] : memref<?x?xf32>
      }
    }
    return
@@ -301,15 +345,19 @@ func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_9
 func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: %[[C0:[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK: affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
 // CHECK:      %{{.*}} = affine.apply #map0(%{{.*}})
@@ -318,7 +366,7 @@ func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-NEXT: {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %{{.*}}
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
-       %a18 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a18 = affine.load %A[%c0, %c0] : memref<?x?xf32>
      }
    }
    return
@@ -326,19 +374,23 @@ func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // CHECK-LABEL: func @vec_rejected_10
 func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
-// CHECK-DAG: [[C0:%[a-z0-9_]+]] = constant 0 : index
-// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, 0 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, 1 : memref<?x?xf32>
-// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, 2 : memref<?x?x?xf32>
-   %M = dim %A, 0 : memref<?x?xf32>
-   %N = dim %A, 1 : memref<?x?xf32>
-   %P = dim %B, 2 : memref<?x?x?xf32>
-   %cst0 = constant 0 : index
-//
+// CHECK-DAG: %[[C0:.*]] = constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = constant 1 : index
+// CHECK-DAG: %[[C2:.*]] = constant 2 : index
+// CHECK-DAG: [[ARG_M:%[0-9]+]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_N:%[0-9]+]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+// CHECK-DAG: [[ARG_P:%[0-9]+]] = dim %{{.*}}, %[[C2]] : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?xf32>
+   %N = dim %A, %c1 : memref<?x?xf32>
+   %P = dim %B, %c2 : memref<?x?x?xf32>
+
 // CHECK:  affine.for %{{.*}}{{[0-9]*}} = 0 to %{{[0-9]*}} {
    affine.for %i15 = 0 to %M { // not vectorized due to condition below
      affine.if #set0(%i15) {
-       %a15 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a15 = affine.load %A[%c0, %c0] : memref<?x?xf32>
      }
    }
    return
@@ -347,7 +399,8 @@ func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // This should not vectorize and should not crash.
 // CHECK-LABEL: @vec_rejected_11
 func @vec_rejected_11(%A : memref<?x?xf32>, %C : memref<?x?xf32>) {
-  %N = dim %A, 0 : memref<?x?xf32>
+  %c0 = constant 0 : index
+  %N = dim %A, %c0 : memref<?x?xf32>
   affine.for %i = 0 to %N {
 // CHECK-NOT: vector
     %a = affine.load %A[%i, %i] : memref<?x?xf32> // not vectorized
@@ -365,7 +418,8 @@ func @vec_rejected_11(%A : memref<?x?xf32>, %C : memref<?x?xf32>) {
 // This should not vectorize due to the sequential dependence in the scf.
 // CHECK-LABEL: @vec_rejected_sequential
 func @vec_rejected_sequential(%A : memref<?xf32>) {
-  %N = dim %A, 0 : memref<?xf32>
+  %c0 = constant 0 : index
+  %N = dim %A, %c0 : memref<?xf32>
   affine.for %i = 0 to %N {
     // CHECK-NOT: vector
     %a = affine.load %A[%i] : memref<?xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
index 3352644da63d..de757a4ad88b 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
@@ -12,9 +12,12 @@
 // VECT-DAG: #[[map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
 
 func @vec2d(%A : memref<?x?x?xf32>) {
-   %M = dim %A, 0 : memref<?x?x?xf32>
-   %N = dim %A, 1 : memref<?x?x?xf32>
-   %P = dim %A, 2 : memref<?x?x?xf32>
+   %c0 = constant 0 : index
+   %c1 = constant 1 : index
+   %c2 = constant 2 : index
+   %M = dim %A, %c0 : memref<?x?x?xf32>
+   %N = dim %A, %c1 : memref<?x?x?xf32>
+   %P = dim %A, %c2 : memref<?x?x?xf32>
    // CHECK: for  {{.*}} = 0 to %{{.*}} {
    // CHECK:   for {{.*}} = 0 to %{{.*}} step 32
    // CHECK:     for {{.*}} = 0 to %{{.*}} step 256
@@ -100,13 +103,15 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
 // VECT-LABEL: func @vectorize_matmul
 func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %c0 = constant 0 : index
-  %M = dim %arg0, 0 : memref<?x?xf32>
-  %K = dim %arg0, 1 : memref<?x?xf32>
-  %N = dim %arg2, 1 : memref<?x?xf32>
+  %c1 = constant 1 : index
+  %M = dim %arg0, %c0 : memref<?x?xf32>
+  %K = dim %arg0, %c1 : memref<?x?xf32>
+  %N = dim %arg2, %c1 : memref<?x?xf32>
   //      VECT: %[[C0:.*]] = constant 0 : index
-  // VECT-NEXT: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32>
-  // VECT-NEXT: %[[K:.*]] = dim %{{.*}}, 1 : memref<?x?xf32>
-  // VECT-NEXT: %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32>
+  // VECT-NEXT: %[[C1:.*]] = constant 1 : index
+  // VECT-NEXT: %[[M:.*]] = dim %{{.*}}, %[[C0]] : memref<?x?xf32>
+  // VECT-NEXT: %[[K:.*]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
+  // VECT-NEXT: %[[N:.*]] = dim %{{.*}}, %[[C1]] : memref<?x?xf32>
   //      VECT: {{.*}} #[[map_id1]](%[[M]]) step 4 {
   // VECT-NEXT:   {{.*}} #[[map_id1]](%[[N]]) step 8 {
   //      VECT:     %[[VC0:.*]] = constant dense<0.000000e+00> : vector<4x8xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
index 5b6517ea390e..b438887f060b 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
@@ -4,25 +4,28 @@
 // CHECK: #[[map_proj_d0d1d2_d0d1d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 
 func @vec3d(%A : memref<?x?x?xf32>) {
-   %0 = dim %A, 0 : memref<?x?x?xf32>
-   %1 = dim %A, 1 : memref<?x?x?xf32>
-   %2 = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 32 {
-   // CHECK:       affine.for %{{.*}} = 0 to %{{.*}} step 64 {
-   // CHECK:         affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:           %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} : memref<?x?x?xf32>, vector<32x64x256xf32>
-   affine.for %t0 = 0 to %0 {
-     affine.for %t1 = 0 to %0 {
-       affine.for %i0 = 0 to %0 {
-         affine.for %i1 = 0 to %1 {
-           affine.for %i2 = 0 to %2 {
-             %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
-           }
-         }
-       }
-     }
-   }
-   return
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %0 = dim %A, %c0 : memref<?x?x?xf32>
+  %1 = dim %A, %c1 : memref<?x?x?xf32>
+  %2 = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 32 {
+  // CHECK:       affine.for %{{.*}} = 0 to %{{.*}} step 64 {
+  // CHECK:         affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:           %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} : memref<?x?x?xf32>, vector<32x64x256xf32>
+  affine.for %t0 = 0 to %0 {
+    affine.for %t1 = 0 to %0 {
+      affine.for %i0 = 0 to %0 {
+        affine.for %i1 = 0 to %1 {
+          affine.for %i2 = 0 to %2 {
+            %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+          }
+        }
+      }
+    }
+  }
+  return
 }

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
index c41c14d42390..adaacddef1f7 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
@@ -4,31 +4,34 @@
 // CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
 
 func @vec2d(%A : memref<?x?x?xf32>) {
-   %M = dim %A, 0 : memref<?x?x?xf32>
-   %N = dim %A, 1 : memref<?x?x?xf32>
-   %P = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256
-   // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d0d2]]} : memref<?x?x?xf32>,  vector<32x256xf32>
-   affine.for %i0 = 0 to %M {
-     affine.for %i1 = 0 to %N {
-       affine.for %i2 = 0 to %P {
-         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
-       }
-     }
-   }
-   // CHECK: for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:   for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:     for  {{.*}} = 0 to %{{.*}} {
-   // For the case: --test-fastest-varying=2 --test-fastest-varying=0 no
-   // vectorization happens because of loop nesting order
-   affine.for %i3 = 0 to %M {
-     affine.for %i4 = 0 to %N {
-       affine.for %i5 = 0 to %P {
-         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
-       }
-     }
-   }
-   return
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %M = dim %A, %c0 : memref<?x?x?xf32>
+  %N = dim %A, %c1 : memref<?x?x?xf32>
+  %P = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256
+  // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d0d2]]} : memref<?x?x?xf32>,  vector<32x256xf32>
+  affine.for %i0 = 0 to %M {
+    affine.for %i1 = 0 to %N {
+      affine.for %i2 = 0 to %P {
+        %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+      }
+    }
+  }
+  // CHECK: for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:   for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:     for  {{.*}} = 0 to %{{.*}} {
+  // For the case: --test-fastest-varying=2 --test-fastest-varying=0 no
+  // vectorization happens because of loop nesting order
+  affine.for %i3 = 0 to %M {
+    affine.for %i4 = 0 to %N {
+      affine.for %i5 = 0 to %P {
+        %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+      }
+    }
+  }
+  return
 }

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
index 3dc3e69e6678..9bd56df2c8a9 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
@@ -4,62 +4,68 @@
 // CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d0)>
 
 func @vec2d(%A : memref<?x?x?xf32>) {
-   %M = dim %A, 0 : memref<?x?x?xf32>
-   %N = dim %A, 1 : memref<?x?x?xf32>
-   %P = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:   for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:     for  {{.*}} = 0 to %{{.*}} {
-   // For the case: --test-fastest-varying=0 --test-fastest-varying=2 no
-   // vectorization happens because of loop nesting order.
-   affine.for %i0 = 0 to %M {
-     affine.for %i1 = 0 to %N {
-       affine.for %i2 = 0 to %P {
-         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
-       }
-     }
-   }
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   affine.for %i3 = 0 to %M {
-     affine.for %i4 = 0 to %N {
-       affine.for %i5 = 0 to %P {
-         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
-       }
-     }
-   }
-   return
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %M = dim %A, %c0 : memref<?x?x?xf32>
+  %N = dim %A, %c1 : memref<?x?x?xf32>
+  %P = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:   for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:     for  {{.*}} = 0 to %{{.*}} {
+  // For the case: --test-fastest-varying=0 --test-fastest-varying=2 no
+  // vectorization happens because of loop nesting order.
+  affine.for %i0 = 0 to %M {
+    affine.for %i1 = 0 to %N {
+      affine.for %i2 = 0 to %P {
+        %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+      }
+    }
+  }
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  affine.for %i3 = 0 to %M {
+    affine.for %i4 = 0 to %N {
+      affine.for %i5 = 0 to %P {
+        %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+      }
+    }
+  }
+  return
 }
 
 func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
-   %0 = dim %A, 0 : memref<?x?x?xf32>
-   %1 = dim %A, 1 : memref<?x?x?xf32>
-   %2 = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 {
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   affine.for %i0 = 0 to %0 {
-     affine.for %i1 = 0 to %1 {
-       affine.for %i2 = 0 to %2 {
-         %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
-       }
-     }
-     affine.for %i3 = 0 to %1 {
-       affine.for %i4 = 0 to %2 {
-         %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
-       }
-       affine.for %i5 = 0 to %2 {
-         %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
-       }
-     }
-   }
-   return
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %0 = dim %A, %c0 : memref<?x?x?xf32>
+  %1 = dim %A, %c1 : memref<?x?x?xf32>
+  %2 = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 {
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d0]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  affine.for %i0 = 0 to %0 {
+    affine.for %i1 = 0 to %1 {
+      affine.for %i2 = 0 to %2 {
+        %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
+      }
+    }
+    affine.for %i3 = 0 to %1 {
+      affine.for %i4 = 0 to %2 {
+        %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
+      }
+      affine.for %i5 = 0 to %2 {
+        %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
+      }
+    }
+  }
+  return
 }

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
index 893352a40db4..cf6a54a6ea51 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
@@ -4,63 +4,69 @@
 // CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
 
 func @vec2d(%A : memref<?x?x?xf32>) {
-   %M = dim %A, 0 : memref<?x?x?xf32>
-   %N = dim %A, 1 : memref<?x?x?xf32>
-   %P = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:   for  {{.*}} = 0 to %{{.*}} {
-   // CHECK:     for  {{.*}} = 0 to %{{.*}} {
-   // For the case: --test-fastest-varying=0 --test-fastest-varying=1 no
-   // vectorization happens because of loop nesting order.
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %M = dim %A, %c0 : memref<?x?x?xf32>
+  %N = dim %A, %c1 : memref<?x?x?xf32>
+  %P = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:   for  {{.*}} = 0 to %{{.*}} {
+  // CHECK:     for  {{.*}} = 0 to %{{.*}} {
+  // For the case: --test-fastest-varying=0 --test-fastest-varying=1 no
+  // vectorization happens because of loop nesting order.
   affine.for %i0 = 0 to %M {
-     affine.for %i1 = 0 to %N {
-       affine.for %i2 = 0 to %P {
-         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
-       }
-     }
-   }
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256
-   // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   affine.for %i3 = 0 to %M {
-     affine.for %i4 = 0 to %N {
-       affine.for %i5 = 0 to %P {
-         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
-       }
-     }
-   }
-   return
+    affine.for %i1 = 0 to %N {
+      affine.for %i2 = 0 to %P {
+        %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+      }
+    }
+  }
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256
+  // CHECK:       {{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  affine.for %i3 = 0 to %M {
+    affine.for %i4 = 0 to %N {
+      affine.for %i5 = 0 to %P {
+        %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+      }
+    }
+  }
+  return
 }
 
 func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
-   %0 = dim %A, 0 : memref<?x?x?xf32>
-   %1 = dim %A, 1 : memref<?x?x?xf32>
-   %2 = dim %A, 2 : memref<?x?x?xf32>
-   // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 {
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
-   // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
-   affine.for %i0 = 0 to %0 {
-     affine.for %i1 = 0 to %1 {
-       affine.for %i2 = 0 to %2 {
-         %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
-       }
-     }
-     affine.for %i3 = 0 to %1 {
-       affine.for %i4 = 0 to %2 {
-         %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
-       }
-       affine.for %i5 = 0 to %2 {
-         %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
-       }
-     }
-   }
-   return
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %0 = dim %A, %c0 : memref<?x?x?xf32>
+  %1 = dim %A, %c1 : memref<?x?x?xf32>
+  %2 = dim %A, %c2 : memref<?x?x?xf32>
+  // CHECK: affine.for %{{.*}} = 0 to %{{.*}} step 32 {
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  // CHECK:   affine.for %{{.*}} = 0 to %{{.*}} {
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  // CHECK:     affine.for %{{.*}} = 0 to %{{.*}} step 256 {
+  // CHECK:       %{{.*}} = vector.transfer_read %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}], %{{.*}} {permutation_map = #[[map_proj_d0d1d2_d2d1]]} : memref<?x?x?xf32>, vector<32x256xf32>
+  affine.for %i0 = 0 to %0 {
+    affine.for %i1 = 0 to %1 {
+      affine.for %i2 = 0 to %2 {
+        %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
+      }
+    }
+    affine.for %i3 = 0 to %1 {
+      affine.for %i4 = 0 to %2 {
+        %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
+      }
+      affine.for %i5 = 0 to %2 {
+        %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
+      }
+    }
+  }
+  return
 }
 

diff  --git a/mlir/test/Dialect/Affine/dma-generate.mlir b/mlir/test/Dialect/Affine/dma-generate.mlir
index 9995c1908d5b..1232d4d7c419 100644
--- a/mlir/test/Dialect/Affine/dma-generate.mlir
+++ b/mlir/test/Dialect/Affine/dma-generate.mlir
@@ -271,8 +271,9 @@ func @dma_with_symbolic_loop_bounds(%A : memref<100x100xf32>, %M : index, %N: in
 
 // CHECK-LABEL: func @dma_unknown_size
 func @dma_unknown_size(%arg0: memref<?x?xf32>) {
-  %M = dim %arg0, 0 : memref<? x ? x f32>
-  %N = dim %arg0, 0 : memref<? x ? x f32>
+  %c0 = constant 0 : index
+  %M = dim %arg0, %c0 : memref<? x ? x f32>
+  %N = dim %arg0, %c0 : memref<? x ? x f32>
   affine.for %i = 0 to %M {
     affine.for %j = 0 to %N {
       // If this loop nest isn't tiled, the access requires a non-constant DMA

diff  --git a/mlir/test/Dialect/Affine/invalid.mlir b/mlir/test/Dialect/Affine/invalid.mlir
index 102dd394f93c..e338851a6800 100644
--- a/mlir/test/Dialect/Affine/invalid.mlir
+++ b/mlir/test/Dialect/Affine/invalid.mlir
@@ -122,7 +122,8 @@ func @affine_if_invalid_sym() {
 func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
   affine.for %n0 = 0 to 7 {
     %0 = alloc(%arg0, %arg1, %arg2, %arg3) : memref<?x?x?x?xf32>
-    %dim = dim %0, 0 : memref<?x?x?x?xf32>
+    %c0 = constant 0 : index
+    %dim = dim %0, %c0 : memref<?x?x?x?xf32>
 
     // expected-error at +1 {{operand cannot be used as a symbol}}
     affine.if #set0(%dim)[%n0] {}

diff  --git a/mlir/test/Dialect/Affine/loop-tiling.mlir b/mlir/test/Dialect/Affine/loop-tiling.mlir
index 1aee50f15839..7deb8772f6d0 100644
--- a/mlir/test/Dialect/Affine/loop-tiling.mlir
+++ b/mlir/test/Dialect/Affine/loop-tiling.mlir
@@ -66,7 +66,8 @@ func @loop_tiling() {
 #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
 // CHECK-LABEL: func @loop_max_min_bound(%{{.*}}: memref<?xi32>, %{{.*}}: index, %{{.*}}: index) {
 func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
-  %M = dim %A, 0 : memref<? x i32>
+  %c0 = constant 0 : index
+  %M = dim %A, %c0 : memref<? x i32>
   affine.for %i = max #lb()[%L] to min #ub()[%M, %U] {
     addi %i, %i : index
   }
@@ -111,7 +112,8 @@ func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256
 
 func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %cst = constant 0.000000e+00 : f32
-  %0 = dim %arg0, 0 : memref<?x?xf32>
+  %c0 = constant 0 : index
+  %0 = dim %arg0, %c0 : memref<?x?xf32>
   affine.for %i0 = 0 to %0 {
     affine.for %i1 = 0 to %0 {
       affine.store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
@@ -128,7 +130,7 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
   return
 }
 
-// CHECK:       dim %{{.*}}, 0 : memref<?x?xf32>
+// CHECK:       dim %{{.*}}, %c0 : memref<?x?xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to %{{.*}} step 32 {
 // CHECK-NEXT:    affine.for %{{.*}} = 0 to %{{.*}} step 32 {
 // CHECK-NEXT:      affine.for %{{.*}} = #map3(%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}] {
@@ -155,14 +157,15 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
 // CHECK-DAG: [[UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)>
 
 func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
-  %dim0 = dim %arg0, 0 : memref<?xf32>
+  %c0 = constant 0 : index
+  %dim0 = dim %arg0, %c0 : memref<?xf32>
   affine.for %i0 = 0 to affine_map<()[s0, s1] -> (s0 + s1)> ()[%dim0, %limit] {
     %v0 = affine.load %arg0[%i0] : memref<?xf32>
   }
   return
 }
 
-// CHECK:       dim %{{.*}}, 0 : memref<?xf32>
+// CHECK:       dim %{{.*}}, %c0 : memref<?xf32>
 // CHECK-NEXT:  affine.for %{{.*}} = 0 to [[MAP1]]()[%{{.*}}, %{{.*}}] step 32 {
 // CHECK-NEXT:    affine.for %{{.*}} = [[MAP0]](%{{.*}}) to min [[UBMAP]](%{{.*}})[%{{.*}}, %{{.*}}] {
 // CHECK-NEXT:      affine.load

diff  --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir
index 5ca6de5023eb..832c0aa477a4 100644
--- a/mlir/test/Dialect/Affine/ops.mlir
+++ b/mlir/test/Dialect/Affine/ops.mlir
@@ -95,16 +95,16 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 // -----
 
 func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) {
-  %c0 = constant 1 : index
-  %c1 = constant 0 : index
+  %c1 = constant 1 : index
+  %c0 = constant 0 : index
   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
   affine.for %arg3 = 0 to %arg2 step 768 {
-    %13 = dim %0, 1 : memref<?x?xf32>
+    %13 = dim %0, %c1 : memref<?x?xf32>
     affine.for %arg4 = 0 to %13 step 264 {
-      %18 = dim %0, 0 : memref<?x?xf32>
+      %18 = dim %0, %c0 : memref<?x?xf32>
       %20 = std.subview %0[%c0, %c0][%18,%arg4][%c1,%c1] : memref<?x?xf32>
                           to memref<?x?xf32, offset : ?, strides : [?, ?]>
-      %24 = dim %20, 0 : memref<?x?xf32, offset : ?, strides : [?, ?]>
+      %24 = dim %20, %c0 : memref<?x?xf32, offset : ?, strides : [?, ?]>
       affine.for %arg5 = 0 to %24 step 768 {
         "foo"() : () -> ()
       }

diff  --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index 51394ab61525..9a3206f31345 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -92,8 +92,9 @@ func @extra_constants(%arg0 : memref<?xf32>) {
   // CHECK: %[[CST:.*]] = constant 8 : index
   %cst = constant 8 : index
   %cst2 = constant 2 : index
-  %cst3 = dim %arg0, 0 : memref<?xf32>
-  // CHECK: "gpu.launch_func"(%[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %{{.*}}) {kernel = @extra_constants_kernel::@extra_constants_kernel} : (index, index, index, index, index, index, memref<?xf32>) -> ()
+  %c0 = constant 0 : index
+  %cst3 = dim %arg0, %c0 : memref<?xf32>
+  // CHECK: "gpu.launch_func"(%[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %[[CST]], %{{.*}}, %{{.*}}) {kernel = @extra_constants_kernel::@extra_constants_kernel} : (index, index, index, index, index, index, memref<?xf32>, index) -> ()
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst,
                                        %grid_z = %cst)
              threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst,
@@ -104,7 +105,7 @@ func @extra_constants(%arg0 : memref<?xf32>) {
   return
 }
 
-// CHECK-LABEL: func @extra_constants_kernel(%{{.*}}: memref<?xf32>)
+// CHECK-LABEL: func @extra_constants_kernel(%{{.*}}: memref<?xf32>, %{{.*}}: index)
 // CHECK: constant
 // CHECK: constant
 
@@ -122,7 +123,7 @@ func @multiple_uses(%arg0 : memref<?xf32>) {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1,
                                        %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1,
-	                                %block_z = %c1) {
+                                        %block_z = %c1) {
     "use1"(%c2, %c2) : (index, index) -> ()
     "use2"(%c2) : (index) -> ()
     gpu.terminator

diff  --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir
index 279a4bde29aa..4e5fc43f5611 100644
--- a/mlir/test/Dialect/Linalg/affine.mlir
+++ b/mlir/test/Dialect/Linalg/affine.mlir
@@ -43,11 +43,11 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 
 // CHECK-LABEL: func @conv_view3(
 //  CHECK: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECK:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[Q:.*]] = dim %arg0, 1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[K:.*]] = dim %arg0, 2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[B:.*]] = dim %arg1, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECK:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
 //       CHECK:   affine.for %{{.*}} = 0 to %[[B]] {
 //       CHECK:     affine.for %{{.*}} = 0 to %[[X0]] {
 //       CHECK:       affine.for %{{.*}} = 0 to %[[K]] {
@@ -70,13 +70,13 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 // CHECK-LABEL: func @conv_padding
 //       CHECK: %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>) {
 //       CHECK:   %[[ZERO:.*]] = constant 0.000000e+00 : f32
-//       CHECK:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?x?xf32>
-//       CHECK:   %[[Z1:.*]] = dim %arg0, 1 : memref<?x?x?x?xf32>
-//       CHECK:   %[[Q:.*]] =  dim %arg0, 2 : memref<?x?x?x?xf32>
-//       CHECK:   %[[K:.*]] =  dim %arg0, 3 : memref<?x?x?x?xf32>
-//       CHECK:   %[[B:.*]] =  dim %arg1, 0 : memref<?x?x?x?xf32>
-//       CHECK:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?x?xf32>
-//       CHECK:   %[[X1:.*]] = dim %arg2, 2 : memref<?x?x?x?xf32>
+//       CHECK:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32>
+//       CHECK:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32>
+//       CHECK:   %[[Q:.*]] =  dim %arg0, %c2 : memref<?x?x?x?xf32>
+//       CHECK:   %[[K:.*]] =  dim %arg0, %c3 : memref<?x?x?x?xf32>
+//       CHECK:   %[[B:.*]] =  dim %arg1, %c0 : memref<?x?x?x?xf32>
+//       CHECK:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32>
+//       CHECK:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32>
 //       CHECK:   affine.for %{{.*}} = 0 to %[[B]] {
 //       CHECK:     affine.for %{{.*}} = 0 to %[[X0]] {
 //       CHECK:       affine.for %{{.*}} = 0 to %[[X1]] {
@@ -109,10 +109,10 @@ func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memre
 //  CHECK-SAME: %[[mA:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECK-SAME: %[[mB:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECK-SAME: %[[mC:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
-//       CHECK: %[[B:.*]] = dim %[[mA]], 0 : memref<?x?x?xf32>
-//       CHECK: %[[M:.*]] = dim %[[mA]], 1 : memref<?x?x?xf32>
-//       CHECK: %[[K:.*]] = dim %[[mA]], 2 : memref<?x?x?xf32>
-//       CHECK: %[[N:.*]] = dim %[[mB]], 2 : memref<?x?x?xf32>
+//       CHECK: %[[B:.*]] = dim %[[mA]], %c0 : memref<?x?x?xf32>
+//       CHECK: %[[M:.*]] = dim %[[mA]], %c1 : memref<?x?x?xf32>
+//       CHECK: %[[K:.*]] = dim %[[mA]], %c2 : memref<?x?x?xf32>
+//       CHECK: %[[N:.*]] = dim %[[mB]], %c2 : memref<?x?x?xf32>
 //       CHECK: affine.for %[[b:.*]] = 0 to %[[B]] {
 //       CHECK:   affine.for %[[m:.*]] = 0 to %[[M]] {
 //       CHECK:     affine.for %[[n:.*]] = 0 to %[[N]] {

diff  --git a/mlir/test/Dialect/Linalg/fusion-2-level.mlir b/mlir/test/Dialect/Linalg/fusion-2-level.mlir
index f4864945f5d1..7be54f45b473 100644
--- a/mlir/test/Dialect/Linalg/fusion-2-level.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-2-level.mlir
@@ -9,9 +9,9 @@ func @f1(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>, %B: memref<?x?xf32, of
   %c40 = constant 40 : index
   %c30 = constant 30 : index
   %c20 = constant 20 : index
-  %0 = dim %C, 0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %1 = dim %C, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %0 = dim %C, %c0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %1 = dim %C, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
   linalg.matmul(%A, %B, %C) : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>
   scf.for %arg5 = %c0 to %0 step %c20 {
     scf.for %arg6 = %c0 to %2 step %c30 {
@@ -19,9 +19,9 @@ func @f1(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>, %B: memref<?x?xf32, of
         %5 = std.subview %C[%arg5, %arg7][%c20, %c40][%c1, %c1] : memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
         %7 = std.subview %D[%arg7, %arg6][%c40, %c30][%c1, %c1]: memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
         %8 = std.subview %E[%arg5, %arg6][%c20, %c40][%c1, %c1] : memref<?x?xf32, offset: ?, strides: [?, 1]> to memref<?x?xf32, offset: ?, strides: [?, ?]>
-        %9 = dim %5, 0 : memref<?x?xf32, offset: ?, strides: [?, ?]>
-        %10 = dim %5, 1 : memref<?x?xf32, offset: ?, strides: [?, ?]>
-        %11 = dim %7, 1 : memref<?x?xf32, offset: ?, strides: [?, ?]>
+        %9 = dim %5, %c0 : memref<?x?xf32, offset: ?, strides: [?, ?]>
+        %10 = dim %5, %c1 : memref<?x?xf32, offset: ?, strides: [?, ?]>
+        %11 = dim %7, %c1 : memref<?x?xf32, offset: ?, strides: [?, ?]>
         scf.for %arg8 = %c0 to %9 step %c2 {
           scf.for %arg9 = %c0 to %11 step %c3 {
             scf.for %arg10 = %c0 to %10 step %c4 {

diff  --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index db47e8eea616..b6020df5fb77 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -10,14 +10,14 @@ func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,
   %c4 = constant 4 : index
   %c3 = constant 3 : index
   %c2 = constant 2 : index
-  %0 = dim %A, 0 : memref<?x?xf32, offset: 0, strides: [?, 1]>
-  %1 = dim %A, 1 : memref<?x?xf32, offset: 0, strides: [?, 1]>
-  %2 = dim %B, 1 : memref<?x?xf32, offset: 0, strides: [?, 1]>
+  %c1 = constant 1 : index
+  %0 = dim %A, %c0 : memref<?x?xf32, offset: 0, strides: [?, 1]>
+  %1 = dim %A, %c1 : memref<?x?xf32, offset: 0, strides: [?, 1]>
+  %2 = dim %B, %c1 : memref<?x?xf32, offset: 0, strides: [?, 1]>
   linalg.matmul(%A, %B, %C) :
     memref<?x?xf32, offset: 0, strides: [?, 1]>,
     memref<?x?xf32, offset: 0, strides: [?, 1]>,
     memref<?x?xf32, offset: 0, strides: [?, 1]>
-  %c1 = constant 1 : index
   scf.for %arg5 = %c0 to %0 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %1 step %c4 {
@@ -65,9 +65,9 @@ func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %0 = dim %C, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %C, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %C, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %C, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg5 = %c0 to %0 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %1 step %c4 {
@@ -91,9 +91,9 @@ func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f2
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK-DAG:  %[[C_0:.*]] = dim %[[C]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[C_1:.*]] = dim %[[C]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], 1 : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
@@ -117,9 +117,9 @@ func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %0 = dim %D, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %C, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %D, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %C, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg5 = %c0 to %0 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %1 step %c4 {
@@ -143,9 +143,9 @@ func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f3
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[D_0:.*]] = dim %[[D]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], 1 : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[D_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
@@ -173,9 +173,9 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %0 = dim %C, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %C, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %C, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %C, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg5 = %c0 to %0 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %1 step %c4 {
@@ -199,9 +199,9 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f4
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[C_0:.*]] = dim %[[C]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], 1 : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
 // CHECK:      scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
@@ -224,9 +224,9 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   %c4 = constant 4 : index
   %c3 = constant 3 : index
   %c2 = constant 2 : index
-  %0 = dim %B, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %D, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %B, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %D, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   linalg.matmul(%A, %B, %C) :
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -258,9 +258,9 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f5
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK-DAG:  %[[B_1:.*]] = dim %[[B]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_0:.*]] = dim %[[D]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], 1 : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK-DAG:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
 // CHECK:  scf.for %[[I:.*]] = %{{.*}} to %[[D_0]] step %{{.*}} {
 // CHECK:    scf.for %[[J:.*]] = %{{.*}} to %[[B_1]] step %{{.*}} {
 // CHECK:      scf.for %[[K:.*]] = %{{.*}} to %[[D_1]] step %{{.*}} {
@@ -296,7 +296,7 @@ func @f6(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   %c4 = constant 4 : index
   %c3 = constant 3 : index
   %c2 = constant 2 : index
-  %0 = dim %C, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %C, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   linalg.matmul(%A, %B, %C) :
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -305,8 +305,8 @@ func @f6(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %C, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %C, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg5 = %c0 to %1 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %0 step %c4 {
@@ -354,11 +354,11 @@ func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   %c4 = constant 4 : index
   %c3 = constant 3 : index
   %c2 = constant 2 : index
-  %0 = dim %A, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %A, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %C, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %3 = dim %C, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %4 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %A, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %A, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %C, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %3 = dim %C, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %4 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   linalg.matmul(%A, %C, %E) :
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -409,11 +409,11 @@ func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 }
 // CHECK-LABEL: func @f7
 // CHECK:  (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
-// CHECK:  %[[A_0:.*]] = dim %[[A]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[A_1:.*]] = dim %[[A]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_1:.*]] = dim %[[C]], 1 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[C_0:.*]] = dim %[[C]], 0 : memref<?x?xf32, #[[strided2D]]>
-// CHECK:  %[[D_1:.*]] = dim %[[D]], 1 : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
+// CHECK:  %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
 // CHECK:  linalg.matmul(%[[A]], %[[C]], %[[E]])
 // CHECK:  scf.for %{{.*}} = %{{.*}} to %[[A_0]] step %{{.*}} {
 // CHECK:    scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
@@ -443,8 +443,8 @@ func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   %c4 = constant 4 : index
   %c3 = constant 3 : index
   %c2 = constant 2 : index
-  %0 = dim %A, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %A, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %A, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %A, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   linalg.matmul(%A, %C, %D) :
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -453,7 +453,7 @@ func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>,
     memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %2 = dim %D, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %2 = dim %D, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg5 = %c0 to %0 step %c2 {
     scf.for %arg6 = %c0 to %2 step %c3 {
       scf.for %arg7 = %c0 to %1 step %c4 {
@@ -512,8 +512,8 @@ func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   }: memref<?x?xf32, offset: 0, strides: [?, ?]>,
      memref<?x?xf32, offset: 0, strides: [?, ?]>,
      memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %0 = dim %B, 0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
-  %1 = dim %B, 1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %0 = dim %B, %c0 : memref<?x?xf32, offset: 0, strides: [?, ?]>
+  %1 = dim %B, %c1 : memref<?x?xf32, offset: 0, strides: [?, ?]>
   scf.for %arg4 = %c0 to %0 step %c2 {
     scf.for %arg5 = %c0 to %1 step %c3 {
       %4 = std.subview %B[%arg4, %arg5][%c2, %c3][%c1, %c1] :
@@ -571,8 +571,8 @@ func @pointwise_no_view(%M: index, %N: index) {
   }: memref<?x?xf32>,
      memref<?x?xf32>,
      memref<?x?xf32>
-  %0 = dim %B, 0 : memref<?x?xf32>
-  %1 = dim %B, 1 : memref<?x?xf32>
+  %0 = dim %B, %c0 : memref<?x?xf32>
+  %1 = dim %B, %c1 : memref<?x?xf32>
   scf.for %arg4 = %c0 to %0 step %c2 {
     scf.for %arg5 = %c0 to %1 step %c3 {
       %4 = std.subview %B[%arg4, %arg5][%c2, %c3][%c1, %c1] :
@@ -638,10 +638,10 @@ func @fusion_of_three(%arg0: memref<100x10xf32>,
         linalg.yield %2 : f32
       }: memref<100x10xf32>, memref<100x10xf32>, memref<100x10xf32>
   dealloc %0 : memref<100x10xf32>
-  %2 = dim %1, 0 : memref<100x10xf32>
-  %3 = dim %1, 1 : memref<100x10xf32>
-  %4 = dim %arg2, 0 : memref<100x10xf32>
-  %5 = dim %arg2, 1 : memref<100x10xf32>
+  %2 = dim %1, %c0 : memref<100x10xf32>
+  %3 = dim %1, %c1 : memref<100x10xf32>
+  %4 = dim %arg2, %c0 : memref<100x10xf32>
+  %5 = dim %arg2, %c1 : memref<100x10xf32>
   scf.for %i = %c0 to %2 step %c1 {
     scf.for %j = %c0 to %3 step %c1 {
       %6 = std.subview %1[%i, %j][%c1, %c1][%c1, %c1] :
@@ -693,15 +693,15 @@ func @fill_and_conv(%arg0: memref<1x4x5x1xf32>, %arg1: memref<2x3x1x1xf32>, %arg
   %c0 = constant 0 : index
   %c2 = constant 2 : index
   %c3 = constant 3 : index
-  %4 = dim %arg1, 0 : memref<2x3x1x1xf32>
-  %5 = dim %arg1, 1 : memref<2x3x1x1xf32>
-  %6 = dim %arg0, 0 : memref<1x4x5x1xf32>
-  %7 = dim %arg0, 1 : memref<1x4x5x1xf32>
-  %8 = dim %arg0, 3 : memref<1x4x5x1xf32>
-  %9 = dim %arg2, 0 : memref<1x4x5x1xf32>
-  %10 = dim %arg2, 1 : memref<1x4x5x1xf32>
-  %11 = dim %arg2, 2 : memref<1x4x5x1xf32>
-  %12 = dim %arg2, 3 : memref<1x4x5x1xf32>
+  %4 = dim %arg1, %c0 : memref<2x3x1x1xf32>
+  %5 = dim %arg1, %c1 : memref<2x3x1x1xf32>
+  %6 = dim %arg0, %c0 : memref<1x4x5x1xf32>
+  %7 = dim %arg0, %c1 : memref<1x4x5x1xf32>
+  %8 = dim %arg0, %c3 : memref<1x4x5x1xf32>
+  %9 = dim %arg2, %c0 : memref<1x4x5x1xf32>
+  %10 = dim %arg2, %c1 : memref<1x4x5x1xf32>
+  %11 = dim %arg2, %c2 : memref<1x4x5x1xf32>
+  %12 = dim %arg2, %c3 : memref<1x4x5x1xf32>
   %13 = linalg.range %c0 : %6 : %c2 : !linalg.range
   %14 = linalg.range %c0 : %10 : %c3 : !linalg.range
   scf.for %arg3 = %c0 to %6 step %c2 {
@@ -709,13 +709,13 @@ func @fill_and_conv(%arg0: memref<1x4x5x1xf32>, %arg1: memref<2x3x1x1xf32>, %arg
       %15 = affine.min #map0(%c2, %c1, %arg3)
       %16 = affine.apply #map2()[%7]
       %17 = affine.min #map0(%16, %c4, %arg4)
-      %18 = dim %arg0, 2 : memref<1x4x5x1xf32>
-      %19 = dim %arg0, 3 : memref<1x4x5x1xf32>
+      %18 = dim %arg0, %c2 : memref<1x4x5x1xf32>
+      %19 = dim %arg0, %c3 : memref<1x4x5x1xf32>
       %20 = subview %arg0[%arg3, %arg4, %c0, %c0] [%15, %17, %18, %19] [%c1, %c1, %c1, %c1] : memref<1x4x5x1xf32> to memref<?x?x?x?xf32, #map1>
       %21 = affine.min #map0(%c2, %c1, %arg3)
       %22 = affine.min #map0(%c3, %c4, %arg4)
-      %23 = dim %arg2, 2 : memref<1x4x5x1xf32>
-      %24 = dim %arg2, 3 : memref<1x4x5x1xf32>
+      %23 = dim %arg2, %c2 : memref<1x4x5x1xf32>
+      %24 = dim %arg2, %c3 : memref<1x4x5x1xf32>
       %25 = subview %arg2[%arg3, %arg4, %c0, %c0] [%21, %22, %23, %24] [%c1, %c1, %c1, %c1] : memref<1x4x5x1xf32> to memref<?x?x?x?xf32, #map1>
       linalg.conv(%arg1, %20, %25) {dilations = [1, 1], strides = [1, 1]} : memref<2x3x1x1xf32>, memref<?x?x?x?xf32, #map1>, memref<?x?x?x?xf32, #map1>
     }

diff  --git a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
index c14db3bed1c4..984da632973e 100644
--- a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
+++ b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir
@@ -21,10 +21,10 @@ func @fuse_indexed_generic_consumer(%A: memref<?x?xf32>,
   %c0 = constant 0 : index
   %c25 = constant 25 : index
   %c10 = constant 10 : index
-  %0 = dim %C, 0 : memref<?x?xf32>
-  %1 = dim %C, 1 : memref<?x?xf32>
-  %2 = dim %D, 0 : memref<?x?xf32>
-  %3 = dim %D, 1 : memref<?x?xf32>
+  %0 = dim %C, %c0 : memref<?x?xf32>
+  %1 = dim %C, %c1 : memref<?x?xf32>
+  %2 = dim %D, %c0 : memref<?x?xf32>
+  %3 = dim %D, %c1 : memref<?x?xf32>
   scf.for %arg2 = %c0 to %0 step %c10 {
     scf.for %arg3 = %c0 to %1 step %c25 {
       %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
@@ -87,10 +87,10 @@ func @fuse_indexed_generic_producer(%A: memref<?x?xf32>,
       %out = addf %ab, %i_float : f32
       linalg.yield %out : f32
   }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
-  %C_X = dim %C, 0 : memref<?x?xf32>
-  %C_Y = dim %C, 1 : memref<?x?xf32>
-  %D_X = dim %D, 0 : memref<?x?xf32>
-  %D_Y = dim %D, 1 : memref<?x?xf32>
+  %C_X = dim %C, %c0 : memref<?x?xf32>
+  %C_Y = dim %C, %c1 : memref<?x?xf32>
+  %D_X = dim %D, %c0 : memref<?x?xf32>
+  %D_Y = dim %D, %c1 : memref<?x?xf32>
   scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%C_X, %C_Y) step (%c10, %c25) {
     %C_view = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
         memref<?x?xf32> to memref<?x?xf32, #map>
@@ -145,10 +145,10 @@ func @fuse_indexed_generic_producer_tile_second_dim_only(%A: memref<?x?xf32>,
       %out = addf %ab, %j_float : f32
       linalg.yield %out : f32
   }: memref<?x?xf32>, memref<?x?xf32>, memref<?x?xf32>
-  %C_X = dim %C, 0 : memref<?x?xf32>
-  %C_Y = dim %C, 1 : memref<?x?xf32>
-  %D_X = dim %D, 0 : memref<?x?xf32>
-  %D_Y = dim %D, 1 : memref<?x?xf32>
+  %C_X = dim %C, %c0 : memref<?x?xf32>
+  %C_Y = dim %C, %c1 : memref<?x?xf32>
+  %D_X = dim %D, %c0 : memref<?x?xf32>
+  %D_Y = dim %D, %c1 : memref<?x?xf32>
   %3 = linalg.range %c0 : %C_Y : %c3 : !linalg.range
   scf.parallel (%j) = (%c0) to (%C_Y) step (%c3) {
     %0 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %C_Y, %j)

diff  --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 72457bc6034e..a78a2c1c934d 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -153,7 +153,7 @@ func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf3
 }
 // CHECKLOOP-LABEL: func @dot_view(
 //       CHECKLOOP:   %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<f32>) {
-//       CHECKLOOP: %[[K:.*]] = dim %arg0, 0 : memref<?xf32, #[[strided1D]]>
+//       CHECKLOOP: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[strided1D]]>
 //       CHECKLOOP: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
 //   CHECKLOOP-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[strided1D]]>
 //   CHECKLOOP-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
@@ -164,7 +164,7 @@ func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf3
 
 // CHECKPARALLEL-LABEL: func @dot_view(
 //       CHECKPARALLEL:   %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<?xf32, #[[strided1D]]>, %{{.*}}: memref<f32>) {
-//       CHECKPARALLEL: %[[K:.*]] = dim %arg0, 0 : memref<?xf32, #[[strided1D]]>
+//       CHECKPARALLEL: %[[K:.*]] = dim %arg0, %c0 : memref<?xf32, #[[strided1D]]>
 //       CHECKPARALLEL: scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
 //   CHECKPARALLEL-DAG:   %[[a:.*]] = load %arg0[%{{.*}}] : memref<?xf32, #[[strided1D]]>
 //   CHECKPARALLEL-DAG:   %[[b:.*]] = load %{{.*}}[%{{.*}}] : memref<?xf32, #[[strided1D]]>
@@ -267,11 +267,11 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 }
 // CHECKLOOP-LABEL: func @conv_view3(
 //       CHECKLOOP: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, 1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[K:.*]] = dim %arg0, 2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[B:.*]] = dim %arg1, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} {
@@ -287,11 +287,11 @@ func @conv_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 
 // CHECKPARALLEL-LABEL: func @conv_view3(
 //       CHECKPARALLEL: %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>, %{{.*}}: memref<?x?x?xf32, #[[strided3D]]>) {
-//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, 1 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, 2 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, 0 : memref<?x?x?xf32, #[[strided3D]]>
-//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c1 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c2 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?xf32, #[[strided3D]]>
+//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?xf32, #[[strided3D]]>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
@@ -309,13 +309,13 @@ func @conv_view4(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %
 }
 // CHECKLOOP-LABEL: func @conv_view4(
 //       CHECKLOOP: %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
-//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[K:.*]] = dim %arg0, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[B:.*]] = dim %arg1, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[X1]] step %{{.*}} {
@@ -334,13 +334,13 @@ func @conv_view4(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %
 
 // CHECKPARALLEL-LABEL: func @conv_view4(
 //       CHECKPARALLEL: %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
-//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[Q:.*]] = dim %arg0, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[K:.*]] = dim %arg0, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[B:.*]] = dim %arg1, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[X1]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
@@ -366,13 +366,13 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 // CHECKLOOP-LABEL: func @conv_padding
 //       CHECKLOOP: %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>) {
 //       CHECKLOOP:   %[[ZERO:.*]] = constant 0.000000e+00 : f32
-//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, 1 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[Q:.*]] =  dim %arg0, 2 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[K:.*]] =  dim %arg0, 3 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[B:.*]] =  dim %arg1, 0 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?x?xf32>
-//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, 2 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[Q:.*]] =  dim %arg0, %c2 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[K:.*]] =  dim %arg0, %c3 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[B:.*]] =  dim %arg1, %c0 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32>
+//       CHECKLOOP:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[X0]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[X1]] step %{{.*}} {
@@ -395,13 +395,13 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32>,
 // CHECKPARALLEL-LABEL: func @conv_padding
 //       CHECKPARALLEL: %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>, %{{.*}}: memref<?x?x?x?xf32>) {
 //       CHECKPARALLEL:   %[[ZERO:.*]] = constant 0.000000e+00 : f32
-//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, 0 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, 1 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[Q:.*]] =  dim %arg0, 2 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[K:.*]] =  dim %arg0, 3 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[B:.*]] =  dim %arg1, 0 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, 1 : memref<?x?x?x?xf32>
-//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, 2 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[Z0:.*]] = dim %arg0, %c0 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[Z1:.*]] = dim %arg0, %c1 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[Q:.*]] =  dim %arg0, %c2 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[K:.*]] =  dim %arg0, %c3 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[B:.*]] =  dim %arg1, %c0 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[X0:.*]] = dim %arg2, %c1 : memref<?x?x?x?xf32>
+//       CHECKPARALLEL:   %[[X1:.*]] = dim %arg2, %c2 : memref<?x?x?x?xf32>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) to (%[[B]], %[[X0]], %[[X1]], %[[K]]) step (%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[Q]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[Z0]] step %{{.*}} {
@@ -426,10 +426,10 @@ func @pooling_max(%arg0: memref<?x?xf32>,
   return
 }
 // CHECKLOOP-LABEL: func @pooling_max
-//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
@@ -442,10 +442,10 @@ func @pooling_max(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:           store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 
 // CHECKPARALLEL-LABEL: func @pooling_max
-//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
@@ -464,10 +464,10 @@ func @pooling_min(%arg0: memref<?x?xf32>,
   return
 }
 // CHECKLOOP-LABEL: func @pooling_min
-//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
@@ -480,10 +480,10 @@ func @pooling_min(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:           store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 
 // CHECKPARALLEL-LABEL: func @pooling_min
-//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
@@ -502,10 +502,10 @@ func @pooling_sum(%arg0: memref<?x?xf32>,
   return
 }
 // CHECKLOOP-LABEL: func @pooling_sum
-//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKLOOP:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKLOOP:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKLOOP:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKLOOP:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKLOOP:   scf.for %{{.*}} = %{{.*}} to %[[OX]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %{{.*}} = %{{.*}} to %[[OY]] step %{{.*}} {
 //       CHECKLOOP:       scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
@@ -518,10 +518,10 @@ func @pooling_sum(%arg0: memref<?x?xf32>,
 //       CHECKLOOP:           store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
 
 // CHECKPARALLEL-LABEL: func @pooling_sum
-//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, 0 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, 1 : memref<?x?xi32>
-//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, 0 : memref<?x?xf32>
-//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, 1 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[WX:.*]] = dim %arg1, %c0 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[WY:.*]] = dim %arg1, %c1 : memref<?x?xi32>
+//       CHECKPARALLEL:   %[[OX:.*]] = dim %arg2, %c0 : memref<?x?xf32>
+//       CHECKPARALLEL:   %[[OY:.*]] = dim %arg2, %c1 : memref<?x?xf32>
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[OX]], %[[OY]]) step (%{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     scf.for %{{.*}} = %{{.*}} to %[[WX]] step %{{.*}} {
 //       CHECKPARALLEL:       scf.for %{{.*}} = %{{.*}} to %[[WY]] step %{{.*}} {
@@ -879,10 +879,10 @@ func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memre
 //  CHECKLOOP-SAME: %[[mA:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECKLOOP-SAME: %[[mB:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECKLOOP-SAME: %[[mC:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
-//       CHECKLOOP: %[[B:.*]] = dim %[[mA]], 0 : memref<?x?x?xf32>
-//       CHECKLOOP: %[[M:.*]] = dim %[[mA]], 1 : memref<?x?x?xf32>
-//       CHECKLOOP: %[[K:.*]] = dim %[[mA]], 2 : memref<?x?x?xf32>
-//       CHECKLOOP: %[[N:.*]] = dim %[[mB]], 2 : memref<?x?x?xf32>
+//       CHECKLOOP: %[[B:.*]] = dim %[[mA]], %c0 : memref<?x?x?xf32>
+//       CHECKLOOP: %[[M:.*]] = dim %[[mA]], %c1 : memref<?x?x?xf32>
+//       CHECKLOOP: %[[K:.*]] = dim %[[mA]], %c2 : memref<?x?x?xf32>
+//       CHECKLOOP: %[[N:.*]] = dim %[[mB]], %c2 : memref<?x?x?xf32>
 //       CHECKLOOP: scf.for %[[b:.*]] = %{{.*}} to %[[B]] step %{{.*}} {
 //       CHECKLOOP:   scf.for %[[m:.*]] = %{{.*}} to %[[M]] step %{{.*}} {
 //       CHECKLOOP:     scf.for %[[n:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
@@ -898,10 +898,10 @@ func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memre
 //  CHECKPARALLEL-SAME: %[[mA:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECKPARALLEL-SAME: %[[mB:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
 //  CHECKPARALLEL-SAME: %[[mC:[a-zA-Z0-9]+]]: memref<?x?x?xf32>
-//       CHECKPARALLEL: %[[B:.*]] = dim %[[mA]], 0 : memref<?x?x?xf32>
-//       CHECKPARALLEL: %[[M:.*]] = dim %[[mA]], 1 : memref<?x?x?xf32>
-//       CHECKPARALLEL: %[[K:.*]] = dim %[[mA]], 2 : memref<?x?x?xf32>
-//       CHECKPARALLEL: %[[N:.*]] = dim %[[mB]], 2 : memref<?x?x?xf32>
+//       CHECKPARALLEL: %[[B:.*]] = dim %[[mA]], %c0 : memref<?x?x?xf32>
+//       CHECKPARALLEL: %[[M:.*]] = dim %[[mA]], %c1 : memref<?x?x?xf32>
+//       CHECKPARALLEL: %[[K:.*]] = dim %[[mA]], %c2 : memref<?x?x?xf32>
+//       CHECKPARALLEL: %[[N:.*]] = dim %[[mB]], %c2 : memref<?x?x?xf32>
 //       CHECKPARALLEL: scf.parallel (%[[b:.*]], %[[m:.*]], %[[n:.*]]) = ({{.*}}) to (%[[B]], %[[M]], %[[N]]) step ({{.*}}) {
 //       CHECKPARALLEL:   scf.for %[[k:.*]] = %{{.*}} to %[[K]] step %{{.*}} {
 //       CHECKPARALLEL:       %[[va:.*]] = load %[[mA]][%[[b]], %[[m]], %[[k]]] : memref<?x?x?xf32>

diff  --git a/mlir/test/Dialect/Linalg/parallel_loops.mlir b/mlir/test/Dialect/Linalg/parallel_loops.mlir
index 597990eac264..6c500ec3ded7 100644
--- a/mlir/test/Dialect/Linalg/parallel_loops.mlir
+++ b/mlir/test/Dialect/Linalg/parallel_loops.mlir
@@ -51,10 +51,10 @@ func @lower_outer_parallel(%A: memref<?x?x?x?xf32>, %B: memref<?x?x?xf32>) {
 // CHECK-LABEL: @lower_outer_parallel
 //   CHECK-DAG: %[[C0:.*]] = constant 0
 //   CHECK-DAG: %[[C1:.*]] = constant 1
-//   CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, 0
-//   CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, 1
-//   CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, 2
-//   CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, 3
+//   CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, %c0
+//   CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, %c1
+//   CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, %c2
+//   CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, %c3
 //       CHECK: scf.parallel (%[[IV0:.*]], %[[IV1:.*]]) = (%[[C0]], %[[C0]]) to (%[[D0]], %[[D1]]) step (%[[C1]], %[[C1]])
 //       CHECK:   scf.for %[[IV2:.*]] = %[[C0]] to %[[D2]] step %[[C1]]
 //       CHECK:     scf.parallel (%[[IV3:.*]]) = (%[[C0]]) to (%[[D3]]) step (%[[C1]])
@@ -84,12 +84,12 @@ func @lower_mixed_parallel(%A: memref<?x?x?x?x?x?xf32>, %B: memref<?x?x?x?xf32>)
 // CHECK-LABEL: @lower_mixed_parallel
 //   CHECK-DAG: %[[C0:.*]] = constant 0
 //   CHECK-DAG: %[[C1:.*]] = constant 1
-//   CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, 0
-//   CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, 1
-//   CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, 2
-//   CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, 3
-//   CHECK-DAG: %[[D4:.*]] = dim %{{.*}}, 4
-//   CHECK-DAG: %[[D5:.*]] = dim %{{.*}}, 5
+//   CHECK-DAG: %[[D0:.*]] = dim %{{.*}}, %c0
+//   CHECK-DAG: %[[D1:.*]] = dim %{{.*}}, %c1
+//   CHECK-DAG: %[[D2:.*]] = dim %{{.*}}, %c2
+//   CHECK-DAG: %[[D3:.*]] = dim %{{.*}}, %c3
+//   CHECK-DAG: %[[D4:.*]] = dim %{{.*}}, %c4
+//   CHECK-DAG: %[[D5:.*]] = dim %{{.*}}, %c5
 //       CHECK: scf.parallel (%[[IV0:.*]], %[[IV1:.*]]) = (%[[C0]], %[[C0]]) to (%[[D0]], %[[D1]]) step (%[[C1]], %[[C1]])
 //       CHECK:   scf.for %[[IV2:.*]] = %[[C0]] to %[[D2]] step %[[C1]]
 //       CHECK:     scf.parallel (%[[IV3:.*]], %[[IV4:.*]]) = (%[[C0]], %[[C0]]) to (%[[D3]], %[[D4]]) step (%[[C1]], %[[C1]])

diff  --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 27364b05f3bd..b8c52e43504b 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -17,9 +17,9 @@ func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %3 = view %A[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32>
   %4 = view %A[%c0][%K, %N] : memref<?xi8> to memref<?x?xf32>
   %5 = view %A[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32>
-  %6 = dim %3, 0 : memref<?x?xf32>
-  %7 = dim %3, 1 : memref<?x?xf32>
-  %8 = dim %4, 1 : memref<?x?xf32>
+  %6 = dim %3, %c0 : memref<?x?xf32>
+  %7 = dim %3, %c1 : memref<?x?xf32>
+  %8 = dim %4, %c1 : memref<?x?xf32>
   scf.for %arg4 = %c0 to %6 step %c2 {
     scf.for %arg5 = %c0 to %8 step %c3 {
       scf.for %arg6 = %c0 to %7 step %c4 {
@@ -79,9 +79,9 @@ func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %3 = view %A[%c0][%M, %K] : memref<?xi8> to memref<?x?xf64>
   %4 = view %A[%c0][%K, %N] : memref<?xi8> to memref<?x?xf64>
   %5 = view %A[%c0][%M, %N] : memref<?xi8> to memref<?x?xf64>
-  %6 = dim %3, 0 : memref<?x?xf64>
-  %7 = dim %3, 1 : memref<?x?xf64>
-  %8 = dim %4, 1 : memref<?x?xf64>
+  %6 = dim %3, %c0 : memref<?x?xf64>
+  %7 = dim %3, %c1 : memref<?x?xf64>
+  %8 = dim %4, %c1 : memref<?x?xf64>
   scf.for %arg4 = %c0 to %6 step %c2 {
     scf.for %arg5 = %c0 to %8 step %c3 {
       scf.for %arg6 = %c0 to %7 step %c4 {
@@ -141,9 +141,9 @@ func @matmul_i32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %3 = view %A[%c0][%M, %K] : memref<?xi8> to memref<?x?xi32>
   %4 = view %A[%c0][%K, %N] : memref<?xi8> to memref<?x?xi32>
   %5 = view %A[%c0][%M, %N] : memref<?xi8> to memref<?x?xi32>
-  %6 = dim %3, 0 : memref<?x?xi32>
-  %7 = dim %3, 1 : memref<?x?xi32>
-  %8 = dim %4, 1 : memref<?x?xi32>
+  %6 = dim %3, %c0 : memref<?x?xi32>
+  %7 = dim %3, %c1 : memref<?x?xi32>
+  %8 = dim %4, %c1 : memref<?x?xi32>
   scf.for %arg4 = %c0 to %6 step %c2 {
     scf.for %arg5 = %c0 to %8 step %c3 {
       scf.for %arg6 = %c0 to %7 step %c4 {

diff  --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index 41dd7796c7ab..b17a3f3e5efd 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -31,29 +31,29 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 // TILE-2-LABEL: func @matmul(
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
-//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[K:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[K]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   %[[localK:.*]] = dim %{{.*}}, 0
+//       TILE-2:   %[[localK:.*]] = dim %{{.*}}, %c0
 //       TILE-2:   %[[szK:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localK]]]
-//       TILE-2:   %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
 //       TILE-2:   linalg.matmul(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
 
 // TILE-02-LABEL: func @matmul(
 //       TILE-02-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-02-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-02: %[[N:.*]] = dim %arg1, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02: %[[N:.*]] = dim %arg1, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-02: scf.for %[[J:.*]] = %{{.*}} to %[[N]] step %{{.*}} {
-//       TILE-02:   %[[K:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, 1
+//       TILE-02:   %[[K:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c1
 //       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
 //       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[K]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[localK:.*]] = dim %{{.*}}, 1
+//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[localK:.*]] = dim %{{.*}}, %c1
 //       TILE-02:   %[[szK:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localK]]]
 //       TILE-02:   %[[sCj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
 //       TILE-02:   linalg.matmul(%{{.*}}, %[[sBj]], %[[sCj]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
@@ -61,15 +61,15 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 // TILE-002-LABEL: func @matmul(
 //       TILE-002-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-002-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-002: %[[ubK:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-002: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-002: scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
-//       TILE-002:   %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-002:   %[[localK:.*]] = dim %{{.*}}, 1
+//       TILE-002:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[localK:.*]] = dim %{{.*}}, %c1
 //       TILE-002:   %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]]
 //       TILE-002:   %[[sAj:.*]] = subview %{{.*}}[0, %[[K]]] [%[[M]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-002:   %[[localK:.*]] = dim %{{.*}}, 0
+//       TILE-002:   %[[localK:.*]] = dim %{{.*}}, %c0
 //       TILE-002:   %[[szK:.*]] = affine.min #[[bound_map]](%[[K]])[%[[localK]]]
-//       TILE-002:   %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-002:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-002:   %[[sBj:.*]] = subview %{{.*}}[%[[K]], 0] [%[[szK]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
 //       TILE-002:   linalg.matmul(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D]]>
 
@@ -78,25 +78,25 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-234-DAG: %[[C3:.*]] = constant 3 : index
 //       TILE-234-DAG: %[[C4:.*]] = constant 4 : index
-//       TILE-234: %[[ubM:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[ubK:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[ubN:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[ubM:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[ubK:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[ubN:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[ubM]] step %{{.*}} {
 //       TILE-234:    scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[ubN]] step %{{.*}} {
 //       TILE-234:      scf.for %[[K:.*]] = %{{.*}}{{.*}} to %[[ubK]] step %{{.*}} {
-//       TILE-234:        %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:        %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:        %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:        %[[localK:.*]] = dim %{{.*}}, 1
+//       TILE-234:        %[[localK:.*]] = dim %{{.*}}, %c1
 //       TILE-234:        %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]]
 //       TILE-234:        %[[sAik:.*]] = subview %{{.*}}[%[[I]], %[[K]]] [%[[szM]], %[[szK]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-234:        %[[localK:.*]] = dim %{{.*}}, 0
+//       TILE-234:        %[[localK:.*]] = dim %{{.*}}, %c0
 //       TILE-234:        %[[szK:.*]] = affine.min #[[bound_map_4]](%[[K]])[%[[localK]]]
-//       TILE-234:        %[[localN:.*]] = dim %{{.*}}, 1
+//       TILE-234:        %[[localN:.*]] = dim %{{.*}}, %c1
 //       TILE-234:        %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
 //       TILE-234:        %[[sBkj:.*]] = subview %{{.*}}[%[[K]], %[[J]]] [%[[szK]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-234:        %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:        %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:        %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:        %[[localN:.*]] = dim %{{.*}}, 1
+//       TILE-234:        %[[localN:.*]] = dim %{{.*}}, %c1
 //       TILE-234:        %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
 //       TILE-234:        %[[sCij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
 //
@@ -173,13 +173,13 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //  TILE-2-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
-//       TILE-2:   %[[localM:.*]] = dim %[[ARG0]], 0
+//       TILE-2:   %[[localM:.*]] = dim %[[ARG0]], %c0
 //       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
-//       TILE-2:   %[[N:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-2:   %[[N:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]], 0] [%[[szM]], %[[N]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-2:   %[[localN:.*]] = dim %{{.*}}, 0
+//       TILE-2:   %[[localN:.*]] = dim %{{.*}}, %c0
 //       TILE-2:   %[[szN:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localN]]]
 //       TILE-2:   %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
 //       TILE-2:   linalg.matvec(%[[sAi]], %{{.*}}, %[[sCi]]) : memref<?x?xf32, #[[strided2D]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
@@ -190,13 +190,13 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 // TILE-02-SAME: %[[ARG2:[0-9a-zA-Z]*]]: memref
 //       TILE-02-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-02-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-02: %[[K:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-02: scf.for %[[J]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} {
-//       TILE-02:   %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, 1
+//       TILE-02:   %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c1
 //       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
 //       TILE-02:   %[[sAj:.*]] = subview %{{.*}}[0, %[[J]]] [%[[M]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, 0
+//       TILE-02:   %[[localN:.*]] = dim %{{.*}}, %c0
 //       TILE-02:   %[[szN:.*]] = affine.min #[[bound_map]](%[[J]])[%[[localN]]]
 //       TILE-02:   %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
 //       TILE-02:   linalg.matvec(%[[sAj]], %[[sBj]], %{{.*}}) : memref<?x?xf32, #[[strided2D]]>, memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>
@@ -214,19 +214,19 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-234-DAG: %[[C3:.*]] = constant 3 : index
-//       TILE-234: %[[M:.*]] = dim %{{.*}}, 0 : memref<?x?xf32, #[[strided2D]]>
-//       TILE-234: %[[K:.*]] = dim %{{.*}}, 1 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?x?xf32, #[[strided2D]]>
+//       TILE-234: %[[K:.*]] = dim %{{.*}}, %c1 : memref<?x?xf32, #[[strided2D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
 //       TILE-234:    scf.for %[[J:.*]] = %{{.*}}{{.*}} to %[[K]] step %{{.*}} {
-//       TILE-234:      %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:      %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:      %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
-//       TILE-234:      %[[localN:.*]] = dim %{{.*}}, 1
+//       TILE-234:      %[[localN:.*]] = dim %{{.*}}, %c1
 //       TILE-234:      %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
 //       TILE-234:      %[[sAij:.*]] = subview %{{.*}}[%[[I]], %[[J]]] [%[[szM]], %[[szN]]] [1, 1] : memref<?x?xf32, #[[strided2D]]> to memref<?x?xf32, #[[strided2D]]>
-//       TILE-234:      %[[localN:.*]] = dim %{{.*}}, 0
+//       TILE-234:      %[[localN:.*]] = dim %{{.*}}, %c0
 //       TILE-234:      %[[szN:.*]] = affine.min #[[bound_map_3]](%[[J]])[%[[localN]]]
 //       TILE-234:      %[[sBj:.*]] = subview %{{.*}}[%[[J]]] [%[[szN]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-234:      %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:      %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:      %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:      %[[sCi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
 //
@@ -239,12 +239,12 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
 // TILE-2-LABEL: func @dot(
 //       TILE-2-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-2-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-2: %[[M:.*]] = dim %{{.*}}, 0 : memref<?xf32, #[[strided1D]]>
+//       TILE-2: %[[M:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[strided1D]]>
 //       TILE-2: scf.for %[[I:.*]] = %{{.*}}{{.*}} to %[[M]] step %{{.*}} {
-//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
 //       TILE-2:   %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-2:   %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-2:   %[[szM:.*]] = affine.min #[[bound_map]](%[[I]])[%[[localM]]]
 //       TILE-2:   %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
 //       TILE-2:   linalg.dot(%[[sAi]], %[[sBi]], {{.*}}) : memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>, memref<f32>
@@ -258,12 +258,12 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
 // TILE-234-LABEL: func @dot(
 //       TILE-234-DAG: %[[C0:.*]] = constant 0 : index
 //       TILE-234-DAG: %[[C2:.*]] = constant 2 : index
-//       TILE-234:  %[[ubK:.*]] = dim %{{.*}}, 0 : memref<?xf32, #[[strided1D]]>
+//       TILE-234:  %[[ubK:.*]] = dim %{{.*}}, %c0 : memref<?xf32, #[[strided1D]]>
 //       TILE-234:  scf.for %[[I:.*]] = %{{.*}} to %[[ubK]] step %{{.*}} {
-//       TILE-234:    %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:    %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:    %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:    %[[sAi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
-//       TILE-234:    %[[localM:.*]] = dim %{{.*}}, 0
+//       TILE-234:    %[[localM:.*]] = dim %{{.*}}, %c0
 //       TILE-234:    %[[szM:.*]] = affine.min #[[bound_map_2]](%[[I]])[%[[localM]]]
 //       TILE-234:    %[[sBi:.*]] = subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[strided1D]]> to memref<?xf32, #[[strided1D]]>
 //       TILE-234:    linalg.dot(%[[sAi]], %[[sBi]], %{{.*}}) : memref<?xf32, #[[strided1D]]>, memref<?xf32, #[[strided1D]]>, memref<f32>

diff  --git a/mlir/test/Dialect/Linalg/tile_conv.mlir b/mlir/test/Dialect/Linalg/tile_conv.mlir
index 4b7698cb9b5a..86300f87938e 100644
--- a/mlir/test/Dialect/Linalg/tile_conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv.mlir
@@ -15,30 +15,30 @@ func @conv(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1:
 //       TILE-23004-DAG: %[[C2:.*]] = constant 2 : index
 //       TILE-23004-DAG: %[[C3:.*]] = constant 3 : index
 //       TILE-23004-DAG: %[[C4:.*]] = constant 4 : index
-//       TILE-23004:   %[[Q:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[B:.*]] = dim %{{.*}}, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[PaddedInput0:.*]] = dim %{{.*}}, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:   %[[X0:.*]] = dim %{{.*}}, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:   %[[Q:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:   %[[B:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:   %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:   %[[X0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       TILE-23004:   scf.for %[[ivI:.*]] = %{{.*}} to %[[B]] step %{{.*}} {
 //       TILE-23004:     scf.for %[[ivJ:.*]] = %{{.*}} to %[[X0]] step %{{.*}} {
 //       TILE-23004:       scf.for %[[ivK:.*]] = %{{.*}} to %[[Q]] step %{{.*}} {
-//       TILE-23004:         %[[Z0:.*]] = dim %{{.*}}, 0 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[Z1:.*]] = dim %{{.*}}, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[Z2:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[Z0:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[Z1:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[Z2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       TILE-23004:         %[[szK:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[Z2]]]
-//       TILE-23004:         %[[K:.*]] = dim %{{.*}}, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[K:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       TILE-23004:         %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
 //
 //       TILE-23004:         %[[J1:.*]] = affine.apply #[[D0x30pS0x10]](%[[ivJ]])
-//       TILE-23004:         %[[PaddedInput0b:.*]] = dim %{{.*}}, 1 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       TILE-23004:         %[[I1pStep:.*]] = affine.min #[[S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]]
-//       TILE-23004:         %[[SZ2:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[dim3:.*]] = dim %{{.*}}, 3
+//       TILE-23004:         %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[dim3:.*]] = dim %{{.*}}, %c3
 //       TILE-23004:         %[[sz3:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[dim3]]]
 //       TILE-23004:         %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
 //
-//       TILE-23004:         %[[X0:.*]] = dim %{{.*}}, 2 : memref<?x?x?x?xf32, #[[strided4D]]>
-//       TILE-23004:         %[[X1:.*]] = dim %{{.*}}, 3 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[X0:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
+//       TILE-23004:         %[[X1:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
 //       TILE-23004:         %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
 //
 //       TILE-23004:         linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>

diff  --git a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
index 273f64913159..7bcaa25f210b 100644
--- a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
+++ b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir
@@ -21,18 +21,18 @@ func @conv_padding(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>,
 //  TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>)
 //   TILE-20000-DAG:   %[[C0:.*]] = constant 0 : index
 //   TILE-20000-DAG:   %[[C2:.*]] = constant 2 : index
-//       TILE-20000:   %[[B:.*]] = dim %[[ARG1]], 0
+//       TILE-20000:   %[[B:.*]] = dim %[[ARG1]], %c0
 //       TILE-20000:   scf.for %[[ivI:.*]] = %[[C0]] to %[[B]] step %[[C2]] {
-//       TILE-20000:     %[[DIM10:.*]] = dim %[[ARG1]], 0
+//       TILE-20000:     %[[DIM10:.*]] = dim %[[ARG1]], %c0
 //       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM10]]]
-//       TILE-20000:     %[[DIM11:.*]] = dim %[[ARG1]], 1
-//       TILE-20000:     %[[DIM12:.*]] = dim %[[ARG1]], 2
-//       TILE-20000:     %[[DIM13:.*]] = dim %[[ARG1]], 3
+//       TILE-20000:     %[[DIM11:.*]] = dim %[[ARG1]], %c1
+//       TILE-20000:     %[[DIM12:.*]] = dim %[[ARG1]], %c2
+//       TILE-20000:     %[[DIM13:.*]] = dim %[[ARG1]], %c3
 //       TILE-20000:     %[[SUBVIEW1:.*]] = subview %[[ARG1]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM11]], %[[DIM12]], %[[DIM13]]]
-//       TILE-20000:     %[[DIM20:.*]] = dim %[[ARG2]], 0
+//       TILE-20000:     %[[DIM20:.*]] = dim %[[ARG2]], %c0
 //       TILE-20000:     %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM20]]]
-//       TILE-20000:     %[[DIM21:.*]] = dim %[[ARG2]], 1
-//       TILE-20000:     %[[DIM22:.*]] = dim %[[ARG2]], 2
-//       TILE-20000:     %[[DIM23:.*]] = dim %[[ARG2]], 3
+//       TILE-20000:     %[[DIM21:.*]] = dim %[[ARG2]], %c1
+//       TILE-20000:     %[[DIM22:.*]] = dim %[[ARG2]], %c2
+//       TILE-20000:     %[[DIM23:.*]] = dim %[[ARG2]], %c3
 //       TILE-20000:     %[[SUBVIEW2:.*]] = subview %[[ARG2]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM21]], %[[DIM22]], %[[DIM23]]]
 //       TILE-20000:     linalg.conv(%[[ARG0]], %[[SUBVIEW1]], %[[SUBVIEW2]])

diff  --git a/mlir/test/Dialect/Linalg/tile_parallel.mlir b/mlir/test/Dialect/Linalg/tile_parallel.mlir
index 18d9d2016b1d..ad38095efc46 100644
--- a/mlir/test/Dialect/Linalg/tile_parallel.mlir
+++ b/mlir/test/Dialect/Linalg/tile_parallel.mlir
@@ -27,7 +27,7 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // TILE-2-SAME:    [[LHS:%.*]]: {{.*}}, [[RHS:%.*]]: {{.*}}, [[SUM:%.*]]: {{.*}}) {
 // TILE-2-DAG: [[C0:%.*]] = constant 0 : index
 // TILE-2-DAG: [[C2:%.*]] = constant 2 : index
-// TILE-2: [[LHS_ROWS:%.*]] = dim [[LHS]], 0
+// TILE-2: [[LHS_ROWS:%.*]] = dim [[LHS]], %c0
 // TILE-2: scf.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_ROWS]]) step ([[C2]]) {
 // TILE-2-NO: scf.parallel
 // TILE-2:   [[LHS_SUBVIEW:%.*]] = subview [[LHS]]
@@ -39,7 +39,7 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // TILE-02-SAME:    [[LHS:%.*]]: {{.*}}, [[RHS:%.*]]: {{.*}}, [[SUM:%.*]]: {{.*}}) {
 // TILE-02-DAG: [[C0:%.*]] = constant 0 : index
 // TILE-02-DAG: [[C2:%.*]] = constant 2 : index
-// TILE-02: [[LHS_COLS:%.*]] = dim [[LHS]], 1
+// TILE-02: [[LHS_COLS:%.*]] = dim [[LHS]], %c1
 // TILE-02: scf.parallel ([[I:%.*]]) = ([[C0]]) to ([[LHS_COLS]]) step ([[C2]]) {
 // TILE-02-NO: scf.parallel
 // TILE-02:   [[LHS_SUBVIEW:%.*]] = subview [[LHS]]
@@ -57,8 +57,8 @@ func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // TILE-234-DAG: [[C0:%.*]] = constant 0 : index
 // TILE-234-DAG: [[C2:%.*]] = constant 2 : index
 // TILE-234-DAG: [[C3:%.*]] = constant 3 : index
-// TILE-234: [[LHS_ROWS:%.*]] = dim [[LHS]], 0
-// TILE-234: [[LHS_COLS:%.*]] = dim [[LHS]], 1
+// TILE-234: [[LHS_ROWS:%.*]] = dim [[LHS]], %c0
+// TILE-234: [[LHS_COLS:%.*]] = dim [[LHS]], %c1
 // TILE-234: scf.parallel ([[I:%.*]], [[J:%.*]]) = ([[C0]], [[C0]]) to ([[LHS_ROWS]], [[LHS_COLS]]) step ([[C2]], [[C3]]) {
 // TILE-234-NO: scf.parallel
 // TILE-234:   [[LHS_SUBVIEW:%.*]] = subview [[LHS]]

diff  --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 9a022082b3be..a70421f35831 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -252,9 +252,9 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
   %c4000 = constant 4000 : index
   %c0 = constant 0 : index
   %c1 = constant 1 : index
-  %0 = dim %arg0, 0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %1 = dim %arg0, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %2 = dim %arg1, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %0 = dim %arg0, %c0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %1 = dim %arg0, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %2 = dim %arg1, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
   scf.for %arg3 = %c0 to %0 step %c2000 {
     scf.for %arg4 = %c0 to %2 step %c3000 {
       scf.for %arg5 = %c0 to %1 step %c4000 {
@@ -302,9 +302,9 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
   %c4000 = constant 4000 : index
   %c0 = constant 0 : index
   %c1 = constant 1 : index
-  %0 = dim %arg0, 0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %1 = dim %arg0, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
-  %2 = dim %arg1, 1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %0 = dim %arg0, %c0 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %1 = dim %arg0, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
+  %2 = dim %arg1, %c1 : memref<?x?xf32, offset: ?, strides: [?, 1]>
   scf.for %arg3 = %c0 to %0 step %c2000 {
     scf.for %arg4 = %c0 to %2 step %c3000 {
       scf.for %arg5 = %c0 to %1 step %c4000 {
@@ -381,9 +381,9 @@ func @tile_permute_parallel_loop(%arg0: memref<?x?xf32>,
 //   CHECK-DAG:   %[[C8:.*]] = constant 8 : index
 //   CHECK-DAG:   %[[C4:.*]] = constant 4 : index
 //   CHECK-DAG:   %[[C0:.*]] = constant 0 : index
-//   CHECK-DAG:   %[[D0:.*]] = dim %[[ARG0]], 0
-//   CHECK-DAG:   %[[D1:.*]] = dim %[[ARG0]], 1
-//   CHECK-DAG:   %[[D2:.*]] = dim %[[ARG1]], 1
+//   CHECK-DAG:   %[[D0:.*]] = dim %[[ARG0]], %c0
+//   CHECK-DAG:   %[[D1:.*]] = dim %[[ARG0]], %c1
+//   CHECK-DAG:   %[[D2:.*]] = dim %[[ARG1]], %c1
 //       CHECK:   scf.parallel (%{{.*}}) = (%[[C0]]) to (%[[D2]]) step (%[[C8]])
 //       CHECK:     scf.for %{{.*}} = %[[C0]] to %[[D1]] step %[[C4]]
 //       CHECK:       scf.parallel (%{{.*}}) = (%[[C0]]) to (%[[D0]]) step (%[[C16]])

diff  --git a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
index d7c0f1d3074e..d11b4d0625e3 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
@@ -7,8 +7,8 @@ func @parallel_loop(%outer_i0: index, %outer_i1: index, %A: memref<?x?xf32>, %B:
                     %C: memref<?x?xf32>, %result: memref<?x?xf32>) {
   %c0 = constant 0 : index
   %c1 = constant 1 : index
-  %d0 = dim %A, 0 : memref<?x?xf32>
-  %d1 = dim %A, 1 : memref<?x?xf32>
+  %d0 = dim %A, %c0 : memref<?x?xf32>
+  %d1 = dim %A, %c1 : memref<?x?xf32>
   %b0 = affine.min #map0()[%d0, %outer_i0]
   %b1 = affine.min #map1()[%d1, %outer_i1]
   scf.parallel (%i0, %i1) = (%c0, %c0) to (%b0, %b1) step (%c1, %c1) {
@@ -24,8 +24,8 @@ func @parallel_loop(%outer_i0: index, %outer_i1: index, %A: memref<?x?xf32>, %B:
 // CHECK-SAME:                        [[VAL_0:%.*]]: index, [[VAL_1:%.*]]: index, [[VAL_2:%.*]]: memref<?x?xf32>, [[VAL_3:%.*]]: memref<?x?xf32>, [[VAL_4:%.*]]: memref<?x?xf32>, [[VAL_5:%.*]]: memref<?x?xf32>) {
 // CHECK:           [[VAL_6:%.*]] = constant 0 : index
 // CHECK:           [[VAL_7:%.*]] = constant 1 : index
-// CHECK:           [[VAL_8:%.*]] = dim [[VAL_2]], 0 : memref<?x?xf32>
-// CHECK:           [[VAL_9:%.*]] = dim [[VAL_2]], 1 : memref<?x?xf32>
+// CHECK:           [[VAL_8:%.*]] = dim [[VAL_2]], [[VAL_6]] : memref<?x?xf32>
+// CHECK:           [[VAL_9:%.*]] = dim [[VAL_2]], [[VAL_7]] : memref<?x?xf32>
 // CHECK:           [[VAL_10:%.*]] = affine.min #map0(){{\[}}[[VAL_8]], [[VAL_0]]]
 // CHECK:           [[VAL_11:%.*]] = affine.min #map1(){{\[}}[[VAL_9]], [[VAL_1]]]
 // CHECK:           [[VAL_12:%.*]] = constant 1024 : index

diff  --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 4d0888e55312..4b01f7110532 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -663,9 +663,9 @@ TEST_FUNC(tile_2d) {
   // clang-format off
   // CHECK-LABEL: func @tile_2d
   //       CHECK: %[[ZERO:.*]] = constant 0 : index
-  //       CHECK: %[[M:[0-9]+]] = dim %arg2, 0 : memref<?x?x?xf32>
-  //  CHECK-NEXT: %[[N:[0-9]+]] = dim %arg2, 1 : memref<?x?x?xf32>
-  //  CHECK-NEXT: %[[P:[0-9]+]] = dim %arg2, 2 : memref<?x?x?xf32>
+  //       CHECK: %[[M:[0-9]+]] = dim %arg2, %c0{{[_0-9]*}} : memref<?x?x?xf32>
+  //       CHECK: %[[N:[0-9]+]] = dim %arg2, %c1{{[_0-9]*}} : memref<?x?x?xf32>
+  //       CHECK: %[[P:[0-9]+]] = dim %arg2, %c2{{[_0-9]*}} : memref<?x?x?xf32>
   //       CHECK:   affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[M]]) step 512 {
   //  CHECK-NEXT:     affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[N]]) step 1024 {
   //  CHECK-NEXT:       affine.for %{{.*}} = affine_map<(d0) -> (d0)>(%[[ZERO]]) to affine_map<(d0) -> (d0)>(%[[P]]) {

diff  --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 7727fa5e0363..e524461f4b0c 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -20,68 +20,70 @@
 // CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 // CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>
 
-// CHECK-LABEL: func @func_with_ops(%arg0: f32) {
+// CHECK-LABEL: func @func_with_ops
+// CHECK-SAME: %[[ARG:.*]]: f32
 func @func_with_ops(f32) {
 ^bb0(%a : f32):
-  // CHECK: %0 = "getTensor"() : () -> tensor<4x4x?xf32>
+  // CHECK: %[[T:.*]] = "getTensor"() : () -> tensor<4x4x?xf32>
   %t = "getTensor"() : () -> tensor<4x4x?xf32>
 
-  // CHECK: %1 = dim %0, 2 : tensor<4x4x?xf32>
-  %t2 = "std.dim"(%t){index = 2} : (tensor<4x4x?xf32>) -> index
+  // CHECK: %[[C2:.*]] = constant 2 : index
+  // CHECK-NEXT: %{{.*}} = dim %[[T]], %[[C2]] : tensor<4x4x?xf32>
+  %c2 = constant 2 : index
+  %t2 = "std.dim"(%t, %c2) : (tensor<4x4x?xf32>, index) -> index
 
-  // CHECK: %2 = addf %arg0, %arg0 : f32
+  // CHECK: %{{.*}} = addf %[[ARG]], %[[ARG]] : f32
   %x = "std.addf"(%a, %a) : (f32,f32) -> (f32)
 
-  // CHECK:   return
+  // CHECK: return
   return
 }
 
 // CHECK-LABEL: func @standard_instrs(%arg0: tensor<4x4x?xf32>, %arg1: f32, %arg2: i32, %arg3: index, %arg4: i64, %arg5: f16) {
 func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
 ^bb42(%t: tensor<4x4x?xf32>, %f: f32, %i: i32, %idx : index, %j: i64, %half: f16):
-  // CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32>
-  %a = "std.dim"(%t){index = 2} : (tensor<4x4x?xf32>) -> index
+  // CHECK: %[[C2:.*]] = constant 2 : index
+  // CHECK: %[[A2:.*]] = dim %arg0, %[[C2]] : tensor<4x4x?xf32>
+  %c2 = constant 2 : index
+  %a2 = dim %t, %c2 : tensor<4x4x?xf32>
 
-  // CHECK: %1 = dim %arg0, 2 : tensor<4x4x?xf32>
-  %a2 = dim %t, 2 : tensor<4x4x?xf32>
-
-  // CHECK: %2 = addf %arg1, %arg1 : f32
+  // CHECK: %[[F2:.*]] = addf %arg1, %arg1 : f32
   %f2 = "std.addf"(%f, %f) : (f32,f32) -> f32
 
-  // CHECK: %3 = addf %2, %2 : f32
+  // CHECK: %[[F3:.*]] = addf %[[F2]], %[[F2]] : f32
   %f3 = addf %f2, %f2 : f32
 
-  // CHECK: %4 = addi %arg2, %arg2 : i32
+  // CHECK: %[[I2:.*]] = addi %arg2, %arg2 : i32
   %i2 = "std.addi"(%i, %i) : (i32,i32) -> i32
 
-  // CHECK: %5 = addi %4, %4 : i32
+  // CHECK: %[[I3:.*]] = addi %[[I2]], %[[I2]] : i32
   %i3 = addi %i2, %i2 : i32
 
-  // CHECK: %{{[0-9]+}} = addi %arg3, %arg3 : index
+  // CHECK: %[[IDX1:.*]] = addi %arg3, %arg3 : index
   %idx1 = addi %idx, %idx : index
 
-  // CHECK: %{{[0-9]+}} = addi %arg3, %{{[0-9]+}} : index
+  // CHECK: %[[IDX2:.*]] = addi %arg3, %[[IDX1]] : index
   %idx2 = "std.addi"(%idx, %idx1) : (index, index) -> index
 
-  // CHECK: %8 = subf %arg1, %arg1 : f32
+  // CHECK: %[[F4:.*]] = subf %arg1, %arg1 : f32
   %f4 = "std.subf"(%f, %f) : (f32,f32) -> f32
 
-  // CHECK: %9 = subf %8, %8 : f32
+  // CHECK: %[[F5:.*]] = subf %[[F4]], %[[F4]] : f32
   %f5 = subf %f4, %f4 : f32
 
-  // CHECK: %10 = subi %arg2, %arg2 : i32
+  // CHECK: %[[I4:.*]] = subi %arg2, %arg2 : i32
   %i4 = "std.subi"(%i, %i) : (i32,i32) -> i32
 
-  // CHECK: %11 = subi %10, %10 : i32
+  // CHECK: %[[I5:.*]] = subi %[[I4]], %[[I4]] : i32
   %i5 = subi %i4, %i4 : i32
 
-  // CHECK: %12 = mulf %2, %2 : f32
+  // CHECK: %[[F6:.*]] = mulf %[[F2]], %[[F2]] : f32
   %f6 = mulf %f2, %f2 : f32
 
-  // CHECK: %13 = muli %4, %4 : i32
+  // CHECK: %[[I6:.*]] = muli %[[I2]], %[[I2]] : i32
   %i6 = muli %i2, %i2 : i32
 
-  // CHECK: %[[C0:.*]] = create_complex %[[F2:.*]], %[[F2]] : complex<f32>
+  // CHECK: %[[C0:.*]] = create_complex %[[F2]], %[[F2]] : complex<f32>
   %c0 = "std.create_complex"(%f2, %f2) : (f32, f32) -> complex<f32>
 
   // CHECK: %[[C1:.*]] = create_complex %[[F2]], %[[F2]] : complex<f32>
@@ -465,7 +467,7 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
   // CHECK: %{{[0-9]+}} = shift_left %arg2, %arg2 : i32
   %124 = "std.shift_left"(%i, %i) : (i32, i32) -> i32
 
-  // CHECK:%{{[0-9]+}} = shift_left %4, %4 : i32
+  // CHECK:%{{[0-9]+}} = shift_left %[[I2]], %[[I2]] : i32
   %125 = shift_left %i2, %i2 : i32
 
   // CHECK: %{{[0-9]+}} = shift_left %arg3, %arg3 : index
@@ -480,7 +482,7 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
   // CHECK: %{{[0-9]+}} = shift_right_signed %arg2, %arg2 : i32
   %129 = "std.shift_right_signed"(%i, %i) : (i32, i32) -> i32
 
-  // CHECK:%{{[0-9]+}} = shift_right_signed %4, %4 : i32
+  // CHECK:%{{[0-9]+}} = shift_right_signed %[[I2]], %[[I2]] : i32
   %130 = shift_right_signed %i2, %i2 : i32
 
   // CHECK: %{{[0-9]+}} = shift_right_signed %arg3, %arg3 : index
@@ -495,7 +497,7 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
   // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg2, %arg2 : i32
   %134 = "std.shift_right_unsigned"(%i, %i) : (i32, i32) -> i32
 
-  // CHECK:%{{[0-9]+}} = shift_right_unsigned %4, %4 : i32
+  // CHECK:%{{[0-9]+}} = shift_right_unsigned %[[I2]], %[[I2]] : i32
   %135 = shift_right_unsigned %i2, %i2 : i32
 
   // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg3, %arg3 : index
@@ -778,10 +780,13 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {
   return
 }
 
-// CHECK-LABEL: func @test_dimop(%arg0
+// CHECK-LABEL: func @test_dimop
+// CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32>
 func @test_dimop(%arg0: tensor<4x4x?xf32>) {
-  // CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32>
-  %0 = dim %arg0, 2 : tensor<4x4x?xf32>
+  // CHECK: %[[C2:.*]] = constant 2 : index
+  // CHECK: %{{.*}} = dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32>
+  %c2 = constant 2 : index
+  %0 = dim %arg0, %c2 : tensor<4x4x?xf32>
   // use dim as an index to ensure type correctness
   %1 = affine.apply affine_map<(d0) -> (d0)>(%0)
   return

diff  --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index 1ccf322ee8b5..c8908a0fded6 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -1,24 +1,8 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics
 
-func @dim(tensor<1xf32>) {
-^bb(%0: tensor<1xf32>):
-  "std.dim"(%0){index = "xyz"} : (tensor<1xf32>)->index // expected-error {{attribute 'index' failed to satisfy constraint: arbitrary integer attribute}}
-  return
-}
-
-// -----
-
-func @dim2(tensor<1xf32>) {
-^bb(%0: tensor<1xf32>):
-  "std.dim"(){index = "xyz"} : ()->index // expected-error {{'std.dim' op requires a single operand}}
-  return
-}
-
-// -----
-
-func @dim3(tensor<1xf32>) {
-^bb(%0: tensor<1xf32>):
-  "std.dim"(%0){index = 1} : (tensor<1xf32>)->index // expected-error {{'std.dim' op index is out of range}}
+func @dim(%arg : tensor<1x?xf32>) {
+  %c2 = constant 2 : index
+  dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'std.dim' op index is out of range}}
   return
 }
 

diff  --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index f1ad305d5c87..dc5c9a7253b3 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -28,7 +28,8 @@ func @test_subi_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
 func @dim(%arg0: tensor<8x4xf32>) -> index {
 
   // CHECK: %c4 = constant 4 : index
-  %0 = dim %arg0, 1 : tensor<8x4xf32>
+  %c1 = constant 1 : index
+  %0 = dim %arg0, %c1 : tensor<8x4xf32>
 
   // CHECK-NEXT: return %c4
   return %0 : index
@@ -51,7 +52,8 @@ func @test_commutative(%arg0: i32) -> (i32, i32) {
 
 // CHECK-LABEL: func @trivial_dce
 func @trivial_dce(%arg0: tensor<8x4xf32>) {
-  %0 = dim %arg0, 1 : tensor<8x4xf32>
+  %c1 = constant 1 : index
+  %0 = dim %arg0, %c1 : tensor<8x4xf32>
   // CHECK-NEXT: return
   return
 }
@@ -314,7 +316,7 @@ func @memref_cast_folding(%arg0: memref<4 x f32>, %arg1: f32) -> (f32, f32) {
   %0 = memref_cast %arg0 : memref<4xf32> to memref<?xf32>
   // CHECK-NEXT: %c0 = constant 0 : index
   %c0 = constant 0 : index
-  %dim = dim %0, 0 : memref<? x f32>
+  %dim = dim %0, %c0 : memref<? x f32>
 
   // CHECK-NEXT: affine.load %arg0[3]
   %1 = affine.load %0[%dim - 1] : memref<?xf32>
@@ -442,24 +444,25 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
 // CHECK-SAME: [[K:arg[0-9]+]]: index
   %c0 = constant 0 : index
   %c1 = constant 1 : index
+  %c2 = constant 2 : index
   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
   %1 = alloc(%arg1, %arg2) : memref<?x8x?xf32>
-  %2 = dim %1, 2 : memref<?x8x?xf32>
+  %2 = dim %1, %c2 : memref<?x8x?xf32>
   affine.for %arg3 = 0 to %2 {
     %3 = alloc(%arg0) : memref<?xi8>
-    %ub = dim %3, 0 : memref<?xi8>
+    %ub = dim %3, %c0 : memref<?xi8>
     affine.for %arg4 = 0 to %ub {
-      %s = dim %0, 0 : memref<?x?xf32>
+      %s = dim %0, %c0 : memref<?x?xf32>
       %v = std.view %3[%c0][%arg4, %s] : memref<?xi8> to memref<?x?xf32>
       %sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
-      %l = dim %v, 1 : memref<?x?xf32>
-      %u = dim %sv, 0 : memref<?x?xf32, #map1>
+      %l = dim %v, %c1 : memref<?x?xf32>
+      %u = dim %sv, %c0 : memref<?x?xf32, #map1>
       affine.for %arg5 = %l to %u {
         "foo"() : () -> ()
       }
       %sv2 = subview %0[0, 0][17, %arg4][1, 1] : memref<?x?xf32> to memref<17x?xf32, #map3>
-      %l2 = dim %v, 1 : memref<?x?xf32>
-      %u2 = dim %sv2, 1 : memref<17x?xf32, #map3>
+      %l2 = dim %v, %c1 : memref<?x?xf32>
+      %u2 = dim %sv2, %c1 : memref<17x?xf32, #map3>
       scf.for %arg5 = %l2 to %u2 step %c1 {
         "foo"() : () -> ()
       }
@@ -480,9 +483,9 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
   %B = view %BUF[%c0][%K, %N] : memref<?xi8> to memref<?x?xf32>
   %C = view %BUF[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32>
 
-  %M_ = dim %A, 0 : memref<?x?xf32>
-  %K_ = dim %A, 1 : memref<?x?xf32>
-  %N_ = dim %C, 1 : memref<?x?xf32>
+  %M_ = dim %A, %c0 : memref<?x?xf32>
+  %K_ = dim %A, %c1 : memref<?x?xf32>
+  %N_ = dim %C, %c1 : memref<?x?xf32>
   scf.for %i = %c0 to %M_ step %c1 {
     scf.for %j = %c0 to %N_ step %c1 {
       scf.for %k = %c0 to %K_ step %c1 {
@@ -855,8 +858,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
   store %v0, %20[%arg1, %arg1] : memref<12x4xf32, offset: ?, strides:[4, 1]>
 
   // Test: dim on subview is rewritten to size operand.
-  %7 = dim %4, 0 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
-  %8 = dim %4, 1 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
+  %7 = dim %4, %c0 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
+  %8 = dim %4, %c1 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
 
   // CHECK: return %[[C7]], %[[C11]]
   return %7, %8 : index, index

diff  --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir
index 801d59185b35..5562450a6955 100644
--- a/mlir/test/Transforms/constant-fold.mlir
+++ b/mlir/test/Transforms/constant-fold.mlir
@@ -382,7 +382,8 @@ func @muli_splat_vector() -> vector<4xi32> {
 func @dim(%x : tensor<8x4xf32>) -> index {
 
   // CHECK:[[C4:%.+]] = constant 4 : index
-  %0 = dim %x, 1 : tensor<8x4xf32>
+  %c1 = constant 1 : index
+  %0 = dim %x, %c1 : tensor<8x4xf32>
 
   // CHECK-NEXT: return [[C4]]
   return %0 : index

diff  --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index 3c93cc8ced68..aab4f4a618c9 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -330,8 +330,10 @@ func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>) {
 
 // Double buffering for dynamic shaped buffer.
 // CHECK:       alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 2>
-// CHECK-NEXT:  dim %{{.*}}, 0 : memref<?x?xf32, 2>
-// CHECK-NEXT:  dim %{{.*}}, 1 : memref<?x?xf32, 2>
+// CHECK-NEXT:  %[[C0:.*]] = constant 0 : index
+// CHECK-NEXT:  dim %{{.*}}, %[[C0]] : memref<?x?xf32, 2>
+// CHECK-NEXT:  %[[C1:.*]] = constant 1 : index
+// CHECK-NEXT:  dim %{{.*}}, %[[C1]] : memref<?x?xf32, 2>
 // CHECK-NEXT:  alloc(%{{.*}}, %{{.*}}) : memref<2x?x?xf32, 2>
 // CHECK:       affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0, 0], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}}
   affine.for %kTT = 0 to 16 {

diff  --git a/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir
index 92149c722166..c6eab3d9b984 100644
--- a/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir
+++ b/mlir/test/mlir-cpu-runner/sgemm_naive_codegen.mlir
@@ -23,15 +23,18 @@ func @main() {
   %pC = memref_cast %C : memref<16x16xf32> to memref<*xf32>
   call @print_memref_f32(%pC) : (memref<*xf32>) -> ()
 
-  %M = dim %C, 0 : memref<16x16xf32>
-  %N = dim %C, 1 : memref<16x16xf32>
-  %K = dim %A, 1 : memref<16x16xf32>
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+
+  %M = dim %C, %c0 : memref<16x16xf32>
+  %N = dim %C, %c1 : memref<16x16xf32>
+  %K = dim %A, %c1 : memref<16x16xf32>
 
   %f1 = muli %M, %N : index
   %f2 = muli %f1, %K : index
 
   // 2*M*N*K.
-  %c2 = constant 2 : index
   %f3 = muli %c2, %f2 : index
   %num_flops = muli %reps, %f3 : index
   %num_flops_i = index_cast %num_flops : index to i16

diff  --git a/mlir/test/mlir-cuda-runner/all-reduce-op.mlir b/mlir/test/mlir-cuda-runner/all-reduce-op.mlir
index eb522d2910a6..67c4f96d36f4 100644
--- a/mlir/test/mlir-cuda-runner/all-reduce-op.mlir
+++ b/mlir/test/mlir-cuda-runner/all-reduce-op.mlir
@@ -4,13 +4,15 @@
 func @main() {
   %arg = alloc() : memref<2x4x13xf32>
   %dst = memref_cast %arg : memref<2x4x13xf32> to memref<?x?x?xf32>
-  %one = constant 1 : index
-  %sx = dim %dst, 2 : memref<?x?x?xf32>
-  %sy = dim %dst, 1 : memref<?x?x?xf32>
-  %sz = dim %dst, 0 : memref<?x?x?xf32>
+  %c0 = constant 0 : index
+  %c1 = constant 1 : index
+  %c2 = constant 2 : index
+  %sx = dim %dst, %c2 : memref<?x?x?xf32>
+  %sy = dim %dst, %c1 : memref<?x?x?xf32>
+  %sz = dim %dst, %c0 : memref<?x?x?xf32>
   %cast_dst = memref_cast %dst : memref<?x?x?xf32> to memref<*xf32>
   call @mcuMemHostRegisterFloat(%cast_dst) : (memref<*xf32>) -> ()
-  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)
+  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %sx, %block_y = %sy, %block_z = %sz) {
     %t0 = muli %tz, %block_y : index
     %t1 = addi %ty, %t0 : index

diff  --git a/mlir/test/mlir-cuda-runner/all-reduce-region.mlir b/mlir/test/mlir-cuda-runner/all-reduce-region.mlir
index 69499215707e..afd3d7cb038a 100644
--- a/mlir/test/mlir-cuda-runner/all-reduce-region.mlir
+++ b/mlir/test/mlir-cuda-runner/all-reduce-region.mlir
@@ -5,7 +5,8 @@ func @main() {
   %arg = alloc() : memref<35xf32>
   %dst = memref_cast %arg : memref<35xf32> to memref<?xf32>
   %one = constant 1 : index
-  %sx = dim %dst, 0 : memref<?xf32>
+  %c0 = constant 0 : index
+  %sx = dim %dst, %c0 : memref<?xf32>
   %cast_dst = memref_cast %dst : memref<?xf32> to memref<*xf32>
   call @mcuMemHostRegisterFloat(%cast_dst) : (memref<*xf32>) -> ()
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)

diff  --git a/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir b/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir
index 242cc9c28c00..0ef33ea6112a 100644
--- a/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir
+++ b/mlir/test/mlir-cuda-runner/gpu-to-cubin.mlir
@@ -2,7 +2,8 @@
 
 func @other_func(%arg0 : f32, %arg1 : memref<?xf32>) {
   %cst = constant 1 : index
-  %cst2 = dim %arg1, 0 : memref<?xf32>
+  %c0 = constant 0 : index
+  %cst2 = dim %arg1, %c0 : memref<?xf32>
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst)
              threads(%tx, %ty, %tz) in (%block_x = %cst2, %block_y = %cst, %block_z = %cst) {
     store %arg0, %arg1[%tx] : memref<?xf32>

diff  --git a/mlir/test/mlir-cuda-runner/shuffle.mlir b/mlir/test/mlir-cuda-runner/shuffle.mlir
index 09fbef0095d8..0f8cdca3a8eb 100644
--- a/mlir/test/mlir-cuda-runner/shuffle.mlir
+++ b/mlir/test/mlir-cuda-runner/shuffle.mlir
@@ -5,7 +5,8 @@ func @main() {
   %arg = alloc() : memref<13xf32>
   %dst = memref_cast %arg : memref<13xf32> to memref<?xf32>
   %one = constant 1 : index
-  %sx = dim %dst, 0 : memref<?xf32>
+  %c0 = constant 0 : index
+  %sx = dim %dst, %c0 : memref<?xf32>
   %cast_dest = memref_cast %dst : memref<?xf32> to memref<*xf32>
   call @mcuMemHostRegisterFloat(%cast_dest) : (memref<*xf32>) -> ()
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)

diff  --git a/mlir/test/mlir-cuda-runner/two-modules.mlir b/mlir/test/mlir-cuda-runner/two-modules.mlir
index 68c936596315..3229879d2fb5 100644
--- a/mlir/test/mlir-cuda-runner/two-modules.mlir
+++ b/mlir/test/mlir-cuda-runner/two-modules.mlir
@@ -5,7 +5,8 @@ func @main() {
   %arg = alloc() : memref<13xi32>
   %dst = memref_cast %arg : memref<13xi32> to memref<?xi32>
   %one = constant 1 : index
-  %sx = dim %dst, 0 : memref<?xi32>
+  %c0 = constant 0 : index
+  %sx = dim %dst, %c0 : memref<?xi32>
   %cast_dst = memref_cast %dst : memref<?xi32> to memref<*xi32>
   call @mcuMemHostRegisterInt32(%cast_dst) : (memref<*xi32>) -> ()
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)


        


More information about the Mlir-commits mailing list