[Mlir-commits] [mlir] 5661647 - [mlir][sparse] build proper insertion chain

Aart Bik llvmlistbot at llvm.org
Fri Oct 28 15:59:01 PDT 2022


Author: Aart Bik
Date: 2022-10-28T15:58:51-07:00
New Revision: 5661647e8564121287203f268524e8c41377d475

URL: https://github.com/llvm/llvm-project/commit/5661647e8564121287203f268524e8c41377d475
DIFF: https://github.com/llvm/llvm-project/commit/5661647e8564121287203f268524e8c41377d475.diff

LOG: [mlir][sparse] build proper insertion chain

The alloc->insert/compress->load chain needs to be
properly represented with an SSA chain now in loops
and if statements to properly reflect the modifying
behavior (runtime support lib is forgiving on breaking
this, but the new codegen is not).

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D136966

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
    mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
    mlir/test/Dialect/SparseTensor/sparse_expand.mlir
    mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
    mlir/test/Dialect/SparseTensor/sparse_index.mlir
    mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
    mlir/test/Dialect/SparseTensor/sparse_out.mlir
    mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
    mlir/test/Dialect/SparseTensor/sparse_transpose.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index b4d986f7b9e8..1e9cadd13e15 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -929,3 +929,13 @@ void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
   for (unsigned i = 0; i < rank; i++)
     sizes.push_back(linalg::createOrFoldDimOp(builder, loc, src, i));
 }
+
+Operation *mlir::sparse_tensor::getTop(Operation *op) {
+  for (; isa<scf::ForOp>(op->getParentOp()) ||
+         isa<scf::WhileOp>(op->getParentOp()) ||
+         isa<scf::ParallelOp>(op->getParentOp()) ||
+         isa<scf::IfOp>(op->getParentOp());
+       op = op->getParentOp())
+    ;
+  return op;
+}

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index 0559eedb1777..3228eb4c79cb 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -177,6 +177,9 @@ void genDenseTensorOrSparseConstantIterLoop(
 void sizesFromSrc(OpBuilder &builder, SmallVector<Value, 4> &sizes,
                   Location loc, Value src);
 
+/// Scans to top of generated loop.
+Operation *getTop(Operation *op);
+
 //===----------------------------------------------------------------------===//
 // Inlined constant generators.
 //

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 3c2aa637abaf..85f4c4e073ad 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -31,6 +31,9 @@ using namespace mlir::sparse_tensor;
 
 namespace {
 
+// TODO: start using these when insertions are implemented
+// static constexpr uint64_t DimSizesIdx = 0;
+// static constexpr uint64_t DimCursorIdx = 1;
 static constexpr uint64_t MemSizesIdx = 2;
 static constexpr uint64_t FieldsIdx = 3;
 
@@ -632,13 +635,7 @@ class SparseCompressConverter : public OpConversionPattern<CompressOp> {
                                      filled, index);
     rewriter.create<scf::YieldOp>(loc, fields);
     // Deallocate the buffers on exit of the full loop nest.
-    Operation *parent = op;
-    for (; isa<scf::ForOp>(parent->getParentOp()) ||
-           isa<scf::WhileOp>(parent->getParentOp()) ||
-           isa<scf::ParallelOp>(parent->getParentOp()) ||
-           isa<scf::IfOp>(parent->getParentOp());
-         parent = parent->getParentOp())
-      ;
+    Operation *parent = getTop(op);
     rewriter.setInsertionPointAfter(parent);
     rewriter.create<memref::DeallocOp>(loc, values);
     rewriter.create<memref::DeallocOp>(loc, filled);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index c7c81767a404..f41a5798e18d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -1060,13 +1060,7 @@ class SparseTensorCompressConverter : public OpConversionPattern<CompressOp> {
                    EmitCInterface::On);
     rewriter.replaceOp(op, adaptor.getTensor());
     // Deallocate the buffers on exit of the loop nest.
-    Operation *parent = op;
-    for (; isa<scf::ForOp>(parent->getParentOp()) ||
-           isa<scf::WhileOp>(parent->getParentOp()) ||
-           isa<scf::ParallelOp>(parent->getParentOp()) ||
-           isa<scf::IfOp>(parent->getParentOp());
-         parent = parent->getParentOp())
-      ;
+    Operation *parent = getTop(op);
     rewriter.setInsertionPointAfter(parent);
     rewriter.create<memref::DeallocOp>(loc, values);
     rewriter.create<memref::DeallocOp>(loc, filled);

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index f77fdb5534b8..82125e34d5df 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -58,7 +58,10 @@ struct CodeGen {
           std::vector<unsigned> &ts)
       : options(o), loopEmitter(tensors, /*isLastOutput=*/true,
                                 /*isSparseOut=*/op != nullptr),
-        sparseOut(op), outerParNest(nest), topSort(ts) {}
+        sparseOut(op), outerParNest(nest), topSort(ts) {
+    if (op)
+      insChain = op->get();
+  }
   /// Sparsification options.
   SparsificationOptions options;
   /// Loop emitter helper class.
@@ -74,6 +77,7 @@ struct CodeGen {
   // in the innermost loop nest (`expValues` through `expCount`).
   OpOperand *sparseOut;
   unsigned outerParNest;
+  Value insChain; // bookkeeping for insertion chain
   Value expValues;
   Value expFilled;
   Value expAdded;
@@ -560,7 +564,8 @@ static void genInsertionStore(CodeGen &codegen, OpBuilder &builder,
       assert(codegen.loopEmitter.getLoopIV(i));
       indices.push_back(codegen.loopEmitter.getLoopIV(i));
     }
-    builder.create<InsertOp>(loc, rhs, t->get(), indices);
+    codegen.insChain =
+        builder.create<InsertOp>(loc, rhs, codegen.insChain, indices);
     return;
   }
   // Generates insertion code along expanded access pattern.
@@ -633,13 +638,26 @@ static void genTensorStore(Merger &merger, CodeGen &codegen, OpBuilder &builder,
       // to indicate missing output.
       assert(merger.exp(exp).kind == kUnary || merger.exp(exp).kind == kBinary);
     } else if (merger.exp(exp).kind == kSelect) {
-      scf::IfOp ifOp = builder.create<scf::IfOp>(loc, rhs);
+      // Select operation insertion.
+      Value insChain = codegen.insChain;
+      assert(insChain);
+      SmallVector<Type, 1> types;
+      types.push_back(codegen.insChain.getType());
+      scf::IfOp ifOp =
+          builder.create<scf::IfOp>(loc, types, rhs, /*else=*/true);
       builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
       // Existing value was preserved to be used here.
       assert(merger.exp(exp).val);
       Value v0 = merger.exp(exp).val;
       genInsertionStore(codegen, builder, op, t, v0);
       merger.exp(exp).val = Value();
+      // Yield modified insertion chain along true branch.
+      builder.create<scf::YieldOp>(op.getLoc(), codegen.insChain);
+      // Yield original insertion chain along false branch.
+      builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
+      builder.create<scf::YieldOp>(loc, insChain);
+      // Done with if statement.
+      codegen.insChain = ifOp->getResult(0);
       builder.setInsertionPointAfter(ifOp);
     } else {
       genInsertionStore(codegen, builder, op, t, rhs);
@@ -811,7 +829,11 @@ static void genExpansion(Merger &merger, CodeGen &codegen, OpBuilder &builder,
       at != codegen.outerParNest)
     return; // not needed at this level
   assert(codegen.redVal == nullptr);
-  // Generate start or end of an expanded access pattern.
+  // Generate start or end of an expanded access pattern. Note that because
+  // an expension does not rely on the ongoing contents of the sparse storage
+  // scheme, we can use the original tensor as incoming SSA value (which
+  // simplifies codegen a bit). If expansion on the actual contents is ever
+  // needed, we will need to use the SSA value in the insertion chain instead.
   Value tensor = lhs->get();
   Location loc = op.getLoc();
   if (atStart) {
@@ -836,9 +858,9 @@ static void genExpansion(Merger &merger, CodeGen &codegen, OpBuilder &builder,
       assert(codegen.loopEmitter.getLoopIV(i));
       indices.push_back(codegen.loopEmitter.getLoopIV(i));
     }
-    builder.create<CompressOp>(loc, codegen.expValues, codegen.expFilled,
-                               codegen.expAdded, codegen.expCount, tensor,
-                               indices);
+    codegen.insChain = builder.create<CompressOp>(
+        loc, codegen.expValues, codegen.expFilled, codegen.expAdded,
+        codegen.expCount, codegen.insChain, indices);
     codegen.expValues = codegen.expFilled = codegen.expAdded =
         codegen.expCount = Value();
   }
@@ -882,21 +904,26 @@ static Operation *genFor(Merger &merger, CodeGen &codegen, OpBuilder &builder,
   bool isParallel = isParallelFor(codegen, isOuter, isReduction, isSparse);
   assert(!isParallel);
 
-  // Emit a sequential or vector loop.
+  // Emit a sequential for loop.
   SmallVector<Value, 4> operands;
   if (codegen.redVal)
     operands.push_back(codegen.redVal);
   if (codegen.expValues)
     operands.push_back(codegen.expCount);
+  if (codegen.insChain)
+    operands.push_back(codegen.insChain);
 
   Operation *loop = codegen.loopEmitter.enterLoopOverTensorAtDim(
       builder, loc, tid, dim, operands, isParallel, extraTids, extraDims);
 
-  // The operands should be updated by loop emitter already.
+  unsigned o = 0;
   if (codegen.redVal)
-    updateReduc(merger, codegen, operands.front());
+    updateReduc(merger, codegen, operands[o++]);
   if (codegen.expValues)
-    codegen.expCount = operands.back();
+    codegen.expCount = operands[o++];
+  if (codegen.insChain)
+    codegen.insChain = operands[o++];
+  assert(o == operands.size());
 
   return loop;
 }
@@ -907,7 +934,6 @@ static Operation *genWhile(Merger &merger, CodeGen &codegen, OpBuilder &builder,
                            ArrayRef<size_t> condTids, ArrayRef<size_t> condDims,
                            ArrayRef<size_t> extraTids,
                            ArrayRef<size_t> extraDims) {
-
   SmallVector<Value, 4> operands;
 
   // Construct the while-loop with a parameter for each index.
@@ -915,15 +941,21 @@ static Operation *genWhile(Merger &merger, CodeGen &codegen, OpBuilder &builder,
     operands.push_back(codegen.redVal);
   if (codegen.expValues)
     operands.push_back(codegen.expCount);
+  if (codegen.insChain)
+    operands.push_back(codegen.insChain);
 
   Operation *loop = codegen.loopEmitter.enterCoIterationOverTensorsAtDims(
       builder, op.getLoc(), condTids, condDims, needsUniv, operands, extraTids,
       extraDims);
 
+  unsigned o = 0;
   if (codegen.redVal)
-    updateReduc(merger, codegen, operands.front());
+    updateReduc(merger, codegen, operands[o++]);
   if (codegen.expValues)
-    codegen.expCount = operands.back();
+    codegen.expCount = operands[o++];
+  if (codegen.insChain)
+    codegen.insChain = operands[o++];
+  assert(o == operands.size());
 
   return loop;
 }
@@ -955,7 +987,7 @@ static void finalizeWhileOp(Merger &merger, CodeGen &codegen,
                             scf::WhileOp whileOp) {
   Location loc = op.getLoc();
   // Finalize each else branch of all if statements.
-  if (codegen.redVal || codegen.expValues) {
+  if (codegen.redVal || codegen.expValues || codegen.insChain) {
     while (auto ifOp = dyn_cast_or_null<scf::IfOp>(
                builder.getInsertionBlock()->getParentOp())) {
       unsigned y = 0;
@@ -968,6 +1000,10 @@ static void finalizeWhileOp(Merger &merger, CodeGen &codegen,
         yields.push_back(codegen.expCount);
         codegen.expCount = ifOp->getResult(y++);
       }
+      if (codegen.insChain) {
+        yields.push_back(codegen.insChain);
+        codegen.insChain = ifOp->getResult(y++);
+      }
       assert(y == yields.size());
       builder.create<scf::YieldOp>(loc, yields);
       builder.setInsertionPointAfter(ifOp);
@@ -1007,6 +1043,8 @@ static scf::IfOp genIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
     types.push_back(codegen.redVal.getType());
   if (codegen.expValues)
     types.push_back(builder.getIndexType());
+  if (codegen.insChain)
+    types.push_back(codegen.insChain.getType());
   scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
   builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
   return ifOp;
@@ -1015,7 +1053,7 @@ static scf::IfOp genIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
 /// Generates end of true branch of if-statement within a while-loop.
 static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
                   linalg::GenericOp op, scf::IfOp ifOp, Operation *loop,
-                  Value redInput, Value cntInput) {
+                  Value redInput, Value cntInput, Value insInput) {
   SmallVector<Value, 4> operands;
   if (codegen.redVal) {
     operands.push_back(codegen.redVal);
@@ -1025,6 +1063,10 @@ static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
     operands.push_back(codegen.expCount);
     codegen.expCount = cntInput;
   }
+  if (codegen.insChain) {
+    operands.push_back(codegen.insChain);
+    codegen.insChain = insInput;
+  }
   if (!operands.empty())
     builder.create<scf::YieldOp>(op.getLoc(), operands);
   builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
@@ -1160,15 +1202,21 @@ static bool endLoop(Merger &merger, CodeGen &codegen, OpBuilder &builder,
     reduc.push_back(codegen.redVal);
   if (codegen.expValues)
     reduc.push_back(codegen.expCount);
+  if (codegen.insChain)
+    reduc.push_back(codegen.insChain);
 
   auto loopRet =
       codegen.loopEmitter.exitCurrentLoop(builder, op.getLoc(), reduc);
   assert(reduc.size() == loopRet.size());
 
+  unsigned o = 0;
   if (codegen.redVal)
-    updateReduc(merger, codegen, loopRet.front());
+    updateReduc(merger, codegen, loopRet[o++]);
   if (codegen.expValues)
-    codegen.expCount = loopRet.back();
+    codegen.expCount = loopRet[o++];
+  if (codegen.insChain)
+    codegen.insChain = loopRet[o++];
+  assert(o == loopRet.size());
 
   return needsUniv;
 }
@@ -1203,6 +1251,9 @@ static void genStmt(Merger &merger, CodeGen &codegen, RewriterBase &rewriter,
   unsigned ldx = at == 0 ? -1u : codegen.topSort[at - 1];
   unsigned lts = merger.optimizeSet(merger.buildLattices(exp, idx));
 
+  // TODO: sort
+  // TODO: dedup
+
   // Start a loop sequence.
   bool needsUniv =
       startLoopSeq(merger, codegen, rewriter, op, exp, at, idx, ldx, lts);
@@ -1219,6 +1270,7 @@ static void genStmt(Merger &merger, CodeGen &codegen, RewriterBase &rewriter,
     // loop-body, possibly with if statements for coiteration.
     Value redInput = codegen.redVal;
     Value cntInput = codegen.expCount;
+    Value insInput = codegen.insChain;
     bool isWhile = dyn_cast<scf::WhileOp>(loop) != nullptr;
     for (unsigned j = 0; j < lsize; j++) {
       unsigned lj = merger.set(lts)[j];
@@ -1229,7 +1281,8 @@ static void genStmt(Merger &merger, CodeGen &codegen, RewriterBase &rewriter,
           scf::IfOp ifOp =
               genIf(merger, codegen, rewriter, op, idx, merger.lat(lj).simple);
           genStmt(merger, codegen, rewriter, op, ej, at + 1);
-          endIf(merger, codegen, rewriter, op, ifOp, loop, redInput, cntInput);
+          endIf(merger, codegen, rewriter, op, ifOp, loop, redInput, cntInput,
+                insInput);
         } else {
           genStmt(merger, codegen, rewriter, op, ej, at + 1);
         }
@@ -1249,12 +1302,16 @@ static void genStmt(Merger &merger, CodeGen &codegen, RewriterBase &rewriter,
 static void genResult(Merger &merger, CodeGen &codegen, RewriterBase &rewriter,
                       linalg::GenericOp op) {
   OpOperand *lhs = op.getDpsInitOperand(0);
-  Type resType = lhs->get().getType();
+  Value tensor = lhs->get();
+  Type resType = tensor.getType();
   if (getSparseTensorEncoding(resType)) {
     // The sparse tensor rematerializes from the original sparse tensor's
-    // underlying sparse storage format.
-    rewriter.replaceOpWithNewOp<LoadOp>(op, resType, lhs->get(),
-                                        codegen.sparseOut == lhs);
+    // underlying sparse storage format. For an insertion chain, the
+    // tensor materializes from the chain with 'hasInserts' enabled.
+    bool hasInserts = codegen.sparseOut == lhs;
+    if (hasInserts)
+      tensor = codegen.insChain;
+    rewriter.replaceOpWithNewOp<LoadOp>(op, resType, tensor, hasInserts);
   } else {
     // To rematerialize an non-annotated tensor, simply load it
     // from the bufferized value.

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
index 425b9847f54d..fd30b3fa5240 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
@@ -12,7 +12,7 @@
 }
 
 // CHECK-LABEL:  @main(
-//  CHECK-SAME:    %[[TMP_arg0:.*]]: tensor<4x5xi32, 
+//  CHECK-SAME:  %[[TMP_arg0:.*]]: tensor<4x5xi32,
 //   CHECK-DAG:  %[[TMP_c3:.*]] = arith.constant 3 : index
 //   CHECK-DAG:  %[[TMP_c0:.*]] = arith.constant 0 : index
 //   CHECK-DAG:  %[[TMP_c1:.*]] = arith.constant 1 : index
@@ -24,21 +24,24 @@
 //       CHECK:  %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]]
 //       CHECK:  %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref<?xindex>
 //       CHECK:  %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref<?xindex>
-//       CHECK:  scf.for %[[TMP_arg1:.*]] = %[[TMP_6]] to %[[TMP_7]] step %[[TMP_c1]] {
+//       CHECK:  %[[T:.*]] = scf.for %[[TMP_arg1:.*]] = %[[TMP_6]] to %[[TMP_7]] step %[[TMP_c1]] {{.*}} {
 //       CHECK:    %[[TMP_9:.*]] = memref.load %[[TMP_2]][%[[TMP_arg1]]] : memref<?xindex>
-//       CHECK:    scf.for %[[TMP_arg2:.*]] = %[[TMP_c0]] to %[[TMP_c3]] step %[[TMP_c1]] {
+//       CHECK:    %[[L1:.*]] = scf.for %[[TMP_arg2:.*]] = %[[TMP_c0]] to %[[TMP_c3]] step %[[TMP_c1]] {{.*}} {
 //       CHECK:      %[[TMP_10:.*]] = memref.load %[[TMP_3]][%[[TMP_arg1]]] : memref<?xindex>
 //       CHECK:      %[[TMP_11:.*]] = arith.addi %[[TMP_arg1]], %[[TMP_c1]] : index
 //       CHECK:      %[[TMP_12:.*]] = memref.load %[[TMP_3]][%[[TMP_11]]] : memref<?xindex>
-//       CHECK:      scf.for %[[TMP_arg3:.*]] = %[[TMP_10]] to %[[TMP_12]] step %[[TMP_c1]] {
+//       CHECK:      %[[L2:.*]] = scf.for %[[TMP_arg3:.*]] = %[[TMP_10]] to %[[TMP_12]] step %[[TMP_c1]] {{.*}} {
 //       CHECK:        %[[TMP_13:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<?xindex>
 //       CHECK:        %[[TMP_14:.*]] = memref.load %[[TMP_5]][%[[TMP_arg3]]] : memref<?xi32>
-//       CHECK:        %[[TMP_15:.*]] = sparse_tensor.insert %[[TMP_14]] into %[[TMP_0]][%[[TMP_9]], %[[TMP_arg2]], %[[TMP_13]]]
+//       CHECK:        %[[Y:.*]] = sparse_tensor.insert %[[TMP_14]] into %{{.*}}[%[[TMP_9]], %[[TMP_arg2]], %[[TMP_13]]]
+//       CHECK:        scf.yield %[[Y]]
 //       CHECK:      }
+//       CHECK:      scf.yield %[[L2]]
 //       CHECK:    }
+//       CHECK:    scf.yield %[[L1]]
 //       CHECK:  }
-//       CHECK:  %[[TMP_8:.*]] = sparse_tensor.load %[[TMP_0]] hasInserts
-//       CHECK:  return %[[TMP_8]] 
+//       CHECK:  %[[TMP_8:.*]] = sparse_tensor.load %[[T]] hasInserts
+//       CHECK:  return %[[TMP_8]]
 module @func_sparse {
   func.func public @main(%arg0: tensor<4x5xi32, #DCSR>) -> tensor<4x3x5xi32, #SparseTensor> {
     %0 = bufferization.alloc_tensor() : tensor<4x3x5xi32, #SparseTensor>

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
index 96c8b00e4de2..4983eded28bb 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
@@ -84,7 +84,7 @@ func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
 // CHECK-SPARSE-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-SPARSE-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-SPARSE-DAG: %[[C8:.*]] = arith.constant 8 : index
-// CHECK-SPARSE: scf.for %{{.*}} = %[[C0]] to %[[C8]] step %[[C1]] {
+// CHECK-SPARSE: %[[T:.*]] = scf.for %{{.*}} = %[[C0]] to %[[C8]] step %[[C1]] {{.*}} {
 // CHECK-SPARSE:   %[[A:.*]], %[[B:.*]], %[[C:.*]], %{{.*}} = sparse_tensor.expand
 // CHECK-SPARSE:   %[[COUNT:.*]] = scf.for {{.*}} {
 // CHECK-SPARSE:     scf.for {{.*}} {
@@ -92,7 +92,7 @@ func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
 // CHECK-SPARSE:   }
 // CHECK-SPARSE:   sparse_tensor.compress %[[A]], %[[B]], %[[C]], %[[COUNT]] into
 // CHECK-SPARSE: }
-// CHECK-SPARSE: %[[RET:.*]] = sparse_tensor.load %{{.*}} hasInserts
+// CHECK-SPARSE: %[[RET:.*]] = sparse_tensor.load %[[T]] hasInserts
 // CHECK-SPARSE: return %[[RET]]
 //
 // CHECK-CONVERT-LABEL: func @matmul1(
@@ -106,7 +106,7 @@ func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
 // CHECK-CONVERT: %[[C:.*]] = memref.alloc(%[[C4]]) : memref<?xindex>
 // CHECK-CONVERT: linalg.fill ins(%{{.*}} : f64) outs(%[[A]] : memref<?xf64>)
 // CHECK-CONVERT: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
-// CHECK-CONVERT: scf.for %{{.*}} = %[[C0]] to %[[C8]] step %[[C1]] {
+// CHECK-CONVERT: scf.for %{{.*}} = %[[C0]] to %[[C8]] step %[[C1]] {{.*}} {
 // CHECK-CONVERT:   scf.for {{.*}} {
 // CHECK-CONVERT:     scf.for {{.*}} {
 // CHECK-CONVERT:     }
@@ -132,7 +132,7 @@ func.func @matmul1(%A: tensor<8x2xf64, #CSR>,
 // CHECK-SPARSE-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-SPARSE-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-SPARSE-DAG: %[[C4:.*]] = arith.constant 4 : index
-// CHECK-SPARSE: scf.for %{{.*}} = %[[C0]] to %[[C4]] step %[[C1]] {
+// CHECK-SPARSE: %[[T:.*]] = scf.for %{{.*}} = %[[C0]] to %[[C4]] step %[[C1]] {{.*}} {
 // CHECK-SPARSE:   %[[A:.*]], %[[B:.*]], %[[C:.*]], %{{.*}} = sparse_tensor.expand
 // CHECK-SPARSE:   %[[COUNT:.*]] = scf.for {{.*}} {
 // CHECK-SPARSE:     scf.for {{.*}} {
@@ -140,7 +140,7 @@ func.func @matmul1(%A: tensor<8x2xf64, #CSR>,
 // CHECK-SPARSE:   }
 // CHECK-SPARSE:   sparse_tensor.compress %[[A]], %[[B]], %[[C]], %[[COUNT]]
 // CHECK-SPARSE: }
-// CHECK-SPARSE: %[[RET:.*]] = sparse_tensor.load %{{.*}} hasInserts
+// CHECK-SPARSE: %[[RET:.*]] = sparse_tensor.load %[[T]] hasInserts
 // CHECK-SPARSE: return %[[RET]]
 //
 // CHECK-CONVERT-LABEL: func @matmul2(
@@ -154,7 +154,7 @@ func.func @matmul1(%A: tensor<8x2xf64, #CSR>,
 // CHECK-CONVERT: %[[C:.*]] = memref.alloc(%[[C8]]) : memref<?xindex>
 // CHECK-CONVERT: linalg.fill ins(%{{.*}} : f64) outs(%[[A]] : memref<?xf64>)
 // CHECK-CONVERT: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
-// CHECK-CONVERT: scf.for %{{.*}} = %[[C0]] to %[[C4]] step %[[C1]] {
+// CHECK-CONVERT: scf.for %{{.*}} = %[[C0]] to %[[C4]] step %[[C1]] {{.*}} {
 // CHECK-CONVERT:   scf.for {{.*}} {
 // CHECK-CONVERT:     scf.for {{.*}} {
 // CHECK-CONVERT:     }

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
index 6d363cfe47bd..975f160ad73f 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
@@ -360,7 +360,7 @@ func.func @divbyc(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf64>
 // CHECK:           %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex>
 // CHECK:           %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] {
+// CHECK:           %[[T:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] {{.*}} {
 // CHECK:             %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_9]]] : memref<?xindex>
 // CHECK:             %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xf64>
 // CHECK:             %[[VAL_12:.*]] = math.absf %[[VAL_11]] : f64
@@ -371,9 +371,10 @@ func.func @divbyc(%arga: tensor<32xf64, #SV>,
 // CHECK:             %[[VAL_17:.*]] = math.log1p %[[VAL_16]] : f64
 // CHECK:             %[[VAL_18:.*]] = math.sin %[[VAL_17]] : f64
 // CHECK:             %[[VAL_19:.*]] = math.tanh %[[VAL_18]] : f64
-// CHECK:             sparse_tensor.insert %[[VAL_19]] into %[[VAL_3]]{{\[}}%[[VAL_10]]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:             %[[Y:.*]] = sparse_tensor.insert %[[VAL_19]] into %{{.*}}[%[[VAL_10]]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:             scf.yield %[[Y]]
 // CHECK:           }
-// CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_3]] hasInserts : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
 // CHECK:           return %[[VAL_20]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
 // CHECK:         }
 func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #SV> {
@@ -407,13 +408,14 @@ func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #S
 // CHECK:           %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xcomplex<f64>>
 // CHECK:           %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex>
 // CHECK:           %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_2]] {
+// CHECK:           %[[T:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_2]] {{.*}} {
 // CHECK:             %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xindex>
 // CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_10]]] : memref<?xcomplex<f64>>
 // CHECK:             %[[VAL_13:.*]] = complex.div %[[VAL_12]], %[[VAL_3]] : complex<f64>
-// CHECK:             sparse_tensor.insert %[[VAL_13]] into %[[VAL_4]]{{\[}}%[[VAL_11]]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:             %[[Y:.*]] = sparse_tensor.insert %[[VAL_13]] into %{{.*}}[%[[VAL_11]]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:             scf.yield %[[Y]]
 // CHECK:           }
-// CHECK:           %[[VAL_14:.*]] = sparse_tensor.load %[[VAL_4]] hasInserts : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
+// CHECK:           %[[VAL_14:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
 // CHECK:           return %[[VAL_14]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
 // CHECK:         }
 func.func @complex_divbyc(%arg0: tensor<32xcomplex<f64>, #SV>) -> tensor<32xcomplex<f64>, #SV> {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_index.mlir
index 4884cbf7ad0f..5cfa3ac9b6a4 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_index.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_index.mlir
@@ -83,22 +83,24 @@ func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
 // CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:           %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_1]]] : memref<?xindex>
 // CHECK:           %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_2]] {
+// CHECK:           %[[T:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_2]] {{.*}} {
 // CHECK:             %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
 // CHECK:             %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xindex>
 // CHECK:             %[[VAL_16:.*]] = arith.addi %[[VAL_13]], %[[VAL_2]] : index
 // CHECK:             %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex>
-// CHECK:             scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_2]] {
+// CHECK:             %[[L:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_2]] {{.*}} {
 // CHECK:               %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
 // CHECK:               %[[VAL_20:.*]] = arith.index_cast %[[VAL_19]] : index to i64
 // CHECK:               %[[VAL_21:.*]] = arith.index_cast %[[VAL_14]] : index to i64
 // CHECK:               %[[VAL_22:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xi64>
 // CHECK:               %[[VAL_23:.*]] = arith.muli %[[VAL_21]], %[[VAL_22]] : i64
 // CHECK:               %[[VAL_24:.*]] = arith.muli %[[VAL_20]], %[[VAL_23]] : i64
-// CHECK:               sparse_tensor.insert %[[VAL_24]] into %[[VAL_5]]{{\[}}%[[VAL_14]], %[[VAL_19]]] : tensor<?x?xi64, #sparse_tensor.encoding
+// CHECK:               %[[Y:.*]] = sparse_tensor.insert %[[VAL_24]] into %{{.*}}[%[[VAL_14]], %[[VAL_19]]] : tensor<?x?xi64, #sparse_tensor.encoding
+// CHECK:               scf.yield %[[Y]]
 // CHECK:             }
+// CHECK:             scf.yield %[[L]]
 // CHECK:           }
-// CHECK:           %[[VAL_25:.*]] = sparse_tensor.load %[[VAL_5]] hasInserts : tensor<?x?xi64, #sparse_tensor.encoding
+// CHECK:           %[[VAL_25:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:           return %[[VAL_25]] : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:         }
 func.func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
index 0f629eaa2319..8ff61b5c1dac 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
@@ -13,11 +13,11 @@
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 30 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>
+// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>
 // CHECK:           %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30xf32>
 // CHECK:           %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32>
 // CHECK:           %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
@@ -61,81 +61,82 @@ func.func @matmul1(%a: tensor<10x20xf32, #DCSR>,
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant false
 // CHECK-DAG:       %[[VAL_5:.*]] = arith.constant true
-// CHECK:           %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
-// CHECK:           %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
+// CHECK-DAG:       %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
+// CHECK-DAG:       %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
 // CHECK:           %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_2]]] : memref<?xindex>
 // CHECK:           %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_19:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_3]] {
-// CHECK:             %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
-// CHECK:             %[[VAL_21:.*]], %[[VAL_22:.*]], %[[VAL_23:.*]], %[[VAL_24:.*]] = sparse_tensor.expand %[[VAL_6]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
-// CHECK:             %[[VAL_25:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
-// CHECK:             %[[VAL_26:.*]] = arith.addi %[[VAL_19]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_26]]] : memref<?xindex>
-// CHECK:             %[[VAL_28:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:             %[[VAL_29:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:             %[[VAL_30:.*]]:3 = scf.while (%[[VAL_31:.*]] = %[[VAL_25]], %[[VAL_32:.*]] = %[[VAL_28]], %[[VAL_33:.*]] = %[[VAL_24]]) : (index, index, index) -> (index, index, index) {
-// CHECK:               %[[VAL_34:.*]] = arith.cmpi ult, %[[VAL_31]], %[[VAL_27]] : index
-// CHECK:               %[[VAL_35:.*]] = arith.cmpi ult, %[[VAL_32]], %[[VAL_29]] : index
-// CHECK:               %[[VAL_36:.*]] = arith.andi %[[VAL_34]], %[[VAL_35]] : i1
-// CHECK:               scf.condition(%[[VAL_36]]) %[[VAL_31]], %[[VAL_32]], %[[VAL_33]] : index, index, index
+// CHECK:           %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_3]] iter_args(%[[VAL_21:.*]] = %[[VAL_6]]) -> (tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK:             %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]] = sparse_tensor.expand %[[VAL_6]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
+// CHECK:             %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK:             %[[VAL_28:.*]] = arith.addi %[[VAL_20]], %[[VAL_3]] : index
+// CHECK:             %[[VAL_29:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_28]]] : memref<?xindex>
+// CHECK:             %[[VAL_30:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK:             %[[VAL_31:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK:             %[[VAL_32:.*]]:4 = scf.while (%[[VAL_33:.*]] = %[[VAL_27]], %[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_26]], %[[VAL_36:.*]] = %[[VAL_21]]) : (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:               %[[VAL_37:.*]] = arith.cmpi ult, %[[VAL_33]], %[[VAL_29]] : index
+// CHECK:               %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_34]], %[[VAL_31]] : index
+// CHECK:               %[[VAL_39:.*]] = arith.andi %[[VAL_37]], %[[VAL_38]] : i1
+// CHECK:               scf.condition(%[[VAL_39]]) %[[VAL_33]], %[[VAL_34]], %[[VAL_35]], %[[VAL_36]] : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             } do {
-// CHECK:             ^bb0(%[[VAL_37:.*]]: index, %[[VAL_38:.*]]: index, %[[VAL_39:.*]]: index):
-// CHECK:               %[[VAL_40:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_37]]] : memref<?xindex>
-// CHECK:               %[[VAL_41:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_38]]] : memref<?xindex>
-// CHECK:               %[[VAL_42:.*]] = arith.cmpi ult, %[[VAL_41]], %[[VAL_40]] : index
-// CHECK:               %[[VAL_43:.*]] = arith.select %[[VAL_42]], %[[VAL_41]], %[[VAL_40]] : index
-// CHECK:               %[[VAL_44:.*]] = arith.cmpi eq, %[[VAL_40]], %[[VAL_43]] : index
-// CHECK:               %[[VAL_45:.*]] = arith.cmpi eq, %[[VAL_41]], %[[VAL_43]] : index
-// CHECK:               %[[VAL_46:.*]] = arith.andi %[[VAL_44]], %[[VAL_45]] : i1
-// CHECK:               %[[VAL_47:.*]] = scf.if %[[VAL_46]] -> (index) {
-// CHECK:                 %[[VAL_48:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_37]]] : memref<?xf64>
-// CHECK:                 %[[VAL_49:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_38]]] : memref<?xindex>
-// CHECK:                 %[[VAL_50:.*]] = arith.addi %[[VAL_38]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_51:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_50]]] : memref<?xindex>
-// CHECK:                 %[[VAL_52:.*]] = scf.for %[[VAL_53:.*]] = %[[VAL_49]] to %[[VAL_51]] step %[[VAL_3]] iter_args(%[[VAL_54:.*]] = %[[VAL_39]]) -> (index) {
-// CHECK:                   %[[VAL_55:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_53]]] : memref<?xindex>
-// CHECK:                   %[[VAL_56:.*]] = memref.load %[[VAL_21]]{{\[}}%[[VAL_55]]] : memref<?xf64>
-// CHECK:                   %[[VAL_57:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_53]]] : memref<?xf64>
-// CHECK:                   %[[VAL_58:.*]] = arith.mulf %[[VAL_48]], %[[VAL_57]] : f64
-// CHECK:                   %[[VAL_59:.*]] = arith.addf %[[VAL_56]], %[[VAL_58]] : f64
-// CHECK:                   %[[VAL_60:.*]] = memref.load %[[VAL_22]]{{\[}}%[[VAL_55]]] : memref<?xi1>
-// CHECK:                   %[[VAL_61:.*]] = arith.cmpi eq, %[[VAL_60]], %[[VAL_4]] : i1
-// CHECK:                   %[[VAL_62:.*]] = scf.if %[[VAL_61]] -> (index) {
-// CHECK:                     memref.store %[[VAL_5]], %[[VAL_22]]{{\[}}%[[VAL_55]]] : memref<?xi1>
-// CHECK:                     memref.store %[[VAL_55]], %[[VAL_23]]{{\[}}%[[VAL_54]]] : memref<?xindex>
-// CHECK:                     %[[VAL_63:.*]] = arith.addi %[[VAL_54]], %[[VAL_3]] : index
-// CHECK:                     scf.yield %[[VAL_63]] : index
+// CHECK:             ^bb0(%[[VAL_40:.*]]: index, %[[VAL_41:.*]]: index, %[[VAL_42:.*]]: index, %[[VAL_43:.*]]: tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>):
+// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK:               %[[VAL_45:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_41]]] : memref<?xindex>
+// CHECK:               %[[VAL_46:.*]] = arith.cmpi ult, %[[VAL_45]], %[[VAL_44]] : index
+// CHECK:               %[[VAL_47:.*]] = arith.select %[[VAL_46]], %[[VAL_45]], %[[VAL_44]] : index
+// CHECK:               %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index
+// CHECK:               %[[VAL_49:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index
+// CHECK:               %[[VAL_50:.*]] = arith.andi %[[VAL_48]], %[[VAL_49]] : i1
+// CHECK:               %[[VAL_51:.*]]:2 = scf.if %[[VAL_50]] -> (index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                 %[[VAL_52:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_40]]] : memref<?xf64>
+// CHECK:                 %[[VAL_53:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_41]]] : memref<?xindex>
+// CHECK:                 %[[VAL_54:.*]] = arith.addi %[[VAL_41]], %[[VAL_3]] : index
+// CHECK:                 %[[VAL_55:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_54]]] : memref<?xindex>
+// CHECK:                 %[[VAL_56:.*]] = scf.for %[[VAL_57:.*]] = %[[VAL_53]] to %[[VAL_55]] step %[[VAL_3]] iter_args(%[[VAL_58:.*]] = %[[VAL_42]]) -> (index) {
+// CHECK:                   %[[VAL_59:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_57]]] : memref<?xindex>
+// CHECK:                   %[[VAL_60:.*]] = memref.load %[[VAL_23]]{{\[}}%[[VAL_59]]] : memref<?xf64>
+// CHECK:                   %[[VAL_61:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_57]]] : memref<?xf64>
+// CHECK:                   %[[VAL_62:.*]] = arith.mulf %[[VAL_52]], %[[VAL_61]] : f64
+// CHECK:                   %[[VAL_63:.*]] = arith.addf %[[VAL_60]], %[[VAL_62]] : f64
+// CHECK:                   %[[VAL_64:.*]] = memref.load %[[VAL_24]]{{\[}}%[[VAL_59]]] : memref<?xi1>
+// CHECK:                   %[[VAL_65:.*]] = arith.cmpi eq, %[[VAL_64]], %[[VAL_4]] : i1
+// CHECK:                   %[[VAL_66:.*]] = scf.if %[[VAL_65]] -> (index) {
+// CHECK:                     memref.store %[[VAL_5]], %[[VAL_24]]{{\[}}%[[VAL_59]]] : memref<?xi1>
+// CHECK:                     memref.store %[[VAL_59]], %[[VAL_25]]{{\[}}%[[VAL_58]]] : memref<?xindex>
+// CHECK:                     %[[VAL_67:.*]] = arith.addi %[[VAL_58]], %[[VAL_3]] : index
+// CHECK:                     scf.yield %[[VAL_67]] : index
 // CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_54]] : index
+// CHECK:                     scf.yield %[[VAL_58]] : index
 // CHECK:                   }
-// CHECK:                   memref.store %[[VAL_59]], %[[VAL_21]]{{\[}}%[[VAL_55]]] : memref<?xf64>
-// CHECK:                   scf.yield %[[VAL_64:.*]] : index
+// CHECK:                   memref.store %[[VAL_63]], %[[VAL_23]]{{\[}}%[[VAL_59]]] : memref<?xf64>
+// CHECK:                   scf.yield %[[VAL_68:.*]] : index
 // CHECK:                 }
-// CHECK:                 scf.yield %[[VAL_65:.*]] : index
+// CHECK:                 scf.yield %[[VAL_69:.*]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               } else {
-// CHECK:                 scf.yield %[[VAL_39]] : index
+// CHECK:                 scf.yield %[[VAL_42]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               }
-// CHECK:               %[[VAL_66:.*]] = arith.cmpi eq, %[[VAL_40]], %[[VAL_43]] : index
-// CHECK:               %[[VAL_67:.*]] = arith.addi %[[VAL_37]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_68:.*]] = arith.select %[[VAL_66]], %[[VAL_67]], %[[VAL_37]] : index
-// CHECK:               %[[VAL_69:.*]] = arith.cmpi eq, %[[VAL_41]], %[[VAL_43]] : index
-// CHECK:               %[[VAL_70:.*]] = arith.addi %[[VAL_38]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_71:.*]] = arith.select %[[VAL_69]], %[[VAL_70]], %[[VAL_38]] : index
-// CHECK:               scf.yield %[[VAL_68]], %[[VAL_71]], %[[VAL_72:.*]] : index, index, index
+// CHECK:               %[[VAL_70:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index
+// CHECK:               %[[VAL_71:.*]] = arith.addi %[[VAL_40]], %[[VAL_3]] : index
+// CHECK:               %[[VAL_72:.*]] = arith.select %[[VAL_70]], %[[VAL_71]], %[[VAL_40]] : index
+// CHECK:               %[[VAL_73:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index
+// CHECK:               %[[VAL_74:.*]] = arith.addi %[[VAL_41]], %[[VAL_3]] : index
+// CHECK:               %[[VAL_75:.*]] = arith.select %[[VAL_73]], %[[VAL_74]], %[[VAL_41]] : index
+// CHECK:               scf.yield %[[VAL_72]], %[[VAL_75]], %[[VAL_76:.*]]#0, %[[VAL_76]]#1 : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             }
-// CHECK:             sparse_tensor.compress %[[VAL_21]], %[[VAL_22]], %[[VAL_23]], %[[VAL_73:.*]]#2 into %[[VAL_6]]{{\[}}%[[VAL_20]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             %[[VAL_77:.*]] = sparse_tensor.compress %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_78:.*]]#2 into %[[VAL_78]]#3{{\[}}%[[VAL_22]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             scf.yield %[[VAL_77]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_74:.*]] = sparse_tensor.load %[[VAL_6]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           return %[[VAL_74]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_79:.*]] = sparse_tensor.load %[[VAL_80:.*]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_79]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @matmul2(%A: tensor<4x8xf64, #DCSR>,
               %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
@@ -154,12 +155,12 @@ func.func @matmul2(%A: tensor<4x8xf64, #DCSR>,
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 6 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xi32>
+// CHECK-DAG:       %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xi32>
 // CHECK:           %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x6xi32>
 // CHECK:           %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
 // CHECK:           %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
@@ -204,12 +205,12 @@ func.func @conv2d(%input:  tensor<8x8xi32>,
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 1 : index
 // CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 2 : i64
-// CHECK:           %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xi8>
+// CHECK-DAG:       %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xi8>
 // CHECK:           %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<5x6xi64>
 // CHECK:           %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
 // CHECK:           %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
@@ -253,12 +254,12 @@ func.func @quantized_matmul(%input1: tensor<5x3xi8>,
 // CHECK-SAME:      %[[VAL_2:.*2]]: tensor<f32>) -> tensor<f32> {
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf32>
 // CHECK:           %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<f32>
 // CHECK:           %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref<f32>
 // CHECK:           %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
index d52a81f8d6ff..99b9dffeb990 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
@@ -23,27 +23,27 @@
 }
 
 // CHECK-LABEL:   func.func @sparse_simply_dynamic1(
-// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> {
-// CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 0 : index
-// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
-// CHECK:           %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
-// CHECK:           %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:           %[[VAL_10:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_3]] {
-// CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
-// CHECK:             %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
-// CHECK:             scf.for %[[VAL_15:.*]] = %[[VAL_12]] to %[[VAL_14]] step %[[VAL_3]] {
-// CHECK:               %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xf32>
-// CHECK:               %[[VAL_17:.*]] = arith.mulf %[[VAL_16]], %[[VAL_1]] : f32
-// CHECK:               memref.store %[[VAL_17]], %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xf32>
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> {
+// CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
+// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32
+// CHECK-DAG:       %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>
+// CHECK:           %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex>
+// CHECK:           %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] {
+// CHECK:             %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK:             %[[VAL_11:.*]] = arith.addi %[[VAL_9]], %[[VAL_2]] : index
+// CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xindex>
+// CHECK:             scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_12]] step %[[VAL_2]] {
+// CHECK:               %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xf32>
+// CHECK:               %[[VAL_15:.*]] = arith.mulf %[[VAL_14]], %[[VAL_3]] : f32
+// CHECK:               memref.store %[[VAL_15]], %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xf32>
 // CHECK:             }
 // CHECK:           }
-// CHECK:           %[[VAL_18:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK:           return %[[VAL_18]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:           %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> {
   %c = arith.constant 2.0 : f32
@@ -57,12 +57,12 @@ func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x
 }
 
 // CHECK-LABEL:   func.func @sparse_simply_dynamic2(
-// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> {
 // CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_3:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK:           %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK:           %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK-DAG:       %[[VAL_3:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>
 // CHECK:           %[[VAL_6:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xindex>
 // CHECK:           %[[VAL_7:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_2]]] : memref<?xindex>
 // CHECK:           scf.for %[[VAL_8:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_2]] {
@@ -76,8 +76,8 @@ func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x
 // CHECK:               memref.store %[[VAL_15]], %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref<?xf32>
 // CHECK:             }
 // CHECK:           }
-// CHECK:           %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
-// CHECK:           return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:           %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> {
   %0 = linalg.generic #trait_scale_inpl
@@ -104,23 +104,25 @@ func.func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x
 // CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK:           %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xf32>
-// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] {
-// CHECK:             %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xindex>
-// CHECK:             %[[VAL_11:.*]] = arith.addi %[[VAL_9]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
-// CHECK:             scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_12]] step %[[VAL_3]] {
-// CHECK:               %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
-// CHECK:               %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xf32>
-// CHECK:               %[[VAL_16:.*]] = arith.mulf %[[VAL_15]], %[[VAL_4]] : f32
-// CHECK:               sparse_tensor.insert %[[VAL_16]] into %[[VAL_5]]{{\[}}%[[VAL_9]], %[[VAL_14]]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK-DAG:       %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref<?xf32>
+// CHECK:           %[[VAL_9:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] iter_args(%[[VAL_11:.*]] = %[[VAL_5]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xindex>
+// CHECK:             %[[VAL_13:.*]] = arith.addi %[[VAL_10]], %[[VAL_3]] : index
+// CHECK:             %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK:             %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_12]] to %[[VAL_14]] step %[[VAL_3]] iter_args(%[[VAL_17:.*]] = %[[VAL_11]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:               %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
+// CHECK:               %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xf32>
+// CHECK:               %[[VAL_20:.*]] = arith.mulf %[[VAL_19]], %[[VAL_4]] : f32
+// CHECK:               %[[VAL_21:.*]] = sparse_tensor.insert %[[VAL_20]] into %[[VAL_17]]{{\[}}%[[VAL_10]], %[[VAL_18]]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:               scf.yield %[[VAL_21]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             }
+// CHECK:             scf.yield %[[VAL_22:.*]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_17:.*]] = sparse_tensor.load %[[VAL_5]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           return %[[VAL_17]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_23:.*]] = sparse_tensor.load %[[VAL_24:.*]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_23]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> {
   %s = arith.constant 2.0 : f32
@@ -172,102 +174,106 @@ func.func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20x
 // CHECK:           %[[VAL_23:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
 // CHECK:           %[[VAL_24:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_2]]] : memref<?xindex>
 // CHECK:           %[[VAL_25:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:           %[[VAL_26:.*]]:2 = scf.while (%[[VAL_27:.*]] = %[[VAL_22]], %[[VAL_28:.*]] = %[[VAL_24]]) : (index, index) -> (index, index) {
-// CHECK:             %[[VAL_29:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_23]] : index
-// CHECK:             %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_25]] : index
-// CHECK:             %[[VAL_31:.*]] = arith.andi %[[VAL_29]], %[[VAL_30]] : i1
-// CHECK:             scf.condition(%[[VAL_31]]) %[[VAL_27]], %[[VAL_28]] : index, index
+// CHECK:           %[[VAL_26:.*]]:3 = scf.while (%[[VAL_27:.*]] = %[[VAL_22]], %[[VAL_28:.*]] = %[[VAL_24]], %[[VAL_29:.*]] = %[[VAL_7]]) : (index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_23]] : index
+// CHECK:             %[[VAL_31:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_25]] : index
+// CHECK:             %[[VAL_32:.*]] = arith.andi %[[VAL_30]], %[[VAL_31]] : i1
+// CHECK:             scf.condition(%[[VAL_32]]) %[[VAL_27]], %[[VAL_28]], %[[VAL_29]] : index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           } do {
-// CHECK:           ^bb0(%[[VAL_32:.*]]: index, %[[VAL_33:.*]]: index):
-// CHECK:             %[[VAL_34:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_32]]] : memref<?xindex>
-// CHECK:             %[[VAL_35:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_33]]] : memref<?xindex>
-// CHECK:             %[[VAL_36:.*]] = arith.cmpi ult, %[[VAL_35]], %[[VAL_34]] : index
-// CHECK:             %[[VAL_37:.*]] = arith.select %[[VAL_36]], %[[VAL_35]], %[[VAL_34]] : index
-// CHECK:             %[[VAL_38:.*]] = arith.cmpi eq, %[[VAL_34]], %[[VAL_37]] : index
-// CHECK:             %[[VAL_39:.*]] = arith.cmpi eq, %[[VAL_35]], %[[VAL_37]] : index
-// CHECK:             %[[VAL_40:.*]] = arith.andi %[[VAL_38]], %[[VAL_39]] : i1
-// CHECK:             scf.if %[[VAL_40]] {
-// CHECK:               %[[VAL_41:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_32]]] : memref<?xindex>
-// CHECK:               %[[VAL_42:.*]] = arith.addi %[[VAL_32]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_43:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_42]]] : memref<?xindex>
-// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK:           ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>):
+// CHECK:             %[[VAL_36:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK:             %[[VAL_37:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref<?xindex>
+// CHECK:             %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_36]] : index
+// CHECK:             %[[VAL_39:.*]] = arith.select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
+// CHECK:             %[[VAL_40:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
+// CHECK:             %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
+// CHECK:             %[[VAL_42:.*]] = arith.andi %[[VAL_40]], %[[VAL_41]] : i1
+// CHECK:             %[[VAL_43:.*]] = scf.if %[[VAL_42]] -> (tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_33]]] : memref<?xindex>
 // CHECK:               %[[VAL_45:.*]] = arith.addi %[[VAL_33]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_46:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_45]]] : memref<?xindex>
-// CHECK:               %[[VAL_47:.*]]:2 = scf.while (%[[VAL_48:.*]] = %[[VAL_41]], %[[VAL_49:.*]] = %[[VAL_44]]) : (index, index) -> (index, index) {
-// CHECK:                 %[[VAL_50:.*]] = arith.cmpi ult, %[[VAL_48]], %[[VAL_43]] : index
-// CHECK:                 %[[VAL_51:.*]] = arith.cmpi ult, %[[VAL_49]], %[[VAL_46]] : index
-// CHECK:                 %[[VAL_52:.*]] = arith.andi %[[VAL_50]], %[[VAL_51]] : i1
-// CHECK:                 scf.condition(%[[VAL_52]]) %[[VAL_48]], %[[VAL_49]] : index, index
+// CHECK:               %[[VAL_46:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_45]]] : memref<?xindex>
+// CHECK:               %[[VAL_47:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_34]]] : memref<?xindex>
+// CHECK:               %[[VAL_48:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index
+// CHECK:               %[[VAL_49:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_48]]] : memref<?xindex>
+// CHECK:               %[[VAL_50:.*]]:3 = scf.while (%[[VAL_51:.*]] = %[[VAL_44]], %[[VAL_52:.*]] = %[[VAL_47]], %[[VAL_53:.*]] = %[[VAL_35]]) : (index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                 %[[VAL_54:.*]] = arith.cmpi ult, %[[VAL_51]], %[[VAL_46]] : index
+// CHECK:                 %[[VAL_55:.*]] = arith.cmpi ult, %[[VAL_52]], %[[VAL_49]] : index
+// CHECK:                 %[[VAL_56:.*]] = arith.andi %[[VAL_54]], %[[VAL_55]] : i1
+// CHECK:                 scf.condition(%[[VAL_56]]) %[[VAL_51]], %[[VAL_52]], %[[VAL_53]] : index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               } do {
-// CHECK:               ^bb0(%[[VAL_53:.*]]: index, %[[VAL_54:.*]]: index):
-// CHECK:                 %[[VAL_55:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_53]]] : memref<?xindex>
-// CHECK:                 %[[VAL_56:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_54]]] : memref<?xindex>
-// CHECK:                 %[[VAL_57:.*]] = arith.cmpi ult, %[[VAL_56]], %[[VAL_55]] : index
-// CHECK:                 %[[VAL_58:.*]] = arith.select %[[VAL_57]], %[[VAL_56]], %[[VAL_55]] : index
-// CHECK:                 %[[VAL_59:.*]] = arith.cmpi eq, %[[VAL_55]], %[[VAL_58]] : index
-// CHECK:                 %[[VAL_60:.*]] = arith.cmpi eq, %[[VAL_56]], %[[VAL_58]] : index
-// CHECK:                 %[[VAL_61:.*]] = arith.andi %[[VAL_59]], %[[VAL_60]] : i1
-// CHECK:                 scf.if %[[VAL_61]] {
-// CHECK:                   %[[VAL_62:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_53]]] : memref<?xindex>
-// CHECK:                   %[[VAL_63:.*]] = arith.addi %[[VAL_53]], %[[VAL_3]] : index
-// CHECK:                   %[[VAL_64:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_63]]] : memref<?xindex>
-// CHECK:                   %[[VAL_65:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_54]]] : memref<?xindex>
-// CHECK:                   %[[VAL_66:.*]] = arith.addi %[[VAL_54]], %[[VAL_3]] : index
-// CHECK:                   %[[VAL_67:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_66]]] : memref<?xindex>
-// CHECK:                   %[[VAL_68:.*]]:3 = scf.while (%[[VAL_69:.*]] = %[[VAL_62]], %[[VAL_70:.*]] = %[[VAL_65]], %[[VAL_71:.*]] = %[[VAL_4]]) : (index, index, i32) -> (index, index, i32) {
-// CHECK:                     %[[VAL_72:.*]] = arith.cmpi ult, %[[VAL_69]], %[[VAL_64]] : index
-// CHECK:                     %[[VAL_73:.*]] = arith.cmpi ult, %[[VAL_70]], %[[VAL_67]] : index
-// CHECK:                     %[[VAL_74:.*]] = arith.andi %[[VAL_72]], %[[VAL_73]] : i1
-// CHECK:                     scf.condition(%[[VAL_74]]) %[[VAL_69]], %[[VAL_70]], %[[VAL_71]] : index, index, i32
+// CHECK:               ^bb0(%[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>):
+// CHECK:                 %[[VAL_60:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_57]]] : memref<?xindex>
+// CHECK:                 %[[VAL_61:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_58]]] : memref<?xindex>
+// CHECK:                 %[[VAL_62:.*]] = arith.cmpi ult, %[[VAL_61]], %[[VAL_60]] : index
+// CHECK:                 %[[VAL_63:.*]] = arith.select %[[VAL_62]], %[[VAL_61]], %[[VAL_60]] : index
+// CHECK:                 %[[VAL_64:.*]] = arith.cmpi eq, %[[VAL_60]], %[[VAL_63]] : index
+// CHECK:                 %[[VAL_65:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_63]] : index
+// CHECK:                 %[[VAL_66:.*]] = arith.andi %[[VAL_64]], %[[VAL_65]] : i1
+// CHECK:                 %[[VAL_67:.*]] = scf.if %[[VAL_66]] -> (tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                   %[[VAL_68:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_57]]] : memref<?xindex>
+// CHECK:                   %[[VAL_69:.*]] = arith.addi %[[VAL_57]], %[[VAL_3]] : index
+// CHECK:                   %[[VAL_70:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_69]]] : memref<?xindex>
+// CHECK:                   %[[VAL_71:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_58]]] : memref<?xindex>
+// CHECK:                   %[[VAL_72:.*]] = arith.addi %[[VAL_58]], %[[VAL_3]] : index
+// CHECK:                   %[[VAL_73:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_72]]] : memref<?xindex>
+// CHECK:                   %[[VAL_74:.*]]:4 = scf.while (%[[VAL_75:.*]] = %[[VAL_68]], %[[VAL_76:.*]] = %[[VAL_71]], %[[VAL_77:.*]] = %[[VAL_4]], %[[VAL_78:.*]] = %[[VAL_59]]) : (index, index, i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                     %[[VAL_79:.*]] = arith.cmpi ult, %[[VAL_75]], %[[VAL_70]] : index
+// CHECK:                     %[[VAL_80:.*]] = arith.cmpi ult, %[[VAL_76]], %[[VAL_73]] : index
+// CHECK:                     %[[VAL_81:.*]] = arith.andi %[[VAL_79]], %[[VAL_80]] : i1
+// CHECK:                     scf.condition(%[[VAL_81]]) %[[VAL_75]], %[[VAL_76]], %[[VAL_77]], %[[VAL_78]] : index, index, i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                   } do {
-// CHECK:                   ^bb0(%[[VAL_75:.*]]: index, %[[VAL_76:.*]]: index, %[[VAL_77:.*]]: i32):
-// CHECK:                     %[[VAL_78:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_75]]] : memref<?xindex>
-// CHECK:                     %[[VAL_79:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_76]]] : memref<?xindex>
-// CHECK:                     %[[VAL_80:.*]] = arith.cmpi ult, %[[VAL_79]], %[[VAL_78]] : index
-// CHECK:                     %[[VAL_81:.*]] = arith.select %[[VAL_80]], %[[VAL_79]], %[[VAL_78]] : index
-// CHECK:                     %[[VAL_82:.*]] = arith.cmpi eq, %[[VAL_78]], %[[VAL_81]] : index
-// CHECK:                     %[[VAL_83:.*]] = arith.cmpi eq, %[[VAL_79]], %[[VAL_81]] : index
-// CHECK:                     %[[VAL_84:.*]] = arith.andi %[[VAL_82]], %[[VAL_83]] : i1
-// CHECK:                     %[[VAL_85:.*]] = scf.if %[[VAL_84]] -> (i32) {
-// CHECK:                       %[[VAL_86:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_75]]] : memref<?xi32>
-// CHECK:                       %[[VAL_87:.*]] = memref.load %[[VAL_21]]{{\[}}%[[VAL_76]]] : memref<?xi32>
-// CHECK:                       %[[VAL_88:.*]] = arith.muli %[[VAL_86]], %[[VAL_87]] : i32
-// CHECK:                       %[[VAL_89:.*]] = arith.addi %[[VAL_77]], %[[VAL_88]] : i32
-// CHECK:                       scf.yield %[[VAL_89]] : i32
+// CHECK:                   ^bb0(%[[VAL_82:.*]]: index, %[[VAL_83:.*]]: index, %[[VAL_84:.*]]: i32, %[[VAL_85:.*]]: tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>):
+// CHECK:                     %[[VAL_86:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_82]]] : memref<?xindex>
+// CHECK:                     %[[VAL_87:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_83]]] : memref<?xindex>
+// CHECK:                     %[[VAL_88:.*]] = arith.cmpi ult, %[[VAL_87]], %[[VAL_86]] : index
+// CHECK:                     %[[VAL_89:.*]] = arith.select %[[VAL_88]], %[[VAL_87]], %[[VAL_86]] : index
+// CHECK:                     %[[VAL_90:.*]] = arith.cmpi eq, %[[VAL_86]], %[[VAL_89]] : index
+// CHECK:                     %[[VAL_91:.*]] = arith.cmpi eq, %[[VAL_87]], %[[VAL_89]] : index
+// CHECK:                     %[[VAL_92:.*]] = arith.andi %[[VAL_90]], %[[VAL_91]] : i1
+// CHECK:                     %[[VAL_93:.*]]:2 = scf.if %[[VAL_92]] -> (i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                       %[[VAL_94:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_82]]] : memref<?xi32>
+// CHECK:                       %[[VAL_95:.*]] = memref.load %[[VAL_21]]{{\[}}%[[VAL_83]]] : memref<?xi32>
+// CHECK:                       %[[VAL_96:.*]] = arith.muli %[[VAL_94]], %[[VAL_95]] : i32
+// CHECK:                       %[[VAL_97:.*]] = arith.addi %[[VAL_84]], %[[VAL_96]] : i32
+// CHECK:                       scf.yield %[[VAL_97]], %[[VAL_85]] : i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                     } else {
-// CHECK:                       scf.yield %[[VAL_77]] : i32
+// CHECK:                       scf.yield %[[VAL_84]], %[[VAL_85]] : i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                     }
-// CHECK:                     %[[VAL_90:.*]] = arith.cmpi eq, %[[VAL_78]], %[[VAL_81]] : index
-// CHECK:                     %[[VAL_91:.*]] = arith.addi %[[VAL_75]], %[[VAL_3]] : index
-// CHECK:                     %[[VAL_92:.*]] = arith.select %[[VAL_90]], %[[VAL_91]], %[[VAL_75]] : index
-// CHECK:                     %[[VAL_93:.*]] = arith.cmpi eq, %[[VAL_79]], %[[VAL_81]] : index
-// CHECK:                     %[[VAL_94:.*]] = arith.addi %[[VAL_76]], %[[VAL_3]] : index
-// CHECK:                     %[[VAL_95:.*]] = arith.select %[[VAL_93]], %[[VAL_94]], %[[VAL_76]] : index
-// CHECK:                     scf.yield %[[VAL_92]], %[[VAL_95]], %[[VAL_96:.*]] : index, index, i32
+// CHECK:                     %[[VAL_98:.*]] = arith.cmpi eq, %[[VAL_86]], %[[VAL_89]] : index
+// CHECK:                     %[[VAL_99:.*]] = arith.addi %[[VAL_82]], %[[VAL_3]] : index
+// CHECK:                     %[[VAL_100:.*]] = arith.select %[[VAL_98]], %[[VAL_99]], %[[VAL_82]] : index
+// CHECK:                     %[[VAL_101:.*]] = arith.cmpi eq, %[[VAL_87]], %[[VAL_89]] : index
+// CHECK:                     %[[VAL_102:.*]] = arith.addi %[[VAL_83]], %[[VAL_3]] : index
+// CHECK:                     %[[VAL_103:.*]] = arith.select %[[VAL_101]], %[[VAL_102]], %[[VAL_83]] : index
+// CHECK:                     scf.yield %[[VAL_100]], %[[VAL_103]], %[[VAL_104:.*]]#0, %[[VAL_104]]#1 : index, index, i32, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                   }
-// CHECK:                   sparse_tensor.insert %[[VAL_97:.*]]#2 into %[[VAL_7]]{{\[}}%[[VAL_37]], %[[VAL_58]]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:                   %[[VAL_105:.*]] = sparse_tensor.insert %[[VAL_106:.*]]#2 into %[[VAL_106]]#3{{\[}}%[[VAL_39]], %[[VAL_63]]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:                   scf.yield %[[VAL_105]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                 } else {
+// CHECK:                   scf.yield %[[VAL_59]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:                 }
-// CHECK:                 %[[VAL_98:.*]] = arith.cmpi eq, %[[VAL_55]], %[[VAL_58]] : index
-// CHECK:                 %[[VAL_99:.*]] = arith.addi %[[VAL_53]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_100:.*]] = arith.select %[[VAL_98]], %[[VAL_99]], %[[VAL_53]] : index
-// CHECK:                 %[[VAL_101:.*]] = arith.cmpi eq, %[[VAL_56]], %[[VAL_58]] : index
-// CHECK:                 %[[VAL_102:.*]] = arith.addi %[[VAL_54]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_103:.*]] = arith.select %[[VAL_101]], %[[VAL_102]], %[[VAL_54]] : index
-// CHECK:                 scf.yield %[[VAL_100]], %[[VAL_103]] : index, index
+// CHECK:                 %[[VAL_107:.*]] = arith.cmpi eq, %[[VAL_60]], %[[VAL_63]] : index
+// CHECK:                 %[[VAL_108:.*]] = arith.addi %[[VAL_57]], %[[VAL_3]] : index
+// CHECK:                 %[[VAL_109:.*]] = arith.select %[[VAL_107]], %[[VAL_108]], %[[VAL_57]] : index
+// CHECK:                 %[[VAL_110:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_63]] : index
+// CHECK:                 %[[VAL_111:.*]] = arith.addi %[[VAL_58]], %[[VAL_3]] : index
+// CHECK:                 %[[VAL_112:.*]] = arith.select %[[VAL_110]], %[[VAL_111]], %[[VAL_58]] : index
+// CHECK:                 scf.yield %[[VAL_109]], %[[VAL_112]], %[[VAL_113:.*]] : index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               }
+// CHECK:               scf.yield %[[VAL_114:.*]]#2 : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             } else {
+// CHECK:               scf.yield %[[VAL_35]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             }
-// CHECK:             %[[VAL_104:.*]] = arith.cmpi eq, %[[VAL_34]], %[[VAL_37]] : index
-// CHECK:             %[[VAL_105:.*]] = arith.addi %[[VAL_32]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_106:.*]] = arith.select %[[VAL_104]], %[[VAL_105]], %[[VAL_32]] : index
-// CHECK:             %[[VAL_107:.*]] = arith.cmpi eq, %[[VAL_35]], %[[VAL_37]] : index
-// CHECK:             %[[VAL_108:.*]] = arith.addi %[[VAL_33]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_109:.*]] = arith.select %[[VAL_107]], %[[VAL_108]], %[[VAL_33]] : index
-// CHECK:             scf.yield %[[VAL_106]], %[[VAL_109]] : index, index
+// CHECK:             %[[VAL_115:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
+// CHECK:             %[[VAL_116:.*]] = arith.addi %[[VAL_33]], %[[VAL_3]] : index
+// CHECK:             %[[VAL_117:.*]] = arith.select %[[VAL_115]], %[[VAL_116]], %[[VAL_33]] : index
+// CHECK:             %[[VAL_118:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
+// CHECK:             %[[VAL_119:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index
+// CHECK:             %[[VAL_120:.*]] = arith.select %[[VAL_118]], %[[VAL_119]], %[[VAL_34]] : index
+// CHECK:             scf.yield %[[VAL_117]], %[[VAL_120]], %[[VAL_121:.*]] : index, index, tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_110:.*]] = sparse_tensor.load %[[VAL_7]] hasInserts : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           return %[[VAL_110]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_122:.*]] = sparse_tensor.load %[[VAL_123:.*]]#2 hasInserts : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_122]] : tensor<?x?xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
              %argb: tensor<?x?x?xi32, #SparseTensor>) -> tensor<?x?xi32, #DCSR> {
@@ -299,12 +305,12 @@ func.func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
 }
 
 // CHECK-LABEL:   func.func @matmat(
-// CHECK-SAME:                      %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>,
-// CHECK-SAME:                      %[[VAL_1:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> {
-// CHECK:           %[[VAL_2:.*]] = arith.constant 0 : index
-// CHECK:           %[[VAL_3:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_4:.*]] = arith.constant false
-// CHECK:           %[[VAL_5:.*]] = arith.constant true
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>,
+// CHECK-SAME:      %[[VAL_1:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> {
+// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 0 : index
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
+// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant false
+// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant true
 // CHECK:           %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           %[[VAL_8:.*]] = bufferization.alloc_tensor(%[[VAL_6]], %[[VAL_7]]) : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
@@ -320,68 +326,69 @@ func.func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
 // CHECK:           %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>
 // CHECK:           %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref<?xindex>
 // CHECK:           %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_21:.*]] = %[[VAL_19]] to %[[VAL_20]] step %[[VAL_3]] {
-// CHECK:             %[[VAL_22:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK:             %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>, memref<?xi1>, memref<?xindex>
-// CHECK:             %[[VAL_27:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK:             %[[VAL_28:.*]] = arith.addi %[[VAL_21]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_29:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK:             %[[VAL_30:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:             %[[VAL_31:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:             %[[VAL_32:.*]]:3 = scf.while (%[[VAL_33:.*]] = %[[VAL_27]], %[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_26]]) : (index, index, index) -> (index, index, index) {
-// CHECK:               %[[VAL_36:.*]] = arith.cmpi ult, %[[VAL_33]], %[[VAL_29]] : index
-// CHECK:               %[[VAL_37:.*]] = arith.cmpi ult, %[[VAL_34]], %[[VAL_31]] : index
-// CHECK:               %[[VAL_38:.*]] = arith.andi %[[VAL_36]], %[[VAL_37]] : i1
-// CHECK:               scf.condition(%[[VAL_38]]) %[[VAL_33]], %[[VAL_34]], %[[VAL_35]] : index, index, index
+// CHECK:           %[[VAL_21:.*]] = scf.for %[[VAL_22:.*]] = %[[VAL_19]] to %[[VAL_20]] step %[[VAL_3]] iter_args(%[[VAL_23:.*]] = %[[VAL_8]]) -> (tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_24:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK:             %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]], %[[VAL_28:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf32>, memref<?xi1>, memref<?xindex>
+// CHECK:             %[[VAL_29:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref<?xindex>
+// CHECK:             %[[VAL_30:.*]] = arith.addi %[[VAL_22]], %[[VAL_3]] : index
+// CHECK:             %[[VAL_31:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<?xindex>
+// CHECK:             %[[VAL_32:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_2]]] : memref<?xindex>
+// CHECK:             %[[VAL_33:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_3]]] : memref<?xindex>
+// CHECK:             %[[VAL_34:.*]]:4 = scf.while (%[[VAL_35:.*]] = %[[VAL_29]], %[[VAL_36:.*]] = %[[VAL_32]], %[[VAL_37:.*]] = %[[VAL_28]], %[[VAL_38:.*]] = %[[VAL_23]]) : (index, index, index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:               %[[VAL_39:.*]] = arith.cmpi ult, %[[VAL_35]], %[[VAL_31]] : index
+// CHECK:               %[[VAL_40:.*]] = arith.cmpi ult, %[[VAL_36]], %[[VAL_33]] : index
+// CHECK:               %[[VAL_41:.*]] = arith.andi %[[VAL_39]], %[[VAL_40]] : i1
+// CHECK:               scf.condition(%[[VAL_41]]) %[[VAL_35]], %[[VAL_36]], %[[VAL_37]], %[[VAL_38]] : index, index, index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             } do {
-// CHECK:             ^bb0(%[[VAL_39:.*]]: index, %[[VAL_40:.*]]: index, %[[VAL_41:.*]]: index):
-// CHECK:               %[[VAL_42:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
-// CHECK:               %[[VAL_43:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_40]]] : memref<?xindex>
-// CHECK:               %[[VAL_44:.*]] = arith.cmpi ult, %[[VAL_43]], %[[VAL_42]] : index
-// CHECK:               %[[VAL_45:.*]] = arith.select %[[VAL_44]], %[[VAL_43]], %[[VAL_42]] : index
-// CHECK:               %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_42]], %[[VAL_45]] : index
-// CHECK:               %[[VAL_47:.*]] = arith.cmpi eq, %[[VAL_43]], %[[VAL_45]] : index
-// CHECK:               %[[VAL_48:.*]] = arith.andi %[[VAL_46]], %[[VAL_47]] : i1
-// CHECK:               %[[VAL_49:.*]] = scf.if %[[VAL_48]] -> (index) {
-// CHECK:                 %[[VAL_50:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_39]]] : memref<?xf32>
-// CHECK:                 %[[VAL_51:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_40]]] : memref<?xindex>
-// CHECK:                 %[[VAL_52:.*]] = arith.addi %[[VAL_40]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_53:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_52]]] : memref<?xindex>
-// CHECK:                 %[[VAL_54:.*]] = scf.for %[[VAL_55:.*]] = %[[VAL_51]] to %[[VAL_53]] step %[[VAL_3]] iter_args(%[[VAL_56:.*]] = %[[VAL_41]]) -> (index) {
-// CHECK:                   %[[VAL_57:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_55]]] : memref<?xindex>
-// CHECK:                   %[[VAL_58:.*]] = memref.load %[[VAL_23]]{{\[}}%[[VAL_57]]] : memref<?xf32>
-// CHECK:                   %[[VAL_59:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_55]]] : memref<?xf32>
-// CHECK:                   %[[VAL_60:.*]] = arith.mulf %[[VAL_50]], %[[VAL_59]] : f32
-// CHECK:                   %[[VAL_61:.*]] = arith.addf %[[VAL_58]], %[[VAL_60]] : f32
-// CHECK:                   %[[VAL_62:.*]] = memref.load %[[VAL_24]]{{\[}}%[[VAL_57]]] : memref<?xi1>
-// CHECK:                   %[[VAL_63:.*]] = arith.cmpi eq, %[[VAL_62]], %[[VAL_4]] : i1
-// CHECK:                   %[[VAL_64:.*]] = scf.if %[[VAL_63]] -> (index) {
-// CHECK:                     memref.store %[[VAL_5]], %[[VAL_24]]{{\[}}%[[VAL_57]]] : memref<?xi1>
-// CHECK:                     memref.store %[[VAL_57]], %[[VAL_25]]{{\[}}%[[VAL_56]]] : memref<?xindex>
-// CHECK:                     %[[VAL_65:.*]] = arith.addi %[[VAL_56]], %[[VAL_3]] : index
-// CHECK:                     scf.yield %[[VAL_65]] : index
+// CHECK:             ^bb0(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index, %[[VAL_44:.*]]: index, %[[VAL_45:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>):
+// CHECK:               %[[VAL_46:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_42]]] : memref<?xindex>
+// CHECK:               %[[VAL_47:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_43]]] : memref<?xindex>
+// CHECK:               %[[VAL_48:.*]] = arith.cmpi ult, %[[VAL_47]], %[[VAL_46]] : index
+// CHECK:               %[[VAL_49:.*]] = arith.select %[[VAL_48]], %[[VAL_47]], %[[VAL_46]] : index
+// CHECK:               %[[VAL_50:.*]] = arith.cmpi eq, %[[VAL_46]], %[[VAL_49]] : index
+// CHECK:               %[[VAL_51:.*]] = arith.cmpi eq, %[[VAL_47]], %[[VAL_49]] : index
+// CHECK:               %[[VAL_52:.*]] = arith.andi %[[VAL_50]], %[[VAL_51]] : i1
+// CHECK:               %[[VAL_53:.*]]:2 = scf.if %[[VAL_52]] -> (index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:                 %[[VAL_54:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_42]]] : memref<?xf32>
+// CHECK:                 %[[VAL_55:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_43]]] : memref<?xindex>
+// CHECK:                 %[[VAL_56:.*]] = arith.addi %[[VAL_43]], %[[VAL_3]] : index
+// CHECK:                 %[[VAL_57:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_56]]] : memref<?xindex>
+// CHECK:                 %[[VAL_58:.*]] = scf.for %[[VAL_59:.*]] = %[[VAL_55]] to %[[VAL_57]] step %[[VAL_3]] iter_args(%[[VAL_60:.*]] = %[[VAL_44]]) -> (index) {
+// CHECK:                   %[[VAL_61:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_59]]] : memref<?xindex>
+// CHECK:                   %[[VAL_62:.*]] = memref.load %[[VAL_25]]{{\[}}%[[VAL_61]]] : memref<?xf32>
+// CHECK:                   %[[VAL_63:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_59]]] : memref<?xf32>
+// CHECK:                   %[[VAL_64:.*]] = arith.mulf %[[VAL_54]], %[[VAL_63]] : f32
+// CHECK:                   %[[VAL_65:.*]] = arith.addf %[[VAL_62]], %[[VAL_64]] : f32
+// CHECK:                   %[[VAL_66:.*]] = memref.load %[[VAL_26]]{{\[}}%[[VAL_61]]] : memref<?xi1>
+// CHECK:                   %[[VAL_67:.*]] = arith.cmpi eq, %[[VAL_66]], %[[VAL_4]] : i1
+// CHECK:                   %[[VAL_68:.*]] = scf.if %[[VAL_67]] -> (index) {
+// CHECK:                     memref.store %[[VAL_5]], %[[VAL_26]]{{\[}}%[[VAL_61]]] : memref<?xi1>
+// CHECK:                     memref.store %[[VAL_61]], %[[VAL_27]]{{\[}}%[[VAL_60]]] : memref<?xindex>
+// CHECK:                     %[[VAL_69:.*]] = arith.addi %[[VAL_60]], %[[VAL_3]] : index
+// CHECK:                     scf.yield %[[VAL_69]] : index
 // CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_56]] : index
+// CHECK:                     scf.yield %[[VAL_60]] : index
 // CHECK:                   }
-// CHECK:                   memref.store %[[VAL_61]], %[[VAL_23]]{{\[}}%[[VAL_57]]] : memref<?xf32>
-// CHECK:                   scf.yield %[[VAL_66:.*]] : index
+// CHECK:                   memref.store %[[VAL_65]], %[[VAL_25]]{{\[}}%[[VAL_61]]] : memref<?xf32>
+// CHECK:                   scf.yield %[[VAL_70:.*]] : index
 // CHECK:                 }
-// CHECK:                 scf.yield %[[VAL_67:.*]] : index
+// CHECK:                 scf.yield %[[VAL_71:.*]], %[[VAL_45]] : index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               } else {
-// CHECK:                 scf.yield %[[VAL_41]] : index
+// CHECK:                 scf.yield %[[VAL_44]], %[[VAL_45]] : index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:               }
-// CHECK:               %[[VAL_68:.*]] = arith.cmpi eq, %[[VAL_42]], %[[VAL_45]] : index
-// CHECK:               %[[VAL_69:.*]] = arith.addi %[[VAL_39]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_70:.*]] = arith.select %[[VAL_68]], %[[VAL_69]], %[[VAL_39]] : index
-// CHECK:               %[[VAL_71:.*]] = arith.cmpi eq, %[[VAL_43]], %[[VAL_45]] : index
-// CHECK:               %[[VAL_72:.*]] = arith.addi %[[VAL_40]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_73:.*]] = arith.select %[[VAL_71]], %[[VAL_72]], %[[VAL_40]] : index
-// CHECK:               scf.yield %[[VAL_70]], %[[VAL_73]], %[[VAL_74:.*]] : index, index, index
+// CHECK:               %[[VAL_72:.*]] = arith.cmpi eq, %[[VAL_46]], %[[VAL_49]] : index
+// CHECK:               %[[VAL_73:.*]] = arith.addi %[[VAL_42]], %[[VAL_3]] : index
+// CHECK:               %[[VAL_74:.*]] = arith.select %[[VAL_72]], %[[VAL_73]], %[[VAL_42]] : index
+// CHECK:               %[[VAL_75:.*]] = arith.cmpi eq, %[[VAL_47]], %[[VAL_49]] : index
+// CHECK:               %[[VAL_76:.*]] = arith.addi %[[VAL_43]], %[[VAL_3]] : index
+// CHECK:               %[[VAL_77:.*]] = arith.select %[[VAL_75]], %[[VAL_76]], %[[VAL_43]] : index
+// CHECK:               scf.yield %[[VAL_74]], %[[VAL_77]], %[[VAL_78:.*]]#0, %[[VAL_78]]#1 : index, index, index, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             }
-// CHECK:             sparse_tensor.compress %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_75:.*]]#2 into %[[VAL_8]]{{\[}}%[[VAL_22]]] : memref<?xf32>, memref<?xi1>, memref<?xindex>, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             %[[VAL_79:.*]] = sparse_tensor.compress %[[VAL_25]], %[[VAL_26]], %[[VAL_27]], %[[VAL_80:.*]]#2 into %[[VAL_80]]#3{{\[}}%[[VAL_24]]] : memref<?xf32>, memref<?xi1>, memref<?xindex>, tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             scf.yield %[[VAL_79]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_76:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           return %[[VAL_76]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_81:.*]] = sparse_tensor.load %[[VAL_82:.*]] hasInserts : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_81]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @matmat(%arga: tensor<?x?xf32, #DCSR>,
              %argb: tensor<?x?xf32, #DCSR>) -> tensor<?x?xf32, #DCSR> {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
index ad1ad1d524be..d2ec5cafd59d 100755
--- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
@@ -133,52 +133,53 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
 // CHECK-DAG:       %[[VAL_6:.*]] = arith.constant false
 // CHECK-DAG:       %[[VAL_7:.*]] = arith.constant true
 // CHECK-DAG:       %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
-// CHECK:           %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) {bufferization.escape = [false]} : tensor<8x8xf64>
-// CHECK:           %[[VAL_10:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
-// CHECK:           %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
-// CHECK:           %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_15:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_16:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
-// CHECK:           %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
+// CHECK-DAG:       %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) {bufferization.escape = [false]} : tensor<8x8xf64>
+// CHECK-DAG:       %[[VAL_10:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK-DAG:       %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
+// CHECK-DAG:       %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
+// CHECK-DAG:       %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>
 // CHECK:           %[[VAL_18:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_4]]] : memref<?xindex>
 // CHECK:           %[[VAL_19:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_20:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] {
-// CHECK:             %[[VAL_21:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK:             %[[VAL_22:.*]], %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
-// CHECK:             %[[VAL_26:.*]] = scf.for %[[VAL_27:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] iter_args(%[[VAL_28:.*]] = %[[VAL_25]]) -> (index) {
-// CHECK:               %[[VAL_29:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_21]], %[[VAL_27]]] : memref<8x8xf64>
-// CHECK:               %[[VAL_30:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK:               %[[VAL_31:.*]] = arith.addi %[[VAL_20]], %[[VAL_5]] : index
-// CHECK:               %[[VAL_32:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_31]]] : memref<?xindex>
-// CHECK:               %[[VAL_33:.*]] = scf.for %[[VAL_34:.*]] = %[[VAL_30]] to %[[VAL_32]] step %[[VAL_5]] iter_args(%[[VAL_35:.*]] = %[[VAL_28]]) -> (index) {
-// CHECK:                 %[[VAL_36:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref<?xindex>
-// CHECK:                 %[[VAL_37:.*]] = memref.load %[[VAL_22]]{{\[}}%[[VAL_36]]] : memref<?xf64>
-// CHECK:                 %[[VAL_38:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_27]], %[[VAL_36]]] : memref<8x8xf64>
-// CHECK:                 %[[VAL_39:.*]] = arith.mulf %[[VAL_29]], %[[VAL_38]] : f64
-// CHECK:                 %[[VAL_40:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_34]]] : memref<?xf64>
-// CHECK:                 %[[VAL_41:.*]] = arith.mulf %[[VAL_39]], %[[VAL_40]] : f64
-// CHECK:                 %[[VAL_42:.*]] = arith.addf %[[VAL_37]], %[[VAL_41]] : f64
-// CHECK:                 %[[VAL_43:.*]] = memref.load %[[VAL_23]]{{\[}}%[[VAL_36]]] : memref<?xi1>
-// CHECK:                 %[[VAL_44:.*]] = arith.cmpi eq, %[[VAL_43]], %[[VAL_6]] : i1
-// CHECK:                 %[[VAL_45:.*]] = scf.if %[[VAL_44]] -> (index) {
-// CHECK:                   memref.store %[[VAL_7]], %[[VAL_23]]{{\[}}%[[VAL_36]]] : memref<?xi1>
-// CHECK:                   memref.store %[[VAL_36]], %[[VAL_24]]{{\[}}%[[VAL_35]]] : memref<?xindex>
-// CHECK:                   %[[VAL_46:.*]] = arith.addi %[[VAL_35]], %[[VAL_5]] : index
-// CHECK:                   scf.yield %[[VAL_46]] : index
+// CHECK:           %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_10]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_23:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK:             %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
+// CHECK:             %[[VAL_28:.*]] = scf.for %[[VAL_29:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] iter_args(%[[VAL_30:.*]] = %[[VAL_27]]) -> (index) {
+// CHECK:               %[[VAL_31:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_23]], %[[VAL_29]]] : memref<8x8xf64>
+// CHECK:               %[[VAL_32:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_21]]] : memref<?xindex>
+// CHECK:               %[[VAL_33:.*]] = arith.addi %[[VAL_21]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_34:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_33]]] : memref<?xindex>
+// CHECK:               %[[VAL_35:.*]] = scf.for %[[VAL_36:.*]] = %[[VAL_32]] to %[[VAL_34]] step %[[VAL_5]] iter_args(%[[VAL_37:.*]] = %[[VAL_30]]) -> (index) {
+// CHECK:                 %[[VAL_38:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_36]]] : memref<?xindex>
+// CHECK:                 %[[VAL_39:.*]] = memref.load %[[VAL_24]]{{\[}}%[[VAL_38]]] : memref<?xf64>
+// CHECK:                 %[[VAL_40:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_29]], %[[VAL_38]]] : memref<8x8xf64>
+// CHECK:                 %[[VAL_41:.*]] = arith.mulf %[[VAL_31]], %[[VAL_40]] : f64
+// CHECK:                 %[[VAL_42:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_36]]] : memref<?xf64>
+// CHECK:                 %[[VAL_43:.*]] = arith.mulf %[[VAL_41]], %[[VAL_42]] : f64
+// CHECK:                 %[[VAL_44:.*]] = arith.addf %[[VAL_39]], %[[VAL_43]] : f64
+// CHECK:                 %[[VAL_45:.*]] = memref.load %[[VAL_25]]{{\[}}%[[VAL_38]]] : memref<?xi1>
+// CHECK:                 %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_6]] : i1
+// CHECK:                 %[[VAL_47:.*]] = scf.if %[[VAL_46]] -> (index) {
+// CHECK:                   memref.store %[[VAL_7]], %[[VAL_25]]{{\[}}%[[VAL_38]]] : memref<?xi1>
+// CHECK:                   memref.store %[[VAL_38]], %[[VAL_26]]{{\[}}%[[VAL_37]]] : memref<?xindex>
+// CHECK:                   %[[VAL_48:.*]] = arith.addi %[[VAL_37]], %[[VAL_5]] : index
+// CHECK:                   scf.yield %[[VAL_48]] : index
 // CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_35]] : index
+// CHECK:                   scf.yield %[[VAL_37]] : index
 // CHECK:                 }
-// CHECK:                 memref.store %[[VAL_42]], %[[VAL_22]]{{\[}}%[[VAL_36]]] : memref<?xf64>
-// CHECK:                 scf.yield %[[VAL_47:.*]] : index
+// CHECK:                 memref.store %[[VAL_44]], %[[VAL_24]]{{\[}}%[[VAL_38]]] : memref<?xf64>
+// CHECK:                 scf.yield %[[VAL_49:.*]] : index
 // CHECK:               }
-// CHECK:               scf.yield %[[VAL_48:.*]] : index
+// CHECK:               scf.yield %[[VAL_50:.*]] : index
 // CHECK:             }
-// CHECK:             sparse_tensor.compress %[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %[[VAL_49:.*]] into %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             %[[VAL_51:.*]] = sparse_tensor.compress %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_52:.*]] into %[[VAL_22]]{{\[}}%[[VAL_23]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:             scf.yield %[[VAL_51]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_50:.*]] = sparse_tensor.load %[[VAL_10]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           return %[[VAL_50]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_53:.*]] = sparse_tensor.load %[[VAL_54:.*]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_53]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sparse_sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
                                      %arga: tensor<8x8xf64>,

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir
index 8b8b1c00e176..e909e097aa1c 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir
@@ -19,29 +19,31 @@
 // CHECK-SAME:      %[[VAL_0:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> {
 // CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
-// CHECK:           %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
-// CHECK:           %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>
-// CHECK:           %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
-// CHECK:           %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
-// CHECK:           %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
-// CHECK:           %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
-// CHECK:           %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xf64>
+// CHECK-DAG:       %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK-DAG:       %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>
+// CHECK-DAG:       %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref<?xf64>
 // CHECK:           %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex>
 // CHECK:           %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
-// CHECK:           scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] {
-// CHECK:             %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK:             %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xindex>
-// CHECK:             %[[VAL_15:.*]] = arith.addi %[[VAL_12]], %[[VAL_2]] : index
-// CHECK:             %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
-// CHECK:             scf.for %[[VAL_17:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_2]] {
-// CHECK:               %[[VAL_18:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK:               %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<?xf64>
-// CHECK:               sparse_tensor.insert %[[VAL_19]] into %[[VAL_3]]{{\[}}%[[VAL_13]], %[[VAL_18]]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_3]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:             %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK:             %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
+// CHECK:             %[[VAL_17:.*]] = arith.addi %[[VAL_13]], %[[VAL_2]] : index
+// CHECK:             %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
+// CHECK:             %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_2]] iter_args(%[[VAL_21:.*]] = %[[VAL_14]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) {
+// CHECK:               %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
+// CHECK:               %[[VAL_23:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xf64>
+// CHECK:               %[[VAL_24:.*]] = sparse_tensor.insert %[[VAL_23]] into %[[VAL_21]]{{\[}}%[[VAL_15]], %[[VAL_22]]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:               scf.yield %[[VAL_24]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:             }
+// CHECK:             scf.yield %[[VAL_25:.*]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           }
-// CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_3]] hasInserts : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           %[[VAL_26:.*]] = sparse_tensor.load %[[VAL_27:.*]] hasInserts : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:           bufferization.dealloc_tensor %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>
-// CHECK:           return %[[VAL_20]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
+// CHECK:           return %[[VAL_26]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>
 // CHECK:         }
 func.func @sparse_transpose_auto(%arga: tensor<3x4xf64, #DCSR>)
                                      -> tensor<4x3xf64, #DCSR> {


        


More information about the Mlir-commits mailing list