[Mlir-commits] [mlir] [mlir][sparse] unify lib/codegen rewriting rules for sparse tensor re… (PR #68049)

Peiming Liu llvmlistbot at llvm.org
Mon Oct 2 16:27:20 PDT 2023


https://github.com/PeimingLiu updated https://github.com/llvm/llvm-project/pull/68049

>From 19c24d4a7aace77ccf44fc304ec2b888760cf90f Mon Sep 17 00:00:00 2001
From: Peiming Liu <peiming at google.com>
Date: Mon, 2 Oct 2023 23:23:17 +0000
Subject: [PATCH 1/2] [mlir][sparse] unify lib/codegen rewriting rules for
 sparse tensor reshaping operations.

---
 .../Transforms/SparseTensorConversion.cpp     |   2 -
 .../Transforms/SparseTensorRewriting.cpp      |  10 +-
 .../Dialect/SparseTensor/post_rewriting.mlir  |  99 -----
 .../Dialect/SparseTensor/sparse_reshape.mlir  | 356 ++++++------------
 4 files changed, 125 insertions(+), 342 deletions(-)
 delete mode 100644 mlir/test/Dialect/SparseTensor/post_rewriting.mlir

diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 37f6971cf4df1a2..75045d2378dec00 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -1538,8 +1538,6 @@ void mlir::populateSparseTensorConversionPatterns(
   patterns
       .add<SparseReturnConverter, SparseTensorToDimSizeConverter,
            SparseCastConverter, SparseTensorNewConverter,
-           SparseReshapeConverter<tensor::ExpandShapeOp>,
-           SparseReshapeConverter<tensor::CollapseShapeOp>,
            SparseTensorConcatConverter, SparseTensorAllocConverter,
            SparseTensorEmptyConverter, SparseTensorDeallocConverter,
            SparseTensorToPositionsConverter, SparseTensorToCoordinatesConverter,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 277903dc55b7432..55db34f7050d3f3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -1475,15 +1475,15 @@ void mlir::populatePostSparsificationRewriting(RewritePatternSet &patterns,
                                                bool enableForeach,
                                                bool enableConvert) {
   patterns.add<ReshapeRewriter<tensor::ExpandShapeOp>,
-               ReshapeRewriter<tensor::CollapseShapeOp>, TensorReshapeRewriter>(
-      patterns.getContext());
+               ReshapeRewriter<tensor::CollapseShapeOp>,
+               Sparse2SparseReshapeRewriter<tensor::ExpandShapeOp>,
+               Sparse2SparseReshapeRewriter<tensor::CollapseShapeOp>,
+               TensorReshapeRewriter>(patterns.getContext());
   if (enableForeach)
     patterns.add<ForeachRewriter>(patterns.getContext());
   // TODO: If RT not enabled, rewrite concatenate ops, etc here.
   if (!enableRT) {
-    patterns.add<ConcatenateRewriter, NewRewriter, OutRewriter,
-                 Sparse2SparseReshapeRewriter<tensor::ExpandShapeOp>,
-                 Sparse2SparseReshapeRewriter<tensor::CollapseShapeOp>>(
+    patterns.add<ConcatenateRewriter, NewRewriter, OutRewriter>(
         patterns.getContext());
     if (enableConvert)
       patterns.add<ConvertRewriter>(patterns.getContext());
diff --git a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir
deleted file mode 100644
index 93fc610b64b3359..000000000000000
--- a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir
+++ /dev/null
@@ -1,99 +0,0 @@
-// RUN: mlir-opt %s -post-sparsification-rewrite | FileCheck %s
-
-#SparseVector = #sparse_tensor.encoding<{
-  map = (d0) -> (d0 : compressed)
-}>
-
-#SparseMatrix = #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d0 : compressed, d1 : compressed)
-}>
-
-// CHECK-LABEL: func.func @expand_dense(
-// CHECK-SAME:    %[[A:.*]]: tensor<12xf64>) -> tensor<3x4xf64> {
-// CHECK:         %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK:         return %[[E]] : tensor<3x4xf64>
-// CHECK:       }
-func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> {
-  %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64>
-  return %0 : tensor<3x4xf64>
-}
-
-// CHECK-LABEL: func.func @expand_from_sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64> {
-// CHECK:         %[[C:.*]] = sparse_tensor.convert %[[A]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> to tensor<12xf64>
-// CHECK:         %[[E:.*]] = tensor.expand_shape %[[C]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK:         return %[[E]] : tensor<3x4xf64>
-// CHECK:       }
-func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> {
-  %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64>
-  return %0 : tensor<3x4xf64>
-}
-
-// CHECK-LABEL: func.func @expand_to_sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<12xf64>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK:         %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK:         %[[C:.*]] = sparse_tensor.convert %[[E]] : tensor<3x4xf64> to tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:         return %[[C]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:       }
-func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> {
-  %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix>
-  return %0 : tensor<3x4xf64, #SparseMatrix>
-}
-
-//
-// Not rewritten, needs conversion.
-//
-// CHECK-LABEL:   func.func @expand_sparse2sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK:         %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:         return %[[E]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:       }
-func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
-  %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
-  return %0 : tensor<3x4xf64, #SparseMatrix>
-}
-
-// CHECK-LABEL: func.func @collapse_dense(
-// CHECK-SAME:    %[[A:.*]]: tensor<3x4xf64>) -> tensor<12xf64> {
-// CHECK:         %[[R:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK:         return %[[R]] : tensor<12xf64>
-// CHECK:       }
-func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> {
-  %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64>
-  return %0 : tensor<12xf64>
-}
-
-// CHECK-LABEL: func.func @collapse_from_sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64> {
-// CHECK:         %[[C:.*]] = sparse_tensor.convert %[[A]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> to tensor<3x4xf64>
-// CHECK:         %[[R:.*]] = tensor.collapse_shape %[[C]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK:         return %[[R]] : tensor<12xf64>
-// CHECK:       }
-func.func @collapse_from_sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> {
-  %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64>
-  return %0 : tensor<12xf64>
-}
-
-// CHECK-LABEL: func.func @collapse_to_sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<3x4xf64>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK:         %[[R:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK:         %[[C:.*]] = sparse_tensor.convert %[[R]] : tensor<12xf64> to tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:         return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:       }
-func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> {
-  %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64, #SparseVector>
-  return %0 : tensor<12xf64, #SparseVector>
-}
-
-//
-// Not rewritten, needs conversion.
-//
-// CHECK-LABEL:   func.func @collapse_sparse2sparse(
-// CHECK-SAME:    %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK:         %[[C:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:         return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK:       }
-func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
-  %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector>
-  return %0 : tensor<12xf64, #SparseVector>
-}
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
index 5ecc92231068c1d..7f8edac15302616 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
@@ -1,7 +1,8 @@
 // RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND
-// RUN: mlir-opt %s --sparse-tensor-conversion --cse --canonicalize | FileCheck %s --check-prefix=CHECK-CONV
+// RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=true enable-convert=false" \
+// RUN: --cse --canonicalize  | FileCheck %s
 // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \
-// RUN: --cse --canonicalize  | FileCheck %s --check-prefix=CHECK-RWT
+// RUN: --cse --canonicalize  | FileCheck %s
 
 #SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
 #SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
@@ -14,55 +15,28 @@
 //      CHECK-ROUND:  %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //      CHECK-ROUND:  return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @sparse_expand(
-// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV:      scf.while : () -> () {
-// CHECK-CONV:        call @getNextF64
-// CHECK-CONV:        scf.condition
-// CHECK-CONV:      } do {
-// CHECK-CONV:        %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV:        %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index
-// CHECK-CONV:        %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index
-// CHECK-CONV:        memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV:        memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV:        call @addEltF64
-// CHECK-CONV:        scf.yield
-// CHECK-CONV:      }
-// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV:      call @delSparseTensorCOOF64
-// CHECK-CONV:      call @delSparseTensorIteratorF64
-// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL:   func.func @sparse_expand(
-// CHECK-RWT-SAME:    %[[S:.*]]:
-// CHECK-RWT-DAG:     %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG:     %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG:     %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT:         %[[B:.*]] = bufferization.alloc_tensor()
-// CHECK-RWT:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
-// CHECK-RWT:           %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT:           %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
-// CHECK-RWT:           %[[DI0:.*]] = arith.divui %[[SI]], %[[C10]] : index
-// CHECK-RWT:           %[[DI1:.*]] = arith.remui %[[SI]], %[[C10]] : index
-// CHECK-RWT:           %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
-// CHECK-RWT:           scf.yield %[[NT:.*]]
-// CHECK-RWT:         }
-// CHECK-RWT:         %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT:     sparse_tensor.convert
-// CHECK-RWT:         return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL:   func.func @sparse_expand(
+// CHECK-SAME:    %[[S:.*]]:
+// CHECK-DAG:     %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
+// CHECK:         %[[B:.*]] = bufferization.alloc_tensor()
+// CHECK:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK:         %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
+// CHECK:           %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK:           %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
+// CHECK:           %[[DI0:.*]] = arith.divui %[[SI]], %[[C10]] : index
+// CHECK:           %[[DI1:.*]] = arith.remui %[[SI]], %[[C10]] : index
+// CHECK:           %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
+// CHECK:           scf.yield %[[NT:.*]]
+// CHECK:         }
+// CHECK:         %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT:     sparse_tensor.convert
+// CHECK:         return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
 func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> {
   %0 = tensor.expand_shape %arg0 [[0, 1]] :
@@ -78,64 +52,37 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
 //      CHECK-ROUND:  %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //      CHECK-ROUND:  return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @sparse_collapse(
-// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV:      scf.while : () -> () {
-// CHECK-CONV:        call @getNextF64
-// CHECK-CONV:        scf.condition
-// CHECK-CONV:      } do {
-// CHECK-CONV:        %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV:        %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV:        %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index
-// CHECK-CONV:        %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index
-// CHECK-CONV:        memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV:        call @addEltF64
-// CHECK-CONV:        scf.yield
-// CHECK-CONV:      }
-// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV:      call @delSparseTensorCOOF64
-// CHECK-CONV:      call @delSparseTensorIteratorF64
-// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL:   func.func @sparse_collapse(
-// CHECK-RWT-SAME:    %[[S:.*]]:
-// CHECK-RWT-DAG:     %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG:     %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG:     %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT:         %[[B:.*]] = bufferization.alloc_tensor()
-// CHECK-RWT:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
-// CHECK-RWT:         %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
-// CHECK-RWT:         %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[A0:.*]] = %[[B]])
-// CHECK-RWT:           %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG:       %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG:       %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
-// CHECK-RWT:           %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
-// CHECK-RWT:           %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[A1:.*]] = %[[A0]])
-// CHECK-RWT:             %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
-// CHECK-RWT:             %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
-// CHECK-RWT:             %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
-// CHECK-RWT:             %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
-// CHECK-RWT:             %[[R1:.*]] = sparse_tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
-// CHECK-RWT              scf.yield %[[R1]]
-// CHECK-RWT            }
-// CHECK-RWT            scf.yield %[[RET_1]]
-// CHECK-RWT:         }
-// CHECK-RWT:        %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT:    sparse_tensor.convert
-// CHECK-RWT:        return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL:   func.func @sparse_collapse(
+// CHECK-SAME:    %[[S:.*]]:
+// CHECK-DAG:     %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
+// CHECK:         %[[B:.*]] = bufferization.alloc_tensor()
+// CHECK:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK:         %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
+// CHECK:         %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
+// CHECK:         %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[A0:.*]] = %[[B]])
+// CHECK:           %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG:       %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG:       %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
+// CHECK:           %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
+// CHECK:           %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[A1:.*]] = %[[A0]])
+// CHECK:             %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
+// CHECK:             %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
+// CHECK:             %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
+// CHECK:             %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
+// CHECK:             %[[R1:.*]] = sparse_tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
+// CHECK              scf.yield %[[R1]]
+// CHECK            }
+// CHECK            scf.yield %[[RET_1]]
+// CHECK:         }
+// CHECK:        %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT:    sparse_tensor.convert
+// CHECK:        return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
 func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> {
   %0 = tensor.collapse_shape %arg0 [[0, 1]] :
@@ -151,66 +98,34 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10
 //      CHECK-ROUND:  %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //      CHECK-ROUND:  return %[[E]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @dynamic_sparse_expand(
-// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG:  %[[D1:.*]] = arith.divui %{{.*}}, %[[C10]] : index
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV:      scf.while : () -> () {
-// CHECK-CONV:        call @getNextF64
-// CHECK-CONV:        scf.condition
-// CHECK-CONV:      } do {
-// CHECK-CONV:        %[[L:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV:        %[[M:.*]] = arith.muli %[[D1]], %[[C10]] : index
-// CHECK-CONV:        %[[D2:.*]] = arith.divui %[[M]], %[[D1]] : index
-// CHECK-CONV:        %[[D3:.*]] = arith.divui %[[L]], %[[D2]] : index
-// CHECK-CONV:        %[[R:.*]] = arith.remui %[[L]], %[[D2]] : index
-// CHECK-CONV:        %[[D4:.*]] = arith.divui %[[D2]], %[[C10]] : index
-// CHECK-CONV:        %[[D5:.*]] = arith.divui %[[R]], %[[D4]] : index
-// CHECK-CONV:        memref.store %[[D3]], %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV:        memref.store %[[D5]], %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV:        call @addEltF64
-// CHECK-CONV:        scf.yield
-// CHECK-CONV:      }
-// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV:      call @delSparseTensorCOOF64
-// CHECK-CONV:      call @delSparseTensorIteratorF64
-// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL:   func.func @dynamic_sparse_expand(
-// CHECK-RWT-SAME:    %[[S:.*]]:
-// CHECK-RWT-DAG:     %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG:     %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG:     %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT:         %[[SD:.*]] = tensor.dim %[[S]], %[[C0]]
-// CHECK-RWT:         %[[DD0:.*]] = arith.divui %[[SD]], %[[C10]] : index
-// CHECK-RWT:         %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]])
-// CHECK-RWT:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
-// CHECK-RWT:           %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT:           %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
-// CHECK-RWT:           %[[T1:.*]] = arith.muli %[[DD0]], %[[C10]] : index
-// CHECK-RWT:           %[[T2:.*]] = arith.divui %[[T1]], %[[DD0]] : index
-// CHECK-RWT:           %[[DI0:.*]] = arith.divui %[[SI]], %[[T2]] : index
-// CHECK-RWT:           %[[T3:.*]] = arith.remui %[[SI]], %[[T2]] : index
-// CHECK-RWT:           %[[T4:.*]] = arith.divui %[[T2]], %[[C10]] : index
-// CHECK-RWT:           %[[DI1:.*]] = arith.divui %[[T3]], %[[T4]] : index
-// CHECK-RWT:           %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
-// CHECK-RWT:           scf.yield %[[NT]]
-// CHECK-RWT:         }
-// CHECK-RWT:         %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT:     sparse_tensor.convert
-// CHECK-RWT:         return %[[NT1]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL:   func.func @dynamic_sparse_expand(
+// CHECK-SAME:    %[[S:.*]]:
+// CHECK-DAG:     %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
+// CHECK:         %[[SD:.*]] = tensor.dim %[[S]], %[[C0]]
+// CHECK:         %[[DD0:.*]] = arith.divui %[[SD]], %[[C10]] : index
+// CHECK:         %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]])
+// CHECK:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK:         %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
+// CHECK:           %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK:           %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
+// CHECK:           %[[T1:.*]] = arith.muli %[[DD0]], %[[C10]] : index
+// CHECK:           %[[T2:.*]] = arith.divui %[[T1]], %[[DD0]] : index
+// CHECK:           %[[DI0:.*]] = arith.divui %[[SI]], %[[T2]] : index
+// CHECK:           %[[T3:.*]] = arith.remui %[[SI]], %[[T2]] : index
+// CHECK:           %[[T4:.*]] = arith.divui %[[T2]], %[[C10]] : index
+// CHECK:           %[[DI1:.*]] = arith.divui %[[T3]], %[[T4]] : index
+// CHECK:           %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
+// CHECK:           scf.yield %[[NT]]
+// CHECK:         }
+// CHECK:         %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT:     sparse_tensor.convert
+// CHECK:         return %[[NT1]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
 func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>) -> tensor<?x10xf64, #SparseMatrix> {
   %0 = tensor.expand_shape %arg0 [[0, 1]] :
@@ -226,73 +141,42 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>) -> tensor<
 //      CHECK-ROUND:  %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x?xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //      CHECK-ROUND:  return %[[C]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @dynamic_sparse_collapse(
-// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG:  %[[M1:.*]] = arith.muli %{{.*}}, %[[C10]] : index
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV-DAG:  call @newSparseTensor
-// CHECK-CONV:      scf.while : () -> () {
-// CHECK-CONV:        call @getNextF64
-// CHECK-CONV:        scf.condition
-// CHECK-CONV:      } do {
-// CHECK-CONV:        %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV:        %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV:        %[[D1:.*]] = arith.divui %[[M1]], %[[C10]] : index
-// CHECK-CONV:        %[[M2:.*]] = arith.muli %[[X]], %[[D1]] : index
-// CHECK-CONV:        %[[D2:.*]] = arith.divui %[[D1]], %{{.*}} : index
-// CHECK-CONV:        %[[M3:.*]] = arith.muli %[[Y]], %[[D2]] : index
-// CHECK-CONV:        %[[A:.*]] = arith.addi %[[M2]], %[[M3]] : index
-// CHECK-CONV:        memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV:        call @addEltF64
-// CHECK-CONV:        scf.yield
-// CHECK-CONV:      }
-// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV:      call @delSparseTensorCOOF64
-// CHECK-CONV:      call @delSparseTensorIteratorF64
-// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL:   func.func @dynamic_sparse_collapse(
-// CHECK-RWT-SAME:    %[[S:.*]]:
-// CHECK-RWT-DAG:     %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG:     %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG:     %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT:         %[[SD1:.*]] = tensor.dim %[[S]], %[[C1]]
-// CHECK-RWT:         %[[DD0:.*]] = arith.muli %[[SD1]], %[[C10]] : index
-// CHECK-RWT:         %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]])
-// CHECK-RWT:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT:         %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
-// CHECK-RWT:         %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
-// CHECK-RWT:         %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R0:.*]] = %[[B]])
-// CHECK-RWT:           %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG:       %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG:       %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
-// CHECK-RWT:           %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
-// CHECK-RWT:           %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[R1:.*]] = %[[R0]])
-// CHECK-RWT:             %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
-// CHECK-RWT:             %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
-// CHECK-RWT:             %[[T1:.*]] = arith.divui %[[DD0]], %[[C10]] : index
-// CHECK-RWT:             %[[T2:.*]] = arith.muli %[[SI0]], %[[T1]] : index
-// CHECK-RWT:             %[[T3:.*]] = arith.divui %[[T1]], %[[SD1]] : index
-// CHECK-RWT:             %[[T4:.*]] = arith.muli %[[SI1]], %[[T3]] : index
-// CHECK-RWT:             %[[DI:.*]] = arith.addi %[[T2]], %[[T4]] : index
-// CHECK-RWT:             %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R1]]{{\[}}%[[DI]]]
-// CHECK-RWT              scf.yield %[[NT]]
-// CHECK-RWT            }
-// CHECK-RWT            scf.yield %[[RET_1]]
-// CHECK-RWT:        }
-// CHECK-RWT:        %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT:    sparse_tensor.convert
-// CHECK-RWT:        return %[[NT1]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL:   func.func @dynamic_sparse_collapse(
+// CHECK-SAME:    %[[S:.*]]:
+// CHECK-DAG:     %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
+// CHECK:         %[[SD1:.*]] = tensor.dim %[[S]], %[[C1]]
+// CHECK:         %[[DD0:.*]] = arith.muli %[[SD1]], %[[C10]] : index
+// CHECK:         %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]])
+// CHECK:         %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK:         %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK:         %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
+// CHECK:         %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
+// CHECK:         %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK:         %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK:         %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK:         %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R0:.*]] = %[[B]])
+// CHECK:           %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG:       %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG:       %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
+// CHECK:           %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
+// CHECK:           %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[R1:.*]] = %[[R0]])
+// CHECK:             %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
+// CHECK:             %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
+// CHECK:             %[[T1:.*]] = arith.divui %[[DD0]], %[[C10]] : index
+// CHECK:             %[[T2:.*]] = arith.muli %[[SI0]], %[[T1]] : index
+// CHECK:             %[[T3:.*]] = arith.divui %[[T1]], %[[SD1]] : index
+// CHECK:             %[[T4:.*]] = arith.muli %[[SI1]], %[[T3]] : index
+// CHECK:             %[[DI:.*]] = arith.addi %[[T2]], %[[T4]] : index
+// CHECK:             %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R1]]{{\[}}%[[DI]]]
+// CHECK              scf.yield %[[NT]]
+// CHECK            }
+// CHECK            scf.yield %[[RET_1]]
+// CHECK:        }
+// CHECK:        %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT:    sparse_tensor.convert
+// CHECK:        return %[[NT1]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
 //
 func.func @dynamic_sparse_collapse(%arg0: tensor<10x?xf64, #SparseMatrix>) -> tensor<?xf64, #SparseVector> {
   %0 = tensor.collapse_shape %arg0 [[0, 1]] :

>From b49cea6fcb2a52352bdf4bdbf8ba69e065351265 Mon Sep 17 00:00:00 2001
From: Peiming Liu <peiming at google.com>
Date: Mon, 2 Oct 2023 23:27:07 +0000
Subject: [PATCH 2/2] remove deadcode after cleanup

---
 .../Transforms/SparseTensorConversion.cpp     | 101 ------------------
 1 file changed, 101 deletions(-)

diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 75045d2378dec00..987706f2f127ab8 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -467,94 +467,6 @@ static bool canUseDirectConversion(ArrayRef<DimLevelType> dimTypes) {
   return true;
 }
 
-/// Helper method to translate coordinates during a reshaping operation.
-/// TODO: provide as general utility to MLIR at large?
-static void reshapeCoords(Location loc, OpBuilder &builder,
-                          ArrayRef<ReassociationIndices> reassociation,
-                          ValueRange srcSizes, Value srcCoords,
-                          ValueRange dstSizes, Value dstCoords) {
-  const auto srcCvs = loadAll(builder, loc, srcSizes.size(), srcCoords);
-  SmallVector<Value> dstCvs;
-  reshapeCvs(builder, loc, reassociation, srcSizes, srcCvs, dstSizes, dstCvs);
-  assert(dstCvs.size() == dstSizes.size());
-  storeAll(builder, loc, dstCoords, dstCvs);
-}
-
-/// Generate code for a general sparse to sparse reshaping operation.
-/// Note that unlike dense reshaping (which can be done with a "cheap"
-/// change of view), sparse reshaping is currently done with actual
-/// data shuffling.
-///
-/// TODO: proportional to nnz, but still a lot of data movement
-///       https://github.com/llvm/llvm-project/issues/56477
-///
-///   iter = src->toCOO();
-///   coo = newSparseCOO()
-///   while (elem = iter->getNext()) {
-///     coo->add(reshape(elem.coords), elem.value)
-///   }
-///   s = newSparseTensor(coo)
-template <typename ReshapeOp>
-static LogicalResult
-genSparse2SparseReshape(ReshapeOp op, typename ReshapeOp::Adaptor adaptor,
-                        ConversionPatternRewriter &rewriter) {
-  Location loc = op.getLoc();
-  const auto srcTp = getSparseTensorType(op.getSrc());
-  const auto dstTp = getSparseTensorType(op.getResult());
-  if (!srcTp.hasEncoding() || !dstTp.hasEncoding())
-    return failure();
-  Type elemTp = srcTp.getElementType();
-  assert(elemTp == dstTp.getElementType() &&
-         "reshape should not change element type");
-  // Start an iterator over the source tensor (in coordinate order).
-  SmallVector<Value> srcDimSizes =
-      getDimSizes(rewriter, loc, srcTp, adaptor.getSrc());
-  NewCallParams params(rewriter, loc);
-  Value iter = params.genBuffers(srcTp.withoutDimToLvl(), srcDimSizes)
-                   .genNewCall(Action::kToIterator, adaptor.getSrc());
-  // Start a new COO for the destination tensor.
-  SmallVector<Value> dstDimSizes;
-  if (dstTp.hasStaticDimShape())
-    // Static "shapes" are in fact "sizes".
-    fillDimShape(rewriter, loc, dstTp, dstDimSizes);
-  else
-    genReshapeDstShape(rewriter, loc, dstDimSizes, srcDimSizes,
-                       dstTp.getDimShape(), op.getReassociationIndices());
-  const Value coo =
-      params.genBuffers(dstTp, dstDimSizes).genNewCall(Action::kEmptyCOO);
-  const Value dstDimToLvl = params.getDimToLvl();
-  // Construct a while loop over the iterator.
-  const Type iTp = rewriter.getIndexType();
-  const Value srcDimCoords = genAlloca(rewriter, loc, srcTp.getDimRank(), iTp);
-  const Value dstDimCoords = genAlloca(rewriter, loc, dstTp.getDimRank(), iTp);
-  const Value elemPtr = genAllocaScalar(rewriter, loc, elemTp);
-  const SmallVector<Value> noArgs;
-  const SmallVector<Type> noTypes;
-  auto whileOp = rewriter.create<scf::WhileOp>(loc, noTypes, noArgs);
-  Block *before = rewriter.createBlock(&whileOp.getBefore(), {}, noTypes);
-  rewriter.setInsertionPointToEnd(before);
-  Value cond = genGetNextCall(rewriter, loc, iter, srcDimCoords, elemPtr);
-  rewriter.create<scf::ConditionOp>(loc, cond, before->getArguments());
-  // Translate coordinates from source to target and insert. Note that we do
-  // not need to store the value in elemPtr, as the value is still there.
-  Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes);
-  rewriter.setInsertionPointToStart(after);
-  // We probably don't need these assertions, but better safe than sorry.
-  assert(srcTp.getDimRank() == srcDimSizes.size());
-  assert(dstTp.getDimRank() == dstDimSizes.size());
-  reshapeCoords(loc, rewriter, op.getReassociationIndices(), srcDimSizes,
-                srcDimCoords, dstDimSizes, dstDimCoords);
-  genAddEltCall(rewriter, loc, elemTp, coo, elemPtr, dstDimCoords, dstDimToLvl);
-  rewriter.create<scf::YieldOp>(loc);
-  // Final call to construct sparse tensor storage and free temporary resources.
-  rewriter.setInsertionPointAfter(whileOp);
-  Value dst = params.genNewCall(Action::kFromCOO, coo);
-  genDelCOOCall(rewriter, loc, elemTp, coo);
-  genDelIteratorCall(rewriter, loc, elemTp, iter);
-  rewriter.replaceOp(op, dst);
-  return success();
-}
-
 // Generates a while loop that iterates over the COO list extracted
 // from `t`, using `bodyBuilder` to build the loop body.
 //   while (elem = coo->getNext()) {
@@ -713,19 +625,6 @@ class SparseCastConverter : public OpConversionPattern<tensor::CastOp> {
   }
 };
 
-/// Sparse conversion rule for a reshape operator.
-template <typename ReshapeOp>
-class SparseReshapeConverter : public OpConversionPattern<ReshapeOp> {
-public:
-  using OpAdaptor = typename OpConversionPattern<ReshapeOp>::OpAdaptor;
-  using OpConversionPattern<ReshapeOp>::OpConversionPattern;
-  LogicalResult
-  matchAndRewrite(ReshapeOp op, OpAdaptor adaptor,
-                  ConversionPatternRewriter &rewriter) const override {
-    return genSparse2SparseReshape(op, adaptor, rewriter);
-  }
-};
-
 /// Sparse conversion rule for the new operator.
 class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
 public:



More information about the Mlir-commits mailing list