[Mlir-commits] [mlir] [mlir][sparse] unify lib/codegen rewriting rules for sparse tensor re… (PR #68049)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Oct 2 16:26:10 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-sparse
<details>
<summary>Changes</summary>
…shaping operations.
---
Patch is 32.84 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/68049.diff
4 Files Affected:
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp (-2)
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp (+5-5)
- (removed) mlir/test/Dialect/SparseTensor/post_rewriting.mlir (-99)
- (modified) mlir/test/Dialect/SparseTensor/sparse_reshape.mlir (+120-236)
``````````diff
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 37f6971cf4df1a2..75045d2378dec00 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -1538,8 +1538,6 @@ void mlir::populateSparseTensorConversionPatterns(
patterns
.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
SparseCastConverter, SparseTensorNewConverter,
- SparseReshapeConverter<tensor::ExpandShapeOp>,
- SparseReshapeConverter<tensor::CollapseShapeOp>,
SparseTensorConcatConverter, SparseTensorAllocConverter,
SparseTensorEmptyConverter, SparseTensorDeallocConverter,
SparseTensorToPositionsConverter, SparseTensorToCoordinatesConverter,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 277903dc55b7432..55db34f7050d3f3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -1475,15 +1475,15 @@ void mlir::populatePostSparsificationRewriting(RewritePatternSet &patterns,
bool enableForeach,
bool enableConvert) {
patterns.add<ReshapeRewriter<tensor::ExpandShapeOp>,
- ReshapeRewriter<tensor::CollapseShapeOp>, TensorReshapeRewriter>(
- patterns.getContext());
+ ReshapeRewriter<tensor::CollapseShapeOp>,
+ Sparse2SparseReshapeRewriter<tensor::ExpandShapeOp>,
+ Sparse2SparseReshapeRewriter<tensor::CollapseShapeOp>,
+ TensorReshapeRewriter>(patterns.getContext());
if (enableForeach)
patterns.add<ForeachRewriter>(patterns.getContext());
// TODO: If RT not enabled, rewrite concatenate ops, etc here.
if (!enableRT) {
- patterns.add<ConcatenateRewriter, NewRewriter, OutRewriter,
- Sparse2SparseReshapeRewriter<tensor::ExpandShapeOp>,
- Sparse2SparseReshapeRewriter<tensor::CollapseShapeOp>>(
+ patterns.add<ConcatenateRewriter, NewRewriter, OutRewriter>(
patterns.getContext());
if (enableConvert)
patterns.add<ConvertRewriter>(patterns.getContext());
diff --git a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir
deleted file mode 100644
index 93fc610b64b3359..000000000000000
--- a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir
+++ /dev/null
@@ -1,99 +0,0 @@
-// RUN: mlir-opt %s -post-sparsification-rewrite | FileCheck %s
-
-#SparseVector = #sparse_tensor.encoding<{
- map = (d0) -> (d0 : compressed)
-}>
-
-#SparseMatrix = #sparse_tensor.encoding<{
- map = (d0, d1) -> (d0 : compressed, d1 : compressed)
-}>
-
-// CHECK-LABEL: func.func @expand_dense(
-// CHECK-SAME: %[[A:.*]]: tensor<12xf64>) -> tensor<3x4xf64> {
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK: return %[[E]] : tensor<3x4xf64>
-// CHECK: }
-func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> {
- %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64>
- return %0 : tensor<3x4xf64>
-}
-
-// CHECK-LABEL: func.func @expand_from_sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64> {
-// CHECK: %[[C:.*]] = sparse_tensor.convert %[[A]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> to tensor<12xf64>
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[C]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK: return %[[E]] : tensor<3x4xf64>
-// CHECK: }
-func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> {
- %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64>
- return %0 : tensor<3x4xf64>
-}
-
-// CHECK-LABEL: func.func @expand_to_sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<12xf64>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64> into tensor<3x4xf64>
-// CHECK: %[[C:.*]] = sparse_tensor.convert %[[E]] : tensor<3x4xf64> to tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[C]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: }
-func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> {
- %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix>
- return %0 : tensor<3x4xf64, #SparseMatrix>
-}
-
-//
-// Not rewritten, needs conversion.
-//
-// CHECK-LABEL: func.func @expand_sparse2sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[E]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: }
-func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
- %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
- return %0 : tensor<3x4xf64, #SparseMatrix>
-}
-
-// CHECK-LABEL: func.func @collapse_dense(
-// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64>) -> tensor<12xf64> {
-// CHECK: %[[R:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK: return %[[R]] : tensor<12xf64>
-// CHECK: }
-func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> {
- %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64>
- return %0 : tensor<12xf64>
-}
-
-// CHECK-LABEL: func.func @collapse_from_sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64> {
-// CHECK: %[[C:.*]] = sparse_tensor.convert %[[A]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> to tensor<3x4xf64>
-// CHECK: %[[R:.*]] = tensor.collapse_shape %[[C]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK: return %[[R]] : tensor<12xf64>
-// CHECK: }
-func.func @collapse_from_sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> {
- %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64>
- return %0 : tensor<12xf64>
-}
-
-// CHECK-LABEL: func.func @collapse_to_sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK: %[[R:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64> into tensor<12xf64>
-// CHECK: %[[C:.*]] = sparse_tensor.convert %[[R]] : tensor<12xf64> to tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: }
-func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> {
- %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64, #SparseVector>
- return %0 : tensor<12xf64, #SparseVector>
-}
-
-//
-// Not rewritten, needs conversion.
-//
-// CHECK-LABEL: func.func @collapse_sparse2sparse(
-// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> {
-// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: }
-func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
- %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector>
- return %0 : tensor<12xf64, #SparseVector>
-}
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
index 5ecc92231068c1d..7f8edac15302616 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
@@ -1,7 +1,8 @@
// RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND
-// RUN: mlir-opt %s --sparse-tensor-conversion --cse --canonicalize | FileCheck %s --check-prefix=CHECK-CONV
+// RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=true enable-convert=false" \
+// RUN: --cse --canonicalize | FileCheck %s
// RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \
-// RUN: --cse --canonicalize | FileCheck %s --check-prefix=CHECK-RWT
+// RUN: --cse --canonicalize | FileCheck %s
#SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
@@ -14,55 +15,28 @@
// CHECK-ROUND: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @sparse_expand(
-// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG: call @newSparseTensor
-// CHECK-CONV-DAG: call @newSparseTensor
-// CHECK-CONV: scf.while : () -> () {
-// CHECK-CONV: call @getNextF64
-// CHECK-CONV: scf.condition
-// CHECK-CONV: } do {
-// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV: %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index
-// CHECK-CONV: %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index
-// CHECK-CONV: memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV: memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV: call @addEltF64
-// CHECK-CONV: scf.yield
-// CHECK-CONV: }
-// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV: call @delSparseTensorCOOF64
-// CHECK-CONV: call @delSparseTensorIteratorF64
-// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL: func.func @sparse_expand(
-// CHECK-RWT-SAME: %[[S:.*]]:
-// CHECK-RWT-DAG: %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor()
-// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT: %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
-// CHECK-RWT: %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT: %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
-// CHECK-RWT: %[[DI0:.*]] = arith.divui %[[SI]], %[[C10]] : index
-// CHECK-RWT: %[[DI1:.*]] = arith.remui %[[SI]], %[[C10]] : index
-// CHECK-RWT: %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
-// CHECK-RWT: scf.yield %[[NT:.*]]
-// CHECK-RWT: }
-// CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT: sparse_tensor.convert
-// CHECK-RWT: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL: func.func @sparse_expand(
+// CHECK-SAME: %[[S:.*]]:
+// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK: %[[B:.*]] = bufferization.alloc_tensor()
+// CHECK: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK: %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK: %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[R:.*]] = %[[B]])
+// CHECK: %[[SI:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK: %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[I]]] : memref<?xf64>
+// CHECK: %[[DI0:.*]] = arith.divui %[[SI]], %[[C10]] : index
+// CHECK: %[[DI1:.*]] = arith.remui %[[SI]], %[[C10]] : index
+// CHECK: %[[NT:.*]] = sparse_tensor.insert %[[SV]] into %[[R]]{{\[}}%[[DI0]], %[[DI1]]]
+// CHECK: scf.yield %[[NT:.*]]
+// CHECK: }
+// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT: sparse_tensor.convert
+// CHECK: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> {
%0 = tensor.expand_shape %arg0 [[0, 1]] :
@@ -78,64 +52,37 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
// CHECK-ROUND: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
-// conversion:
-//
-// CHECK-CONV-LABEL: func.func @sparse_collapse(
-// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
-// CHECK-CONV-DAG: call @newSparseTensor
-// CHECK-CONV-DAG: call @newSparseTensor
-// CHECK-CONV: scf.while : () -> () {
-// CHECK-CONV: call @getNextF64
-// CHECK-CONV: scf.condition
-// CHECK-CONV: } do {
-// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<2xindex>
-// CHECK-CONV: %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<2xindex>
-// CHECK-CONV: %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index
-// CHECK-CONV: %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index
-// CHECK-CONV: memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<1xindex>
-// CHECK-CONV: call @addEltF64
-// CHECK-CONV: scf.yield
-// CHECK-CONV: }
-// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
-// CHECK-CONV: call @delSparseTensorCOOF64
-// CHECK-CONV: call @delSparseTensorIteratorF64
-// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
-//
-// rewrite for codegen:
-//
-// CHECK-RWT-LABEL: func.func @sparse_collapse(
-// CHECK-RWT-SAME: %[[S:.*]]:
-// CHECK-RWT-DAG: %[[C10:.*]] = arith.constant 10 : index
-// CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index
-// CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index
-// CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor()
-// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
-// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
-// CHECK-RWT: %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
-// CHECK-RWT: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
-// CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]]
-// CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
-// CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
-// CHECK-RWT: %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[A0:.*]] = %[[B]])
-// CHECK-RWT: %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG: %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
-// CHECK-RWT-DAG: %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
-// CHECK-RWT: %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
-// CHECK-RWT: %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[A1:.*]] = %[[A0]])
-// CHECK-RWT: %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
-// CHECK-RWT: %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
-// CHECK-RWT: %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
-// CHECK-RWT: %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
-// CHECK-RWT: %[[R1:.*]] = sparse_tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
-// CHECK-RWT scf.yield %[[R1]]
-// CHECK-RWT }
-// CHECK-RWT scf.yield %[[RET_1]]
-// CHECK-RWT: }
-// CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
-// CHECK-RWT-NOT: sparse_tensor.convert
-// CHECK-RWT: return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-LABEL: func.func @sparse_collapse(
+// CHECK-SAME: %[[S:.*]]:
+// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK: %[[B:.*]] = bufferization.alloc_tensor()
+// CHECK: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index}
+// CHECK: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index}
+// CHECK: %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index}
+// CHECK: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
+// CHECK: %[[V:.*]] = sparse_tensor.values %[[S]]
+// CHECK: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref<?xindex>
+// CHECK: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref<?xindex>
+// CHECK: %[[RET:.*]] = scf.for %[[I:.*]] = %[[S0]] to %[[E0]] step %[[C1]] iter_args(%[[A0:.*]] = %[[B]])
+// CHECK: %[[SI0:.*]] = memref.load %[[I0]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG: %[[S1:.*]] = memref.load %[[P1]]{{\[}}%[[I]]] : memref<?xindex>
+// CHECK-DAG: %[[PE1:.*]] = arith.addi %[[I]], %[[C1]] : index
+// CHECK: %[[E1:.*]] = memref.load %[[P1]]{{\[}}%[[PE1]]] : memref<?xindex>
+// CHECK: %[[RET_1:.*]] = scf.for %[[J:.*]] = %[[S1]] to %[[E1]] step %[[C1]] iter_args(%[[A1:.*]] = %[[A0]])
+// CHECK: %[[SI1:.*]] = memref.load %[[I1]]{{\[}}%[[J]]] : memref<?xindex>
+// CHECK: %[[SV:.*]] = memref.load %[[V]]{{\[}}%[[J]]] : memref<?xf64>
+// CHECK: %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
+// CHECK: %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
+// CHECK: %[[R1:.*]] = sparse_tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
+// CHECK scf.yield %[[R1]]
+// CHECK }
+// CHECK scf.yield %[[RET_1]]
+// CHECK: }
+// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
+// CHECK-NOT: sparse_tensor.convert
+// CHECK: return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] :
@@ -151,66 +98,34 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10
// CHECK-ROUND: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<?x...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/68049
More information about the Mlir-commits
mailing list