[Mlir-commits] [mlir] [mlir][sparse] Change sparse_tensor.print format (PR #91528)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed May 8 13:10:14 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

Author: Yinying Li (yinying-lisa-li)

<details>
<summary>Changes</summary>

1. Remove the trailing comma for the last element of memref and add closing parenthesis.
2. Change integration tests to use the new format.

---

Patch is 215.66 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/91528.diff


73 Files Affected:

- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp (+30-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir (+9-9) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir (+1-1) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir (+1-1) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir (+1-1) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir (+14-14) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir (+53-53) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir (+26-26) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir (+21-21) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir (+19-19) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir (+19-19) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir (+20-20) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir (+18-18) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir (+84-84) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir (+30-30) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir (+28-28) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir (+4-4) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_empty.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir (+3-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir (+38-38) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir (+37-37) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir (+16-16) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir (+18-18) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir (+6-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir (+56-56) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir (+17-17) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir (+30-30) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir (+13-13) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir (+9-9) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir (+51-51) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print_3d.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir (+15-15) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir (+3-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir (+14-14) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir (+5-5) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir (+3-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir (+26-26) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir (+23-23) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir (+3-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir (+7-7) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir (+12-12) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir (+10-10) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir (+8-8) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir (+22-22) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir (+19-19) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir (+3-3) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir (+6-6) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir (+6-6) 


``````````diff
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 025fd3331ba89..d2cdca41cf4a1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -830,10 +830,12 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
                                          vector::PrintPunctuation::Comma);
         rewriter.create<vector::PrintOp>(loc, imag,
                                          vector::PrintPunctuation::Close);
-        rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
+        printEnding(rewriter, loc, forOp.getInductionVar(), step,
+                    forOp.getUpperBound(), /*isComplex*/ true, val);
+
       } else {
-        rewriter.create<vector::PrintOp>(loc, val,
-                                         vector::PrintPunctuation::Comma);
+        printEnding(rewriter, loc, forOp.getInductionVar(), step,
+                    forOp.getUpperBound(), /*isComplex*/ false, val);
       }
     }
     idxs.pop_back();
@@ -842,6 +844,31 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
     rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close);
   }
 
+  // Helper method to print the ending of memref (no punctuation after the last
+  // element).
+  static void printEnding(PatternRewriter &rewriter, Location loc, Value indVar,
+                          Value step, Value upperBound, bool isComplex,
+                          Value val) {
+    auto bound = rewriter.create<arith::AddIOp>(loc, indVar, step);
+    Value cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
+                                                bound, upperBound);
+    scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ true);
+    if (isComplex) {
+      rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
+      rewriter.create<vector::PrintOp>(loc,
+                                       vector::PrintPunctuation::NoPunctuation);
+      rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front());
+      rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
+    } else {
+      rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
+      rewriter.create<vector::PrintOp>(loc, val,
+                                       vector::PrintPunctuation::NoPunctuation);
+      rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front());
+      rewriter.create<vector::PrintOp>(loc, val,
+                                       vector::PrintPunctuation::Comma);
+    }
+  }
+
   // Helper method to print run-time lvl/dim sizes.
   static void printSizes(PatternRewriter &rewriter, Location loc, Value tensor,
                          unsigned size, bool isDim) {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
index f79e7e68f382d..ab4fd0e30d654 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
@@ -93,9 +93,9 @@ module {
     // CHECK-NEXT: nse = 12
     // CHECK-NEXT: dim = ( 4, 6 )
     // CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1,
-    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1 )
+    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
     // CHECK-NEXT: ----
     sparse_tensor.print %A : tensor<?x?xf64, #BSR>
 
@@ -103,9 +103,9 @@ module {
     // CHECK-NEXT: nse = 12
     // CHECK-NEXT: dim = ( 2, 3, 2, 2 )
     // CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1
-    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1 )
+    // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
     // CHECK-NEXT: ----
     %t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
                                           to tensor<?x?x2x2xf64, #DSDD>
@@ -115,9 +115,9 @@ module {
     // CHECK-NEXT: nse = 12
     // CHECK-NEXT: dim = ( 4, 6 )
     // CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3,
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1,
-    // CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1 )
+    // CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
     // CHECK-NEXT: ----
     %As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
     sparse_tensor.print %As : tensor<?x?xf64, #BSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
index 3534e7d15207c..caa0d6a71ed37 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
@@ -108,9 +108,9 @@ module {
   // CHECK-NEXT: nse = 24
   // CHECK-NEXT: dim = ( 6, 16 )
   // CHECK-NEXT: lvl = ( 2, 4, 3, 4 )
-  // CHECK-NEXT: pos[1] : ( 0, 1, 2,
-  // CHECK-NEXT: crd[1] : ( 0, 2,
-  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+  // CHECK-NEXT: pos[1] : ( 0, 1, 2 )
+  // CHECK-NEXT: crd[1] : ( 0, 2 )
+  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
   // CHECK-NEXT: ----
   //
   func.func @foo1() {
@@ -134,9 +134,9 @@ module {
   // CHECK-NEXT: nse = 24
   // CHECK-NEXT: dim = ( 6, 16 )
   // CHECK-NEXT: lvl = ( 2, 4, 4, 3 )
-  // CHECK-NEXT: pos[1] : ( 0, 1, 2,
-  // CHECK-NEXT: crd[1] : ( 0, 2,
-  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+  // CHECK-NEXT: pos[1] : ( 0, 1, 2 )
+  // CHECK-NEXT: crd[1] : ( 0, 2 )
+  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
   // CHECK-NEXT: ----
   //
   func.func @foo2() {
@@ -160,9 +160,9 @@ module {
   // CHECK-NEXT: nse = 24
   // CHECK-NEXT: dim = ( 6, 16 )
   // CHECK-NEXT: lvl = ( 4, 2, 3, 4 )
-  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
-  // CHECK-NEXT: crd[1] : ( 0, 1,
-  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
+  // CHECK-NEXT: crd[1] : ( 0, 1 )
+  // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
   // CHECK-NEXT: ----
   //
   func.func @foo3() {
@@ -186,9 +186,9 @@ module {
   // CHECK-NEXT: nse = 24
   // CHECK-NEXT: dim = ( 6, 16 )
   // CHECK-NEXT: lvl = ( 4, 2, 4, 3 )
-  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
-  // CHECK-NEXT: crd[1] : ( 0, 1,
-  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+  // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
+  // CHECK-NEXT: crd[1] : ( 0, 1 )
+  // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
   // CHECK-NEXT: ----
   //
   func.func @foo4() {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
index 6a4902057362f..7edb76cc8045d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir
@@ -111,11 +111,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 9, 4 )
     // CHECK-NEXT: lvl = ( 9, 4 )
-    // CHECK-NEXT: pos[0] : ( 0, 9,
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
-    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
+    // CHECK-NEXT: pos[0] : ( 0, 9 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
@@ -142,11 +142,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 9, 4 )
     // CHECK-NEXT: lvl = ( 9, 4 )
-    // CHECK-NEXT: pos[0] : ( 0, 9,
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
-    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
+    // CHECK-NEXT: pos[0] : ( 0, 9 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
index 9c9b0e3330c9c..d17e110e2c2d9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir
@@ -144,11 +144,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 9, 4 )
     // CHECK-NEXT: lvl = ( 4, 9 )
-    // CHECK-NEXT: pos[0] : ( 0, 4
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
-    // CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
-    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
-    // CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
+    // CHECK-NEXT: pos[0] : ( 0, 4 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
+    // CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5 )
+    // CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1 )
     // CHECK-NEXT: ----
     //
     %4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
@@ -173,11 +173,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 9, 4 )
     // CHECK-NEXT: lvl = ( 9, 4 )
-    // CHECK-NEXT: pos[0] : ( 0, 9
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
-    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
-    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
+    // CHECK-NEXT: pos[0] : ( 0, 9 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
+    // CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
index ae067bf18527b..c2a4e95e7922e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir
@@ -116,11 +116,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 4, 9 )
     // CHECK-NEXT: lvl = ( 4, 9 )
-    // CHECK-NEXT: pos[0] : ( 0, 4
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
-    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
-    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
-    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: pos[0] : ( 0, 4 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
@@ -140,11 +140,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 4, 9 )
     // CHECK-NEXT: lvl = ( 4, 9 )
-    // CHECK-NEXT: pos[0] : ( 0, 4
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
-    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
-    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
-    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: pos[0] : ( 0, 4 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
index ce746f27c4d88..8fe7e08a66d38 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir
@@ -130,11 +130,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 4, 9 )
     // CHECK-NEXT: lvl = ( 9, 4 )
-    // CHECK-NEXT: pos[0] : ( 0, 9
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
-    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
-    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
-    // CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
+    // CHECK-NEXT: pos[0] : ( 0, 9 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
+    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0 )
+    // CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1 )
     // CHECK-NEXT: ----
     //
     %12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
@@ -154,11 +154,11 @@ module {
     // CHECK-NEXT: nse = 18
     // CHECK-NEXT: dim = ( 4, 9 )
     // CHECK-NEXT: lvl = ( 4, 9 )
-    // CHECK-NEXT: pos[0] : ( 0, 4
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
-    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
-    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
-    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
+    // CHECK-NEXT: pos[0] : ( 0, 4 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
+    // CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
+    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
+    // CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
     // CHECK-NEXT: ----
     //
     %14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index b2bbc64f1688e..d00d4c87f9bd4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -108,7 +108,7 @@ module {
     // CHECK-NEXT: nse = 25
     // CHECK-NEXT: dim = ( 5, 5 )
     // CHECK-NEXT: lvl = ( 5, 5 )
-    // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
+    // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10 )
     // CHECK-NEXT: ----
     //
     sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index ca9df03c69eed..49f182ddb1d44 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -95,7 +95,7 @@ module {
     // CHECK-NEXT: nse = 32
     // CHECK-NEXT: dim = ( 32 )
     // CHECK-NEXT: lvl = ( 32 )
-    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
     // CHECK-NEXT: ----
     //
     sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index 4f5e6ddd48d8e..cc2a3733c8633 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -96,7 +96,7 @@ module {
     // CHECK-NEXT: nse = 32
     // CHECK-NEXT: dim = ( 32 )
     // CHECK-NEXT: lvl = ( 32 )
-    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+    // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
     // CHECK-NEXT: ----
     //
     sparse_tensor.print %0 : tensor<?xf16, #DenseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index c645ca6567209..f33a3abc7a5f7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -161,11 +161,11 @@ module {
     // CHECK-NEXT: nse = 36
     // CHECK-NEXT: dim = ( 6, 6 )
     // CHECK-NEXT: lvl = ( 6, 6 )
-    // CHECK-NEXT: pos[0] : ( 0, 6
-    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5
-    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
-    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
-    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: pos[0] : ( 0, 6 )
+    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5 )
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36 )
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5 )
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0 )
     // CHECK-NEXT: ----
     //
     sparse_tensor.print %2 : tensor<6x6xi32, #DCSR>
@@ -177,9 +177,9 @@ module {
     // CHECK-NEXT: nse = 36
     // CHECK-NEXT: dim = ( 6, 6 )
     // CHECK-NEXT: lvl = ( 6, 6 )
-    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36
-    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5
-    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, 1, 0, 0, -1, 1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -6, 2, -1, 3, 0, -3, 0
+    // CHECK-NEXT: pos[1] : ( 0, 6, 12, 18, 24, 30, 36 )
+    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5 )
+    // CHECK-NEXT: values : ( 0, 0, -1, -6, -1, 6, -1, 0, 1, 0, ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/91528


More information about the Mlir-commits mailing list