[Mlir-commits] [mlir] 0cbaff8 - [mlir][sparse] cleanup conversion test (#70356)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Oct 26 10:48:33 PDT 2023


Author: Aart Bik
Date: 2023-10-26T10:48:29-07:00
New Revision: 0cbaff815cf2083b956af037c2efbdce722ed560

URL: https://github.com/llvm/llvm-project/commit/0cbaff815cf2083b956af037c2efbdce722ed560
DIFF: https://github.com/llvm/llvm-project/commit/0cbaff815cf2083b956af037c2efbdce722ed560.diff

LOG: [mlir][sparse] cleanup conversion test (#70356)

Various TODOs had been added that actually removed the actual test.
This puts the CHECK test backs and removes the TODOs that have no
immediate plans.

Added: 
    

Modified: 
    mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 658e8aa40022eb2..0280e27b4e312a0 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -41,25 +41,24 @@ func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64
 }
 
 // CHECK-LABEL:   func.func @sparse_hidden_nop_cast
-// TODO: The following convert should be a cast instead.
-// CHECK:           sparse_tensor.convert
-// CHECK:           return
+// CHECK-NEXT:      sparse_tensor.convert
+// CHECK-NEXT:      return
 func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
   return %0 : tensor<?xf32, #SparseVector>
 }
 
 // CHECK-LABEL:   func.func @sparse_convert_1d_ss(
-// TODO: libgen path need to support efficient format conversion (e.g., 32 bit pos -> 64 bit pos).
-// Maybe we should use a 
diff erent operator as well to be clear.
+// CHECK-NEXT:      sparse_tensor.convert
+// CHECK-NEXT:      return
 func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
   return %0 : tensor<?xf32, #SparseVector32>
 }
 
 // CHECK-LABEL:   func.func @sparse_convert(
-// TODO: libgen path need to support efficient format conversion (e.g., 32 bit pos -> 64 bit pos).
-// Maybe we should use a 
diff erent operator as well to be clear.
+// CHECK-NEXT:      sparse_tensor.convert
+// CHECK-NEXT:      return
 func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
   return %0 : tensor<?xf32, #SparseVector32>
@@ -73,6 +72,7 @@ func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32
 // CHECK:           sparse_tensor.foreach
 // CHECK:             sparse_tensor.insert
 // CHECK:           sparse_tensor.load
+// CHECK:           return
 func.func @sparse_convert_permuted(%arg0: tensor<?x?x?xf32, #SortedCOO3D>) -> tensor<?x?x?xf32, #TsssPermuted> {
   %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf32, #SortedCOO3D> to tensor<?x?x?xf32, #TsssPermuted>
   return %0 : tensor<?x?x?xf32, #TsssPermuted>
@@ -83,6 +83,7 @@ func.func @sparse_convert_permuted(%arg0: tensor<?x?x?xf32, #SortedCOO3D>) -> te
 // CHECK:             sparse_tensor.insert
 // CHECK:           sparse_tensor.load
 // CHECK-NOT:       sparse_tensor.reorder_coo
+// CHECK:           return
 func.func @sparse_convert_slice(%arg0: tensor<2x13xi32, #COOSlice>) -> (tensor<2x13xi32, #SortedCOO2D>)  {
   %0 = sparse_tensor.convert %arg0 : tensor<2x13xi32, #COOSlice> to tensor<2x13xi32, #SortedCOO2D>
   return %0 : tensor<2x13xi32, #SortedCOO2D>


        


More information about the Mlir-commits mailing list