[Mlir-commits] [mlir] 260dbb4 - [mlir][sparse] add COO to python tests (#70090)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Oct 24 12:48:50 PDT 2023


Author: Aart Bik
Date: 2023-10-24T12:48:46-07:00
New Revision: 260dbb45ac2482695a6fb0c55dcf4b693636f0bf

URL: https://github.com/llvm/llvm-project/commit/260dbb45ac2482695a6fb0c55dcf4b693636f0bf
DIFF: https://github.com/llvm/llvm-project/commit/260dbb45ac2482695a6fb0c55dcf4b693636f0bf.diff

LOG: [mlir][sparse] add COO to python tests (#70090)

also typo fix

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
    mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
    mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index e8bb0f727a7ce9c..dbf65bc8a3edd83 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -132,13 +132,15 @@ def main():
 
     # CHECK-LABEL: TEST: testSDDMMM
     print("\nTEST: testSDDMMM")
+    count = 0
     with ir.Context() as ctx, ir.Location.unknown():
-        count = 0
         # Loop over various ways to compile and annotate the SDDMM kernel with
         # a *single* sparse tensor. Note that we deliberate do not exhaustively
         # search the full state space to reduce runtime of the test. It is
         # straightforward to adapt the code below to explore more combinations.
+        # For these simple orderings, dim2lvl and lvl2dim are the same.
         levels = [
+            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
             [st.DimLevelType.dense, st.DimLevelType.dense],
             [st.DimLevelType.dense, st.DimLevelType.compressed],
             [st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -154,7 +156,7 @@ def main():
                     for iwidth in [32]:
                         for e in [True]:
                             attr = st.EncodingAttr.get(
-                                level, ordering, None, pwidth, iwidth
+                                level, ordering, ordering, pwidth, iwidth
                             )
                             opt = f"parallelization-strategy=none"
                             compiler = sparse_compiler.SparseCompiler(
@@ -162,7 +164,7 @@ def main():
                             )
                             build_compile_and_run_SDDMMM(attr, compiler)
                             count = count + 1
-    # CHECK: Passed 8 tests
+    # CHECK: Passed 10 tests
     print("Passed ", count, "tests")
 
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 7ca1fdd8e5d0ed9..7a101b815310828 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -115,17 +115,18 @@ def main():
 
     # CHECK-LABEL: TEST: testSpMM
     print("\nTEST: testSpMM")
+    count = 0
     with ir.Context() as ctx, ir.Location.unknown():
-        count = 0
         # Loop over various ways to compile and annotate the SpMM kernel with
         # a *single* sparse tensor. Note that we deliberate do not exhaustively
         # search the full state space to reduce runtime of the test. It is
         # straightforward to adapt the code below to explore more combinations.
-
+        # For these simple orderings, dim2lvl and lvl2dim are the same.
         vl = 1
         e = False
         opt = f"parallelization-strategy=none"
         levels = [
+            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
             [st.DimLevelType.dense, st.DimLevelType.dense],
             [st.DimLevelType.dense, st.DimLevelType.compressed],
             [st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -144,11 +145,11 @@ def main():
                 for pwidth in bitwidths:
                     for iwidth in bitwidths:
                         attr = st.EncodingAttr.get(
-                            level, ordering, None, pwidth, iwidth
+                            level, ordering, ordering, pwidth, iwidth
                         )
                         build_compile_and_run_SpMM(attr, compiler)
                         count = count + 1
-        # CHECK: Passed 8 tests
+        # CHECK: Passed 10 tests
         print("Passed ", count, "tests")
 
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
index d549a9a0954c6cb..3d1af3017abe35b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
@@ -24,7 +24,7 @@ def __call__(self, module: ir.Module):
         self.compile(module)
 
     def compile(self, module: ir.Module):
-        """Compiles the module by invoking the sparse copmiler pipeline."""
+        """Compiles the module by invoking the sparse compiler pipeline."""
         passmanager.PassManager.parse(self.pipeline).run(module.operation)
 
     def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:


        


More information about the Mlir-commits mailing list