[Mlir-commits] [mlir] [mlir][sparse] add COO to python tests (PR #70090)

Aart Bik llvmlistbot at llvm.org
Tue Oct 24 11:37:41 PDT 2023


https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/70090

also typo fix

>From c379ea808ee08b5c97837a700ef153bc1e1e395e Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 24 Oct 2023 11:35:43 -0700
Subject: [PATCH] [mlir][sparse] add COO to python tests

also typo fix
---
 .../Dialect/SparseTensor/python/test_SDDMM.py            | 8 +++++---
 .../Integration/Dialect/SparseTensor/python/test_SpMM.py | 9 +++++----
 .../Dialect/SparseTensor/python/tools/sparse_compiler.py | 2 +-
 3 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index e8bb0f727a7ce9c..dbf65bc8a3edd83 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -132,13 +132,15 @@ def main():
 
     # CHECK-LABEL: TEST: testSDDMMM
     print("\nTEST: testSDDMMM")
+    count = 0
     with ir.Context() as ctx, ir.Location.unknown():
-        count = 0
         # Loop over various ways to compile and annotate the SDDMM kernel with
         # a *single* sparse tensor. Note that we deliberate do not exhaustively
         # search the full state space to reduce runtime of the test. It is
         # straightforward to adapt the code below to explore more combinations.
+        # For these simple orderings, dim2lvl and lvl2dim are the same.
         levels = [
+            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
             [st.DimLevelType.dense, st.DimLevelType.dense],
             [st.DimLevelType.dense, st.DimLevelType.compressed],
             [st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -154,7 +156,7 @@ def main():
                     for iwidth in [32]:
                         for e in [True]:
                             attr = st.EncodingAttr.get(
-                                level, ordering, None, pwidth, iwidth
+                                level, ordering, ordering, pwidth, iwidth
                             )
                             opt = f"parallelization-strategy=none"
                             compiler = sparse_compiler.SparseCompiler(
@@ -162,7 +164,7 @@ def main():
                             )
                             build_compile_and_run_SDDMMM(attr, compiler)
                             count = count + 1
-    # CHECK: Passed 8 tests
+    # CHECK: Passed 10 tests
     print("Passed ", count, "tests")
 
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 7ca1fdd8e5d0ed9..7a101b815310828 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -115,17 +115,18 @@ def main():
 
     # CHECK-LABEL: TEST: testSpMM
     print("\nTEST: testSpMM")
+    count = 0
     with ir.Context() as ctx, ir.Location.unknown():
-        count = 0
         # Loop over various ways to compile and annotate the SpMM kernel with
         # a *single* sparse tensor. Note that we deliberate do not exhaustively
         # search the full state space to reduce runtime of the test. It is
         # straightforward to adapt the code below to explore more combinations.
-
+        # For these simple orderings, dim2lvl and lvl2dim are the same.
         vl = 1
         e = False
         opt = f"parallelization-strategy=none"
         levels = [
+            [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
             [st.DimLevelType.dense, st.DimLevelType.dense],
             [st.DimLevelType.dense, st.DimLevelType.compressed],
             [st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -144,11 +145,11 @@ def main():
                 for pwidth in bitwidths:
                     for iwidth in bitwidths:
                         attr = st.EncodingAttr.get(
-                            level, ordering, None, pwidth, iwidth
+                            level, ordering, ordering, pwidth, iwidth
                         )
                         build_compile_and_run_SpMM(attr, compiler)
                         count = count + 1
-        # CHECK: Passed 8 tests
+        # CHECK: Passed 10 tests
         print("Passed ", count, "tests")
 
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
index d549a9a0954c6cb..3d1af3017abe35b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
@@ -24,7 +24,7 @@ def __call__(self, module: ir.Module):
         self.compile(module)
 
     def compile(self, module: ir.Module):
-        """Compiles the module by invoking the sparse copmiler pipeline."""
+        """Compiles the module by invoking the sparse compiler pipeline."""
         passmanager.PassManager.parse(self.pipeline).run(module.operation)
 
     def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:



More information about the Mlir-commits mailing list