[Mlir-commits] [mlir] [mlir][sparse] add COO to python tests (PR #70090)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Oct 24 11:38:55 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-sparse
Author: Aart Bik (aartbik)
<details>
<summary>Changes</summary>
also typo fix
---
Full diff: https://github.com/llvm/llvm-project/pull/70090.diff
3 Files Affected:
- (modified) mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py (+5-3)
- (modified) mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py (+5-4)
- (modified) mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py (+1-1)
``````````diff
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index e8bb0f727a7ce9c..dbf65bc8a3edd83 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -132,13 +132,15 @@ def main():
# CHECK-LABEL: TEST: testSDDMMM
print("\nTEST: testSDDMMM")
+ count = 0
with ir.Context() as ctx, ir.Location.unknown():
- count = 0
# Loop over various ways to compile and annotate the SDDMM kernel with
# a *single* sparse tensor. Note that we deliberate do not exhaustively
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.
+ # For these simple orderings, dim2lvl and lvl2dim are the same.
levels = [
+ [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
[st.DimLevelType.dense, st.DimLevelType.dense],
[st.DimLevelType.dense, st.DimLevelType.compressed],
[st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -154,7 +156,7 @@ def main():
for iwidth in [32]:
for e in [True]:
attr = st.EncodingAttr.get(
- level, ordering, None, pwidth, iwidth
+ level, ordering, ordering, pwidth, iwidth
)
opt = f"parallelization-strategy=none"
compiler = sparse_compiler.SparseCompiler(
@@ -162,7 +164,7 @@ def main():
)
build_compile_and_run_SDDMMM(attr, compiler)
count = count + 1
- # CHECK: Passed 8 tests
+ # CHECK: Passed 10 tests
print("Passed ", count, "tests")
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 7ca1fdd8e5d0ed9..7a101b815310828 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -115,17 +115,18 @@ def main():
# CHECK-LABEL: TEST: testSpMM
print("\nTEST: testSpMM")
+ count = 0
with ir.Context() as ctx, ir.Location.unknown():
- count = 0
# Loop over various ways to compile and annotate the SpMM kernel with
# a *single* sparse tensor. Note that we deliberate do not exhaustively
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.
-
+ # For these simple orderings, dim2lvl and lvl2dim are the same.
vl = 1
e = False
opt = f"parallelization-strategy=none"
levels = [
+ [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
[st.DimLevelType.dense, st.DimLevelType.dense],
[st.DimLevelType.dense, st.DimLevelType.compressed],
[st.DimLevelType.compressed, st.DimLevelType.dense],
@@ -144,11 +145,11 @@ def main():
for pwidth in bitwidths:
for iwidth in bitwidths:
attr = st.EncodingAttr.get(
- level, ordering, None, pwidth, iwidth
+ level, ordering, ordering, pwidth, iwidth
)
build_compile_and_run_SpMM(attr, compiler)
count = count + 1
- # CHECK: Passed 8 tests
+ # CHECK: Passed 10 tests
print("Passed ", count, "tests")
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
index d549a9a0954c6cb..3d1af3017abe35b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py
@@ -24,7 +24,7 @@ def __call__(self, module: ir.Module):
self.compile(module)
def compile(self, module: ir.Module):
- """Compiles the module by invoking the sparse copmiler pipeline."""
+ """Compiles the module by invoking the sparse compiler pipeline."""
passmanager.PassManager.parse(self.pipeline).run(module.operation)
def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:
``````````
</details>
https://github.com/llvm/llvm-project/pull/70090
More information about the Mlir-commits
mailing list