[Mlir-commits] [mlir] [mlir][sparse] extend sparse output test (PR #69986)
Aart Bik
llvmlistbot at llvm.org
Mon Oct 23 17:13:20 PDT 2023
https://github.com/aartbik updated https://github.com/llvm/llvm-project/pull/69986
>From ffdb701205064209640b47b5f55525f3da16160c Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Mon, 23 Oct 2023 16:55:34 -0700
Subject: [PATCH 1/2] [mlir][sparse] extend sparse output test
This adds COO and loose compressed to output testing.
Also prepares BSR for output testing, but needs the
conversion to work first. Cleanup of stale TODOs
---
.../Dialect/SparseTensor/python/test_SDDMM.py | 1 -
.../Dialect/SparseTensor/python/test_SpMM.py | 1 -
.../SparseTensor/python/test_output.py | 38 +++++++++++++++----
.../SparseTensor/python/test_stress.py | 6 ---
4 files changed, 30 insertions(+), 16 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index 1f9b6360383180c..e8bb0f727a7ce9c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -108,7 +108,6 @@ def build_compile_and_run_SDDMMM(attr: st.EncodingAttr, compiler):
# Invoke the kernel and get numpy output.
# Built-in bufferization uses in-out buffers.
- # TODO: replace with inplace comprehensive bufferization.
engine.invoke("main", mem_out, mem_a, mem_b, mem_c)
# Sanity check on computed result. Only a few elements
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 69f6cdcea967fae..7ca1fdd8e5d0ed9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -96,7 +96,6 @@ def build_compile_and_run_SpMM(attr: st.EncodingAttr, compiler):
# Invoke the kernel and get numpy output.
# Built-in bufferization uses in-out buffers.
- # TODO: replace with inplace comprehensive bufferization.
engine.invoke("main", mem_out, mem_a, mem_b, mem_c)
# Sanity check on computed result.
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index 7d7749008020515..924828b93292216 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -16,7 +16,7 @@
sys.path.append(_SCRIPT_PATH)
from tools import sparse_compiler
-# TODO: move more into actual IR building.
+
def boilerplate(attr: st.EncodingAttr):
"""Returns boilerplate main method."""
return f"""
@@ -48,7 +48,7 @@ def expected():
"""
-def build_compile_and_run_output(attr: st.EncodingAttr, compiler):
+def build_compile_and_run_output(attr: st.EncodingAttr, compiler, expected):
# Build and Compile.
module = ir.Module.parse(boilerplate(attr))
engine = compiler.compile_and_jit(module)
@@ -59,9 +59,8 @@ def build_compile_and_run_output(attr: st.EncodingAttr, compiler):
buf = out.encode("utf-8")
mem_a = ctypes.pointer(ctypes.pointer(ctypes.create_string_buffer(buf)))
engine.invoke("main", mem_a)
-
actual = open(out).read()
- if actual != expected():
+ if actual != expected:
quit("FAILURE")
@@ -75,9 +74,13 @@ def main():
print("\nTEST: test_output")
count = 0
with ir.Context() as ctx, ir.Location.unknown():
- # Loop over various sparse types: CSR, DCSR, CSC, DCSC.
+ # Loop over various sparse types (COO, CSR, DCSR, CSC, DCSC) with
+ # regular and loose compression and various metadata bitwidths.
+ # For these simple orderings, dim2lvl and lvl2dim are the same.
levels = [
+ [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
[st.DimLevelType.dense, st.DimLevelType.compressed],
+ [st.DimLevelType.dense, st.DimLevelType.loose_compressed],
[st.DimLevelType.compressed, st.DimLevelType.compressed],
]
orderings = [
@@ -91,11 +94,30 @@ def main():
for level in levels:
for ordering in orderings:
for bwidth in bitwidths:
- attr = st.EncodingAttr.get(level, ordering, None, bwidth, bwidth)
- build_compile_and_run_output(attr, compiler)
+ attr = st.EncodingAttr.get(level, ordering, ordering, bwidth, bwidth)
+ build_compile_and_run_output(attr, compiler, expected())
count = count + 1
- # CHECK: Passed 16 tests
+ # Now do the same for BSR.
+ level = [st.DimLevelType.dense, st.DimLevelType.compressed, st.DimLevelType.dense, st.DimLevelType.dense]
+ d0 = ir.AffineDimExpr.get(0)
+ d1 = ir.AffineDimExpr.get(1)
+ c2 = ir.AffineConstantExpr.get(2)
+ dim2lvl = ir.AffineMap.get(2, 0, [ir.AffineExpr.get_floor_div(d0, c2),
+ ir.AffineExpr.get_floor_div(d1, c2),
+ ir.AffineExpr.get_mod(d0, c2),
+ ir.AffineExpr.get_mod(d1, c2)])
+ l0 = ir.AffineDimExpr.get(0)
+ l1 = ir.AffineDimExpr.get(1)
+ l2 = ir.AffineDimExpr.get(2)
+ l3 = ir.AffineDimExpr.get(3)
+ lvl2dim = ir.AffineMap.get(4, 0, [2 * l0 + l2, 2 * l1 + l3])
+ attr = st.EncodingAttr.get(level, dim2lvl, lvl2dim, 0, 0)
+ # TODO: enable this one CONVERSION on BSR is working
+ # build_compile_and_run_output(attr, compiler, block_expected())
+ count = count + 1
+
+ # CHECK: Passed 33 tests
print("Passed", count, "tests")
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
index 841b02bc10c8bec..c5c247d8b9e611b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
@@ -25,8 +25,6 @@
# ===----------------------------------------------------------------------=== #
-# TODO: move this boilerplate to its own module, so it can be used by
-# other tests and programs.
class TypeConverter:
"""Converter between NumPy types and MLIR types."""
@@ -78,7 +76,6 @@ def get_RankedTensorType_of_nparray(
) -> ir.RankedTensorType:
"""Returns the ir.RankedTensorType of a NumPy array. Note that NumPy
arrays can only be converted to/from dense tensors, not sparse tensors."""
- # TODO: handle strides as well?
return ir.RankedTensorType.get(
nparray.shape, self.dtype_to_irtype(nparray.dtype)
)
@@ -112,7 +109,6 @@ def build(self, types: List[ir.Type]):
with ir.InsertionPoint(self._module.body):
tp0 = types.pop(0)
self._roundtripTp = tp0
- # TODO: assert dense? assert element type is recognised by the TypeConverter?
types.append(tp0)
funcTp = ir.FunctionType.get(inputs=[tp0], results=[tp0])
funcOp = func.FuncOp(name="main", type=funcTp)
@@ -206,8 +202,6 @@ def main():
shape = range(2, 3)
rank = len(shape)
# All combinations.
- # TODO: add singleton here too; which requires updating how `np_arg0`
- # is initialized below.
levels = list(
itertools.product(
*itertools.repeat(
>From 3fecaeb73e5de5b2230faa8cc0a33968ff5edc3e Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Mon, 23 Oct 2023 17:12:43 -0700
Subject: [PATCH 2/2] format
---
.../SparseTensor/python/test_output.py | 161 ++++++++++--------
1 file changed, 87 insertions(+), 74 deletions(-)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index 924828b93292216..607bf8856c08874 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -18,8 +18,8 @@
def boilerplate(attr: st.EncodingAttr):
- """Returns boilerplate main method."""
- return f"""
+ """Returns boilerplate main method."""
+ return f"""
func.func @main(%p : !llvm.ptr<i8>) -> () attributes {{ llvm.emit_c_interface }} {{
%d = arith.constant sparse<[[0, 0], [1, 1], [0, 9], [9, 0], [4, 4]],
[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<10x10xf64>
@@ -31,13 +31,13 @@ def boilerplate(attr: st.EncodingAttr):
def expected():
- """Returns expected contents of output.
+ """Returns expected contents of output.
- Regardless of the dimension ordering, compression, and bitwidths that are
- used in the sparse tensor, the output is always lexicographically sorted
- by natural index order.
- """
- return f"""; extended FROSTT format
+ Regardless of the dimension ordering, compression, and bitwidths that are
+ used in the sparse tensor, the output is always lexicographically sorted
+ by natural index order.
+ """
+ return f"""; extended FROSTT format
2 5
10 10
1 1 1
@@ -49,76 +49,89 @@ def expected():
def build_compile_and_run_output(attr: st.EncodingAttr, compiler, expected):
- # Build and Compile.
- module = ir.Module.parse(boilerplate(attr))
- engine = compiler.compile_and_jit(module)
+ # Build and Compile.
+ module = ir.Module.parse(boilerplate(attr))
+ engine = compiler.compile_and_jit(module)
- # Invoke the kernel and compare output.
- with tempfile.TemporaryDirectory() as test_dir:
- out = os.path.join(test_dir, "out.tns")
- buf = out.encode("utf-8")
- mem_a = ctypes.pointer(ctypes.pointer(ctypes.create_string_buffer(buf)))
- engine.invoke("main", mem_a)
- actual = open(out).read()
- if actual != expected:
- quit("FAILURE")
+ # Invoke the kernel and compare output.
+ with tempfile.TemporaryDirectory() as test_dir:
+ out = os.path.join(test_dir, "out.tns")
+ buf = out.encode("utf-8")
+ mem_a = ctypes.pointer(ctypes.pointer(ctypes.create_string_buffer(buf)))
+ engine.invoke("main", mem_a)
+ actual = open(out).read()
+ if actual != expected:
+ quit("FAILURE")
def main():
- support_lib = os.getenv("SUPPORT_LIB")
- assert support_lib is not None, "SUPPORT_LIB is undefined"
- if not os.path.exists(support_lib):
- raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), support_lib)
-
- # CHECK-LABEL: TEST: test_output
- print("\nTEST: test_output")
- count = 0
- with ir.Context() as ctx, ir.Location.unknown():
- # Loop over various sparse types (COO, CSR, DCSR, CSC, DCSC) with
- # regular and loose compression and various metadata bitwidths.
- # For these simple orderings, dim2lvl and lvl2dim are the same.
- levels = [
- [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
- [st.DimLevelType.dense, st.DimLevelType.compressed],
- [st.DimLevelType.dense, st.DimLevelType.loose_compressed],
- [st.DimLevelType.compressed, st.DimLevelType.compressed],
- ]
- orderings = [
- ir.AffineMap.get_permutation([0, 1]),
- ir.AffineMap.get_permutation([1, 0]),
- ]
- bitwidths = [8, 16, 32, 64]
- compiler = sparse_compiler.SparseCompiler(
- options="", opt_level=2, shared_libs=[support_lib]
- )
- for level in levels:
- for ordering in orderings:
- for bwidth in bitwidths:
- attr = st.EncodingAttr.get(level, ordering, ordering, bwidth, bwidth)
- build_compile_and_run_output(attr, compiler, expected())
- count = count + 1
-
- # Now do the same for BSR.
- level = [st.DimLevelType.dense, st.DimLevelType.compressed, st.DimLevelType.dense, st.DimLevelType.dense]
- d0 = ir.AffineDimExpr.get(0)
- d1 = ir.AffineDimExpr.get(1)
- c2 = ir.AffineConstantExpr.get(2)
- dim2lvl = ir.AffineMap.get(2, 0, [ir.AffineExpr.get_floor_div(d0, c2),
- ir.AffineExpr.get_floor_div(d1, c2),
- ir.AffineExpr.get_mod(d0, c2),
- ir.AffineExpr.get_mod(d1, c2)])
- l0 = ir.AffineDimExpr.get(0)
- l1 = ir.AffineDimExpr.get(1)
- l2 = ir.AffineDimExpr.get(2)
- l3 = ir.AffineDimExpr.get(3)
- lvl2dim = ir.AffineMap.get(4, 0, [2 * l0 + l2, 2 * l1 + l3])
- attr = st.EncodingAttr.get(level, dim2lvl, lvl2dim, 0, 0)
- # TODO: enable this one CONVERSION on BSR is working
- # build_compile_and_run_output(attr, compiler, block_expected())
- count = count + 1
-
- # CHECK: Passed 33 tests
- print("Passed", count, "tests")
+ support_lib = os.getenv("SUPPORT_LIB")
+ assert support_lib is not None, "SUPPORT_LIB is undefined"
+ if not os.path.exists(support_lib):
+ raise FileNotFoundError(
+ errno.ENOENT, os.strerror(errno.ENOENT), support_lib
+ )
+
+ # CHECK-LABEL: TEST: test_output
+ print("\nTEST: test_output")
+ count = 0
+ with ir.Context() as ctx, ir.Location.unknown():
+ # Loop over various sparse types (COO, CSR, DCSR, CSC, DCSC) with
+ # regular and loose compression and various metadata bitwidths.
+ # For these simple orderings, dim2lvl and lvl2dim are the same.
+ levels = [
+ [st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
+ [st.DimLevelType.dense, st.DimLevelType.compressed],
+ [st.DimLevelType.dense, st.DimLevelType.loose_compressed],
+ [st.DimLevelType.compressed, st.DimLevelType.compressed],
+ ]
+ orderings = [
+ ir.AffineMap.get_permutation([0, 1]),
+ ir.AffineMap.get_permutation([1, 0]),
+ ]
+ bitwidths = [8, 16, 32, 64]
+ compiler = sparse_compiler.SparseCompiler(
+ options="", opt_level=2, shared_libs=[support_lib]
+ )
+ for level in levels:
+ for ordering in orderings:
+ for bwidth in bitwidths:
+ attr = st.EncodingAttr.get(level, ordering, ordering, bwidth, bwidth)
+ build_compile_and_run_output(attr, compiler, expected())
+ count = count + 1
+
+ # Now do the same for BSR.
+ level = [
+ st.DimLevelType.dense,
+ st.DimLevelType.compressed,
+ st.DimLevelType.dense,
+ st.DimLevelType.dense,
+ ]
+ d0 = ir.AffineDimExpr.get(0)
+ d1 = ir.AffineDimExpr.get(1)
+ c2 = ir.AffineConstantExpr.get(2)
+ dim2lvl = ir.AffineMap.get(
+ 2,
+ 0,
+ [
+ ir.AffineExpr.get_floor_div(d0, c2),
+ ir.AffineExpr.get_floor_div(d1, c2),
+ ir.AffineExpr.get_mod(d0, c2),
+ ir.AffineExpr.get_mod(d1, c2),
+ ],
+ )
+ l0 = ir.AffineDimExpr.get(0)
+ l1 = ir.AffineDimExpr.get(1)
+ l2 = ir.AffineDimExpr.get(2)
+ l3 = ir.AffineDimExpr.get(3)
+ lvl2dim = ir.AffineMap.get(4, 0, [2 * l0 + l2, 2 * l1 + l3])
+ attr = st.EncodingAttr.get(level, dim2lvl, lvl2dim, 0, 0)
+ # TODO: enable this one CONVERSION on BSR is working
+ # build_compile_and_run_output(attr, compiler, block_expected())
+ count = count + 1
+
+ # CHECK: Passed 33 tests
+ print("Passed", count, "tests")
if __name__ == "__main__":
More information about the Mlir-commits
mailing list