[Mlir-commits] [mlir] [mlir][sparse] remove sparse output python example (PR #66298)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Sep 13 15:02:57 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-core
            
<details>
<summary>Changes</summary>
Rationale:
This was actually just a pure "string based" test
with very little actual python usage. The output
sparse tensor was handled via the deprecated
convertFromMLIRSparseTensor method.
--
Full diff: https://github.com/llvm/llvm-project/pull/66298.diff

2 Files Affected:

- (removed) mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py (-108) 
- (removed) mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py (-81) 


<pre>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
deleted file mode 100644
index a41bde1ee2d34b7..000000000000000
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# RUN: env SUPPORT_LIB=%mlir_c_runner_utils %PYTHON %s | FileCheck %s
-
-import ctypes
-import numpy as np
-import os
-import sys
-
-from mlir import ir
-from mlir import runtime as rt
-from mlir.dialects import sparse_tensor as st
-from mlir.dialects import builtin
-from mlir.dialects.linalg.opdsl import lang as dsl
-
-_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
-sys.path.append(_SCRIPT_PATH)
-from tools import np_to_sparse_tensor as test_tools
-from tools import sparse_compiler
-
-# TODO: Use linalg_structured_op to generate the kernel after making it to
-# handle sparse tensor outputs.
-_KERNEL_STR = """
-#DCSR = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ]
-}>
-
-#trait_add_elt = {
-  indexing_maps = [
-    affine_map<(i,j) -> (i,j)>,  // A
-    affine_map<(i,j) -> (i,j)>,  // B
-    affine_map<(i,j) -> (i,j)>   // X (out)
-  ],
-  iterator_types = ["parallel", "parallel"],
-  doc = "X(i,j) = A(i,j) + B(i,j)"
-}
-
-func.func @sparse_add_elt(
-    %arga: tensor<3x4xf64, #DCSR>, %argb: tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR> {
-  %argx = bufferization.alloc_tensor() : tensor<3x4xf64, #DCSR>
-  %0 = linalg.generic #trait_add_elt
-    ins(%arga, %argb: tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>)
-    outs(%argx: tensor<3x4xf64, #DCSR>) {
-      ^bb(%a: f64, %b: f64, %x: f64):
-        %1 = arith.addf %a, %b : f64
-        linalg.yield %1 : f64
-  } -> tensor<3x4xf64, #DCSR>
-  return %0 : tensor<3x4xf64, #DCSR>
-}
-
-func.func @main(%ad: tensor<3x4xf64>, %bd: tensor<3x4xf64>) -> tensor<3x4xf64, #DCSR>
-  attributes { llvm.emit_c_interface } {
-  %a = sparse_tensor.convert %ad : tensor<3x4xf64> to tensor<3x4xf64, #DCSR>
-  %b = sparse_tensor.convert %bd : tensor<3x4xf64> to tensor<3x4xf64, #DCSR>
-  %0 = call @sparse_add_elt(%a, %b) : (tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR>
-  return %0 : tensor<3x4xf64, #DCSR>
-}
-"""
-
-
-def _run_test(support_lib, kernel):
-    """Compiles, runs and checks results."""
-    compiler = sparse_compiler.SparseCompiler(
-        options="", opt_level=2, shared_libs=[support_lib]
-    )
-    module = ir.Module.parse(kernel)
-    engine = compiler.compile_and_jit(module)
-
-    # Set up numpy inputs and buffer for output.
-    a = np.array(
-        [[1.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 6.6, 0.0]], np.float64
-    )
-    b = np.array(
-        [[1.1, 0.0, 0.0, 2.8], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], np.float64
-    )
-
-    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
-    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
-
-    # The sparse tensor output is a pointer to pointer of char.
-    out = ctypes.c_char(0)
-    mem_out = ctypes.pointer(ctypes.pointer(out))
-
-    # Invoke the kernel.
-    engine.invoke("main", mem_a, mem_b, mem_out)
-
-    # Retrieve and check the result.
-    rank, nse, shape, values, indices = test_tools.sparse_tensor_to_coo_tensor(
-        support_lib, mem_out[0], np.float64
-    )
-
-    # CHECK: PASSED
-    if np.allclose(values, [2.2, 2.8, 6.6]) and np.allclose(
-        indices, [[0, 0], [0, 3], [2, 2]]
-    ):
-        print("PASSED")
-    else:
-        quit("FAILURE")
-
-
-def test_elementwise_add():
-    # Obtain path to runtime support library.
-    support_lib = os.getenv("SUPPORT_LIB")
-    assert support_lib is not None, "SUPPORT_LIB is undefined"
-    assert os.path.exists(support_lib), f"{support_lib} does not exist"
-    with ir.Context() as ctx, ir.Location.unknown():
-        _run_test(support_lib, _KERNEL_STR)
-
-
-test_elementwise_add()
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py
deleted file mode 100644
index 785d42cadbbe9f1..000000000000000
--- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/np_to_sparse_tensor.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#  Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-#  See https://llvm.org/LICENSE.txt for license information.
-#  SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#  This file contains functions to process sparse tensor outputs.
-
-import ctypes
-import functools
-import numpy as np
-
-
- at functools.lru_cache()
-def _get_c_shared_lib(lib_name: str):
-    """Loads and returns the requested C shared library.
-
-    Args:
-      lib_name: A string representing the C shared library.
-
-    Returns:
-      The C shared library.
-
-    Raises:
-      OSError: If there is any problem in loading the shared library.
-      ValueError:  If the shared library doesn&#x27;t contain the needed routine.
-    """
-    # This raises OSError exception if there is any problem in loading the shared
-    # library.
-    c_lib = ctypes.CDLL(lib_name)
-
-    try:
-        c_lib.convertFromMLIRSparseTensorF64.restype = ctypes.c_void_p
-    except Exception as e:
-        raise ValueError(
-            "Missing function convertFromMLIRSparseTensorF64 from "
-            f"the C shared library: {e} "
-        ) from e
-
-    return c_lib
-
-
-def sparse_tensor_to_coo_tensor(support_lib, sparse, dtype):
-    """Converts a sparse tensor to COO-flavored format.
-
-    Args:
-       support_lib: A string for the supporting C shared library.
-       sparse: A ctypes.pointer to the sparse tensor descriptor.
-       dtype: The numpy data type for the tensor elements.
-
-    Returns:
-      A tuple that contains the following values:
-      rank: An integer for the rank of the tensor.
-      nse: An integer for the number of non-zero values in the tensor.
-      shape: A 1D numpy array of integers, for the shape of the tensor.
-      values: A 1D numpy array, for the non-zero values in the tensor.
-      indices: A 2D numpy array of integers, representing the indices for the
-        non-zero values in the tensor.
-
-    Raises:
-      OSError: If there is any problem in loading the shared library.
-      ValueError:  If the shared library doesn&#x27;t contain the needed routine.
-    """
-    c_lib = _get_c_shared_lib(support_lib)
-
-    rank = ctypes.c_ulonglong(0)
-    nse = ctypes.c_ulonglong(0)
-    shape = ctypes.POINTER(ctypes.c_ulonglong)()
-    values = ctypes.POINTER(np.ctypeslib.as_ctypes_type(dtype))()
-    indices = ctypes.POINTER(ctypes.c_ulonglong)()
-    c_lib.convertFromMLIRSparseTensorF64(
-        sparse,
-        ctypes.byref(rank),
-        ctypes.byref(nse),
-        ctypes.byref(shape),
-        ctypes.byref(values),
-        ctypes.byref(indices),
-    )
-    # Convert the returned values to the corresponding numpy types.
-    shape = np.ctypeslib.as_array(shape, shape=[rank.value])
-    values = np.ctypeslib.as_array(values, shape=[nse.value])
-    indices = np.ctypeslib.as_array(indices, shape=[nse.value, rank.value])
-    return rank, nse, shape, values, indices
</pre>
</details>


https://github.com/llvm/llvm-project/pull/66298


More information about the Mlir-commits mailing list