[Mlir-commits] [mlir] 110f927 - [mlir][sparse] refactor sparse tensor traits (#73726)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Nov 28 16:58:47 PST 2023


Author: Aart Bik
Date: 2023-11-28T16:58:43-08:00
New Revision: 110f927f937102b1233291ba70dd5f873f4b1724

URL: https://github.com/llvm/llvm-project/commit/110f927f937102b1233291ba70dd5f873f4b1724
DIFF: https://github.com/llvm/llvm-project/commit/110f927f937102b1233291ba70dd5f873f4b1724.diff

LOG: [mlir][sparse] refactor sparse tensor traits (#73726)

Slightly refactors the order of the sparse tensor
traits, and usage. Also makes minor doc improvements.

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index 7fcd1bc2a384a58..1e62d9935d63c32 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -475,9 +475,6 @@ def IsSparseTensorSlicePred
   : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self) && "
           "  ::mlir::sparse_tensor::getSparseTensorEncoding($_self).isSlice()">;
 
-// The following four follow the same idiom as `TensorOf`, `AnyTensor`,
-// `RankedTensorOf`, `AnyRankedTensor`.
-
 class SparseTensorOf<list<Type> allowedTypes>
   : TensorOf<allowedTypes, [IsSparseTensorPred], "sparse tensor">;
 
@@ -487,17 +484,17 @@ class COOSparseTensorOf<list<Type> allowedTypes>
 class SparseTensorSliceOf<list<Type> allowedTypes>
   : TensorOf<allowedTypes, [IsSparseTensorSlicePred], "sparse tensor slice">;
 
-def AnySparseTensor : SparseTensorOf<[AnyType]>;
-def AnyCOOSparseTensor : COOSparseTensorOf<[AnyType]>;
-def AnySparseTensorSlice : SparseTensorSliceOf<[AnyType]>;
-
 class RankedSparseTensorOf<list<Type> allowedTypes>
   : RankedTensorOf<allowedTypes, [IsSparseTensorPred], "ranked sparse tensor">;
 
-def AnyRankedSparseTensor : RankedSparseTensorOf<[AnyType]>;
-
 class ScalarLikeOf<list<Type> allowedTypes>
-  : AnyTypeOf<[0DTensorOf<allowedTypes>, AnyTypeOf<allowedTypes>]>;
+  : AnyTypeOf<[0DTensorOf<allowedTypes>, AnyTypeOf<allowedTypes>], "scalar like">;
+
+def AnySparseTensor : SparseTensorOf<[AnyType]>;
+def AnyCOOSparseTensor : COOSparseTensorOf<[AnyType]>;
+def AnySparseTensorSlice : SparseTensorSliceOf<[AnyType]>;
+def AnyRankedSparseTensor : RankedSparseTensorOf<[AnyType]>;
+def AnyIndexingScalarLike : ScalarLikeOf<[AnySignlessIntegerOrIndex]>;
 
 //===----------------------------------------------------------------------===//
 // Sparse Tensor Sorting Algorithm Attribute.

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index c5cb0ac155d6828..78031f28949a9e5 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -58,7 +58,7 @@ def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
     Arguments<(ins TensorOf<[AnyType]>:$values,
                    Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$levels)>,
     Results<(outs AnySparseTensor: $result)> {
-  let summary = "Returns a sparse tensor from the given values, levels";
+  let summary = "Returns a sparse tensor assembled from the given values and levels";
 
   let description = [{
     Assembles the values and per-level coordinate or postion arrays into a sparse tensor.
@@ -71,7 +71,7 @@ def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
       each supplies the sparse tensor coordinates scheme in the sparse tensor for
       the corresponding level as specifed by `sparse_tensor::StorageLayout`.
 
-    This operation can be used to materialize a sparse tensor from external
+    This operation can be used to assemble a sparse tensor from external
     sources; e.g., when passing two numpy arrays from Python.
 
     Disclaimer: This is the user's responsibility to provide input that can be
@@ -82,7 +82,6 @@ def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
     dimOrdering/higherOrdering mappings.  However, the current implementation
     does not yet support them.
 
-
     Example:
 
     ```mlir
@@ -109,16 +108,17 @@ def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVaria
                    Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels)>,
     Results<(outs TensorOf<[AnyType]>:$ret_values,
                   Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$ret_levels,
-                  ScalarLikeOf<[AnySignlessIntegerOrIndex]>:$val_len,
-                  Variadic<ScalarLikeOf<[AnySignlessIntegerOrIndex]>>:$lvl_lens)> {
-  let summary = "Returns the (values, coordinates) pair unpacked from the input tensor";
+                  AnyIndexingScalarLike:$val_len,
+                  Variadic<AnyIndexingScalarLike>:$lvl_lens)> {
+  let summary = "Returns the (values, coordinates) pair disassembled from the input tensor";
 
   let description = [{
-    The disassemble operation is the inverse of `sparse_tensor::assemble`.  It returns
-    the values and per-level position and coordinate array to the user
-    from the sparse tensor along with the actual length of the memory used in
-    each returned buffer. This operation can be used for returning an
-    unpacked MLIR sparse tensor to frontend; e.g., returning two numpy arrays to Python.
+    The disassemble operation is the inverse of `sparse_tensor::assemble`.
+    It returns the values and per-level position and coordinate array to the
+    user from the sparse tensor along with the actual length of the memory used
+    in each returned buffer. This operation can be used for returning an
+    disassembled MLIR sparse tensor to frontend; e.g., returning two numpy arrays
+    to Python.
 
     Disclaimer: This is the user's responsibility to allocate large enough buffers
     to hold the sparse tensor. The sparsifier simply copies each fields


        


More information about the Mlir-commits mailing list