[Mlir-commits] [mlir] [mlir][sparse] minor edits to support lib files (PR #68137)
Aart Bik
llvmlistbot at llvm.org
Tue Oct 3 11:22:18 PDT 2023
https://github.com/aartbik updated https://github.com/llvm/llvm-project/pull/68137
>From 44b857d7f47b5f43d97cd08d21e5faf96d7e5234 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 3 Oct 2023 10:45:49 -0700
Subject: [PATCH 1/3] [mlir][sparse] minor edits to support lib files
---
.../mlir/ExecutionEngine/SparseTensor/COO.h | 63 ++++------------
.../mlir/ExecutionEngine/SparseTensor/File.h | 29 ++------
.../ExecutionEngine/SparseTensor/Storage.h | 72 ++++++-------------
.../ExecutionEngine/SparseTensorRuntime.h | 11 ++-
4 files changed, 44 insertions(+), 131 deletions(-)
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
index 40c68177bbea821..c8a9ee28f39e58b 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
@@ -23,20 +23,19 @@ namespace mlir {
namespace sparse_tensor {
/// An element of a sparse tensor in coordinate-scheme representation
-/// (i.e., a pair of coordinates and value). For example, a rank-1
+/// (i.e., a pair of coordinates and value). For example, a rank-1
/// vector element would look like
/// ({i}, a[i])
/// and a rank-5 tensor element would look like
/// ({i,j,k,l,m}, a[i,j,k,l,m])
///
-/// The coordinates are represented as a (non-owning) pointer into
-/// a shared pool of coordinates, rather than being stored directly in
-/// this object. This significantly improves performance because it:
-/// (1) reduces the per-element memory footprint, and (2) centralizes
-/// the memory management for coordinates. The only downside is that
-/// the coordinates themselves cannot be retrieved without knowing the
-/// rank of the tensor to which this element belongs (and that rank is
-/// not stored in this object).
+/// The coordinates are represented as a (non-owning) pointer into a
+/// shared pool of coordinates, rather than being stored directly in this
+/// object. This significantly improves performance because it reduces the
+/// per-element memory footprint and centralizes the memory management for
+/// coordinates. The only downside is that the coordinates themselves cannot
+/// be retrieved without knowing the rank of the tensor to which this element
+/// belongs (and that rank is not stored in this object).
template <typename V>
struct Element final {
Element(const uint64_t *coords, V val) : coords(coords), value(val){};
@@ -48,10 +47,6 @@ struct Element final {
template <typename V>
struct ElementLT final {
ElementLT(uint64_t rank) : rank(rank) {}
-
- /// Compares two elements a la `operator<`.
- ///
- /// Precondition: the elements must both be valid for `rank`.
bool operator()(const Element<V> &e1, const Element<V> &e2) const {
for (uint64_t d = 0; d < rank; ++d) {
if (e1.coords[d] == e2.coords[d])
@@ -60,13 +55,10 @@ struct ElementLT final {
}
return false;
}
-
const uint64_t rank;
};
-/// The type of callback functions which receive an element. We avoid
-/// packaging the coordinates and value together as an `Element` object
-/// because this helps keep code somewhat cleaner.
+/// The type of callback functions which receive an element.
template <typename V>
using ElementConsumer =
const std::function<void(const std::vector<uint64_t> &, V)> &;
@@ -89,27 +81,13 @@ class SparseTensorCOO final {
using size_type = typename vector_type::size_type;
/// Constructs a new coordinate-scheme sparse tensor with the given
- /// sizes and initial storage capacity.
- ///
- /// Asserts:
- /// * `dimSizes` has nonzero size.
- /// * the elements of `dimSizes` are nonzero.
+ /// sizes and an optional initial storage capacity.
explicit SparseTensorCOO(const std::vector<uint64_t> &dimSizes,
uint64_t capacity = 0)
: SparseTensorCOO(dimSizes.size(), dimSizes.data(), capacity) {}
- // TODO: make a class for capturing known-valid sizes (a la PermutationRef),
- // so that `SparseTensorStorage::toCOO` can avoid redoing these assertions.
- // Also so that we can enforce the asserts *before* copying into `dimSizes`.
- //
/// Constructs a new coordinate-scheme sparse tensor with the given
- /// sizes and initial storage capacity.
- ///
- /// Precondition: `dimSizes` must be valid for `dimRank`.
- ///
- /// Asserts:
- /// * `dimRank` is nonzero.
- /// * the elements of `dimSizes` are nonzero.
+ /// sizes and an optional initial storage capacity.
explicit SparseTensorCOO(uint64_t dimRank, const uint64_t *dimSizes,
uint64_t capacity = 0)
: dimSizes(dimSizes, dimSizes + dimRank), isSorted(true) {
@@ -134,16 +112,7 @@ class SparseTensorCOO final {
/// Returns the `operator<` closure object for the COO's element type.
ElementLT<V> getElementLT() const { return ElementLT<V>(getRank()); }
- /// Adds an element to the tensor. This method does not check whether
- /// `dimCoords` is already associated with a value, it adds it regardless.
- /// Resolving such conflicts is left up to clients of the iterator
- /// interface.
- ///
- /// This method invalidates all iterators.
- ///
- /// Asserts:
- /// * the `dimCoords` is valid for `getRank`.
- /// * the components of `dimCoords` are valid for `getDimSizes`.
+ /// Adds an element to the tensor. This method invalidates all iterators.
void add(const std::vector<uint64_t> &dimCoords, V val) {
const uint64_t *base = coordinates.data();
const uint64_t size = coordinates.size();
@@ -154,7 +123,7 @@ class SparseTensorCOO final {
"Coordinate is too large for the dimension");
coordinates.push_back(dimCoords[d]);
}
- // This base only changes if `coordinates` was reallocated. In which
+ // This base only changes if `coordinates` was reallocated. In which
// case, we need to correct all previous pointers into the vector.
// Note that this only happens if we did not set the initial capacity
// right, and then only for every internal vector reallocation (which
@@ -175,11 +144,9 @@ class SparseTensorCOO final {
const_iterator begin() const { return elements.cbegin(); }
const_iterator end() const { return elements.cend(); }
- /// Sorts elements lexicographically by coordinates. If a coordinate
+ /// Sorts elements lexicographically by coordinates. If a coordinate
/// is mapped to multiple values, then the relative order of those
- /// values is unspecified.
- ///
- /// This method invalidates all iterators.
+ /// values is unspecified. This method invalidates all iterators.
void sort() {
if (isSorted)
return;
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
index 1b6736b0cdd1d12..be1c4f72e1200da 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements parsing and printing of files in one of the
-// following external formats:
+// This file implements reading and writing files in one of the following
+// external formats:
//
// (1) Matrix Market Exchange (MME): *.mtx
// https://math.nist.gov/MatrixMarket/formats.html
@@ -197,31 +197,14 @@ class SparseTensorReader final {
/// Allocates a new COO object for `lvlSizes`, initializes it by reading
/// all the elements from the file and applying `dim2lvl` to their
- /// dim-coordinates, and then closes the file.
- ///
- /// Preconditions:
- /// * `lvlSizes` must be valid for `lvlRank`.
- /// * `dim2lvl` must be valid for `getRank()`.
- /// * `dim2lvl` maps `getDimSizes()`-coordinates to `lvlSizes`-coordinates.
- /// * the file's actual value type can be read as `V`.
- ///
- /// Asserts:
- /// * `isValid()`
- /// * `dim2lvl` is a permutation, and therefore also `lvlRank == getRank()`.
- /// (This requirement will be lifted once we functionalize `dim2lvl`.)
- //
- // NOTE: This method is factored out of `readSparseTensor` primarily to
- // reduce code bloat (since the bulk of the code doesn't care about the
- // `<P,I>` type template parameters). But we leave it public since it's
- // perfectly reasonable for clients to use.
+ /// dim-coordinates, and then closes the file. Templated on V only.
template <typename V>
SparseTensorCOO<V> *readCOO(uint64_t lvlRank, const uint64_t *lvlSizes,
const uint64_t *dim2lvl);
/// Allocates a new sparse-tensor storage object with the given encoding,
/// initializes it by reading all the elements from the file, and then
- /// closes the file. Preconditions/assertions are as per `readCOO`
- /// and `SparseTensorStorage::newFromCOO`.
+ /// closes the file. Templated on P, I, and V.
template <typename P, typename I, typename V>
SparseTensorStorage<P, I, V> *
readSparseTensor(uint64_t lvlRank, const uint64_t *lvlSizes,
@@ -312,10 +295,6 @@ SparseTensorCOO<V> *SparseTensorReader::readCOO(uint64_t lvlRank,
const uint64_t *lvlSizes,
const uint64_t *dim2lvl) {
assert(isValid() && "Attempt to readCOO() before readHeader()");
- // Construct a `PermutationRef` for the `pushforward` below.
- // TODO: This specific implementation does not generalize to arbitrary
- // mappings, but once we functionalize the `dim2lvl` argument we can
- // simply use that function instead.
const uint64_t dimRank = getRank();
assert(lvlRank == dimRank && "Rank mismatch");
detail::PermutationRef d2l(dimRank, dim2lvl);
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 3c6d6c5bf5c998e..2cab0c01d134285 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -25,9 +25,28 @@
#include "mlir/ExecutionEngine/SparseTensor/COO.h"
#include "mlir/ExecutionEngine/SparseTensor/ErrorHandling.h"
+#define ASSERT_VALID_DIM(d) \
+ assert(d < getDimRank() && "Dimension is out of bounds");
+#define ASSERT_VALID_LVL(l) \
+ assert(l < getLvlRank() && "Level is out of bounds");
+#define ASSERT_COMPRESSED_LVL(l) \
+ assert(isCompressedLvl(l) && "Level is not compressed");
+#define ASSERT_COMPRESSED_OR_SINGLETON_LVL(l) \
+ do { \
+ const DimLevelType dlt = getLvlType(l); \
+ (void)dlt; \
+ assert((isCompressedDLT(dlt) || isSingletonDLT(dlt)) && \
+ "Level is neither compressed nor singleton"); \
+ } while (false)
+#define ASSERT_DENSE_DLT(dlt) assert(isDenseDLT(dlt) && "Level is not dense");
+
namespace mlir {
namespace sparse_tensor {
+// Forward references.
+template <typename V> class SparseTensorEnumeratorBase;
+template <typename P, typename C, typename V> class SparseTensorEnumerator;
+
namespace detail {
/// Checks whether the `perm` array is a permutation of `[0 .. size)`.
@@ -125,33 +144,6 @@ class PermutationRef final {
} // namespace detail
-//===----------------------------------------------------------------------===//
-// This forward decl is sufficient to split `SparseTensorStorageBase` into
-// its own header, but isn't sufficient for `SparseTensorStorage` to join it.
-template <typename V>
-class SparseTensorEnumeratorBase;
-
-// These macros ensure consistent error messages, without risk of incuring
-// an additional method call to do so.
-#define ASSERT_VALID_DIM(d) \
- assert(d < getDimRank() && "Dimension is out of bounds");
-#define ASSERT_VALID_LVL(l) \
- assert(l < getLvlRank() && "Level is out of bounds");
-#define ASSERT_COMPRESSED_LVL(l) \
- assert(isCompressedLvl(l) && "Level is not compressed");
-#define ASSERT_COMPRESSED_OR_SINGLETON_LVL(l) \
- do { \
- const DimLevelType dlt = getLvlType(l); \
- (void)dlt; \
- assert((isCompressedDLT(dlt) || isSingletonDLT(dlt)) && \
- "Level is neither compressed nor singleton"); \
- } while (false)
-// Because the `SparseTensorStorageBase` ctor uses `MLIR_SPARSETENSOR_FATAL`
-// (rather than `assert`) when validating level-types, all the uses of
-// `ASSERT_DENSE_DLT` are technically unnecessary. However, they are
-// retained for the sake of future-proofing.
-#define ASSERT_DENSE_DLT(dlt) assert(isDenseDLT(dlt) && "Level is not dense");
-
/// Abstract base class for `SparseTensorStorage<P,C,V>`. This class
/// takes responsibility for all the `<P,C,V>`-independent aspects
/// of the tensor (e.g., shape, sparsity, permutation). In addition,
@@ -185,23 +177,9 @@ class SparseTensorEnumeratorBase;
/// specified. Thus, dynamic cardinalities always have an "immutable but
/// unknown" value; so the term "dynamic" should not be taken to indicate
/// run-time mutability.
-//
-// TODO: we'd like to factor out a class akin to `PermutationRef` for
-// capturing known-valid sizes to avoid redundant validity assertions.
-// But calling that class "SizesRef" would be a terrible name (and
-// "ValidSizesRef" isn't much better). Whereas, calling it "ShapeRef"
-// would be a lot nicer, but then that conflicts with the terminology
-// introduced above. So we need to come up with some new terminology
-// for distinguishing things, which allows a reasonable class name too.
class SparseTensorStorageBase {
protected:
- // Since this class is virtual, we must disallow public copying in
- // order to avoid "slicing". Since this class has data members,
- // that means making copying protected.
- // <https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#Rc-copy-virtual>
SparseTensorStorageBase(const SparseTensorStorageBase &) = default;
- // Copy-assignment would be implicitly deleted (because our fields
- // are const), so we explicitly delete it for clarity.
SparseTensorStorageBase &operator=(const SparseTensorStorageBase &) = delete;
public:
@@ -313,10 +291,8 @@ class SparseTensorStorageBase {
MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
#undef DECL_GETVALUES
- /// Element-wise insertion in lexicographic coordinate order. The first
+ /// Element-wise insertion in lexicographic coordinate order. The first
/// argument is the level-coordinates for the value being inserted.
- // TODO: For better safety, this should take a parameter for the
- // length of `lvlCoords` and check that against `getLvlRank()`.
#define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT)
#undef DECL_LEXINSERT
@@ -348,12 +324,6 @@ class SparseTensorStorageBase {
const std::vector<uint64_t> lvl2dim;
};
-//===----------------------------------------------------------------------===//
-// This forward decl is necessary for defining `SparseTensorStorage`,
-// but isn't sufficient for splitting it off.
-template <typename P, typename C, typename V>
-class SparseTensorEnumerator;
-
/// A memory-resident sparse tensor using a storage scheme based on
/// per-level sparse/dense annotations. This data structure provides
/// a bufferized form of a sparse tensor type. In contrast to generating
@@ -612,7 +582,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
[&coo](const auto &trgCoords, V val) { coo->add(trgCoords, val); });
// TODO: This assertion assumes there are no stored zeros,
// or if there are then that we don't filter them out.
- // Cf., <https://github.com/llvm/llvm-project/issues/54179>
+ // <https://github.com/llvm/llvm-project/issues/54179>
assert(coo->getElements().size() == values.size());
return coo;
}
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index 14f89bee05bf550..ebf6c5cd235b980 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -6,9 +6,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file provides the enums and functions which comprise the
-// public API of the `ExecutionEngine/SparseTensorRuntime.cpp` runtime
-// support library for the SparseTensor dialect.
+// This header file provides the functions which comprise the public API of the
+// sparse tensor runtime support library for the SparseTensor dialect.
//
//===----------------------------------------------------------------------===//
@@ -153,8 +152,7 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT)
#undef DECL_GETNEXT
/// Reads the sparse tensor, stores the coordinates and values to the given
-/// memrefs. Returns a boolean value to indicate whether the COO elements are
-/// sorted.
+/// memrefs. Returns a boolean to indicate whether the COO elements are sorted.
#define DECL_GETNEXT(VNAME, V, CNAME, C) \
MLIR_CRUNNERUTILS_EXPORT bool \
_mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME( \
@@ -240,8 +238,7 @@ MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNSE(void *p);
MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p,
index_type d);
-/// Releases the SparseTensorReader. This also closes the file associated with
-/// the reader.
+/// Releases the SparseTensorReaderand closes the associated file.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p);
/// Creates a SparseTensorWriter for outputting a sparse tensor to a file
>From bfc7d00e5edcb2c59455161804388e73deac38a5 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 3 Oct 2023 10:56:14 -0700
Subject: [PATCH 2/3] minor edit
---
mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index ebf6c5cd235b980..8f320f04f23fc84 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -238,7 +238,7 @@ MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNSE(void *p);
MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p,
index_type d);
-/// Releases the SparseTensorReaderand closes the associated file.
+/// Releases the SparseTensorReader and closes the associated file.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p);
/// Creates a SparseTensorWriter for outputting a sparse tensor to a file
>From fab1ee6cc3fb49fbb80bd07b811bae4540b83472 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Tue, 3 Oct 2023 11:21:42 -0700
Subject: [PATCH 3/3] format
---
mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 2cab0c01d134285..28c28c28109c3c7 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -44,8 +44,10 @@ namespace mlir {
namespace sparse_tensor {
// Forward references.
-template <typename V> class SparseTensorEnumeratorBase;
-template <typename P, typename C, typename V> class SparseTensorEnumerator;
+template <typename V>
+class SparseTensorEnumeratorBase;
+template <typename P, typename C, typename V>
+class SparseTensorEnumerator;
namespace detail {
More information about the Mlir-commits
mailing list