[Mlir-commits] [mlir] b3c5da7 - [mlir][sparse] Address style nit in documentation

wren romano llvmlistbot at llvm.org
Fri Sep 30 15:22:55 PDT 2022


Author: wren romano
Date: 2022-09-30T15:22:43-07:00
New Revision: b3c5da73b71908c59b12ef0feadfde56bad222b5

URL: https://github.com/llvm/llvm-project/commit/b3c5da73b71908c59b12ef0feadfde56bad222b5
DIFF: https://github.com/llvm/llvm-project/commit/b3c5da73b71908c59b12ef0feadfde56bad222b5.diff

LOG: [mlir][sparse] Address style nit in documentation

Reviewed By: aartbik, Peiming

Differential Revision: https://reviews.llvm.org/D134986

Added: 
    

Modified: 
    mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
    mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
    mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
index 1d45ea2112c0..824183fa50c2 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h
@@ -53,7 +53,7 @@ template <typename V>
 struct ElementLT final {
   ElementLT(uint64_t rank) : rank(rank) {}
 
-  /// Compare two elements a la `operator<`.
+  /// Compares two elements a la `operator<`.
   ///
   /// Precondition: the elements must both be valid for `rank`.
   bool operator()(const Element<V> &e1, const Element<V> &e2) const {
@@ -112,13 +112,13 @@ class SparseTensorCOO final {
     return new SparseTensorCOO<V>(permsz, capacity);
   }
 
-  /// Get the rank of the tensor.
+  /// Gets the rank of the tensor.
   uint64_t getRank() const { return dimSizes.size(); }
 
-  /// Get the dimension-sizes array.
+  /// Gets the dimension-sizes array.
   const std::vector<uint64_t> &getDimSizes() const { return dimSizes; }
 
-  /// Get the elements array.
+  /// Gets the elements array.
   const std::vector<Element<V>> &getElements() const { return elements; }
 
   /// Returns the `operator<` closure object for the COO's element type.
@@ -173,14 +173,14 @@ class SparseTensorCOO final {
     isSorted = true;
   }
 
-  /// Switch into iterator mode.  If already in iterator mode, then
+  /// Switches into iterator mode.  If already in iterator mode, then
   /// resets the position to the first element.
   void startIterator() {
     iteratorLocked = true;
     iteratorPos = 0;
   }
 
-  /// Get the next element.  If there are no remaining elements, then
+  /// Gets the next element.  If there are no remaining elements, then
   /// returns nullptr and switches out of iterator mode.
   ///
   /// Asserts: is in iterator mode.

diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
index 920246cb1001..589e0dbfa2b0 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
@@ -133,10 +133,10 @@ class SparseTensorFile final {
   void assertMatchesShape(uint64_t rank, const uint64_t *shape) const;
 
 private:
-  /// Read the MME header of a general sparse matrix of type real.
+  /// Reads the MME header of a general sparse matrix of type real.
   void readMMEHeader();
 
-  /// Read the "extended" FROSTT header. Although not part of the
+  /// Reads the "extended" FROSTT header. Although not part of the
   /// documented format, we assume that the file starts with optional
   /// comments followed by two lines that define the rank, the number of
   /// nonzeros, and the dimensions sizes (one per rank) of the sparse tensor.

diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 2f3d1a8aa7c6..975f4435c73e 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -83,70 +83,70 @@ class SparseTensorStorageBase {
 
   virtual ~SparseTensorStorageBase() = default;
 
-  /// Get the rank of the tensor.
+  /// Gets the rank of the tensor.
   uint64_t getRank() const { return dimSizes.size(); }
 
-  /// Get the dimension-sizes array, in storage-order.
+  /// Gets the dimension-sizes array, in storage-order.
   const std::vector<uint64_t> &getDimSizes() const { return dimSizes; }
 
-  /// Safely lookup the size of the given (storage-order) dimension.
+  /// Safely looks up the size of the given (storage-order) dimension.
   uint64_t getDimSize(uint64_t d) const {
     ASSERT_VALID_DIM(d);
     return dimSizes[d];
   }
 
-  /// Get the "reverse" permutation, which maps this object's
+  /// Gets the "reverse" permutation, which maps this object's
   /// storage-order to the tensor's semantic-order.
   const std::vector<uint64_t> &getRev() const { return rev; }
 
-  /// Get the dimension-types array, in storage-order.
+  /// Gets the dimension-types array, in storage-order.
   const std::vector<DimLevelType> &getDimTypes() const { return dimTypes; }
 
-  /// Safely lookup the level-type of the given (storage-order) dimension.
+  /// Safely looks up the level-type of the given (storage-order) dimension.
   DimLevelType getDimType(uint64_t d) const {
     ASSERT_VALID_DIM(d);
     return dimTypes[d];
   }
 
-  /// Safely check if the (storage-order) dimension uses dense storage.
+  /// Safely checks if the (storage-order) dimension uses dense storage.
   bool isDenseDim(uint64_t d) const { return isDenseDLT(getDimType(d)); }
 
-  /// Safely check if the (storage-order) dimension uses compressed storage.
+  /// Safely checks if the (storage-order) dimension uses compressed storage.
   bool isCompressedDim(uint64_t d) const {
     return isCompressedDLT(getDimType(d));
   }
 
-  /// Safely check if the (storage-order) dimension uses singleton storage.
+  /// Safely checks if the (storage-order) dimension uses singleton storage.
   bool isSingletonDim(uint64_t d) const {
     return isSingletonDLT(getDimType(d));
   }
 
-  /// Safely check if the (storage-order) dimension is ordered.
+  /// Safely checks if the (storage-order) dimension is ordered.
   bool isOrderedDim(uint64_t d) const { return isOrderedDLT(getDimType(d)); }
 
-  /// Safely check if the (storage-order) dimension is unique.
+  /// Safely checks if the (storage-order) dimension is unique.
   bool isUniqueDim(uint64_t d) const { return isUniqueDLT(getDimType(d)); }
 
-  /// Allocate a new enumerator.
+  /// Allocates a new enumerator.
 #define DECL_NEWENUMERATOR(VNAME, V)                                           \
   virtual void newEnumerator(SparseTensorEnumeratorBase<V> **, uint64_t,       \
                              const uint64_t *) const;
   MLIR_SPARSETENSOR_FOREVERY_V(DECL_NEWENUMERATOR)
 #undef DECL_NEWENUMERATOR
 
-  /// Pointers-overhead storage.
+  /// Gets pointers-overhead storage.
 #define DECL_GETPOINTERS(PNAME, P)                                             \
   virtual void getPointers(std::vector<P> **, uint64_t);
   MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETPOINTERS)
 #undef DECL_GETPOINTERS
 
-  /// Indices-overhead storage.
+  /// Gets indices-overhead storage.
 #define DECL_GETINDICES(INAME, I)                                              \
   virtual void getIndices(std::vector<I> **, uint64_t);
   MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETINDICES)
 #undef DECL_GETINDICES
 
-  /// Primary storage.
+  /// Gets primary storage.
 #define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector<V> **);
   MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
 #undef DECL_GETVALUES
@@ -305,7 +305,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
       endPath(0);
   }
 
-  /// Allocate a new enumerator for this classes `<P,I,V>` types and
+  /// Allocates a new enumerator for this classes `<P,I,V>` types and
   /// erase the `<P,I>` parts from the type.  Callers must make sure to
   /// delete the enumerator when they're done with it.
   void newEnumerator(SparseTensorEnumeratorBase<V> **out, uint64_t rank,
@@ -439,7 +439,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
     finalizeSegment(d, full);
   }
 
-  /// Finalize the sparse pointer structure at this dimension.
+  /// Finalizes the sparse pointer structure at this dimension.
   void finalizeSegment(uint64_t d, uint64_t full = 0, uint64_t count = 1) {
     if (count == 0)
       return; // Short-circuit, since it'll be a nop.
@@ -661,7 +661,7 @@ class SparseTensorEnumerator final : public SparseTensorEnumeratorBase<V> {
 /// those parameters.
 class SparseTensorNNZ final {
 public:
-  /// Allocate the statistics structure for the desired sizes and
+  /// Allocates the statistics structure for the desired sizes and
   /// sparsity (in the target tensor's storage-order).  This constructor
   /// does not actually populate the statistics, however; for that see
   /// `initialize`.
@@ -677,7 +677,7 @@ class SparseTensorNNZ final {
   /// Returns the rank of the target tensor.
   uint64_t getRank() const { return dimSizes.size(); }
 
-  /// Enumerate the source tensor to fill in the statistics.  The
+  /// Enumerates the source tensor to fill in the statistics.  The
   /// enumerator should already incorporate the permutation (from
   /// semantic-order to the target storage-order).
   template <typename V>


        


More information about the Mlir-commits mailing list