[Mlir-commits] [mlir] 289f84a - [mlir][sparse] Rename add{Pointer, Index} to append{Pointer, Index}
wren romano
llvmlistbot at llvm.org
Fri Mar 4 12:03:31 PST 2022
Author: wren romano
Date: 2022-03-04T12:03:24-08:00
New Revision: 289f84a4a2cc0f97575bde7e39845c67a6e8c79d
URL: https://github.com/llvm/llvm-project/commit/289f84a4a2cc0f97575bde7e39845c67a6e8c79d
DIFF: https://github.com/llvm/llvm-project/commit/289f84a4a2cc0f97575bde7e39845c67a6e8c79d.diff
LOG: [mlir][sparse] Rename add{Pointer,Index} to append{Pointer,Index}
This clarifies that these methods only work in append mode, not for general insertions. This is a prospective change towards https://github.com/llvm/llvm-project/issues/51652 which also performs random-access insertions, so we want to avoid confusion.
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D120929
Added:
Modified:
mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
Removed:
################################################################################
diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
index fd32326715f05..80c84d990c86b 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
@@ -264,7 +264,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
indices[r].reserve(sz);
sz = 1;
allDense = false;
- // Prepare the pointer structure. We cannot use `addPointer`
+ // Prepare the pointer structure. We cannot use `appendPointer`
// here, because `isCompressedDim` won't work until after this
// preparation has been done.
pointers[r].push_back(0);
@@ -412,7 +412,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
/// Appends the next free position of `indices[d]` to `pointers[d]`.
/// Thus, when called after inserting the last element of a segment,
/// it will append the position where the next segment begins.
- inline void addPointer(uint64_t d) {
+ inline void appendPointer(uint64_t d) {
assert(isCompressedDim(d)); // Entails `d < getRank()`.
uint64_t p = indices[d].size();
assert(p <= std::numeric_limits<P>::max() &&
@@ -421,7 +421,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
}
/// Appends the given index to `indices[d]`.
- inline void addIndex(uint64_t d, uint64_t i) {
+ inline void appendIndex(uint64_t d, uint64_t i) {
assert(isCompressedDim(d)); // Entails `d < getRank()`.
assert(i <= std::numeric_limits<I>::max() &&
"Index value is too large for the I-type");
@@ -455,7 +455,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
seg++;
// Handle segment in interval for sparse or dense dimension.
if (isCompressedDim(d)) {
- addIndex(d, i);
+ appendIndex(d, i);
} else {
// For dense storage we must fill in all the zero values between
// the previous element (when last we ran this for-loop) and the
@@ -470,7 +470,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
}
// Finalize the sparse pointer structure at this dimension.
if (isCompressedDim(d)) {
- addPointer(d);
+ appendPointer(d);
} else {
// For dense storage we must fill in all the zero values after
// the last element.
@@ -508,7 +508,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
if (d == getRank()) {
values.push_back(0);
} else if (isCompressedDim(d)) {
- addPointer(d);
+ appendPointer(d);
} else {
for (uint64_t full = 0, sz = sizes[d]; full < sz; full++)
endDim(d + 1);
@@ -522,7 +522,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
for (uint64_t i = 0; i < rank -
diff ; i++) {
uint64_t d = rank - i - 1;
if (isCompressedDim(d)) {
- addPointer(d);
+ appendPointer(d);
} else {
for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++)
endDim(d + 1);
@@ -537,7 +537,7 @@ class SparseTensorStorage : public SparseTensorStorageBase {
for (uint64_t d =
diff ; d < rank; d++) {
uint64_t i = cursor[d];
if (isCompressedDim(d)) {
- addIndex(d, i);
+ appendIndex(d, i);
} else {
for (uint64_t full = top; full < i; full++)
endDim(d + 1);
More information about the Mlir-commits
mailing list