[Mlir-commits] [mlir] a3e4888 - [mlir][sparse] Macros to clean up StridedMemRefType in the SparseTensorRuntime
wren romano
llvmlistbot at llvm.org
Wed Nov 16 16:40:21 PST 2022
Author: wren romano
Date: 2022-11-16T16:40:13-08:00
New Revision: a3e4888350169d1f61b3de4df3b0960f865ca416
URL: https://github.com/llvm/llvm-project/commit/a3e4888350169d1f61b3de4df3b0960f865ca416
DIFF: https://github.com/llvm/llvm-project/commit/a3e4888350169d1f61b3de4df3b0960f865ca416.diff
LOG: [mlir][sparse] Macros to clean up StridedMemRefType in the SparseTensorRuntime
In particular, this silences warnings from [-Wsign-compare].
This is a revised version of D137735, which got reverted due to a sign-comparison warning on LLVM's Windows buildbot (which was not on MLIR's Windows buildbot). Differences vs the previous differential:
* `vectorToMemref` now uses `detail::checkOverflowCast` to silence the warning that caused the the previous differential to get reverted.
* `MEMREF_GET_USIZE` now uses `detail::checkOverflowCast` rather than `static_cast`
* `ASSERT_USIZE_EQ` added to abbreviate another common idiom, and to ensure that we use `detail::safelyEQ` everywhere (to silence a few other warnings)
* A couple for-loops now use `index_type` for the induction variable, since their upper bound uses that typedef too. (Namely `_mlir_ciface_getSparseTensorReaderDimSizes` and `_mlir_ciface_outSparseTensorWriterNext`)
Depends on D138149
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D137998
Added:
Modified:
mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
Removed:
################################################################################
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index bca4a2805d8a3..e5ea01420293c 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -50,6 +50,7 @@
#ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
+#include "mlir/ExecutionEngine/SparseTensor/ArithmeticUtils.h"
#include "mlir/ExecutionEngine/SparseTensor/COO.h"
#include "mlir/ExecutionEngine/SparseTensor/ErrorHandling.h"
#include "mlir/ExecutionEngine/SparseTensor/File.h"
@@ -213,6 +214,47 @@ fromMLIRSparseTensor(const SparseTensorStorage<uint64_t, uint64_t, V> *tensor,
*pIndices = indices;
}
+//===----------------------------------------------------------------------===//
+//
+// Utilities for manipulating `StridedMemRefType`.
+//
+//===----------------------------------------------------------------------===//
+
+// We shouldn't need to use `detail::safelyEQ` here since the `1` is a literal.
+#define ASSERT_NO_STRIDE(MEMREF) \
+ do { \
+ assert((MEMREF) && "Memref is nullptr"); \
+ assert(((MEMREF)->strides[0] == 1) && "Memref has non-trivial stride"); \
+ } while (false)
+
+// All our functions use `uint64_t` for ranks, but `StridedMemRefType::sizes`
+// uses `int64_t` on some platforms. So we explicitly cast this lookup to
+// ensure we get a consistent type, and we use `checkOverflowCast` rather
+// than `static_cast` just to be extremely sure that the casting can't
+// go awry. (The cast should aways be safe since (1) sizes should never
+// be negative, and (2) the maximum `int64_t` is smaller than the maximum
+// `uint64_t`. But it's better to be safe than sorry.)
+#define MEMREF_GET_USIZE(MEMREF) \
+ detail::checkOverflowCast<uint64_t>((MEMREF)->sizes[0])
+
+#define ASSERT_USIZE_EQ(MEMREF, SZ) \
+ assert(detail::safelyEQ(MEMREF_GET_USIZE(MEMREF), (SZ)) && \
+ "Memref size mismatch")
+
+#define MEMREF_GET_PAYLOAD(MEMREF) ((MEMREF)->data + (MEMREF)->offset)
+
+// We make this a function rather than a macro mainly for type safety
+// reasons. This function does not modify the vector, but it cannot
+// be marked `const` because it is stored into the non-`const` memref.
+template <typename T>
+static void vectorToMemref(std::vector<T> &v, StridedMemRefType<T, 1> &ref) {
+ ref.basePtr = ref.data = v.data();
+ ref.offset = 0;
+ using SizeT = typename std::remove_reference_t<decltype(ref.sizes[0])>;
+ ref.sizes[0] = detail::checkOverflowCast<SizeT>(v.size());
+ ref.strides[0] = 1;
+}
+
} // anonymous namespace
extern "C" {
@@ -286,21 +328,21 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
StridedMemRefType<index_type, 1> *lvl2dimRef,
StridedMemRefType<index_type, 1> *dim2lvlRef, OverheadType ptrTp,
OverheadType indTp, PrimaryType valTp, Action action, void *ptr) {
- assert(dimSizesRef && dimSizesRef->strides[0] == 1);
- assert(lvlSizesRef && lvlSizesRef->strides[0] == 1);
- assert(lvlTypesRef && lvlTypesRef->strides[0] == 1);
- assert(lvl2dimRef && lvl2dimRef->strides[0] == 1);
- assert(dim2lvlRef && dim2lvlRef->strides[0] == 1);
- const uint64_t dimRank = dimSizesRef->sizes[0];
- const uint64_t lvlRank = lvlSizesRef->sizes[0];
- assert(dim2lvlRef->sizes[0] == (int64_t)dimRank);
- assert(lvlTypesRef->sizes[0] == (int64_t)lvlRank &&
- lvl2dimRef->sizes[0] == (int64_t)lvlRank);
- const index_type *dimSizes = dimSizesRef->data + dimSizesRef->offset;
- const index_type *lvlSizes = lvlSizesRef->data + lvlSizesRef->offset;
- const DimLevelType *lvlTypes = lvlTypesRef->data + lvlTypesRef->offset;
- const index_type *lvl2dim = lvl2dimRef->data + lvl2dimRef->offset;
- const index_type *dim2lvl = dim2lvlRef->data + dim2lvlRef->offset;
+ ASSERT_NO_STRIDE(dimSizesRef);
+ ASSERT_NO_STRIDE(lvlSizesRef);
+ ASSERT_NO_STRIDE(lvlTypesRef);
+ ASSERT_NO_STRIDE(lvl2dimRef);
+ ASSERT_NO_STRIDE(dim2lvlRef);
+ const uint64_t dimRank = MEMREF_GET_USIZE(dimSizesRef);
+ const uint64_t lvlRank = MEMREF_GET_USIZE(lvlSizesRef);
+ ASSERT_USIZE_EQ(dim2lvlRef, dimRank);
+ ASSERT_USIZE_EQ(lvlTypesRef, lvlRank);
+ ASSERT_USIZE_EQ(lvl2dimRef, lvlRank);
+ const index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef);
+ const index_type *lvlSizes = MEMREF_GET_PAYLOAD(lvlSizesRef);
+ const DimLevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef);
+ const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef);
+ const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef);
// Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
// This is safe because of the static_assert above.
@@ -425,10 +467,8 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
assert(ref &&tensor); \
std::vector<V> *v; \
static_cast<SparseTensorStorageBase *>(tensor)->getValues(&v); \
- ref->basePtr = ref->data = v->data(); \
- ref->offset = 0; \
- ref->sizes[0] = v->size(); \
- ref->strides[0] = 1; \
+ assert(v); \
+ vectorToMemref(*v, *ref); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_SPARSEVALUES)
#undef IMPL_SPARSEVALUES
@@ -439,10 +479,8 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_SPARSEVALUES)
assert(ref &&tensor); \
std::vector<TYPE> *v; \
static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \
- ref->basePtr = ref->data = v->data(); \
- ref->offset = 0; \
- ref->sizes[0] = v->size(); \
- ref->strides[0] = 1; \
+ assert(v); \
+ vectorToMemref(*v, *ref); \
}
#define IMPL_SPARSEPOINTERS(PNAME, P) \
IMPL_GETOVERHEAD(sparsePointers##PNAME, P, getPointers)
@@ -463,16 +501,17 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEINDICES)
void *lvlCOO, StridedMemRefType<V, 0> *vref, \
StridedMemRefType<index_type, 1> *dimIndRef, \
StridedMemRefType<index_type, 1> *dim2lvlRef) { \
- assert(lvlCOO &&vref &&dimIndRef &&dim2lvlRef); \
- assert(dimIndRef->strides[0] == 1 && dim2lvlRef->strides[0] == 1); \
- const uint64_t rank = dimIndRef->sizes[0]; \
- assert(dim2lvlRef->sizes[0] == (int64_t)rank); \
- const index_type *dimInd = dimIndRef->data + dimIndRef->offset; \
- const index_type *dim2lvl = dim2lvlRef->data + dim2lvlRef->offset; \
+ assert(lvlCOO &&vref); \
+ ASSERT_NO_STRIDE(dimIndRef); \
+ ASSERT_NO_STRIDE(dim2lvlRef); \
+ const uint64_t rank = MEMREF_GET_USIZE(dimIndRef); \
+ ASSERT_USIZE_EQ(dim2lvlRef, rank); \
+ const index_type *dimInd = MEMREF_GET_PAYLOAD(dimIndRef); \
+ const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef); \
std::vector<index_type> lvlInd(rank); \
for (uint64_t d = 0; d < rank; ++d) \
lvlInd[dim2lvl[d]] = dimInd[d]; \
- V *value = vref->data + vref->offset; \
+ V *value = MEMREF_GET_PAYLOAD(vref); \
static_cast<SparseTensorCOO<V> *>(lvlCOO)->add(lvlInd, *value); \
return lvlCOO; \
}
@@ -483,11 +522,11 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_ADDELT)
bool _mlir_ciface_getNext##VNAME(void *iter, \
StridedMemRefType<index_type, 1> *iref, \
StridedMemRefType<V, 0> *vref) { \
- assert(iter &&iref &&vref); \
- assert(iref->strides[0] == 1); \
- index_type *indx = iref->data + iref->offset; \
- V *value = vref->data + vref->offset; \
- const uint64_t isize = iref->sizes[0]; \
+ assert(iter &&vref); \
+ ASSERT_NO_STRIDE(iref); \
+ index_type *indx = MEMREF_GET_PAYLOAD(iref); \
+ V *value = MEMREF_GET_PAYLOAD(vref); \
+ const uint64_t isize = MEMREF_GET_USIZE(iref); \
const Element<V> *elem = \
static_cast<SparseTensorIterator<V> *>(iter)->getNext(); \
if (elem == nullptr) \
@@ -504,11 +543,11 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
void _mlir_ciface_lexInsert##VNAME(void *tensor, \
StridedMemRefType<index_type, 1> *cref, \
StridedMemRefType<V, 0> *vref) { \
- assert(tensor &&cref &&vref); \
- assert(cref->strides[0] == 1); \
- index_type *cursor = cref->data + cref->offset; \
+ assert(tensor &&vref); \
+ ASSERT_NO_STRIDE(cref); \
+ index_type *cursor = MEMREF_GET_PAYLOAD(cref); \
assert(cursor); \
- V *value = vref->data + vref->offset; \
+ V *value = MEMREF_GET_PAYLOAD(vref); \
static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, *value); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT)
@@ -519,16 +558,16 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT)
void *tensor, StridedMemRefType<index_type, 1> *cref, \
StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \
StridedMemRefType<index_type, 1> *aref, index_type count) { \
- assert(tensor &&cref &&vref &&fref &&aref); \
- assert(cref->strides[0] == 1); \
- assert(vref->strides[0] == 1); \
- assert(fref->strides[0] == 1); \
- assert(aref->strides[0] == 1); \
- assert(vref->sizes[0] == fref->sizes[0]); \
- index_type *cursor = cref->data + cref->offset; \
- V *values = vref->data + vref->offset; \
- bool *filled = fref->data + fref->offset; \
- index_type *added = aref->data + aref->offset; \
+ assert(tensor); \
+ ASSERT_NO_STRIDE(cref); \
+ ASSERT_NO_STRIDE(vref); \
+ ASSERT_NO_STRIDE(fref); \
+ ASSERT_NO_STRIDE(aref); \
+ ASSERT_USIZE_EQ(vref, MEMREF_GET_USIZE(fref)); \
+ index_type *cursor = MEMREF_GET_PAYLOAD(cref); \
+ V *values = MEMREF_GET_PAYLOAD(vref); \
+ bool *filled = MEMREF_GET_PAYLOAD(fref); \
+ index_type *added = MEMREF_GET_PAYLOAD(aref); \
static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \
cursor, values, filled, added, count); \
}
@@ -537,13 +576,13 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT)
void _mlir_ciface_getSparseTensorReaderDimSizes(
void *p, StridedMemRefType<index_type, 1> *dref) {
- assert(p && dref);
- assert(dref->strides[0] == 1);
- index_type *dimSizes = dref->data + dref->offset;
+ assert(p);
+ ASSERT_NO_STRIDE(dref);
+ index_type *dimSizes = MEMREF_GET_PAYLOAD(dref);
SparseTensorReader &file = *static_cast<SparseTensorReader *>(p);
const index_type *sizes = file.getDimSizes();
index_type rank = file.getRank();
- for (uint64_t r = 0; r < rank; ++r)
+ for (index_type r = 0; r < rank; ++r)
dimSizes[r] = sizes[r];
}
@@ -551,12 +590,12 @@ void _mlir_ciface_getSparseTensorReaderDimSizes(
void _mlir_ciface_getSparseTensorReaderNext##VNAME( \
void *p, StridedMemRefType<index_type, 1> *iref, \
StridedMemRefType<V, 0> *vref) { \
- assert(p &&iref &&vref); \
- assert(iref->strides[0] == 1); \
- index_type *indices = iref->data + iref->offset; \
+ assert(p &&vref); \
+ ASSERT_NO_STRIDE(iref); \
+ index_type *indices = MEMREF_GET_PAYLOAD(iref); \
SparseTensorReader *stfile = static_cast<SparseTensorReader *>(p); \
index_type rank = stfile->getRank(); \
- V *value = vref->data + vref->offset; \
+ V *value = MEMREF_GET_PAYLOAD(vref); \
*value = stfile->readCOOElement<V>(rank, indices); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
@@ -565,10 +604,10 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
void _mlir_ciface_outSparseTensorWriterMetaData(
void *p, index_type rank, index_type nnz,
StridedMemRefType<index_type, 1> *dref) {
- assert(p && dref);
- assert(dref->strides[0] == 1);
+ assert(p);
+ ASSERT_NO_STRIDE(dref);
assert(rank != 0);
- index_type *dimSizes = dref->data + dref->offset;
+ index_type *dimSizes = MEMREF_GET_PAYLOAD(dref);
SparseTensorWriter &file = *static_cast<SparseTensorWriter *>(p);
file << rank << " " << nnz << std::endl;
for (index_type r = 0; r < rank - 1; ++r)
@@ -580,13 +619,13 @@ void _mlir_ciface_outSparseTensorWriterMetaData(
void _mlir_ciface_outSparseTensorWriterNext##VNAME( \
void *p, index_type rank, StridedMemRefType<index_type, 1> *iref, \
StridedMemRefType<V, 0> *vref) { \
- assert(p &&iref &&vref); \
- assert(iref->strides[0] == 1); \
- index_type *indices = iref->data + iref->offset; \
+ assert(p &&vref); \
+ ASSERT_NO_STRIDE(iref); \
+ index_type *indices = MEMREF_GET_PAYLOAD(iref); \
SparseTensorWriter &file = *static_cast<SparseTensorWriter *>(p); \
- for (uint64_t r = 0; r < rank; ++r) \
+ for (index_type r = 0; r < rank; ++r) \
file << (indices[r] + 1) << " "; \
- V *value = vref->data + vref->offset; \
+ V *value = MEMREF_GET_PAYLOAD(vref); \
file << *value << std::endl; \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTNEXT)
@@ -723,4 +762,9 @@ void delSparseTensorWriter(void *p) {
} // extern "C"
+#undef MEMREF_GET_PAYLOAD
+#undef ASSERT_USIZE_EQ
+#undef MEMREF_GET_USIZE
+#undef ASSERT_NO_STRIDE
+
#endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
More information about the Mlir-commits
mailing list