[Mlir-commits] [mlir] 7e1eac5 - [mlir][sparse] add initialize() API to LoopEmitter to support post-constructor initialization
Peiming Liu
llvmlistbot at llvm.org
Tue Dec 20 17:18:46 PST 2022
Author: Peiming Liu
Date: 2022-12-21T01:18:42Z
New Revision: 7e1eac511658bde4b83c7655f788d4c1e2d58cc5
URL: https://github.com/llvm/llvm-project/commit/7e1eac511658bde4b83c7655f788d4c1e2d58cc5
DIFF: https://github.com/llvm/llvm-project/commit/7e1eac511658bde4b83c7655f788d4c1e2d58cc5.diff
LOG: [mlir][sparse] add initialize() API to LoopEmitter to support post-constructor initialization
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D140444
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index b5c82bd6db79..d26e36464913 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -208,13 +208,28 @@ SparseTensorLoopEmitter::SparseTensorLoopEmitter(ValueRange tensors,
StringAttr loopTag,
bool hasOutput,
bool isSparseOut,
- ArrayRef<unsigned> topSort)
- : loopTag(loopTag), hasOutput(hasOutput), isSparseOut(isSparseOut),
- tensors(tensors.begin(), tensors.end()), dimTypes(tensors.size()),
- pidxs(tensors.size()), coord(tensors.size()), highs(tensors.size()),
- ptrBuffer(tensors.size()), idxBuffer(tensors.size()),
- valBuffer(tensors.size()), loopStack(),
- sparsiferLoopLvlMap(topSort.size(), 0) {
+ ArrayRef<unsigned> topSort) {
+ initialize(tensors, loopTag, hasOutput, isSparseOut, topSort);
+}
+
+void SparseTensorLoopEmitter::initialize(ValueRange tensors, StringAttr loopTag,
+ bool hasOutput, bool isSparseOut,
+ ArrayRef<unsigned> topSort) {
+ // First initializes fields.
+ this->loopTag = loopTag;
+ this->hasOutput = hasOutput;
+ this->isSparseOut = isSparseOut;
+ this->tensors.assign(tensors.begin(), tensors.end());
+ this->dimTypes.assign(tensors.size(), std::vector<DimLevelType>());
+ this->pidxs.assign(tensors.size(), std::vector<Value>());
+ this->coord.assign(tensors.size(), std::vector<Value>());
+ this->highs.assign(tensors.size(), std::vector<Value>());
+ this->ptrBuffer.assign(tensors.size(), std::vector<Value>());
+ this->idxBuffer.assign(tensors.size(), std::vector<Value>());
+ this->valBuffer.assign(tensors.size(), nullptr);
+ this->loopStack.reserve(topSort.size());
+ this->sparsiferLoopLvlMap.assign(topSort.size(), 0);
+
for (size_t tid = 0, e = tensors.size(); tid < e; tid++) {
auto t = tensors[tid];
// a scalar or 0-dimension tensors
@@ -239,6 +254,7 @@ SparseTensorLoopEmitter::SparseTensorLoopEmitter(ValueRange tensors,
idxBuffer[tid].assign(rank, Value());
}
+ // FIXME: This map should be maintained outside loop emitter.
for (unsigned i = 0, e = topSort.size(); i < e; i++) {
// This is an inverse map of the topologically sorted loop index from
// sparsifier. This is needed to map the AffineDimExpr back to the loopStack
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index a121522d0190..7fd126f2364a 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -562,15 +562,21 @@ class SparseTensorLoopEmitter {
using OutputUpdater = function_ref<Value(OpBuilder &builder, Location loc,
Value memref, Value tensor)>;
- /// Constructor: take an array of tensors inputs, on which the generated
- /// loops will iterate on. The index of the tensor in the array is also the
- /// tensor id (tid) used in related functions.
- /// If isSparseOut is set, loop emitter assume that the sparse output tensor
- /// is empty, and will always generate loops on it based on the dim sizes.
- /// An optional array could be provided (by sparsification) to indicate the
- /// loop id sequence that will be generated. It is used to establish the
- /// mapping between affineDimExpr to the corresponding loop index in the
- /// loop stack that are maintained by the loop emitter.
+ SparseTensorLoopEmitter() = default;
+
+ /// Takes an array of tensors inputs, on which the generated loops will
+ /// iterate on. The index of the tensor in the array is also the tensor id
+ /// (tid) used in related functions. If isSparseOut is set, loop emitter
+ /// assume that the sparse output tensor is empty, and will always generate
+ /// loops on it based on the dim sizes. An optional array could be provided
+ /// (by sparsification) to indicate the loop id sequence that will be
+ /// generated. It is used to establish the mapping between affineDimExpr to
+ /// the corresponding loop index in the loop stack that are maintained by the
+ /// loop emitter.
+ void initialize(ValueRange tensors, StringAttr loopTag = nullptr,
+ bool hasOutput = false, bool isSparseOut = false,
+ ArrayRef<unsigned> topSort = {});
+
explicit SparseTensorLoopEmitter(ValueRange tensors,
StringAttr loopTag = nullptr,
bool hasOutput = false,
More information about the Mlir-commits
mailing list