[Mlir-commits] [mlir] 35fae04 - [mlir][sparse] using non-static field to avoid data races. (#81165)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Feb 8 10:12:28 PST 2024
Author: Peiming Liu
Date: 2024-02-08T10:12:24-08:00
New Revision: 35fae044c5faf8ddb9be7b47bb7573e839f77472
URL: https://github.com/llvm/llvm-project/commit/35fae044c5faf8ddb9be7b47bb7573e839f77472
DIFF: https://github.com/llvm/llvm-project/commit/35fae044c5faf8ddb9be7b47bb7573e839f77472.diff
LOG: [mlir][sparse] using non-static field to avoid data races. (#81165)
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.h
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
index 1c2857d868a604..0ead135c90d305 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
@@ -94,7 +94,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
this->loopTag = loopTag;
this->hasOutput = hasOutput;
this->isSparseOut = isSparseOut;
- SparseIterator::setSparseEmitStrategy(emitStrategy);
+ this->emitStrategy = emitStrategy;
const unsigned numManifestTensors = ts.size();
const unsigned synTensorId = numManifestTensors;
@@ -166,13 +166,13 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
std::unique_ptr<SparseIterator>
LoopEmitter::makeLevelIterator(OpBuilder &builder, Location loc, TensorId t,
Level l) {
- auto it = makeSimpleIterator(*lvls[t][l]);
+ auto it = makeSimpleIterator(*lvls[t][l], emitStrategy);
auto stt = getSparseTensorType(tensors[t]);
if (stt.hasEncoding() && stt.getEncoding().isSlice()) {
Value offset = genSliceOffset(builder, loc, tensors[t], l);
Value stride = genSliceStride(builder, loc, tensors[t], l);
- auto slicedIt = makeSlicedLevelIterator(std::move(it), offset, stride,
- lvls[t][l]->getSize());
+ auto slicedIt = makeSlicedLevelIterator(
+ std::move(it), offset, stride, lvls[t][l]->getSize(), emitStrategy);
return slicedIt;
}
return it;
@@ -186,7 +186,7 @@ void LoopEmitter::initializeLoopEmit(
TensorId synId = getSynTensorId();
for (unsigned i = 0, e = loopHighs.size(); i < e; i++) {
Value sz = loopHighs[i] = synSetter(builder, loc, i);
- auto [stl, it] = makeSynLevelAndIterator(sz, synId, i);
+ auto [stl, it] = makeSynLevelAndIterator(sz, synId, i, emitStrategy);
lvls[synId][i] = std::move(stl);
iters[synId][i].emplace_back(std::move(it));
}
@@ -317,12 +317,13 @@ void LoopEmitter::initSubSectIterator(OpBuilder &builder, Location loc) {
size = ADDI(size, ADDI(MULI(idxMax, C_IDX(stride)), C_IDX(1)));
}
it = makeNonEmptySubSectIterator(builder, loc, parent, loopHighs[loop],
- std::move(lvlIt), size, curDep.second);
+ std::move(lvlIt), size, curDep.second,
+ emitStrategy);
} else {
const SparseIterator &subSectIter = *iters[t][lvl].back();
it = makeTraverseSubSectIterator(builder, loc, subSectIter, *parent,
std::move(lvlIt), loopHighs[loop],
- curDep.second);
+ curDep.second, emitStrategy);
}
lastIter[t] = it.get();
iters[t][lvl].emplace_back(std::move(it));
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
index 5bab2c6a86081f..7bfe713cdd9f74 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
@@ -380,6 +380,7 @@ class LoopEmitter {
/// tensor.
bool hasOutput;
bool isSparseOut;
+ SparseEmitStrategy emitStrategy;
//
// Fields which have `numTensor` many entries.
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
index 04b49c320f07a5..4ba9ecbe03c72d 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
@@ -773,9 +773,6 @@ class SubSectIterator : public SparseIterator {
// SparseIterator derived classes implementation.
//===----------------------------------------------------------------------===//
-SparseEmitStrategy SparseIterator::emitStrategy =
- SparseEmitStrategy::kFunctional;
-
void SparseIterator::genInit(OpBuilder &b, Location l,
const SparseIterator *p) {
if (emitStrategy == SparseEmitStrategy::kDebugInterface) {
@@ -1303,27 +1300,38 @@ sparse_tensor::makeSparseTensorLevel(OpBuilder &b, Location l, Value t,
}
std::pair<std::unique_ptr<SparseTensorLevel>, std::unique_ptr<SparseIterator>>
-sparse_tensor::makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl) {
+sparse_tensor::makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl,
+ SparseEmitStrategy strategy) {
auto stl = std::make_unique<DenseLevel>(tid, lvl, sz, /*encoded=*/false);
auto it = std::make_unique<TrivialIterator>(*stl);
+ it->setSparseEmitStrategy(strategy);
return std::make_pair(std::move(stl), std::move(it));
}
std::unique_ptr<SparseIterator>
-sparse_tensor::makeSimpleIterator(const SparseTensorLevel &stl) {
+sparse_tensor::makeSimpleIterator(const SparseTensorLevel &stl,
+ SparseEmitStrategy strategy) {
+ std::unique_ptr<SparseIterator> ret;
if (!isUniqueLT(stl.getLT())) {
// We always dedupliate the non-unique level, but we should optimize it away
// if possible.
- return std::make_unique<DedupIterator>(stl);
+ ret = std::make_unique<DedupIterator>(stl);
+ } else {
+ ret = std::make_unique<TrivialIterator>(stl);
}
- return std::make_unique<TrivialIterator>(stl);
+ ret->setSparseEmitStrategy(strategy);
+ return ret;
}
std::unique_ptr<SparseIterator>
sparse_tensor::makeSlicedLevelIterator(std::unique_ptr<SparseIterator> &&sit,
- Value offset, Value stride, Value size) {
+ Value offset, Value stride, Value size,
+ SparseEmitStrategy strategy) {
- return std::make_unique<FilterIterator>(std::move(sit), offset, stride, size);
+ auto ret =
+ std::make_unique<FilterIterator>(std::move(sit), offset, stride, size);
+ ret->setSparseEmitStrategy(strategy);
+ return ret;
}
static const SparseIterator *tryUnwrapFilter(const SparseIterator *it) {
@@ -1335,38 +1343,42 @@ static const SparseIterator *tryUnwrapFilter(const SparseIterator *it) {
std::unique_ptr<SparseIterator> sparse_tensor::makeNonEmptySubSectIterator(
OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound,
- std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride) {
+ std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride,
+ SparseEmitStrategy strategy) {
// Try unwrap the NonEmptySubSectIterator from a filter parent.
parent = tryUnwrapFilter(parent);
- auto it = std::make_unique<NonEmptySubSectIterator>(
- b, l, parent, std::move(delegate), size);
+ std::unique_ptr<SparseIterator> it =
+ std::make_unique<NonEmptySubSectIterator>(b, l, parent,
+ std::move(delegate), size);
if (stride != 1) {
// TODO: We can safely skip bound checking on sparse levels, but for dense
// iteration space, we need the bound to infer the dense loop range.
- return std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
- C_IDX(stride), /*size=*/loopBound);
+ it = std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
+ C_IDX(stride), /*size=*/loopBound);
}
+ it->setSparseEmitStrategy(strategy);
return it;
}
std::unique_ptr<SparseIterator> sparse_tensor::makeTraverseSubSectIterator(
OpBuilder &b, Location l, const SparseIterator &subSectIter,
const SparseIterator &parent, std::unique_ptr<SparseIterator> &&wrap,
- Value loopBound, unsigned stride) {
+ Value loopBound, unsigned stride, SparseEmitStrategy strategy) {
// This must be a subsection iterator or a filtered subsection iterator.
auto &subSect =
llvm::cast<NonEmptySubSectIterator>(*tryUnwrapFilter(&subSectIter));
- auto it = std::make_unique<SubSectIterator>(
+ std::unique_ptr<SparseIterator> it = std::make_unique<SubSectIterator>(
subSect, *tryUnwrapFilter(&parent), std::move(wrap));
if (stride != 1) {
- return std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
- C_IDX(stride), /*size=*/loopBound);
+ it = std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
+ C_IDX(stride), /*size=*/loopBound);
}
+ it->setSparseEmitStrategy(strategy);
return it;
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.h
index fc2d9de66cfe72..d1e94b790bea6b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.h
@@ -111,8 +111,8 @@ class SparseIterator {
public:
virtual ~SparseIterator() = default;
- static void setSparseEmitStrategy(SparseEmitStrategy strategy) {
- SparseIterator::emitStrategy = strategy;
+ void setSparseEmitStrategy(SparseEmitStrategy strategy) {
+ emitStrategy = strategy;
}
virtual std::string getDebugInterfacePrefix() const = 0;
@@ -248,7 +248,7 @@ class SparseIterator {
return ref.take_front(cursorValsCnt);
}
- static SparseEmitStrategy emitStrategy;
+ SparseEmitStrategy emitStrategy;
public:
const IterKind kind; // For LLVM-style RTTI.
@@ -277,32 +277,34 @@ std::unique_ptr<SparseTensorLevel> makeSparseTensorLevel(OpBuilder &builder,
/// Helper function to create a simple SparseIterator object that iterate over
/// the SparseTensorLevel.
-std::unique_ptr<SparseIterator>
-makeSimpleIterator(const SparseTensorLevel &stl);
+std::unique_ptr<SparseIterator> makeSimpleIterator(const SparseTensorLevel &stl,
+ SparseEmitStrategy strategy);
/// Helper function to create a synthetic SparseIterator object that iterate
/// over a dense space specified by [0,`sz`).
std::pair<std::unique_ptr<SparseTensorLevel>, std::unique_ptr<SparseIterator>>
-makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl);
+makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl,
+ SparseEmitStrategy strategy);
/// Helper function to create a SparseIterator object that iterate over a
/// sliced space, the orignal space (before slicing) is traversed by `sit`.
std::unique_ptr<SparseIterator>
makeSlicedLevelIterator(std::unique_ptr<SparseIterator> &&sit, Value offset,
- Value stride, Value size);
+ Value stride, Value size, SparseEmitStrategy strategy);
/// Helper function to create a SparseIterator object that iterate over the
/// non-empty subsections set.
std::unique_ptr<SparseIterator> makeNonEmptySubSectIterator(
OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound,
- std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride);
+ std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride,
+ SparseEmitStrategy strategy);
/// Helper function to create a SparseIterator object that iterate over a
/// non-empty subsection created by NonEmptySubSectIterator.
std::unique_ptr<SparseIterator> makeTraverseSubSectIterator(
OpBuilder &b, Location l, const SparseIterator &subsectIter,
const SparseIterator &parent, std::unique_ptr<SparseIterator> &&wrap,
- Value loopBound, unsigned stride);
+ Value loopBound, unsigned stride, SparseEmitStrategy strategy);
} // namespace sparse_tensor
} // namespace mlir
More information about the Mlir-commits
mailing list