[Mlir-commits] [mlir] 91e7b9e - [mlir][sparse] annotate loops that are generated by loop emitter.
Peiming Liu
llvmlistbot at llvm.org
Wed Nov 16 16:09:39 PST 2022
Author: Peiming Liu
Date: 2022-11-17T00:09:33Z
New Revision: 91e7b9e5253f17e5583148215aa52f592ac1133a
URL: https://github.com/llvm/llvm-project/commit/91e7b9e5253f17e5583148215aa52f592ac1133a
DIFF: https://github.com/llvm/llvm-project/commit/91e7b9e5253f17e5583148215aa52f592ac1133a.diff
LOG: [mlir][sparse] annotate loops that are generated by loop emitter.
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D138155
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 5692af5004c0f..0db86dd2e8c16 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -95,9 +95,10 @@ static Value genIndexAndValueForDense(OpBuilder &builder, Location loc,
//===----------------------------------------------------------------------===//
SparseTensorLoopEmitter::SparseTensorLoopEmitter(ValueRange tensors,
+ StringAttr loopTag,
bool hasOutput,
bool isSparseOut)
- : hasOutput(hasOutput), isSparseOut(isSparseOut),
+ : loopTag(loopTag), hasOutput(hasOutput), isSparseOut(isSparseOut),
tensors(tensors.begin(), tensors.end()), dimTypes(tensors.size()),
pidxs(tensors.size()), coord(tensors.size()), highs(tensors.size()),
ptrBuffer(tensors.size()), idxBuffer(tensors.size()),
@@ -284,7 +285,7 @@ Operation *SparseTensorLoopEmitter::enterLoopOverTensorAtDim(
// NOTE: we can also prepares for next dim here in advance
// Push the loop into stack
loopStack.emplace_back(ArrayRef<size_t>(tid), ArrayRef<size_t>(dim), loop,
- coord[tid][dim]);
+ coord[tid][dim], loopTag);
// Emit extra locals.
emitExtraLocalsForTensorsAtDenseDims(builder, loc, extraTids, extraDims);
@@ -386,7 +387,7 @@ Operation *SparseTensorLoopEmitter::enterCoIterationOverTensorsAtDims(
// NOTE: we can also prepares for next dim here in advance
}
// Sets up the loop stack.
- loopStack.emplace_back(tids, dims, whileOp, min);
+ loopStack.emplace_back(tids, dims, whileOp, min, loopTag);
assert(loopStack.size() == loopSeqStack.size());
// Emits extra locals
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index ebb82d025c6f8..ecd4135be8e67 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -331,7 +331,9 @@ class SparseTensorLoopEmitter {
/// tensor id (tid) used in related functions.
/// If isSparseOut is set, loop emitter assume that the sparse output tensor
/// is empty, and will always generate loops on it based on the dim sizes.
- explicit SparseTensorLoopEmitter(ValueRange tensors, bool hasOutput = false,
+ explicit SparseTensorLoopEmitter(ValueRange tensors,
+ StringAttr loopTag = nullptr,
+ bool hasOutput = false,
bool isSparseOut = false);
/// Starts a loop emitting session by generating all the buffers needed to
@@ -413,11 +415,20 @@ class SparseTensorLoopEmitter {
};
const std::vector<Value> &getValBuffer() const { return valBuffer; };
+ constexpr static llvm::StringLiteral getLoopEmitterLoopAttrName() {
+ return llvm::StringLiteral("Emitted from");
+ }
+
private:
struct LoopLevelInfo {
LoopLevelInfo(ArrayRef<size_t> tids, ArrayRef<size_t> dims, Operation *loop,
- Value iv)
- : tids(tids), dims(dims), loop(loop), iv(iv) {}
+ Value iv, StringAttr loopTag)
+ : tids(tids), dims(dims), loop(loop), iv(iv) {
+ // Attached a special tag to loop emitter generated loop.
+ if (loopTag)
+ loop->setAttr(SparseTensorLoopEmitter::getLoopEmitterLoopAttrName(),
+ loopTag);
+ }
// TODO: maybe use a vector<pair> for tid and dim?
// The set of tensors that the loop is operating on
const llvm::SmallVector<size_t> tids;
@@ -485,8 +496,12 @@ class SparseTensorLoopEmitter {
void exitCoIterationLoop(OpBuilder &builder, Location loc,
MutableArrayRef<Value> reduc);
- // Whether the loop emitter needs to treat the last tensor as the output
- // tensor.
+ /// A optional string attribute that should be attached to the loop generated
+ /// by loop emitter, it might help following passes to identify loops that
+ /// operates on sparse tensors more easily.
+ StringAttr loopTag;
+ /// Whether the loop emitter needs to treat the last tensor as the output
+ /// tensor.
bool hasOutput;
bool isSparseOut;
/// Input and (optional) output tensors.
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 2d2f9031f6f10..6f38796719662 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -789,7 +789,9 @@ struct ForeachRewriter : public OpRewritePattern<ForeachOp> {
auto enc = getSparseTensorEncoding(rtp);
// 1. Generates loop for the sparse input.
- SparseTensorLoopEmitter loopEmitter(ValueRange{input});
+ SparseTensorLoopEmitter loopEmitter(
+ ValueRange{input},
+ StringAttr::get(getContext(), ForeachOp::getOperationName()));
loopEmitter.initializeLoopEmit(rewriter, loc);
for (int64_t i = 0; i < rank; i++) {
// TODO: provide utility function for loop sequences that only contains
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index e9bf5aa85610d..7cbdf1da798e3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -53,11 +53,15 @@ enum Reduction { kNoReduc, kSum, kProduct, kAnd, kOr, kXor, kCustom };
// Code generation.
struct CodeGen {
- CodeGen(SparsificationOptions o, ValueRange tensors, unsigned numTensors,
- unsigned numLoops, OpOperand *op, unsigned nest,
+ CodeGen(SparsificationOptions o, MLIRContext *context, ValueRange tensors,
+ unsigned numTensors, unsigned numLoops, OpOperand *op, unsigned nest,
std::vector<unsigned> &ts)
- : options(o), loopEmitter(tensors, /*hasOutput=*/true,
- /*isSparseOut=*/op != nullptr),
+ : options(o),
+ loopEmitter(
+ tensors,
+ StringAttr::get(context, linalg::GenericOp::getOperationName()),
+ /*hasOutput=*/true,
+ /*isSparseOut=*/op != nullptr),
sparseOut(op), outerParNest(nest), topSort(ts) {
if (op)
insChain = op->get();
@@ -670,8 +674,8 @@ static void genTensorStore(Merger &merger, CodeGen &codegen, OpBuilder &builder,
// Select operation insertion.
Value insChain = codegen.insChain;
assert(insChain);
- scf::IfOp ifOp = builder.create<scf::IfOp>(
- loc, insChain.getType(), rhs, /*else=*/true);
+ scf::IfOp ifOp = builder.create<scf::IfOp>(loc, insChain.getType(), rhs,
+ /*else=*/true);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
// Existing value was preserved to be used here.
assert(merger.exp(exp).val);
@@ -1372,8 +1376,8 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
tensors.push_back(t.get());
// Recursively generates code if admissible.
- CodeGen codegen(options, tensors, numTensors, numLoops, sparseOut,
- outerParNest, topSort);
+ CodeGen codegen(options, op.getContext(), tensors, numTensors, numLoops,
+ sparseOut, outerParNest, topSort);
genBuffers(merger, codegen, rewriter, op);
genStmt(merger, codegen, rewriter, op, exp, 0);
genResult(merger, codegen, rewriter, op);
More information about the Mlir-commits
mailing list