[Mlir-commits] [mlir] f231821 - [mlir][sparse] provide convenience methods for toOrig/toStoredDim
Aart Bik
llvmlistbot at llvm.org
Wed Sep 28 16:32:15 PDT 2022
Author: Aart Bik
Date: 2022-09-28T16:32:03-07:00
New Revision: f231821e6e9b6837e190314f45a92899baf6dc14
URL: https://github.com/llvm/llvm-project/commit/f231821e6e9b6837e190314f45a92899baf6dc14
DIFF: https://github.com/llvm/llvm-project/commit/f231821e6e9b6837e190314f45a92899baf6dc14.diff
LOG: [mlir][sparse] provide convenience methods for toOrig/toStoredDim
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D134833
Added:
Modified:
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index 575e5c5bcc8f2..a0c4485707a8c 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -64,6 +64,21 @@ bool isOrderedDim(RankedTensorType type, uint64_t d);
/// given dimension (0 <= d < rank).
bool isUniqueDim(RankedTensorType type, uint64_t d);
+//
+// Reordering.
+//
+
+uint64_t toOrigDim(const SparseTensorEncodingAttr &enc, uint64_t d);
+uint64_t toStoredDim(const SparseTensorEncodingAttr &enc, uint64_t d);
+
+/// Convenience method to translate the given stored dimension
+/// to the original dimension (0 <= d < rank).
+uint64_t toOrigDim(RankedTensorType type, uint64_t d);
+
+/// Convenience method to translate the given original dimension
+/// to the stored dimension (0 <= d < rank).
+uint64_t toStoredDim(RankedTensorType type, uint64_t d);
+
} // namespace sparse_tensor
} // namespace mlir
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3997f4f2cf9a9..8ed65d0813821 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -319,6 +319,40 @@ bool mlir::sparse_tensor::isUniqueDim(RankedTensorType type, uint64_t d) {
return true; // unannotated tensor is dense (and thus unique)
}
+uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc,
+ uint64_t d) {
+ if (enc) {
+ auto order = enc.getDimOrdering();
+ if (order) {
+ assert(order.isPermutation());
+ return order.getDimPosition(d);
+ }
+ }
+ return d;
+}
+
+uint64_t mlir::sparse_tensor::toStoredDim(const SparseTensorEncodingAttr &enc,
+ uint64_t d) {
+ if (enc) {
+ auto order = enc.getDimOrdering();
+ if (order) {
+ assert(order.isPermutation());
+ return order.getPermutedPosition(d);
+ }
+ }
+ return d;
+}
+
+uint64_t mlir::sparse_tensor::toOrigDim(RankedTensorType type, uint64_t d) {
+ assert(d < static_cast<uint64_t>(type.getRank()));
+ return toOrigDim(getSparseTensorEncoding(type), d);
+}
+
+uint64_t mlir::sparse_tensor::toStoredDim(RankedTensorType type, uint64_t d) {
+ assert(d < static_cast<uint64_t>(type.getRank()));
+ return toStoredDim(getSparseTensorEncoding(type), d);
+}
+
//===----------------------------------------------------------------------===//
// TensorDialect Operations.
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 0dc3e9ed3f3dd..d8e5d20baaaf3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -35,26 +35,6 @@ namespace {
// Helper methods.
//===----------------------------------------------------------------------===//
-/// Reorders stored dimension to original dimension.
-static unsigned toOrig(const SparseTensorEncodingAttr &enc, unsigned i) {
- auto order = enc.getDimOrdering();
- if (order) {
- assert(order.isPermutation());
- return order.getDimPosition(i);
- }
- return i;
-}
-
-/// Reorders original dimension to stored dimension.
-static unsigned toStored(const SparseTensorEncodingAttr &enc, unsigned i) {
- auto order = enc.getDimOrdering();
- if (order) {
- assert(order.isPermutation());
- return order.getPermutedPosition(i);
- }
- return i;
-}
-
/// Flatten a list of operands that may contain sparse tensors.
static void flattenOperands(ValueRange operands,
SmallVectorImpl<Value> &flattened) {
@@ -79,7 +59,7 @@ static void flattenOperands(ValueRange operands,
/// Gets the dimension size for the given sparse tensor at the given dim.
/// Returns None if no sparse encoding is attached to the tensor type.
static Optional<Value> sizeFromTensorAtDim(OpBuilder &rewriter, Location loc,
- ShapedType tensorTp,
+ RankedTensorType tensorTp,
Value adaptedValue, unsigned dim) {
auto enc = getSparseTensorEncoding(tensorTp);
if (!enc)
@@ -95,9 +75,8 @@ static Optional<Value> sizeFromTensorAtDim(OpBuilder &rewriter, Location loc,
// accounting for the reordering applied to the sparse storage.
auto tuple =
llvm::cast<UnrealizedConversionCastOp>(adaptedValue.getDefiningOp());
- return rewriter
- .create<memref::LoadOp>(loc, tuple.getInputs().front(),
- constantIndex(rewriter, loc, toStored(enc, dim)))
+ Value idx = constantIndex(rewriter, loc, toStoredDim(tensorTp, dim));
+ return rewriter.create<memref::LoadOp>(loc, tuple.getInputs().front(), idx)
.getResult();
}
@@ -243,7 +222,7 @@ static void createAllocFields(OpBuilder &builder, Location loc, Type type,
// Per-dimension storage.
for (unsigned r = 0; r < rank; r++) {
// Get the original dimension (ro) for the current stored dimension.
- unsigned ro = toOrig(enc, r);
+ unsigned ro = toOrigDim(rType, r);
builder.create<memref::StoreOp>(loc, sizes[ro], dimSizes,
constantIndex(builder, loc, r));
linear = builder.create<arith::MulIOp>(loc, linear, sizes[ro]);
@@ -490,10 +469,7 @@ class SparseExpandConverter : public OpConversionPattern<ExpandOp> {
// Determine the size for access expansion (always the innermost stored
// dimension size, translated back to original dimension). Note that we
// recursively rewrite the new DimOp on the **original** tensor.
- auto enc = getSparseTensorEncoding(srcType);
- unsigned innerDim = srcType.getRank() - 1;
- if (AffineMap p = enc.getDimOrdering())
- innerDim = p.getDimPosition(innerDim);
+ unsigned innerDim = toOrigDim(srcType, srcType.getRank() - 1);
auto sz = sizeFromTensorAtDim(rewriter, loc, srcType, adaptor.getTensor(),
innerDim);
assert(sz); // This for sure is a sparse tensor
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 43de81da1fda8..4d6c304ef4f92 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -106,13 +106,11 @@ static func::CallOp replaceOpWithFuncCall(RewriterBase &rewriter, Operation *op,
/// Generates dimension size call.
static Value genDimSizeCall(OpBuilder &builder, Location loc,
SparseTensorEncodingAttr &enc, Value src,
- int64_t idx) {
- // Permute the index according to an optional dimension ordering.
- if (AffineMap p = enc.getDimOrdering())
- idx = p.getPermutedPosition(idx);
+ uint64_t idx) {
// Generate the call.
StringRef name = "sparseDimSize";
- SmallVector<Value, 2> params{src, constantIndex(builder, loc, idx)};
+ SmallVector<Value, 2> params{
+ src, constantIndex(builder, loc, toStoredDim(enc, idx))};
Type iTp = builder.getIndexType();
return createFuncCall(builder, loc, name, iTp, params, EmitCInterface::Off)
.getResult(0);
@@ -266,13 +264,8 @@ static void newParams(OpBuilder &builder, SmallVector<Value, 8> ¶ms,
// default, or otherwise the "reverse" permutation of a given ordering, so
// that indices can be mapped quickly to the right position.
SmallVector<Value, 4> rev(sz);
- if (AffineMap p = enc.getDimOrdering()) {
- for (unsigned i = 0; i < sz; i++)
- rev[p.getDimPosition(i)] = constantIndex(builder, loc, i);
- } else {
- for (unsigned i = 0; i < sz; i++)
- rev[i] = constantIndex(builder, loc, i);
- }
+ for (unsigned i = 0; i < sz; i++)
+ rev[toOrigDim(enc, i)] = constantIndex(builder, loc, i);
params.push_back(genBuffer(builder, loc, rev));
// Secondary and primary types encoding.
Type elemTp = stp.getElementType();
@@ -1230,7 +1223,8 @@ class SparseTensorExpandConverter : public OpConversionPattern<ExpandOp> {
matchAndRewrite(ExpandOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op->getLoc();
- ShapedType srcType = op.getTensor().getType().cast<ShapedType>();
+ RankedTensorType srcType =
+ op.getTensor().getType().cast<RankedTensorType>();
Type eltType = srcType.getElementType();
Type boolType = rewriter.getIntegerType(1);
Type idxType = rewriter.getIndexType();
@@ -1239,9 +1233,7 @@ class SparseTensorExpandConverter : public OpConversionPattern<ExpandOp> {
// Determine the size for access expansion (always the innermost stored
// dimension size, translated back to original dimension).
auto enc = getSparseTensorEncoding(srcType);
- unsigned innerDim = srcType.getRank() - 1;
- if (AffineMap p = enc.getDimOrdering())
- innerDim = p.getDimPosition(innerDim);
+ unsigned innerDim = toOrigDim(srcType, srcType.getRank() - 1);
auto sz = sizeFromPtrAtDim(rewriter, loc, enc, srcType, adaptor.getTensor(),
innerDim);
// Allocate temporary buffers for values, filled-switch, and indices.
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 35845677082dd..ea54c7bfaef31 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -128,18 +128,6 @@ static AffineMap permute(MLIRContext *context, AffineMap m,
return AffineMap::getPermutationMap(perm, context);
}
-/// Helper method to apply dimension ordering permutation.
-static unsigned perm(const SparseTensorEncodingAttr &enc, unsigned d) {
- if (enc) {
- auto order = enc.getDimOrdering();
- if (order) {
- assert(order.isPermutation());
- return order.getDimPosition(d);
- }
- }
- return d;
-}
-
/// Helper method to obtain the dimension level format from the encoding.
//
// TODO: note that we store, but currently completely *ignore* the properties
@@ -214,7 +202,7 @@ static bool findSparseAnnotations(Merger &merger, linalg::GenericOp op) {
assert(map.getNumResults() == op.getRank(t));
for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) {
unsigned tensor = t->getOperandNumber();
- AffineExpr a = map.getResult(perm(enc, d));
+ AffineExpr a = map.getResult(toOrigDim(enc, d));
if (!findAffine(merger, tensor, a, toDimLevelFormat(enc, d)))
return false; // inadmissible affine expression
}
@@ -319,8 +307,8 @@ static bool computeIterationGraph(Merger &merger, linalg::GenericOp op,
// example, the tensor expresion A_ijk forces the ordering i < j < k
// on the loop indices if no explicit dimension ordering is given.
for (unsigned d = 1, rank = map.getNumResults(); d < rank; d++) {
- AffineExpr f = map.getResult(perm(enc, d - 1));
- AffineExpr t = map.getResult(perm(enc, d));
+ AffineExpr f = map.getResult(toOrigDim(enc, d - 1));
+ AffineExpr t = map.getResult(toOrigDim(enc, d));
addAffineOrderings(adjM, inDegree, f, t, 0);
}
// Push unrelated loops into sparse iteration space, so these
@@ -359,7 +347,7 @@ static bool isMaterializing(Value val) {
/// whether the out tensor in the tensor expression codegen is admissible.
/// Sets `sparseOut` to the tensor and `outerParNest` to the outer injective
/// nesting depth when a "truly dynamic" sparse tensor output occurs.
-static bool isAdmissableTensorExp(Merger &merger, linalg::GenericOp op,
+static bool isAdmissibleTensorExp(Merger &merger, linalg::GenericOp op,
std::vector<unsigned> &topSort, unsigned exp,
OpOperand **sparseOut,
unsigned &outerParNest) {
@@ -367,7 +355,7 @@ static bool isAdmissableTensorExp(Merger &merger, linalg::GenericOp op,
unsigned tensor = lhs->getOperandNumber();
auto enc = getSparseTensorEncoding(lhs->get().getType());
// An non-annotated output tensor is assumed dense, and becomes a random
- // access n-dim memref. Admissable since insertions cannot occur.
+ // access n-dim memref. Admissible since insertions cannot occur.
if (!enc)
return true;
// An all-dense annotated "sparse" output tensor becomes a linearized random
@@ -559,7 +547,7 @@ static void genBuffers(Merger &merger, CodeGen &codegen, OpBuilder &builder,
// Scan all dimensions of current tensor.
args.clear();
for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) {
- AffineExpr a = map.getResult(perm(enc, d));
+ AffineExpr a = map.getResult(toOrigDim(enc, d));
if (a.getKind() != AffineExprKind::DimId)
continue; // compound
unsigned idx = a.cast<AffineDimExpr>().getPosition();
@@ -587,7 +575,7 @@ static void genBuffers(Merger &merger, CodeGen &codegen, OpBuilder &builder,
assert(merger.isDimLevelType(tensor, idx, DimLvlType::kDense));
}
// Find upper bound in current dimension.
- unsigned p = perm(enc, d);
+ unsigned p = toOrigDim(enc, d);
Value up = linalg::createOrFoldDimOp(builder, loc, t->get(), p);
if (ShapedType::isDynamic(shape[p]))
args.push_back(up);
@@ -735,7 +723,7 @@ static Value genAffine(CodeGen &codegen, OpBuilder &builder, AffineExpr a,
static Value genIndex(CodeGen &codegen, linalg::GenericOp op, OpOperand *t) {
auto map = op.getTiedIndexingMap(t);
auto enc = getSparseTensorEncoding(t->get().getType());
- AffineExpr a = map.getResult(perm(enc, map.getNumResults() - 1));
+ AffineExpr a = map.getResult(toOrigDim(enc, map.getNumResults() - 1));
assert(a.getKind() == AffineExprKind::DimId);
unsigned idx = a.cast<AffineDimExpr>().getPosition();
return codegen.loops[idx];
@@ -752,14 +740,14 @@ static Value genSubscript(CodeGen &codegen, OpBuilder &builder,
if (enc) {
// Note that currently, all sparse subscripts are simple.
// TODO: accept affine too?
- AffineExpr a = map.getResult(perm(enc, rank - 1));
+ AffineExpr a = map.getResult(toOrigDim(enc, rank - 1));
assert(a.getKind() == AffineExprKind::DimId);
unsigned idx = a.cast<AffineDimExpr>().getPosition();
assert(codegen.pidxs[tensor][idx] != nullptr);
args.push_back(codegen.pidxs[tensor][idx]); // position index
} else {
for (unsigned d = 0; d < rank; d++) {
- AffineExpr a = map.getResult(perm(enc, d));
+ AffineExpr a = map.getResult(d);
args.push_back(genAffine(codegen, builder, a, op.getLoc()));
}
}
@@ -1094,7 +1082,7 @@ static void genInvariants(Merger &merger, CodeGen &codegen, OpBuilder &builder,
auto map = op.getTiedIndexingMap(t);
auto enc = getSparseTensorEncoding(t->get().getType());
for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) {
- AffineExpr a = map.getResult(perm(enc, d));
+ AffineExpr a = map.getResult(toOrigDim(enc, d));
if (!isInvariantAffine(codegen, a, ldx, atLevel))
return; // still in play
}
@@ -1882,7 +1870,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
for (auto mask : allMask)
if (computeIterationGraph(merger, op, topSort, mask)) {
hasCycle = false;
- if (isAdmissableTensorExp(merger, op, topSort, exp, &sparseOut,
+ if (isAdmissibleTensorExp(merger, op, topSort, exp, &sparseOut,
outerParNest)) {
isAdmissible = true;
break;
More information about the Mlir-commits
mailing list