[Mlir-commits] [mlir] [mlir][sparse] Update verifier for block sparsity and singleton (PR #69389)
Yinying Li
llvmlistbot at llvm.org
Wed Oct 18 08:57:32 PDT 2023
https://github.com/yinying-lisa-li updated https://github.com/llvm/llvm-project/pull/69389
>From 69a2ee629f8ca7cbd1c727ba3efecb94752c6140 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Tue, 17 Oct 2023 22:05:00 +0000
Subject: [PATCH 1/3] [mlir][sparse] Verification of block sparsity
---
.../Dialect/SparseTensor/IR/SparseTensor.h | 8 ++
.../SparseTensor/IR/Detail/DimLvlMap.cpp | 14 ++-
.../SparseTensor/IR/SparseTensorDialect.cpp | 89 +++++++++++++++----
.../SparseTensor/invalid_encoding.mlir | 23 ++++-
4 files changed, 112 insertions(+), 22 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index 6e834426b441764..181f6f4062e1177 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -173,6 +173,14 @@ AffineMap inferLvlToDim(AffineMap dimToLvl, MLIRContext *context);
/// Asserts on failure (so only use when known to succeed).
AffineMap inverseBlockSparsity(AffineMap dimToLvl, MLIRContext *context);
+/// Given the dimToLvl map, returns the block size in vector.
+/// For instance, a 2x3 block will return [2, 3].
+/// Only valid block sparsity will be accepted.
+SmallVector<unsigned> getBlockSize(AffineMap dimToLvl);
+
+/// Given the dimToLvl map, returns if it's block sparsity.
+bool isBlockSparsity(AffineMap dimToLvl);
+
//
// Reordering.
//
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
index 05fce96043826f1..c64390e23d9eace 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
@@ -356,10 +356,18 @@ AffineMap DimLvlMap::getDimToLvlMap(MLIRContext *context) const {
AffineMap DimLvlMap::getLvlToDimMap(MLIRContext *context) const {
SmallVector<AffineExpr> dimAffines;
dimAffines.reserve(getDimRank());
- for (const auto &dimSpec : dimSpecs)
- dimAffines.push_back(dimSpec.getExpr().getAffineExpr());
+ for (const auto &dimSpec : dimSpecs) {
+ auto expr = dimSpec.getExpr().getAffineExpr();
+ if (expr) {
+ auto exprTuple = dimSpec.getExpr().unpackBinop();
+ if (std::get<1>(exprTuple) == AffineExprKind::FloorDiv ||
+ std::get<1>(exprTuple) == AffineExprKind::Mod) {
+ dimAffines.push_back(expr);
+ }
+ }
+ }
auto map = AffineMap::get(getLvlRank(), getSymRank(), dimAffines, context);
- if (map.isIdentity()) return AffineMap();
+ if (dimAffines.empty() || map.isIdentity()) return AffineMap();
return map;
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index fd87bbfa905ed69..75b1700e8017fcd 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -455,6 +455,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
SmallVector<DimLevelType> lvlTypes;
SmallVector<SparseTensorDimSliceAttr> dimSlices;
AffineMap dimToLvl = {};
+ AffineMap lvlToDim = {};
unsigned posWidth = 0;
unsigned crdWidth = 0;
StringRef attrName;
@@ -568,6 +569,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
ERROR_IF(dimToLvl, "Cannot mix `dimToLvl` with `map`")
dimToLvl = dlm.getDimToLvlMap(parser.getContext());
+ lvlToDim = dlm.getLvlToDimMap(parser.getContext());
break;
}
} // switch
@@ -582,8 +584,9 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
#undef RETURN_ON_FAIL
// Construct struct-like storage for attribute.
- // TODO: Fetch lvlToDim if user provides one
- AffineMap lvlToDim = inferLvlToDim(dimToLvl, parser.getContext());
+ if (!lvlToDim || lvlToDim.isEmpty()) {
+ lvlToDim = inferLvlToDim(dimToLvl, parser.getContext());
+ }
return parser.getChecked<SparseTensorEncodingAttr>(
parser.getContext(), lvlTypes, dimToLvl, lvlToDim, posWidth, crdWidth,
dimSlices);
@@ -663,6 +666,17 @@ SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
return emitError() << "unexpected position bitwidth: " << posWidth;
if (!acceptBitWidth(crdWidth))
return emitError() << "unexpected coordinate bitwidth: " << crdWidth;
+ if (auto it = std::find_if(lvlTypes.begin(), lvlTypes.end(), isSingletonDLT);
+ it != std::end(lvlTypes)) {
+ if (it == lvlTypes.begin() ||
+ (!isCompressedDLT(*(it - 1)) && !isLooseCompressedDLT(*(it - 1))))
+ return emitError() << "expected compressed or loose_compressed level "
+ "before singleton level";
+ if (!std::all_of(it, lvlTypes.end(),
+ [](DimLevelType i) { return isSingletonDLT(i); }))
+ return emitError() << "expected all singleton lvlTypes "
+ "following a singleton level";
+ }
// Before we can check that the level-rank is consistent/coherent
// across all fields, we need to define it. The source-of-truth for
// the `getLvlRank` method is the length of the level-types array,
@@ -678,19 +692,14 @@ SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
return emitError()
<< "level-rank mismatch between dimToLvl and lvlTypes: "
<< dimToLvl.getNumResults() << " != " << lvlRank;
- // TODO: The following is attempting to match the old error-conditions
- // from prior to merging dimOrdering and higherOrdering into dimToLvl.
- // That is, we currently require `dimToLvl` to be either a permutation
- // (as when higherOrdering is the identity) or expansive (as per the
- // constraints on higherOrdering). However, those constraints do
- // not match the intended semantics of `dimToLvl`. As we improve the
- // compiler to actually handle non-permutations, we need to update these
- // checks to match what is actually supported. In particular, this is
- // where we'll have to check that when `lvlToDim` is provided then it
- // is indeed an inverse of `dimToLvl`, and when it isn't provided then
- // it can be automatically inferred.
- if (dimRank == lvlRank && !dimToLvl.isPermutation())
- return emitError() << "expected a permutation affine map for dimToLvl";
+ auto inferRes = inferLvlToDim(dimToLvl, dimToLvl.getContext());
+ // Symbols can't be inferred but are acceptable.
+ if (!inferRes && dimToLvl.getNumSymbols() == 0) {
+ return emitError() << "failed to infer lvlToDim from dimToLvl";
+ }
+ if (lvlToDim && (inferRes != lvlToDim)) {
+ return emitError() << "expected lvlToDim to be an inverse of dimToLvl";
+ }
if (dimRank > lvlRank)
return emitError() << "unexpected dimToLvl mapping from " << dimRank
<< " to " << lvlRank;
@@ -758,8 +767,7 @@ AffineMap mlir::sparse_tensor::inferLvlToDim(AffineMap dimToLvl,
lvlToDim = AffineMap();
} else if (map.isPermutation()) {
lvlToDim = inversePermutation(map);
- } else {
- // TODO: check if it's block sparsity
+ } else if (isBlockSparsity(map)) {
lvlToDim = inverseBlockSparsity(map, context);
}
return lvlToDim;
@@ -818,6 +826,53 @@ AffineMap mlir::sparse_tensor::inverseBlockSparsity(AffineMap dimToLvl,
return dimToLvl.get(dimToLvl.getNumResults(), 0, lvlExprs, context);
}
+SmallVector<unsigned> mlir::sparse_tensor::getBlockSize(AffineMap dimToLvl) {
+ assert(isBlockSparsity(dimToLvl) &&
+ "expected dimToLvl to be block sparsity for calling getBlockSize");
+ SmallVector<unsigned> blockSize;
+ for (auto result : dimToLvl.getResults()) {
+ if (auto binOp = result.dyn_cast<AffineBinaryOpExpr>()) {
+ if (result.getKind() == AffineExprKind::Mod) {
+ blockSize.push_back(
+ binOp.getRHS().dyn_cast<AffineConstantExpr>().getValue());
+ }
+ } else {
+ blockSize.push_back(1);
+ }
+ }
+ return blockSize;
+}
+
+bool mlir::sparse_tensor::isBlockSparsity(AffineMap dimToLvl) {
+ if (!dimToLvl)
+ return false;
+ std::map<unsigned, int64_t> coeffientMap;
+ for (auto result : dimToLvl.getResults()) {
+ if (auto binOp = result.dyn_cast<AffineBinaryOpExpr>()) {
+ auto pos = binOp.getLHS().dyn_cast<AffineDimExpr>().getPosition();
+ if (result.getKind() == AffineExprKind::FloorDiv) {
+ // Expect only one floordiv for each dimension.
+ if (coeffientMap.find(pos) != coeffientMap.end())
+ return false;
+ coeffientMap[pos] =
+ binOp.getRHS().dyn_cast<AffineConstantExpr>().getValue();
+ } else if (result.getKind() == AffineExprKind::Mod) {
+ // Expect floordiv before mod.
+ if (coeffientMap.find(pos) == coeffientMap.end())
+ return false;
+ // Expect mod to have the same coefficient as floordiv.
+ if (binOp.getRHS().dyn_cast<AffineConstantExpr>().getValue() !=
+ coeffientMap[pos]) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ }
+ return !coeffientMap.empty();
+}
+
bool mlir::sparse_tensor::isCOOType(SparseTensorEncodingAttr enc,
Level startLvl, bool isUnique) {
if (!enc ||
diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
index ef1dd3ee41f8576..6514391bae92d9b 100644
--- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -60,7 +60,7 @@ func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
-// expected-error at +1 {{unexpected dimToLvl mapping from 2 to 1}}
+// expected-error at +1 {{failed to infer lvlToDim from dimToLvl}}
#a = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense)}>
func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
@@ -119,7 +119,7 @@ func.func private @tensor_dimtolvl_mismatch(%arg0: tensor<8xi32, #a>) -> ()
// -----
-// expected-error at +1 {{expected a permutation affine map for dimToLvl}}
+// expected-error at +1 {{failed to infer lvlToDim from dimToLvl}}
#a = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d0 : compressed)}>
func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
@@ -251,3 +251,22 @@ func.func private @too_few_lvl_decl(%arg0: tensor<?x?xf64, #TooFewLvlDecl>) {
func.func private @wrong_order_lvl_decl(%arg0: tensor<?x?xf64, #WrongOrderLvlDecl>) {
return
}
+
+// -----
+
+// expected-error at +1 {{expected lvlToDim to be an inverse of dimToLvl}}
+#BSR_explicit = #sparse_tensor.encoding<{
+ map =
+ {il, jl, ii, jj}
+ ( i = il * 3 + ii,
+ j = jl * 2 + jj
+ ) ->
+ ( il = i floordiv 2 : dense,
+ jl = j floordiv 3 : compressed,
+ ii = i mod 2 : dense,
+ jj = j mod 3 : dense
+ )
+}>
+func.func private @BSR_explicit(%arg0: tensor<?x?xf64, #BSR_explicit>) {
+ return
+}
>From 9291f369ee352a67886acfc3be8a83f044fb8b44 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Tue, 17 Oct 2023 22:08:38 +0000
Subject: [PATCH 2/3] fix getLvlToDimMap
---
mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h | 2 +-
mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp | 9 +++------
2 files changed, 4 insertions(+), 7 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index 181f6f4062e1177..273aa8858f5da3d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -173,7 +173,7 @@ AffineMap inferLvlToDim(AffineMap dimToLvl, MLIRContext *context);
/// Asserts on failure (so only use when known to succeed).
AffineMap inverseBlockSparsity(AffineMap dimToLvl, MLIRContext *context);
-/// Given the dimToLvl map, returns the block size in vector.
+/// Given the dimToLvl map, returns the block size in a vector.
/// For instance, a 2x3 block will return [2, 3].
/// Only valid block sparsity will be accepted.
SmallVector<unsigned> getBlockSize(AffineMap dimToLvl);
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
index c64390e23d9eace..5f947b67c6d848e 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
@@ -359,15 +359,12 @@ AffineMap DimLvlMap::getLvlToDimMap(MLIRContext *context) const {
for (const auto &dimSpec : dimSpecs) {
auto expr = dimSpec.getExpr().getAffineExpr();
if (expr) {
- auto exprTuple = dimSpec.getExpr().unpackBinop();
- if (std::get<1>(exprTuple) == AffineExprKind::FloorDiv ||
- std::get<1>(exprTuple) == AffineExprKind::Mod) {
- dimAffines.push_back(expr);
- }
+ dimAffines.push_back(expr);
}
}
auto map = AffineMap::get(getLvlRank(), getSymRank(), dimAffines, context);
- if (dimAffines.empty() || map.isIdentity()) return AffineMap();
+ if (dimAffines.empty() || map.isIdentity())
+ return AffineMap();
return map;
}
>From ac841fcae1f95096a8e91e7111d7bae54bc76949 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Wed, 18 Oct 2023 15:56:56 +0000
Subject: [PATCH 3/3] address comments
---
mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 75b1700e8017fcd..78820029ae139d6 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -694,12 +694,10 @@ SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
<< dimToLvl.getNumResults() << " != " << lvlRank;
auto inferRes = inferLvlToDim(dimToLvl, dimToLvl.getContext());
// Symbols can't be inferred but are acceptable.
- if (!inferRes && dimToLvl.getNumSymbols() == 0) {
+ if (!inferRes && dimToLvl.getNumSymbols() == 0)
return emitError() << "failed to infer lvlToDim from dimToLvl";
- }
- if (lvlToDim && (inferRes != lvlToDim)) {
+ if (lvlToDim && (inferRes != lvlToDim))
return emitError() << "expected lvlToDim to be an inverse of dimToLvl";
- }
if (dimRank > lvlRank)
return emitError() << "unexpected dimToLvl mapping from " << dimRank
<< " to " << lvlRank;
More information about the Mlir-commits
mailing list