[Mlir-commits] [mlir] 3b00f44 - [mlir][sparse] Marking off todos and updating commentary
wren romano
llvmlistbot at llvm.org
Mon Jul 31 14:35:46 PDT 2023
Author: wren romano
Date: 2023-07-31T14:35:39-07:00
New Revision: 3b00f4481101c2ab071736d052482e692c6ebb56
URL: https://github.com/llvm/llvm-project/commit/3b00f4481101c2ab071736d052482e692c6ebb56
DIFF: https://github.com/llvm/llvm-project/commit/3b00f4481101c2ab071736d052482e692c6ebb56.diff
LOG: [mlir][sparse] Marking off todos and updating commentary
Depends On D155999
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D156001
Added:
Modified:
mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp
mlir/lib/Dialect/SparseTensor/IR/Detail/Var.h
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
index ca26a83dcd0cdd..3de88aa085635b 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp
@@ -150,8 +150,6 @@ void DimLvlExpr::printAffineExprInternal(
rhs.printStrong(os);
} else {
// Combination of all the special rules for addition/subtraction.
- // TODO(wrengr): despite being succinct, this is prolly too confusing for
- // readers.
lhs.printWeak(os);
const auto rx = matchNeg(rhs);
os << (rx ? " - " : " + ");
@@ -180,8 +178,9 @@ DimSpec::DimSpec(DimVar var, DimExpr expr, SparseTensorDimSliceAttr slice)
: var(var), expr(expr), slice(slice) {}
bool DimSpec::isValid(Ranks const &ranks) const {
- return ranks.isValid(var) && ranks.isValid(expr);
- // TODO(wrengr): is there anything in `slice` that needs validation?
+ // Nothing in `slice` needs additional validation.
+ // We explicitly consider null-expr to be vacuously valid.
+ return ranks.isValid(var) && (!expr || ranks.isValid(expr));
}
bool DimSpec::isFunctionOf(VarSet const &vars) const {
@@ -220,8 +219,8 @@ LvlSpec::LvlSpec(LvlVar var, LvlExpr expr, DimLevelType type)
}
bool LvlSpec::isValid(Ranks const &ranks) const {
+ // Nothing in `type` needs additional validation.
return ranks.isValid(var) && ranks.isValid(expr);
- // TODO(wrengr): is there anything in `type` that needs validation?
}
bool LvlSpec::isFunctionOf(VarSet const &vars) const {
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
index 9278d7c8d33ff9..a58ddcd4677414 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.h
@@ -220,8 +220,10 @@ class DimSpec final {
void setElideExpr(bool b) { elideExpr = b; }
constexpr SparseTensorDimSliceAttr getSlice() const { return slice; }
- /// Checks whether the variables bound/used by this spec are valid
- /// with respect to the given ranks.
+ /// Checks whether the variables bound/used by this spec are valid with
+ /// respect to the given ranks. Note that null `DimExpr` is considered
+ /// to be vacuously valid, and therefore calling `setExpr` invalidates
+ /// the result of this predicate.
bool isValid(Ranks const &ranks) const;
// TODO(wrengr): Use it or loose it.
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp
index a30058d512d44f..7e848d325fb743 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp
@@ -33,13 +33,10 @@ void Var::dump() const {
//===----------------------------------------------------------------------===//
bool Ranks::isValid(DimLvlExpr expr) const {
- // FIXME(wrengr): we have cases without affine expr at an early point
- if (!expr.getAffineExpr())
- return true;
- // Each `DimLvlExpr` only allows one kind of non-symbol variable.
+ assert(expr);
+ // Compute the maximum identifiers for symbol-vars and dim/lvl-vars
+ // (each `DimLvlExpr` only allows one kind of non-symbol variable).
int64_t maxSym = -1, maxVar = -1;
- // TODO(wrengr): If we run into ASan issues, that may be due to the
- // "`{{...}}`" syntax; so we may want to try using local-variables instead.
mlir::getMaxDimAndSymbol<ArrayRef<AffineExpr>>({{expr.getAffineExpr()}},
maxVar, maxSym);
// TODO(wrengr): We may want to add a call to `LLVM_DEBUG` like
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.h b/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.h
index 313972b3ca79b6..cf5bc6d753de4f 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.h
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/Var.h
@@ -179,7 +179,10 @@ class Var {
public:
constexpr Var(VarKind vk, Num n) : impl(Impl(vk, n)) {}
Var(AffineSymbolExpr sym) : Var(VarKind::Symbol, sym.getPosition()) {}
- Var(VarKind vk, AffineDimExpr var) : Var(vk, var.getPosition()) {}
+ // TODO(wrengr): Should make the first argument an `ExprKind` instead...?
+ Var(VarKind vk, AffineDimExpr var) : Var(vk, var.getPosition()) {
+ assert(vk != VarKind::Symbol);
+ }
constexpr bool operator==(Var other) const { return impl == other.impl; }
constexpr bool operator!=(Var other) const { return !(*this == other); }
@@ -345,13 +348,8 @@ class VarInfo final {
enum class ID : unsigned {};
private:
- // FUTURE_CL(wrengr): We could use the high-bit of `Var::Impl` to
- // store the `std::optional` bit, therefore allowing us to bitbash the
- // `num` and `kind` fields together.
- //
StringRef name; // The bare-id used in the MLIR source.
llvm::SMLoc loc; // The location of the first occurence.
- // TODO(wrengr): See the above `LocatedVar` note.
ID id; // The unique `VarInfo`-identifier.
std::optional<Var::Num> num; // The unique `Var`-identifier (if resolved).
VarKind kind; // The kind of variable.
More information about the Mlir-commits
mailing list