[Mlir-commits] [mlir] [mlir][sparse] Update Enum name for CompressedWithHigh (PR #67845)
Yinying Li
llvmlistbot at llvm.org
Fri Sep 29 12:52:06 PDT 2023
https://github.com/yinying-lisa-li updated https://github.com/llvm/llvm-project/pull/67845
>From ab46cf0f76aeaa78f2eb8865d647400f810e35ec Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Fri, 29 Sep 2023 19:09:21 +0000
Subject: [PATCH] [mlir][sparse] Update Enum name for CompressedWithHigh
Change CompressedWithHigh to LooseCompressed.
---
mlir/include/mlir-c/Dialect/SparseTensor.h | 28 ++---
.../mlir/Dialect/SparseTensor/IR/Enums.h | 106 +++++++++---------
.../Dialect/SparseTensor/IR/SparseTensor.h | 4 +-
.../SparseTensor/IR/SparseTensorAttrDefs.td | 2 +-
.../Bindings/Python/DialectSparseTensor.cpp | 14 +--
.../SparseTensor/IR/Detail/LvlTypeParser.cpp | 2 +-
.../SparseTensor/IR/SparseTensorDialect.cpp | 10 +-
.../SparseTensor/Transforms/LoopEmitter.cpp | 16 +--
.../Transforms/SparseTensorCodegen.cpp | 8 +-
.../Transforms/Sparsification.cpp | 6 +-
.../lib/Dialect/SparseTensor/Utils/Merger.cpp | 4 +-
.../SparseTensor/roundtrip_encoding.mlir | 2 +-
.../SparseTensor/CPU/sparse_pack_libgen.mlir | 2 +-
13 files changed, 102 insertions(+), 102 deletions(-)
diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h
index fecbeaf6b0f9d6c..7e47e54e7361d54 100644
--- a/mlir/include/mlir-c/Dialect/SparseTensor.h
+++ b/mlir/include/mlir-c/Dialect/SparseTensor.h
@@ -26,20 +26,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor);
/// If updating, keep them in sync and update the static_assert in the impl
/// file.
enum MlirSparseTensorDimLevelType {
- MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4, // 0b00001_00
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8, // 0b00010_00
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9, // 0b00010_01
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10, // 0b00010_10
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11, // 0b00010_11
- MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16, // 0b00100_00
- MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17, // 0b00100_01
- MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18, // 0b00100_10
- MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19, // 0b00100_11
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI = 32, // 0b01000_00
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU = 33, // 0b01000_01
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO = 34, // 0b01000_10
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO = 35, // 0b01000_11
- MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64, // 0b10000_00
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE = 4, // 0b00001_00
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED = 8, // 0b00010_00
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU = 9, // 0b00010_01
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO = 10, // 0b00010_10
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO = 11, // 0b00010_11
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON = 16, // 0b00100_00
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU = 17, // 0b00100_01
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO = 18, // 0b00100_10
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO = 19, // 0b00100_11
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED = 32, // 0b01000_00
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU = 33, // 0b01000_01
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO = 34, // 0b01000_10
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR = 64, // 0b10000_00
};
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 7a1aed509c2a360..bc351ec52c0946b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -170,33 +170,33 @@ enum class Action : uint32_t {
// TODO: We should generalize TwoOutOfFour to N out of M and use property to
// encode the value of N and M.
// TODO: Update DimLevelType to use lower 8 bits for storage formats and the
-// higher 4 bits to store level properties. Consider CompressedWithHi and
+// higher 4 bits to store level properties. Consider LooseCompressed and
// TwoOutOfFour as properties instead of formats.
enum class DimLevelType : uint8_t {
- Undef = 0, // 0b00000_00
- Dense = 4, // 0b00001_00
- Compressed = 8, // 0b00010_00
- CompressedNu = 9, // 0b00010_01
- CompressedNo = 10, // 0b00010_10
- CompressedNuNo = 11, // 0b00010_11
- Singleton = 16, // 0b00100_00
- SingletonNu = 17, // 0b00100_01
- SingletonNo = 18, // 0b00100_10
- SingletonNuNo = 19, // 0b00100_11
- CompressedWithHi = 32, // 0b01000_00
- CompressedWithHiNu = 33, // 0b01000_01
- CompressedWithHiNo = 34, // 0b01000_10
- CompressedWithHiNuNo = 35, // 0b01000_11
- TwoOutOfFour = 64, // 0b10000_00
+ Undef = 0, // 0b00000_00
+ Dense = 4, // 0b00001_00
+ Compressed = 8, // 0b00010_00
+ CompressedNu = 9, // 0b00010_01
+ CompressedNo = 10, // 0b00010_10
+ CompressedNuNo = 11, // 0b00010_11
+ Singleton = 16, // 0b00100_00
+ SingletonNu = 17, // 0b00100_01
+ SingletonNo = 18, // 0b00100_10
+ SingletonNuNo = 19, // 0b00100_11
+ LooseCompressed = 32, // 0b01000_00
+ LooseCompressedNu = 33, // 0b01000_01
+ LooseCompressedNo = 34, // 0b01000_10
+ LooseCompressedNuNo = 35, // 0b01000_11
+ TwoOutOfFour = 64, // 0b10000_00
};
/// This enum defines all supported storage format without the level properties.
enum class LevelFormat : uint8_t {
- Dense = 4, // 0b00001_00
- Compressed = 8, // 0b00010_00
- Singleton = 16, // 0b00100_00
- CompressedWithHi = 32, // 0b01000_00
- TwoOutOfFour = 64, // 0b10000_00
+ Dense = 4, // 0b00001_00
+ Compressed = 8, // 0b00010_00
+ Singleton = 16, // 0b00100_00
+ LooseCompressed = 32, // 0b01000_00
+ TwoOutOfFour = 64, // 0b10000_00
};
/// This enum defines all the nondefault properties for storage formats.
@@ -228,14 +228,14 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
return "singleton_no";
case DimLevelType::SingletonNuNo:
return "singleton_nu_no";
- case DimLevelType::CompressedWithHi:
- return "compressed_hi";
- case DimLevelType::CompressedWithHiNu:
- return "compressed_hi_nu";
- case DimLevelType::CompressedWithHiNo:
- return "compressed_hi_no";
- case DimLevelType::CompressedWithHiNuNo:
- return "compressed_hi_nu_no";
+ case DimLevelType::LooseCompressed:
+ return "loose_compressed";
+ case DimLevelType::LooseCompressedNu:
+ return "loose_compressed_nu";
+ case DimLevelType::LooseCompressedNo:
+ return "loose_compressed_no";
+ case DimLevelType::LooseCompressedNuNo:
+ return "loose_compressed_nu_no";
case DimLevelType::TwoOutOfFour:
return "compressed24";
}
@@ -279,9 +279,9 @@ constexpr bool isCompressedDLT(DimLevelType dlt) {
}
/// Check if the `DimLevelType` is compressed (regardless of properties).
-constexpr bool isCompressedWithHiDLT(DimLevelType dlt) {
+constexpr bool isLooseCompressedDLT(DimLevelType dlt) {
return (static_cast<uint8_t>(dlt) & ~3) ==
- static_cast<uint8_t>(DimLevelType::CompressedWithHi);
+ static_cast<uint8_t>(DimLevelType::LooseCompressed);
}
/// Check if the `DimLevelType` is singleton (regardless of properties).
@@ -373,10 +373,10 @@ static_assert((isValidDLT(DimLevelType::Undef) &&
isValidDLT(DimLevelType::SingletonNu) &&
isValidDLT(DimLevelType::SingletonNo) &&
isValidDLT(DimLevelType::SingletonNuNo) &&
- isValidDLT(DimLevelType::CompressedWithHi) &&
- isValidDLT(DimLevelType::CompressedWithHiNu) &&
- isValidDLT(DimLevelType::CompressedWithHiNo) &&
- isValidDLT(DimLevelType::CompressedWithHiNuNo) &&
+ isValidDLT(DimLevelType::LooseCompressed) &&
+ isValidDLT(DimLevelType::LooseCompressedNu) &&
+ isValidDLT(DimLevelType::LooseCompressedNo) &&
+ isValidDLT(DimLevelType::LooseCompressedNuNo) &&
isValidDLT(DimLevelType::TwoOutOfFour)),
"isValidDLT definition is broken");
@@ -391,16 +391,16 @@ static_assert((!isCompressedDLT(DimLevelType::Dense) &&
!isCompressedDLT(DimLevelType::SingletonNuNo)),
"isCompressedDLT definition is broken");
-static_assert((!isCompressedWithHiDLT(DimLevelType::Dense) &&
- isCompressedWithHiDLT(DimLevelType::CompressedWithHi) &&
- isCompressedWithHiDLT(DimLevelType::CompressedWithHiNu) &&
- isCompressedWithHiDLT(DimLevelType::CompressedWithHiNo) &&
- isCompressedWithHiDLT(DimLevelType::CompressedWithHiNuNo) &&
- !isCompressedWithHiDLT(DimLevelType::Singleton) &&
- !isCompressedWithHiDLT(DimLevelType::SingletonNu) &&
- !isCompressedWithHiDLT(DimLevelType::SingletonNo) &&
- !isCompressedWithHiDLT(DimLevelType::SingletonNuNo)),
- "isCompressedWithHiDLT definition is broken");
+static_assert((!isLooseCompressedDLT(DimLevelType::Dense) &&
+ isLooseCompressedDLT(DimLevelType::LooseCompressed) &&
+ isLooseCompressedDLT(DimLevelType::LooseCompressedNu) &&
+ isLooseCompressedDLT(DimLevelType::LooseCompressedNo) &&
+ isLooseCompressedDLT(DimLevelType::LooseCompressedNuNo) &&
+ !isLooseCompressedDLT(DimLevelType::Singleton) &&
+ !isLooseCompressedDLT(DimLevelType::SingletonNu) &&
+ !isLooseCompressedDLT(DimLevelType::SingletonNo) &&
+ !isLooseCompressedDLT(DimLevelType::SingletonNuNo)),
+ "isLooseCompressedDLT definition is broken");
static_assert((!isSingletonDLT(DimLevelType::Dense) &&
!isSingletonDLT(DimLevelType::Compressed) &&
@@ -423,10 +423,10 @@ static_assert((isOrderedDLT(DimLevelType::Dense) &&
isOrderedDLT(DimLevelType::SingletonNu) &&
!isOrderedDLT(DimLevelType::SingletonNo) &&
!isOrderedDLT(DimLevelType::SingletonNuNo) &&
- isOrderedDLT(DimLevelType::CompressedWithHi) &&
- isOrderedDLT(DimLevelType::CompressedWithHiNu) &&
- !isOrderedDLT(DimLevelType::CompressedWithHiNo) &&
- !isOrderedDLT(DimLevelType::CompressedWithHiNuNo)),
+ isOrderedDLT(DimLevelType::LooseCompressed) &&
+ isOrderedDLT(DimLevelType::LooseCompressedNu) &&
+ !isOrderedDLT(DimLevelType::LooseCompressedNo) &&
+ !isOrderedDLT(DimLevelType::LooseCompressedNuNo)),
"isOrderedDLT definition is broken");
static_assert((isUniqueDLT(DimLevelType::Dense) &&
@@ -439,10 +439,10 @@ static_assert((isUniqueDLT(DimLevelType::Dense) &&
!isUniqueDLT(DimLevelType::SingletonNu) &&
isUniqueDLT(DimLevelType::SingletonNo) &&
!isUniqueDLT(DimLevelType::SingletonNuNo) &&
- isUniqueDLT(DimLevelType::CompressedWithHi) &&
- !isUniqueDLT(DimLevelType::CompressedWithHiNu) &&
- isUniqueDLT(DimLevelType::CompressedWithHiNo) &&
- !isUniqueDLT(DimLevelType::CompressedWithHiNuNo)),
+ isUniqueDLT(DimLevelType::LooseCompressed) &&
+ !isUniqueDLT(DimLevelType::LooseCompressedNu) &&
+ isUniqueDLT(DimLevelType::LooseCompressedNo) &&
+ !isUniqueDLT(DimLevelType::LooseCompressedNuNo)),
"isUniqueDLT definition is broken");
} // namespace sparse_tensor
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index 9cab6b6a027cdd4..3eb9ce010cb006f 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -114,10 +114,10 @@ SparseTensorEncodingAttr getSparseTensorEncoding(Type type);
/// Convenience method to query whether a given DLT needs both position and
/// coordinates array or only coordinates array.
constexpr inline bool isDLTWithPos(DimLevelType dlt) {
- return isCompressedWithHiDLT(dlt) || isCompressedDLT(dlt);
+ return isLooseCompressedDLT(dlt) || isCompressedDLT(dlt);
}
constexpr inline bool isDLTWithCrd(DimLevelType dlt) {
- return isSingletonDLT(dlt) || isCompressedWithHiDLT(dlt) ||
+ return isSingletonDLT(dlt) || isLooseCompressedDLT(dlt) ||
isCompressedDLT(dlt);
}
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index d311fe7801cc18f..4e38f314a27391d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -367,7 +367,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
bool isDenseLvl(::mlir::sparse_tensor::Level l) const { return isDenseDLT(getLvlType(l)); }
bool isTwoOutOfFourLvl(::mlir::sparse_tensor::Level l) const { return isTwoOutOfFourDLT(getLvlType(l)); }
bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedDLT(getLvlType(l)); }
- bool isCompressedWithHiLvl(::mlir::sparse_tensor::Level l) const { return isCompressedWithHiDLT(getLvlType(l)); }
+ bool isLooseCompressedLvl(::mlir::sparse_tensor::Level l) const { return isLooseCompressedDLT(getLvlType(l)); }
bool isSingletonLvl(::mlir::sparse_tensor::Level l) const { return isSingletonDLT(getLvlType(l)); }
bool isOrderedLvl(::mlir::sparse_tensor::Level l) const { return isOrderedDLT(getLvlType(l)); }
bool isUniqueLvl(::mlir::sparse_tensor::Level l) const { return isUniqueDLT(getLvlType(l)); }
diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
index 3061e042c851d97..8e9e0b6baf76c20 100644
--- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
+++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
@@ -28,13 +28,13 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
.value("singleton_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
.value("singleton_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
.value("singleton_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
- .value("compressed_hi", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI)
- .value("compressed_hi_nu",
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU)
- .value("compressed_hi_no",
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO)
- .value("compressed_hi_nu_no",
- MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO);
+ .value("loose_compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED)
+ .value("loose_compressed_nu",
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU)
+ .value("loose_compressed_no",
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NO)
+ .value("loose_compressed_nu_no",
+ MLIR_SPARSE_TENSOR_DIM_LEVEL_LOOSE_COMPRESSED_NU_NO);
mlir_attribute_subclass(m, "EncodingAttr",
mlirAttributeIsASparseTensorEncodingAttr)
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
index 6938a7ad783ba81..053e067fff64ddb 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
@@ -68,7 +68,7 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
} else if (base.compare("block2_4") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::TwoOutOfFour);
} else if (base.compare("loose_compressed") == 0) {
- properties |= static_cast<uint8_t>(LevelFormat::CompressedWithHi);
+ properties |= static_cast<uint8_t>(LevelFormat::LooseCompressed);
} else if (base.compare("singleton") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Singleton);
} else {
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index b962dda20cfe64a..3897e1b9ea3597c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -422,10 +422,10 @@ const static DimLevelType validDLTs[] = {DimLevelType::Dense,
DimLevelType::SingletonNu,
DimLevelType::SingletonNo,
DimLevelType::SingletonNuNo,
- DimLevelType::CompressedWithHi,
- DimLevelType::CompressedWithHiNu,
- DimLevelType::CompressedWithHiNo,
- DimLevelType::CompressedWithHiNuNo};
+ DimLevelType::LooseCompressed,
+ DimLevelType::LooseCompressedNu,
+ DimLevelType::LooseCompressedNo,
+ DimLevelType::LooseCompressedNuNo};
static std::optional<DimLevelType> parseDLT(StringRef str) {
for (DimLevelType dlt : validDLTs)
@@ -712,7 +712,7 @@ mlir::sparse_tensor::getSparseTensorEncoding(Type type) {
bool mlir::sparse_tensor::isCOOType(SparseTensorEncodingAttr enc,
Level startLvl, bool isUnique) {
if (!enc ||
- !(enc.isCompressedLvl(startLvl) || enc.isCompressedWithHiLvl(startLvl)))
+ !(enc.isCompressedLvl(startLvl) || enc.isLooseCompressedLvl(startLvl)))
return false;
const Level lvlRank = enc.getLvlRank();
for (Level l = startLvl + 1; l < lvlRank; ++l)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index 9feaceac2f51bdd..96eea0d0658290f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -433,7 +433,7 @@ void LoopEmitter::initializeLoopEmit(
!highs[t][l]);
const auto lvlTp = lvlTypes[t][l];
// Handle sparse storage schemes.
- if (isCompressedDLT(lvlTp) || isCompressedWithHiDLT(lvlTp)) {
+ if (isCompressedDLT(lvlTp) || isLooseCompressedDLT(lvlTp)) {
// Generate sparse primitives to obtain positions and coordinates.
positionsBuffers[t][l] = genToPositions(builder, loc, tensor, l);
coordinatesBuffers[t][l] =
@@ -534,7 +534,7 @@ void LoopEmitter::categorizeLoopCondition(
auto lvlType = lvlTypes[t][l];
// Must be a recognizable DLT.
assert(isDenseDLT(lvlType) || isCompressedDLT(lvlType) ||
- isCompressedWithHiDLT(lvlType) || isSingletonDLT(lvlType));
+ isLooseCompressedDLT(lvlType) || isSingletonDLT(lvlType));
bool isSparse = !isDenseDLT(lvlType);
bool isSlice = isSparseSlices[t];
@@ -630,7 +630,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
OpBuilder &builder, Location loc, TensorId tid, Level lvl, Value lo,
Value hi, MutableArrayRef<Value> reduc, bool isParallel) {
bool isSparseCond = isCompressedDLT(lvlTypes[tid][lvl]) ||
- isCompressedWithHiDLT(lvlTypes[tid][lvl]) ||
+ isLooseCompressedDLT(lvlTypes[tid][lvl]) ||
isSingletonDLT(lvlTypes[tid][lvl]);
// TODO: support dynamic slices.
// Uses the first dimension here to build the loop bound (which is also the
@@ -893,7 +893,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
// Dense level are handled by the shared univeral index.
assert(!isDenseCond(cKind));
// Must be a recognizable sparse level.
- assert(isCompressedDLT(lvlTp) || isCompressedWithHiDLT(lvlTp) ||
+ assert(isCompressedDLT(lvlTp) || isLooseCompressedDLT(lvlTp) ||
isSingletonDLT(lvlTp));
(void)lvlTp;
@@ -1012,7 +1012,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
for (auto [tid, lvl] : unpackTensorLevelFromCondRange(spConds)) {
const auto lvlTp = lvlTypes[tid][lvl];
if (isCompressedDLT(lvlTp) || isSingletonDLT(lvlTp) ||
- isCompressedWithHiDLT(lvlTp)) {
+ isLooseCompressedDLT(lvlTp)) {
const auto crd = coords[tid][lvl];
if (min) {
Value cmp = CMPI(ult, coords[tid][lvl], min);
@@ -1237,11 +1237,11 @@ void LoopEmitter::prepareLoopOverTensorAtLvl(OpBuilder &builder, Location loc,
// Either the first level, or the previous level has been set.
/// FIXME: See the [CLARIFY_POSITS_LVL] note in the header.
assert(lvl == 0 || posits[tid][lvl - 1]);
- if (isCompressedDLT(lvlTp) || isCompressedWithHiDLT(lvlTp)) {
+ if (isCompressedDLT(lvlTp) || isLooseCompressedDLT(lvlTp)) {
const Value mem = positionsBuffers[tid][lvl];
Value pLo = lvl == 0 ? c0 : posits[tid][lvl - 1];
- if (isCompressedWithHiDLT(lvlTp))
+ if (isLooseCompressedDLT(lvlTp))
pLo = builder.create<arith::MulIOp>(loc, pLo, C_IDX(2));
posits[tid][lvl] = genIndexLoad(builder, loc, mem, pLo);
@@ -1538,7 +1538,7 @@ void LoopEmitter::exitWhileLoop(OpBuilder &builder, Location loc,
for (auto [tid, lvl] : unpackTensorLevelRange(loopInfo.trivialTidLvls)) {
const auto lvlTp = lvlTypes[tid][lvl];
if (isCompressedDLT(lvlTp) || isSingletonDLT(lvlTp) ||
- isCompressedWithHiDLT(lvlTp)) {
+ isLooseCompressedDLT(lvlTp)) {
const Value crd = coords[tid][lvl];
const Value pos = posits[tid][lvl];
Value cmp = CMPI(eq, crd, iv);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 3a3ea311c49d988..f02276fba0d526b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -498,7 +498,7 @@ static void genEndInsert(OpBuilder &builder, Location loc,
const Level lvlRank = stt.getLvlRank();
for (Level l = 0; l < lvlRank; l++) {
const auto dlt = stt.getLvlType(l);
- if (isCompressedWithHiDLT(dlt))
+ if (isLooseCompressedDLT(dlt))
llvm_unreachable("TODO: Not yet implemented");
if (isCompressedDLT(dlt)) {
// Compressed dimensions need a position cleanup for all entries
@@ -1237,7 +1237,7 @@ class SparseNumberOfEntriesConverter
ConversionPatternRewriter &rewriter) const override {
// Query memSizes for the actually stored values.
// FIXME: the nse value computed in this way might be wrong when there is
- // any "compressed_hi" level.
+ // any "loose_compressed" level.
rewriter.replaceOp(
op, genValMemSize(rewriter, op.getLoc(), adaptor.getTensor()));
return success();
@@ -1316,8 +1316,8 @@ struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
}
if (isDLTWithPos(dlt)) {
- assert(isCompressedDLT(dlt) || isCompressedWithHiDLT(dlt));
- if (isCompressedWithHiDLT(dlt)) {
+ assert(isCompressedDLT(dlt) || isLooseCompressedDLT(dlt));
+ if (isLooseCompressedDLT(dlt)) {
memSize = rewriter.create<arith::MulIOp>(loc, memSize, c2);
posBack = rewriter.create<arith::SubIOp>(loc, memSize, c1);
} else {
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index fee32a5717f62ae..0a6cc32253d26d1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -815,7 +815,7 @@ static bool computeIterationGraph(CodegenEnv &env, SortMask mask,
const TensorId tid = env.makeTensorId(t.getOperandNumber());
for (LoopId i = 0; i < numLoops; i++) {
const auto dltI = env.dlt(tid, i);
- if (isCompressedDLT(dltI) || isCompressedWithHiDLT(dltI) ||
+ if (isCompressedDLT(dltI) || isLooseCompressedDLT(dltI) ||
isSingletonDLT(dltI)) {
for (LoopId j = 0; j < numLoops; j++)
if (isUndefDLT(env.dlt(tid, j))) {
@@ -1508,7 +1508,7 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId ldx,
assert(ldx == env.merger().loop(b));
Value clause;
if (isCompressedDLT(dlt) || isSingletonDLT(dlt) ||
- isCompressedWithHiDLT(dlt)) {
+ isLooseCompressedDLT(dlt)) {
assert(lvl.has_value());
const Value crd = env.emitter().getCoords()[tid][*lvl];
const Value lvar = env.getLoopVar(ldx);
@@ -1593,7 +1593,7 @@ static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
needsUniv = true;
}
if (isCompressedDLT(dlt) || isSingletonDLT(dlt) ||
- isCompressedWithHiDLT(dlt) || isIdxReduc) {
+ isLooseCompressedDLT(dlt) || isIdxReduc) {
// Only when this is a index reduction loop, can the dlt be undefined.
assert(!isUndefDLT(dlt) || isIdxReduc);
// sparse/singleton levels, or a dense/sparse index reduction loop.
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 4143efbd0ab28e0..b6970b8b96d4659 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -490,7 +490,7 @@ BitVector Merger::simplifyCond(LatSetId s0, LatPointId p0) {
if (simple[b] && !isSparseLvlWithNonTrivialIdxExp(b)) {
const auto dlt = getLvlType(b);
if (!isCompressedDLT(dlt) && !isSingletonDLT(dlt) &&
- !isCompressedWithHiDLT(dlt)) {
+ !isLooseCompressedDLT(dlt)) {
if (reset)
simple.reset(b);
reset = true;
@@ -671,7 +671,7 @@ bool Merger::hasAnySparse(const BitVector &bits) const {
for (TensorLoopId b : bits.set_bits()) {
const auto dlt = getLvlType(b);
if (isCompressedDLT(dlt) || isSingletonDLT(dlt) ||
- isCompressedWithHiDLT(dlt))
+ isLooseCompressedDLT(dlt))
return true;
}
return hasSparseIdxReduction(bits);
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index d82c59a714d14ac..39e3ef102423524 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
}>
// CHECK-LABEL: func private @sparse_bcoo(
-// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ] }>>)
+// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "loose_compressed_nu", "singleton" ] }>>)
func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
// -----
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
index 05c4900fb58f78a..6540c950ab675b0 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
@@ -42,7 +42,7 @@
crdWidth = 32
}>
-// TODO: "compressed_hi" is not supported by libgen path.
+// TODO: "loose_compressed" is not supported by libgen path.
// #BCOO = #sparse_tensor.encoding<{
// map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
//}>
More information about the Mlir-commits
mailing list