[Mlir-commits] [mlir] [mlir][sparse] Revert back to treating high and 2OutOf4 as level formats (PR #67203)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Sep 22 15:49:49 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir
<details>
<summary>Changes</summary>
In the new syntax, we will parse **loose_compressed** as **CompressedWithHigh** and **block2_4** as **TwoOutOfFour** level format. Currently, we support unique and order as level properties.
---
Full diff: https://github.com/llvm/llvm-project/pull/67203.diff
6 Files Affected:
- (modified) mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h (-2)
- (modified) mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp (+5-11)
- (modified) mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir (+3-3)
- (modified) mlir/test/Dialect/SparseTensor/sparse_2d.mlir (+1-1)
- (modified) mlir/test/Dialect/SparseTensor/sparse_foreach.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir (+1-1)
``````````diff
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 675c15347791921..b27cc1eff934f7b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -204,8 +204,6 @@ enum class LevelFormat : uint8_t {
enum class LevelNondefaultProperty : uint8_t {
Nonunique = 1, // 0b00000_01
Nonordered = 2, // 0b00000_10
- High = 32, // 0b01000_00
- Block2_4 = 64 // 0b10000_00
};
/// Returns string representation of the given dimension level type.
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
index b8483f5db130dcf..a4985fc9e8a8b92 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
@@ -63,13 +63,11 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
if (base.compare("dense") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Dense);
} else if (base.compare("compressed") == 0) {
- // TODO: Remove this condition once dimLvlType enum is refactored. Current
- // enum treats High and TwoOutOfFour as formats instead of properties.
- if (!(properties & static_cast<uint8_t>(LevelNondefaultProperty::High) ||
- properties &
- static_cast<uint8_t>(LevelNondefaultProperty::Block2_4))) {
- properties |= static_cast<uint8_t>(LevelFormat::Compressed);
- }
+ properties |= static_cast<uint8_t>(LevelFormat::Compressed);
+ } else if (base.compare("block2_4") == 0) {
+ properties |= static_cast<uint8_t>(LevelFormat::TwoOutOfFour);
+ } else if (base.compare("loose_compressed") == 0) {
+ properties |= static_cast<uint8_t>(LevelFormat::CompressedWithHi);
} else if (base.compare("singleton") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Singleton);
} else {
@@ -90,10 +88,6 @@ ParseResult LvlTypeParser::parseProperty(AsmParser &parser,
*properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonunique);
} else if (strVal.compare("nonordered") == 0) {
*properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonordered);
- } else if (strVal.compare("high") == 0) {
- *properties |= static_cast<uint8_t>(LevelNondefaultProperty::High);
- } else if (strVal.compare("block2_4") == 0) {
- *properties |= static_cast<uint8_t>(LevelNondefaultProperty::Block2_4);
} else {
parser.emitError(parser.getCurrentLocation(), "unknown level property");
return failure();
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 60367b43a6ee0e4..0e77889242925c9 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -65,7 +65,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
// -----
#BCOO = #sparse_tensor.encoding<{
- map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
}>
// CHECK-LABEL: func private @sparse_bcoo(
@@ -148,7 +148,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
// below) to encode a 2D matrix, but it would require dim2lvl mapping which is not ready yet.
// So we take the simple path for now.
#NV_24= #sparse_tensor.encoding<{
- map = (d0, d1) -> (d0 : dense, d1 : compressed(block2_4))
+ map = (d0, d1) -> (d0 : dense, d1 : block2_4)
}>
// CHECK-LABEL: func private @sparse_2_out_of_4(
@@ -199,7 +199,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
map = ( i, j ) ->
( i : dense,
j floordiv 4 : dense,
- j mod 4 : compressed(block2_4)
+ j mod 4 : block2_4
)
}>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 56f966e903912ba..9ba47bdf6d10845 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
}
#BatchedVector = #sparse_tensor.encoding<{
- map = (d0, d1) -> (d0 : dense, d1 : compressed(high))
+ map = (d0, d1) -> (d0 : dense, d1 : loose_compressed)
}>
// CHECK-LABEL: func.func @sub_ss_batched(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,
diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
index 822cfb0148f249a..d05d3d5a49cfa27 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
}
#BCOO = #sparse_tensor.encoding<{
- map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
}>
// CHECK-LABEL: func.func @foreach_bcoo(
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index c464d01bf2ab373..a34d62e1369470d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -43,7 +43,7 @@
}>
#BCOO = #sparse_tensor.encoding<{
- map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
}>
module {
``````````
</details>
https://github.com/llvm/llvm-project/pull/67203
More information about the Mlir-commits
mailing list