[Mlir-commits] [mlir] [mlir][sparse] Revert back to treating high and 2OutOf4 as level formats (PR #67203)

Yinying Li llvmlistbot at llvm.org
Fri Sep 22 15:48:41 PDT 2023


https://github.com/yinying-lisa-li created https://github.com/llvm/llvm-project/pull/67203

In the new syntax, we will parse **loose_compressed** as **CompressedWithHigh** and **block2_4** as **TwoOutOfFour** level format. Currently, we support unique and order as level properties.

>From 208a4b2dd956072f3e10982c0d75fc57324d584c Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Fri, 22 Sep 2023 22:37:52 +0000
Subject: [PATCH] [mlir][sparse] Revert back to treating high and 2OutOf4 as
 level formats

---
 .../include/mlir/Dialect/SparseTensor/IR/Enums.h |  2 --
 .../SparseTensor/IR/Detail/LvlTypeParser.cpp     | 16 +++++-----------
 .../Dialect/SparseTensor/roundtrip_encoding.mlir |  6 +++---
 mlir/test/Dialect/SparseTensor/sparse_2d.mlir    |  2 +-
 .../Dialect/SparseTensor/sparse_foreach.mlir     |  2 +-
 .../Dialect/SparseTensor/CPU/sparse_pack.mlir    |  2 +-
 6 files changed, 11 insertions(+), 19 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 675c15347791921..b27cc1eff934f7b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -204,8 +204,6 @@ enum class LevelFormat : uint8_t {
 enum class LevelNondefaultProperty : uint8_t {
   Nonunique = 1,  // 0b00000_01
   Nonordered = 2, // 0b00000_10
-  High = 32,      // 0b01000_00
-  Block2_4 = 64   // 0b10000_00
 };
 
 /// Returns string representation of the given dimension level type.
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
index b8483f5db130dcf..a4985fc9e8a8b92 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
@@ -63,13 +63,11 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
   if (base.compare("dense") == 0) {
     properties |= static_cast<uint8_t>(LevelFormat::Dense);
   } else if (base.compare("compressed") == 0) {
-    // TODO: Remove this condition once dimLvlType enum is refactored. Current
-    // enum treats High and TwoOutOfFour as formats instead of properties.
-    if (!(properties & static_cast<uint8_t>(LevelNondefaultProperty::High) ||
-          properties &
-              static_cast<uint8_t>(LevelNondefaultProperty::Block2_4))) {
-      properties |= static_cast<uint8_t>(LevelFormat::Compressed);
-    }
+    properties |= static_cast<uint8_t>(LevelFormat::Compressed);
+  } else if (base.compare("block2_4") == 0) {
+    properties |= static_cast<uint8_t>(LevelFormat::TwoOutOfFour);
+  } else if (base.compare("loose_compressed") == 0) {
+    properties |= static_cast<uint8_t>(LevelFormat::CompressedWithHi);
   } else if (base.compare("singleton") == 0) {
     properties |= static_cast<uint8_t>(LevelFormat::Singleton);
   } else {
@@ -90,10 +88,6 @@ ParseResult LvlTypeParser::parseProperty(AsmParser &parser,
     *properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonunique);
   } else if (strVal.compare("nonordered") == 0) {
     *properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonordered);
-  } else if (strVal.compare("high") == 0) {
-    *properties |= static_cast<uint8_t>(LevelNondefaultProperty::High);
-  } else if (strVal.compare("block2_4") == 0) {
-    *properties |= static_cast<uint8_t>(LevelNondefaultProperty::Block2_4);
   } else {
     parser.emitError(parser.getCurrentLocation(), "unknown level property");
     return failure();
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 60367b43a6ee0e4..0e77889242925c9 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -65,7 +65,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
 // -----
 
 #BCOO = #sparse_tensor.encoding<{
-  map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+  map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
 }>
 
 // CHECK-LABEL: func private @sparse_bcoo(
@@ -148,7 +148,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
 // below) to encode a 2D matrix, but it would require dim2lvl mapping which is not ready yet.
 // So we take the simple path for now.
 #NV_24= #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d0 : dense, d1 : compressed(block2_4))
+  map = (d0, d1) -> (d0 : dense, d1 : block2_4)
 }>
 
 // CHECK-LABEL: func private @sparse_2_out_of_4(
@@ -199,7 +199,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
   map = ( i, j ) ->
   ( i            : dense,
     j floordiv 4 : dense,
-    j mod 4      : compressed(block2_4)
+    j mod 4      : block2_4
   )
 }>
 
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 56f966e903912ba..9ba47bdf6d10845 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
 }
 
 #BatchedVector = #sparse_tensor.encoding<{
-  map = (d0, d1) -> (d0 : dense, d1 : compressed(high))
+  map = (d0, d1) -> (d0 : dense, d1 : loose_compressed)
 }>
 // CHECK-LABEL:   func.func @sub_ss_batched(
 // CHECK-SAME:      %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,
diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
index 822cfb0148f249a..d05d3d5a49cfa27 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
 }
 
 #BCOO = #sparse_tensor.encoding<{
-  map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+  map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
 }>
 
 // CHECK-LABEL:   func.func @foreach_bcoo(
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index c464d01bf2ab373..a34d62e1369470d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -43,7 +43,7 @@
 }>
 
 #BCOO = #sparse_tensor.encoding<{
-  map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
+  map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton)
 }>
 
 module {



More information about the Mlir-commits mailing list