[libc-commits] [libc] [mlir][sparse] Change tests to use new syntax for ELL and slice (PR #67569)

Yinying Li via libc-commits libc-commits at lists.llvm.org
Wed Sep 27 13:23:26 PDT 2023


https://github.com/yinying-lisa-li updated https://github.com/llvm/llvm-project/pull/67569

>From 0f1db4000d971192e0b2b027746cdd37083bf87f Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Wed, 27 Sep 2023 14:51:34 +0000
Subject: [PATCH 1/2] [mlir][sparse] Change tests to use new syntax for ELL and
 slice

Examples:

#ELL = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "dense", "compressed" ],
  dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
}>
to
#ELL = #sparse_tensor.encoding<{
  map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
}>

#CSR_SLICE = #sparse_tensor.encoding<{
  lvlTypes = [ "dense", "compressed" ],
  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
to
#CSR_SLICE = #sparse_tensor.encoding<{
  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
}>
---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   |  5 ++--
 mlir/test/CAPI/sparse_tensor.c                |  3 +--
 .../SparseTensor/convert_sparse2sparse.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/invalid.mlir   |  9 +++----
 .../SparseTensor/invalid_encoding.mlir        |  3 +--
 .../Dialect/SparseTensor/pre_rewriting.mlir   |  3 +--
 mlir/test/Dialect/SparseTensor/roundtrip.mlir | 15 ++++--------
 .../SparseTensor/roundtrip_encoding.mlir      | 20 +++-------------
 .../SparseTensor/sparse_extract_slice.mlir    |  3 +--
 .../Dialect/SparseTensor/sparse_foreach.mlir  |  6 ++---
 .../CPU/sparse_foreach_slices.mlir            | 12 ++++------
 .../SparseTensor/CPU/sparse_matmul_slice.mlir | 24 +++++++------------
 12 files changed, 33 insertions(+), 73 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e6577aed063ca7f..58e09f0d5e1803b 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -228,8 +228,9 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
     // offset = 0, size = 8, and a dynamic stride on the second dimension).
     #CSR_SLICE = #sparse_tensor.encoding<{
-      lvlTypes = [ "dense", "compressed" ],
-      dimSlices = [ (0, 4, 1), (0, 8, ?) ]
+      map = (d0 : #sparse_tensor<slice(0, 4, 1)>,
+             d1 : #sparse_tensor<slice(0, 8, ?)>) ->
+            (d0 : dense, d1 : compressed)
     }>
     ... tensor<?x?xf64, #CSC_SLICE> ...
 
diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c
index 30ef1557e73302f..33ee8e784096a18 100644
--- a/mlir/test/CAPI/sparse_tensor.c
+++ b/mlir/test/CAPI/sparse_tensor.c
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
   // clang-format off
   const char *originalAsm =
     "#sparse_tensor.encoding<{ "
-    "lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
-    "dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
+    "map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
     "posWidth = 32, crdWidth = 64 }>";
   // clang-format on
   MlirAttribute originalAttr =
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 54cdfc690952d9a..2a2619daf493654 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -39,8 +39,7 @@
 }>
 
 #COOSlice = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (2, 2, 1), (12, 13, 1) ]
+  map = (d0 : #sparse_tensor<slice(2, 2, 1)>, d1 : #sparse_tensor<slice(12, 13, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 // CHECK-LABEL: func @sparse_nop_convert(
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c0e813dcde7c57e..2a13f208fa225d3 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
 // -----
 
 #CSR = #sparse_tensor.encoding<{
-  lvlTypes = ["dense", "compressed"],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
index 8adf981d00051c5..ef1dd3ee41f8576 100644
--- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -218,8 +218,7 @@ func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
+  map = (d0 : #sparse_tensor<slice(-1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed)// expected-error{{expect positive value or ? for slice offset/size/stride}}
 }>
 func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
 
diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
index ac20ec1b950c258..c2cab30edec9180 100644
--- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
+++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
@@ -13,8 +13,7 @@
 }>
 
 #Slice = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (?, 1, 1), (?, 3, 1) ]
+  map = (d0 : #sparse_tensor<slice(?, 1, 1)>, d1 : #sparse_tensor<slice(?, 3, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 #sel_trait = {
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index d252fa559a1543f..33471497cc69767 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -143,8 +143,7 @@ func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64>
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func @sparse_slice_offset(
@@ -159,8 +158,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func @sparse_slice_stride(
@@ -188,8 +186,7 @@ func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVec
 
 #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
 #SparseVector_Slice = #sparse_tensor.encoding<{
-  lvlTypes = ["compressed"],
-  dimSlices = [ (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
 }>
 
 // CHECK-LABEL: func @sparse_metadata_init(
@@ -220,8 +217,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
 // -----
 
 #SparseVector_Slice = #sparse_tensor.encoding<{
-  lvlTypes = ["compressed"],
-  dimSlices = [ (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
 }>
 
 // CHECK-LABEL: func @sparse_get_md(
@@ -237,8 +233,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector_S
 // -----
 
 #SparseVector = #sparse_tensor.encoding<{
-  lvlTypes = ["compressed"],
-  dimSlices = [ (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
 }>
 
 // CHECK-LABEL: func @sparse_get_md(
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 60367b43a6ee0e4..e7b2524e77298e2 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -101,8 +101,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
 // -----
 
 #ELL = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "dense", "compressed" ],
-  dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
+  map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func private @sparse_ell(
@@ -112,8 +111,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func private @sparse_slice(
@@ -123,19 +121,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
-}>
-
-// CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
-func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
-
-// -----
-
-#CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, ?, 1), (?, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL: func private @sparse_slice(
diff --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
index d14d3638a9c2641..828c36d75b9c7aa 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
@@ -5,8 +5,7 @@
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : dense, d1 : compressed)
 }>
 
 // CHECK-LABEL:   func.func @sparse_slice(
diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
index 822cfb0148f249a..e5b9d2a180ce8af 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -29,13 +29,11 @@ func.func @sparse_foreach_constant() -> () {
 }
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ],
-  dimSlices = [ (0, 4, 1), (2, 4, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(2, 4, 1)>) -> (d0 : compressed, d1 : compressed)
 }>
 
 #CSR_SLICE_DYN = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ],
-  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed, d1 : compressed)
 }>
 
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index d10ae8aee8141f4..e0dd31b2ca8671c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -28,13 +28,11 @@
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
 }>
 
 #CSR_SLICE_DYN = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : dense, d1 : compressed)
 }>
 
 #COO = #sparse_tensor.encoding<{
@@ -42,13 +40,11 @@
 }>
 
 #COO_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
+  map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 #COO_SLICE_DYN = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index c04d9b82e811270..21934fd72f018e9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -27,8 +27,7 @@
 }>
 
 #DCSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : compressed, d1 : compressed)
 }>
 
 #CSR = #sparse_tensor.encoding<{
@@ -36,8 +35,7 @@
 }>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : dense, d1 : compressed)
 }>
 
 #COO = #sparse_tensor.encoding<{
@@ -45,33 +43,27 @@
 }>
 
 #CSR_SLICE_1 = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (0, 4, 2), (0, 4, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(0, 4, 1)>) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSR_SLICE_1 = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ],
-  dimSlices = [ (0, 4, 2), (1, 4, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(1, 4, 1)>) -> (d0 : compressed, d1 : compressed)
 }>
 
 #COO_SLICE_1 = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (0, 4, 2), (0, 4, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(0, 4, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 #COO_SLICE_2 = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed_nu", "singleton" ],
-  dimSlices = [ (0, 4, 2), (1, 4, 1) ]
+  map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(1, 4, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
 }>
 
 #CSR_SLICE_dyn = #sparse_tensor.encoding<{
-  lvlTypes = [ "dense", "compressed" ],
-  dimSlices = [ (?, 4, ?), (?, 4, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, 4, ?)>, d1 : #sparse_tensor<slice(?, 4, ?)>) -> (d0 : dense, d1 : compressed)
 }>
 
 #DCSR_SLICE_dyn = #sparse_tensor.encoding<{
-  lvlTypes = [ "compressed", "compressed" ],
-  dimSlices = [ (?, 4, ?), (?, 4, ?) ]
+  map = (d0 : #sparse_tensor<slice(?, 4, ?)>, d1 : #sparse_tensor<slice(?, 4, ?)>) -> (d0 : compressed, d1 : compressed)
 }>
 
 module {

>From 80e188bf680fc0a6e6f197dc8ff6f1dc3cb5a69c Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Wed, 27 Sep 2023 20:23:09 +0000
Subject: [PATCH 2/2] updated sparse tensor attr description

---
 .../SparseTensor/IR/SparseTensorAttrDefs.td   | 27 ++++++++++++++++---
 1 file changed, 24 insertions(+), 3 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index a1e682c106ac177..d311fe7801cc18f 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -237,12 +237,33 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     }>
     ... tensor<20x30xf32, #BSR_explicit> ...
 
+    // ELL format. 
+    // In the simple format for matrix, one array stores values and another
+    // array stores column indices. The arrays have the same number of rows
+    // as the original matrix, but only have as many columns as 
+    // the maximum number of nonzeros on a row of the original matrix.
+    // There are many variants for ELL such as jagged diagonal scheme.
+    // To implement ELL, map provides a notion of "counting a
+    // dimension", where every stored element with the same coordinate
+    // is mapped to a new slice. For instance, ELL storage of a 2-d
+    // tensor can be defined with the mapping (i, j) -> (#i, i, j)
+    // using the notation of [Chou20]. Lacking the # symbol in MLIR's
+    // affine mapping, we use a free symbol c to define such counting,
+    // together with a constant that denotes the number of resulting
+    // slices. For example, the mapping [c](i, j) -> (c * 3 * i, i, j)
+    // with the level-types ["dense", "dense", "compressed"] denotes ELL
+    // storage with three jagged diagonals that count the dimension i.
+    #ELL = #sparse_tensor.encoding<{
+      map = [c](i, j) -> (c * 3 * i : dense, i : dense, j : compressed)
+    }>
+    ... tensor<?x?xf64, #ELL> ...
+
     // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
     // offset = 0, size = 8, and a dynamic stride on the second dimension).
     #CSR_SLICE = #sparse_tensor.encoding<{
-      map = (d0 : #sparse_tensor<slice(0, 4, 1)>,
-             d1 : #sparse_tensor<slice(0, 8, ?)>) ->
-            (d0 : dense, d1 : compressed)
+      map = (i : #sparse_tensor<slice(0, 4, 1)>,
+             j : #sparse_tensor<slice(0, 8, ?)>) ->
+            (i : dense, j : compressed)
     }>
     ... tensor<?x?xf64, #CSC_SLICE> ...
 



More information about the libc-commits mailing list