[Mlir-commits] [mlir] 540d5e0 - [mlir][sparse] Updating STEA parser/printer to use the name "dimSlices"

wren romano llvmlistbot at llvm.org
Tue May 30 15:50:15 PDT 2023


Author: wren romano
Date: 2023-05-30T15:50:07-07:00
New Revision: 540d5e0ce66cefb072ab8f22df62468357c9ed0f

URL: https://github.com/llvm/llvm-project/commit/540d5e0ce66cefb072ab8f22df62468357c9ed0f
DIFF: https://github.com/llvm/llvm-project/commit/540d5e0ce66cefb072ab8f22df62468357c9ed0f.diff

LOG: [mlir][sparse] Updating STEA parser/printer to use the name "dimSlices"

Depends On D151505

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D151513

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
    mlir/test/Dialect/SparseTensor/invalid.mlir
    mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
    mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
    mlir/test/Dialect/SparseTensor/roundtrip.mlir
    mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
    mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
    mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index e49d7be36620c..f0a502e5dcd9c 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -244,7 +244,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     // offset = 0, size = 8, and a dynamic stride on the second dimension).
     #CSR_SLICE = #sparse_tensor.encoding<{
       lvlTypes = [ "dense", "compressed" ],
-      slice = [ (0, 4, 1), (0, 8, ?) ]
+      dimSlices = [ (0, 4, 1), (0, 8, ?) ]
     }>
     ... tensor<?x?xf64, #CSC_SLICE> ...
 
@@ -266,9 +266,6 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
     // The required bitwidth for coordinate storage.
     "unsigned":$crdWidth,
     // A slice attribute for each dimension of the tensor type.
-    // FIXME: The name used here is `dimSlices`, however the
-    // parser/printer uses the name `slice` instead.  Therefore
-    // the parser/printer need to be updated to match.
     ArrayRefParameter<
       "::mlir::sparse_tensor::SparseTensorDimSliceAttr",
       "per dimension slice metadata"

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index ae4198f5dce69..962e0ac21c637 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -408,7 +408,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
 
   // Process the data from the parsed dictionary value into struct-like data.
   SmallVector<DimLevelType> lvlTypes;
-  SmallVector<SparseTensorDimSliceAttr> slices;
+  SmallVector<SparseTensorDimSliceAttr> dimSlices;
   AffineMap dimToLvl = {};
   unsigned posWidth = 0;
   unsigned crdWidth = 0;
@@ -416,7 +416,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
   StringRef attrName;
   // Exactly 6 keys.
   SmallVector<StringRef, 6> keys = {"lvlTypes", "dimToLvl", "posWidth",
-                                    "crdWidth", "slice"};
+                                    "crdWidth", "dimSlices"};
   while (succeeded(parser.parseOptionalKeyword(&attrName))) {
     if (!llvm::is_contained(keys, attrName)) {
       parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName;
@@ -464,13 +464,13 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
       auto intAttr = llvm::dyn_cast<IntegerAttr>(attr);
       ERROR_IF(!intAttr, "expected an integral index bitwidth")
       crdWidth = intAttr.getInt();
-    } else if (attrName == "slice") {
+    } else if (attrName == "dimSlices") {
       RETURN_ON_FAIL(parser.parseLSquare())
       // Dispatches to DimSliceAttr to skip mnemonic
       bool finished = false;
       while (auto attr = SparseTensorDimSliceAttr::parse(parser, nullptr)) {
         auto sliceAttr = llvm::cast<SparseTensorDimSliceAttr>(attr);
-        slices.push_back(sliceAttr);
+        dimSlices.push_back(sliceAttr);
         if (parser.parseOptionalComma().failed()) {
           finished = true;
           break;
@@ -494,7 +494,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
 
   // Construct struct-like storage for attribute.
   return parser.getChecked<SparseTensorEncodingAttr>(
-      parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, slices);
+      parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, dimSlices);
 }
 
 void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
@@ -512,7 +512,7 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
   if (getCrdWidth())
     printer << ", crdWidth = " << getCrdWidth();
   if (!getDimSlices().empty()) {
-    printer << ", slice = [ ";
+    printer << ", dimSlices = [ ";
     llvm::interleaveComma(getDimSlices(), printer,
                           [&](SparseTensorDimSliceAttr attr) {
                             // Calls SparseTensorDimSliceAttr::print directly to

diff  --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
index 26f41e142b8b7..fd612d5f597d5 100644
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -41,7 +41,7 @@
 
 #COOSlice = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (2, 2, 1), (12, 13, 1) ]
+  dimSlices = [ (2, 2, 1), (12, 13, 1) ]
 }>
 
 // CHECK-LABEL: func @sparse_nop_convert(

diff  --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index c1e8afd9206ba..7a6c4824aabed 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -202,7 +202,7 @@ func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -215,7 +215,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -401,7 +401,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
 
 #CSR = #sparse_tensor.encoding<{
   lvlTypes = ["dense", "compressed"],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {

diff  --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
index 91c3ef7b6d62d..e76df6551c2e1 100644
--- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -66,6 +66,6 @@ func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
+  dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
 }>
 func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)

diff  --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
index d35296b924739..8aed1d6d205bd 100644
--- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
+++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
@@ -10,7 +10,7 @@
 
 #Slice = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (?, 1, 1), (?, 3, 1) ]
+  dimSlices = [ (?, 1, 1), (?, 3, 1) ]
 }>
 
 // CHECK-LABEL: func @sparse_nop_cast(

diff  --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index 57dff1e53edc3..43429f454e122 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -144,7 +144,7 @@ func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64>
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 // CHECK-LABEL: func @sparse_slice_offset(
@@ -160,7 +160,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 // CHECK-LABEL: func @sparse_slice_stride(
@@ -189,7 +189,7 @@ func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVec
 #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}>
 #SparseVector_Slice = #sparse_tensor.encoding<{
   lvlTypes = ["compressed"],
-  slice = [ (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?) ]
 }>
 
 // CHECK-LABEL: func @sparse_metadata_init(
@@ -221,7 +221,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
 
 #SparseVector_Slice = #sparse_tensor.encoding<{
   lvlTypes = ["compressed"],
-  slice = [ (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?) ]
 }>
 
 // CHECK-LABEL: func @sparse_get_md(
@@ -238,7 +238,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector_S
 
 #SparseVector = #sparse_tensor.encoding<{
   lvlTypes = ["compressed"],
-  slice = [ (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?) ]
 }>
 
 // CHECK-LABEL: func @sparse_get_md(

diff  --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 4a7cd76ac489f..75f8d071fcfc0 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -100,31 +100,31 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 // CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
 func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
 
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 // CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
 func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
 
 // -----
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, ?, 1), (?, 4, 2) ]
+  dimSlices = [ (1, ?, 1), (?, 4, 2) ]
 }>
 
 // CHECK-LABEL: func private @sparse_slice(
-// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (1, ?, 1), (?, 4, 2) ] }>>
+// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
 func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
index 8cf8c6c89b63c..efb920b7af13c 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir
@@ -6,7 +6,7 @@
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (0, 4, 1), (0, 8, 1) ]
+  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
 }>
 
 // CHECK-LABEL:   func.func @sparse_slice(

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
index 8038e141662e5..339c94d0f78e1 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -30,12 +30,12 @@ func.func @sparse_foreach_constant() -> () {
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed", "compressed" ],
-  slice = [ (0, 4, 1), (2, 4, 1) ]
+  dimSlices = [ (0, 4, 1), (2, 4, 1) ]
 }>
 
 #CSR_SLICE_DYN = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed", "compressed" ],
-  slice = [ (?, ?, ?), (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
 }>
 
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index 43b75f8aa2fe2..fc259b255c456 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -16,12 +16,12 @@
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 #CSR_SLICE_DYN = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (?, ?, ?), (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
 }>
 
 #COO = #sparse_tensor.encoding<{
@@ -30,12 +30,12 @@
 
 #COO_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (1, 4, 1), (1, 4, 2) ]
+  dimSlices = [ (1, 4, 1), (1, 4, 2) ]
 }>
 
 #COO_SLICE_DYN = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (?, ?, ?), (?, ?, ?) ]
+  dimSlices = [ (?, ?, ?), (?, ?, ?) ]
 }>
 
 

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index c5d6032db0e65..c9723070dd18d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -16,7 +16,7 @@
 
 #DCSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed", "compressed" ],
-  slice = [ (0, 4, 1), (0, 8, 1) ]
+  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
 }>
 
 #CSR = #sparse_tensor.encoding<{
@@ -25,7 +25,7 @@
 
 #CSR_SLICE = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (0, 4, 1), (0, 8, 1) ]
+  dimSlices = [ (0, 4, 1), (0, 8, 1) ]
 }>
 
 #COO = #sparse_tensor.encoding<{
@@ -34,32 +34,32 @@
 
 #CSR_SLICE_1 = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (0, 4, 2), (0, 4, 1) ]
+  dimSlices = [ (0, 4, 2), (0, 4, 1) ]
 }>
 
 #DCSR_SLICE_1 = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed", "compressed" ],
-  slice = [ (0, 4, 2), (1, 4, 1) ]
+  dimSlices = [ (0, 4, 2), (1, 4, 1) ]
 }>
 
 #COO_SLICE_1 = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (0, 4, 2), (0, 4, 1) ]
+  dimSlices = [ (0, 4, 2), (0, 4, 1) ]
 }>
 
 #COO_SLICE_2 = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed-nu", "singleton" ],
-  slice = [ (0, 4, 2), (1, 4, 1) ]
+  dimSlices = [ (0, 4, 2), (1, 4, 1) ]
 }>
 
 #CSR_SLICE_dyn = #sparse_tensor.encoding<{
   lvlTypes = [ "dense", "compressed" ],
-  slice = [ (?, 4, ?), (?, 4, ?) ]
+  dimSlices = [ (?, 4, ?), (?, 4, ?) ]
 }>
 
 #DCSR_SLICE_dyn = #sparse_tensor.encoding<{
   lvlTypes = [ "compressed", "compressed" ],
-  slice = [ (?, 4, ?), (?, 4, ?) ]
+  dimSlices = [ (?, 4, ?), (?, 4, ?) ]
 }>
 
 module {


        


More information about the Mlir-commits mailing list