[Mlir-commits] [mlir] 0a29219 - [mlir][sparse] sparse tensor type encoding migration (new home, new builders)

Aart Bik llvmlistbot at llvm.org
Fri Apr 30 19:30:53 PDT 2021


Author: Aart Bik
Date: 2021-04-30T19:30:38-07:00
New Revision: 0a2921993199fbf8af5078dce0dd933d6a50e25b

URL: https://github.com/llvm/llvm-project/commit/0a2921993199fbf8af5078dce0dd933d6a50e25b
DIFF: https://github.com/llvm/llvm-project/commit/0a2921993199fbf8af5078dce0dd933d6a50e25b.diff

LOG: [mlir][sparse] sparse tensor type encoding migration (new home, new builders)

(1) migrates the encoding from TensorDialect into the new SparseTensorDialect
(2) replaces dictionary-based storage and builders with struct-like data

Reviewed By: mehdi_amini

Differential Revision: https://reviews.llvm.org/D101669

Added: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
    mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
    mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
    mlir/include/mlir/Dialect/Tensor/IR/CMakeLists.txt
    mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
    mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
    mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/Tensor/IR/CMakeLists.txt
    mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp

Removed: 
    mlir/include/mlir/Dialect/Tensor/IR/TensorAttrDefs.td
    mlir/test/Dialect/Tensor/invalid_sparse_tensor.mlir
    mlir/test/Dialect/Tensor/valid_sparse.mlir


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt
index bb13cb9edb70f..e7a1381393e15 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt
@@ -1,2 +1,7 @@
 add_mlir_dialect(SparseTensorOps sparse_tensor)
 add_mlir_doc(SparseTensorOps SparseTensorOps Dialects/ -gen-dialect-doc)
+
+set(LLVM_TARGET_DEFINITIONS SparseTensorAttrDefs.td)
+mlir_tablegen(SparseTensorAttrDefs.h.inc -gen-attrdef-decls)
+mlir_tablegen(SparseTensorAttrDefs.cpp.inc -gen-attrdef-defs)
+add_public_tablegen_target(MLIRSparseTensorAttrDefsIncGen)

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index c5a466e9d7dbd..7a8a249a7a959 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -13,8 +13,12 @@
 #include "mlir/IR/Dialect.h"
 #include "mlir/IR/OpDefinition.h"
 #include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TensorEncoding.h"
 #include "mlir/Interfaces/SideEffectInterfaces.h"
 
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.h.inc"
+
 #define GET_OP_CLASSES
 #include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc"
 

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
similarity index 54%
rename from mlir/include/mlir/Dialect/Tensor/IR/TensorAttrDefs.td
rename to mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index 8103878407d13..5c71fb43bfa34 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -1,4 +1,4 @@
-//===-- TensorAttrDefs.td - Tensor Attributes Definitions --*- tablegen -*-===//
+//===-- SparseTensorAttrDefs.td - attributes definitions ---*- tablegen -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,77 +6,75 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef TENSOR_ATTRDEFS
-#define TENSOR_ATTRDEFS
+#ifndef SPARSETENSOR_ATTRDEFS
+#define SPARSETENSOR_ATTRDEFS
 
-include "mlir/Dialect/Tensor/IR/TensorBase.td"
+include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
 include "mlir/IR/TensorEncoding.td"
 
 // All of the Tensor attributes will extend this class.
-class Tensor_Attr<string name,
-                  list<Trait> traits = []> : AttrDef<Tensor_Dialect, name, traits>;
+class SparseTensor_Attr<string name,
+                        list<Trait> traits = []>
+	: AttrDef<SparseTensor_Dialect, name, traits>;
 
 // Sparse tensor encoding attribute.
-def SparseTensorEncodingAttr : Tensor_Attr<"SparseTensorEncoding",
+def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
          [ DeclareAttrInterfaceMethods<VerifiableTensorEncoding> ] > {
-  let mnemonic = "sparse";
+  let mnemonic = "encoding";
 
   let description = [{
     An attribute to encode "TACO"-style information (see tensor-compiler.org)
-    on the sparsity of tensors. The semantics are defined by means of the
-    methods getDimLevelType(), getDimOrdering(), getPointerType(), and
-    getIndexType(), documented below. The encoding is eventually used by
-    a `sparse compiler` pass to generate sparse code fully automatically
+    on sparsity properties of tensors. The encoding is eventually used by a
+    `sparse compiler` pass to generate sparse code fully automatically
     for all tensor expressions that involve tensors with a sparse encoding.
     Compiler passes that run before this sparse compiler pass need to be
     aware of the semantics of tensor types with such an encoding.
   }];
 
-  // All data is stored in a dictionary, interpreted by the methods below.
+  // Data in sparse tensor encoding.
   let parameters = (
     ins
-    "DictionaryAttr":$dict
-  );
-
-  let extraClassDeclaration = [{
-    // Dimension level types that define sparse tensors:
-    //   Dense      - dimension is dense, every entry is stored
-    //   Compressed - dimension is sparse, only nonzeros are stored
-    //   Singleton  - dimension contains single coordinate, no siblings
-    enum class DimLevelType {
-      Dense, Compressed, Singleton
-    };
-
-    // Returns the dimension level type in the given dimension `dim`
-    // of this tensor type. The choices, defined by the `DimLevelType`
-    // enum, are `dense` (the dimension should be stored in its entirety),
+    // A dimension level type for each dimension of a tensor type.
+    // The choices are `dense` (dimension should be stored in its entirety),
     // `compressed` (only non-zero regions or elements should be stored),
     // or `singleton` (no sibling elements for parent).
-    DimLevelType getDimLevelType(unsigned dim) const;
-
-    // Returns the dimension order of this tensor type as an AffineMap.
+    ArrayRefParameter<
+      "SparseTensorEncodingAttr::DimLevelType",
+      "Per-dimension level type"
+      >: $dimLevelType,
+    // A dimension order on the indices of this tensor type.
     // Unlike dense storage, most sparse storage schemes do not provide
     // fast random access. This affine map specifies the order of
     // dimensions that should be support by the sparse storage scheme
     // (e.g. (i,j) -> (i,j) requests 2-d row-wise and (i,j) -> (j,i)
     // requests 2-d column-wise storage).
     // TODO: block structure with higher-dim inputs
-    AffineMap getDimOrdering() const;
-
-    // Returns the required bit width for pointer storage. A narrow width
-    // reduces the memory footprint of overhead storage, as long as the
-    // width suffices to define the total required range (viz. the maximum
+    "AffineMap":$dimOrdering,
+    // The required bit width for pointer storage. A narrow width reduces
+    // the memory footprint of overhead storage, as long as the width
+    // suffices to define the total required range (viz. the maximum
     // number of stored entries over all indirection dimensions). The choices
     // are `8`, `16`, `32`, `64`, or `0` for a native width.
-    unsigned getPointerBitWidth() const;
-
-    // Returns the required bit width for index storage. A narrow width
-    // reduces the memory footprint of overhead storage, as long as the
-    // width suffices to define the total required range (viz. the maximum
+    "unsigned":$pointerBitWidth,
+    // The required bit width for index storage. A narrow width reduces
+    // the memory footprint of overhead storage, as long as the width
+    // suffices to define the total required range (viz. the maximum
     // value of each tensor index over all dimensions). The choices are `8`,
     // `16`, `32`, `64`, or `0` for a native width.
-    unsigned getIndexBitWidth() const;
+    "unsigned":$indexBitWidth
+  );
+
+  let genVerifyDecl = 1;
+
+  let extraClassDeclaration = [{
+    // Dimension level types that define sparse tensors:
+    //   Dense      - dimension is dense, every entry is stored
+    //   Compressed - dimension is sparse, only nonzeros are stored
+    //   Singleton  - dimension contains single coordinate, no siblings
+    enum class DimLevelType {
+      Dense, Compressed, Singleton
+    };
   }];
 }
 
-#endif // LLVMIR_ATTRDEFS
+#endif // SPARSETENSOR_ATTRDEFS

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 5a2b1b129df17..db8506416f208 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -9,6 +9,7 @@
 #ifndef SPARSETENSOR_OPS
 #define SPARSETENSOR_OPS
 
+include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td"
 include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
 include "mlir/Interfaces/SideEffectInterfaces.td"
 

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Tensor/IR/CMakeLists.txt
index 2f373aaab643b..cd14fe5c04561 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/Tensor/IR/CMakeLists.txt
@@ -1,7 +1,2 @@
 add_mlir_dialect(TensorOps tensor)
 add_mlir_doc(TensorOps TensorOps Dialects/ -gen-dialect-doc)
-
-set(LLVM_TARGET_DEFINITIONS TensorAttrDefs.td)
-mlir_tablegen(TensorAttrDefs.h.inc -gen-attrdef-decls)
-mlir_tablegen(TensorAttrDefs.cpp.inc -gen-attrdef-defs)
-add_public_tablegen_target(MLIRTensorAttrDefsIncGen)

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
index 8fa9a79feacfc..830b682c602b6 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
+++ b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
@@ -13,7 +13,6 @@
 #include "mlir/IR/Dialect.h"
 #include "mlir/IR/OpDefinition.h"
 #include "mlir/IR/OpImplementation.h"
-#include "mlir/IR/TensorEncoding.h"
 #include "mlir/Interfaces/CastInterfaces.h"
 #include "mlir/Interfaces/ControlFlowInterfaces.h"
 #include "mlir/Interfaces/SideEffectInterfaces.h"
@@ -24,13 +23,6 @@
 
 #include "mlir/Dialect/Tensor/IR/TensorOpsDialect.h.inc"
 
-//===----------------------------------------------------------------------===//
-// Tensor Dialect Attributes
-//===----------------------------------------------------------------------===//
-
-#define GET_ATTRDEF_CLASSES
-#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.h.inc"
-
 //===----------------------------------------------------------------------===//
 // Tensor Dialect Operations
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
index bf0890fea49be..a0e473873d27a 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
@@ -10,7 +10,6 @@
 #define TENSOR_OPS
 
 include "mlir/Dialect/Tensor/IR/TensorBase.td"
-include "mlir/Dialect/Tensor/IR/TensorAttrDefs.td"
 include "mlir/Interfaces/CastInterfaces.td"
 include "mlir/Interfaces/ControlFlowInterfaces.td"
 include "mlir/Interfaces/SideEffectInterfaces.td"

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt
index 4cc81a3e3b39c..6b94ee010b7cd 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt
@@ -5,9 +5,11 @@ add_mlir_dialect_library(MLIRSparseTensor
   ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor
 
   DEPENDS
+  MLIRSparseTensorAttrDefsIncGen
   MLIRSparseTensorOpsIncGen
 
   LINK_LIBS PUBLIC
   MLIRDialect
   MLIRIR
+  MLIRSupport
   )

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 90db6b2d4b5f1..a41ed2ecd2416 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -9,12 +9,184 @@
 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
 
 #include "mlir/IR/Builders.h"
+#include "mlir/IR/DialectImplementation.h"
 #include "mlir/IR/OpImplementation.h"
+#include "llvm/ADT/TypeSwitch.h"
 
 using namespace mlir;
 using namespace mlir::sparse_tensor;
 
+//===----------------------------------------------------------------------===//
+// TensorDialect Attribute Methods
+//===----------------------------------------------------------------------===//
+
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc"
+
+static bool acceptBitWidth(unsigned bitWidth) {
+  switch (bitWidth) {
+  case 0:
+  case 8:
+  case 16:
+  case 32:
+  case 64:
+    return true;
+  default:
+    return false;
+  }
+}
+
+Attribute SparseTensorEncodingAttr::parse(MLIRContext *context,
+                                          DialectAsmParser &parser, Type type) {
+  if (failed(parser.parseLess()))
+    return {};
+  // Parse the data as a dictionary.
+  DictionaryAttr dict;
+  if (failed(parser.parseAttribute(dict)))
+    return {};
+  if (failed(parser.parseGreater()))
+    return {};
+  // Process the data from the parsed dictionary value into struct-like data.
+  SmallVector<SparseTensorEncodingAttr::DimLevelType, 4> dlt;
+  AffineMap map = {};
+  unsigned ptr = 0;
+  unsigned ind = 0;
+  for (const NamedAttribute &attr : dict) {
+    if (attr.first == "dimLevelType") {
+      auto arrayAttr = attr.second.dyn_cast<ArrayAttr>();
+      if (!arrayAttr) {
+        parser.emitError(parser.getNameLoc(),
+                         "expected an array for dimension level types");
+        return {};
+      }
+      for (unsigned i = 0, e = arrayAttr.size(); i < e; i++) {
+        auto strAttr = arrayAttr[i].dyn_cast<StringAttr>();
+        if (!strAttr) {
+          parser.emitError(parser.getNameLoc(),
+                           "expected a string value in dimension level types");
+          return {};
+        }
+        auto strVal = strAttr.getValue();
+        if (strVal == "dense") {
+          dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Dense);
+        } else if (strVal == "compressed") {
+          dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Compressed);
+        } else if (strVal == "singleton") {
+          dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Singleton);
+        } else {
+          parser.emitError(parser.getNameLoc(),
+                           "unexpected dimension level type: ")
+              << strVal;
+          return {};
+        }
+      }
+    } else if (attr.first == "dimOrdering") {
+      auto affineAttr = attr.second.dyn_cast<AffineMapAttr>();
+      if (!affineAttr) {
+        parser.emitError(parser.getNameLoc(),
+                         "expected an affine map for dimension ordering");
+        return {};
+      }
+      map = affineAttr.getValue();
+    } else if (attr.first == "pointerBitWidth") {
+      auto intAttr = attr.second.dyn_cast<IntegerAttr>();
+      if (!intAttr) {
+        parser.emitError(parser.getNameLoc(),
+                         "expected an integral pointer bitwidth");
+        return {};
+      }
+      ptr = intAttr.getInt();
+    } else if (attr.first == "indexBitWidth") {
+      auto intAttr = attr.second.dyn_cast<IntegerAttr>();
+      if (!intAttr) {
+        parser.emitError(parser.getNameLoc(),
+                         "expected an integral index bitwidth");
+        return {};
+      }
+      ind = intAttr.getInt();
+    } else {
+      parser.emitError(parser.getNameLoc(), "unexpected key: ")
+          << attr.first.str();
+      return {};
+    }
+  }
+  // Construct struct-like storage for attribute.
+  return parser.getChecked<SparseTensorEncodingAttr>(context, dlt, map, ptr,
+                                                     ind);
+}
+
+void SparseTensorEncodingAttr::print(DialectAsmPrinter &printer) const {
+  // Print the struct-like storage in dictionary fashion.
+  printer << "encoding<{ dimLevelType = [ ";
+  for (unsigned i = 0, e = getDimLevelType().size(); i < e; i++) {
+    switch (getDimLevelType()[i]) {
+    case DimLevelType::Dense:
+      printer << "\"dense\"";
+      break;
+    case DimLevelType::Compressed:
+      printer << "\"compressed\"";
+      break;
+    case DimLevelType::Singleton:
+      printer << "\"singleton\"";
+      break;
+    }
+    if (i != e - 1)
+      printer << ", ";
+  }
+  printer << " ]";
+  if (getDimOrdering())
+    printer << ", dimOrdering = affine_map<" << getDimOrdering() << ">";
+  printer << ", pointerBitWidth = " << getPointerBitWidth()
+          << ", indexBitWidth = " << getIndexBitWidth() << " }>";
+}
+
+LogicalResult SparseTensorEncodingAttr::verify(
+    function_ref<InFlightDiagnostic()> emitError,
+    ArrayRef<DimLevelType> dimLevelType, AffineMap dimOrdering,
+    unsigned pointerBitWidth, unsigned indexBitWidth) {
+  if (!acceptBitWidth(pointerBitWidth))
+    return emitError() << "unexpected pointer bitwidth: " << pointerBitWidth;
+  if (!acceptBitWidth(indexBitWidth))
+    return emitError() << "unexpected index bitwidth: " << indexBitWidth;
+  if (dimOrdering) {
+    if (!dimOrdering.isPermutation())
+      return emitError()
+             << "expected a permutation affine map for dimension ordering";
+    if (dimOrdering.getNumResults() != dimLevelType.size())
+      return emitError() << "unexpected mismatch in ordering and dimension "
+                            "level types size";
+  }
+  return success();
+}
+
+LogicalResult SparseTensorEncodingAttr::verifyEncoding(
+    ArrayRef<int64_t> shape, Type elementType,
+    function_ref<InFlightDiagnostic()> emitError) const {
+  // Check structural integrity.
+  if (failed(verify(emitError, getDimLevelType(), getDimOrdering(),
+                    getPointerBitWidth(), getIndexBitWidth())))
+    return failure();
+  // Check integrity with tensor type specifics. Dimension ordering is optional,
+  // but we always should have dimension level types for the full rank.
+  unsigned size = shape.size();
+  if (getDimOrdering() && getDimOrdering().getNumResults() != size)
+    return emitError() << "expected an affine map of size " << size
+                       << " for dimension ordering";
+  if (getDimLevelType().size() != size)
+    return emitError() << "expected an array of size " << size
+                       << " for dimension level types";
+  return success();
+}
+
+//===----------------------------------------------------------------------===//
+// TensorDialect Methods
+//===----------------------------------------------------------------------===//
+
 void SparseTensorDialect::initialize() {
+  addAttributes<
+#define GET_ATTRDEF_LIST
+#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc"
+      >();
   addOperations<
 #define GET_OP_LIST
 #include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
@@ -23,3 +195,23 @@ void SparseTensorDialect::initialize() {
 
 #define GET_OP_CLASSES
 #include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
+
+Attribute SparseTensorDialect::parseAttribute(DialectAsmParser &parser,
+                                              Type type) const {
+  StringRef attrTag;
+  if (failed(parser.parseKeyword(&attrTag)))
+    return Attribute();
+  Attribute attr;
+  auto parseResult =
+      generatedAttributeParser(getContext(), parser, attrTag, type, attr);
+  if (parseResult.hasValue())
+    return attr;
+  parser.emitError(parser.getNameLoc(), "unknown sparse tensor attribute");
+  return Attribute();
+}
+
+void SparseTensorDialect::printAttribute(Attribute attr,
+                                         DialectAsmPrinter &printer) const {
+  if (succeeded(generatedAttributePrinter(attr, printer)))
+    return;
+}

diff  --git a/mlir/lib/Dialect/Tensor/IR/CMakeLists.txt b/mlir/lib/Dialect/Tensor/IR/CMakeLists.txt
index 108b7f2470cb2..de650995ebb60 100644
--- a/mlir/lib/Dialect/Tensor/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Tensor/IR/CMakeLists.txt
@@ -7,7 +7,6 @@ add_mlir_dialect_library(MLIRTensor
 
   DEPENDS
   MLIRTensorOpsIncGen
-  MLIRTensorAttrDefsIncGen
 
   LINK_COMPONENTS
   Core

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp b/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
index bdc2fe345b9e5..46a348bca8f9a 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
@@ -7,142 +7,11 @@
 //===----------------------------------------------------------------------===//
 
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
-#include "mlir/IR/DialectImplementation.h"
 #include "mlir/Transforms/InliningUtils.h"
-#include "llvm/ADT/TypeSwitch.h"
 
 using namespace mlir;
 using namespace mlir::tensor;
 
-//===----------------------------------------------------------------------===//
-// TableGen'd Attributes Methods
-//===----------------------------------------------------------------------===//
-
-#define GET_ATTRDEF_CLASSES
-#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.cpp.inc"
-
-// Dictionary keys.
-static constexpr StringRef getSparseDimLevelTypeAttrName() {
-  return "sparseDimLevelType";
-}
-static constexpr StringRef getSparseDimOrderingAttrName() {
-  return "sparseDimOrdering";
-}
-static constexpr StringRef getSparsePointerBitWidthAttrName() {
-  return "sparsePointerBitWidth";
-}
-static constexpr StringRef getSparseIndexBitWidthAttrName() {
-  return "sparseIndexBitWidth";
-}
-
-// Dictionary values.
-static constexpr StringRef getDenseDimLevelTypeVal() { return "dense"; }
-static constexpr StringRef getCompressedDimLevelTypeVal() {
-  return "compressed";
-}
-static constexpr StringRef getSingletonDimLevelTypeVal() { return "singleton"; }
-
-Attribute SparseTensorEncodingAttr::parse(MLIRContext *context,
-                                          DialectAsmParser &parser, Type type) {
-  if (failed(parser.parseLess()))
-    return {};
-  DictionaryAttr dict;
-  if (failed(parser.parseAttribute(dict)))
-    return {};
-  if (failed(parser.parseGreater()))
-    return {};
-  return SparseTensorEncodingAttr::get(context, dict);
-}
-
-void SparseTensorEncodingAttr::print(DialectAsmPrinter &printer) const {
-  printer << "sparse<" << getDict() << ">";
-}
-
-LogicalResult SparseTensorEncodingAttr::verifyEncoding(
-    llvm::ArrayRef<int64_t> shape, Type elementType,
-    llvm::function_ref<mlir::InFlightDiagnostic()> emitError) const {
-  unsigned size = shape.size();
-  for (const NamedAttribute &attr : getDict()) {
-    if (attr.first == getSparseDimLevelTypeAttrName()) {
-      // Dimension level type verification.
-      auto arrayAttr = attr.second.dyn_cast<ArrayAttr>();
-      if (!arrayAttr || size != static_cast<int64_t>(arrayAttr.size()))
-        return emitError() << "expected an array of size " << size
-                           << " for dimension level types";
-      for (unsigned i = 0; i < size; i++) {
-        auto strAttr = arrayAttr[i].dyn_cast<StringAttr>();
-        if (!strAttr)
-          return emitError()
-                 << "expected string value in dimension level types";
-        auto strVal = strAttr.getValue();
-        if (strVal != getDenseDimLevelTypeVal() &&
-            strVal != getCompressedDimLevelTypeVal() &&
-            strVal != getSingletonDimLevelTypeVal())
-          return emitError() << "unexpected dimension level type: " << strAttr;
-      }
-    } else if (attr.first == getSparseDimOrderingAttrName()) {
-      // Dimension order verification.
-      auto affineAttr = attr.second.dyn_cast<AffineMapAttr>();
-      if (!affineAttr)
-        return emitError() << "expected an affine map for dimension ordering";
-      AffineMap map = affineAttr.getValue();
-      if (size != map.getNumResults() || !map.isPermutation())
-        return emitError() << "expected a permutation affine map of size "
-                           << size << " for dimension ordering";
-    } else if (attr.first == getSparsePointerBitWidthAttrName() ||
-               attr.first == getSparseIndexBitWidthAttrName()) {
-      // Pointer or index bitwidth verification.
-      auto intAttr = attr.second.dyn_cast<IntegerAttr>();
-      if (!intAttr)
-        return emitError() << "expected an integral bitwidth";
-      switch (intAttr.getInt()) {
-      case 0:
-      case 8:
-      case 16:
-      case 32:
-      case 64:
-        continue;
-      default:
-        return emitError() << "unexpected bitwidth: " << intAttr.getInt();
-      }
-    } else {
-      return emitError() << "unexpected key: " << attr.first.str();
-    }
-  }
-  return success();
-}
-
-SparseTensorEncodingAttr::DimLevelType
-SparseTensorEncodingAttr::getDimLevelType(unsigned dim) const {
-  if (auto value = getDict().get(getSparseDimLevelTypeAttrName())) {
-    auto strVal =
-        value.dyn_cast<ArrayAttr>()[dim].cast<StringAttr>().getValue();
-    if (strVal == getCompressedDimLevelTypeVal())
-      return DimLevelType::Compressed;
-    if (strVal == getSingletonDimLevelTypeVal())
-      return DimLevelType::Singleton;
-  }
-  return DimLevelType::Dense;
-}
-
-AffineMap SparseTensorEncodingAttr::getDimOrdering() const {
-  if (auto value = getDict().get(getSparseDimOrderingAttrName()))
-    return value.cast<AffineMapAttr>().getValue();
-  return {};
-}
-
-unsigned SparseTensorEncodingAttr::getPointerBitWidth() const {
-  if (auto value = getDict().get(getSparsePointerBitWidthAttrName()))
-    return value.cast<IntegerAttr>().getInt();
-  return 0;
-}
-
-unsigned SparseTensorEncodingAttr::getIndexBitWidth() const {
-  if (auto value = getDict().get(getSparseIndexBitWidthAttrName()))
-    return value.cast<IntegerAttr>().getInt();
-  return 0;
-}
-
 //===----------------------------------------------------------------------===//
 // TensorDialect Dialect Interfaces
 //===----------------------------------------------------------------------===//
@@ -166,33 +35,9 @@ struct TensorInlinerInterface : public DialectInlinerInterface {
 //===----------------------------------------------------------------------===//
 
 void TensorDialect::initialize() {
-  addAttributes<
-#define GET_ATTRDEF_LIST
-#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.cpp.inc"
-      >();
   addOperations<
 #define GET_OP_LIST
 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
       >();
   addInterfaces<TensorInlinerInterface>();
 }
-
-Attribute TensorDialect::parseAttribute(DialectAsmParser &parser,
-                                        Type type) const {
-  StringRef attrTag;
-  if (failed(parser.parseKeyword(&attrTag)))
-    return Attribute();
-  Attribute attr;
-  auto parseResult =
-      generatedAttributeParser(getContext(), parser, attrTag, type, attr);
-  if (parseResult.hasValue())
-    return attr;
-  parser.emitError(parser.getNameLoc(), "unknown tensor attribute");
-  return Attribute();
-}
-
-void TensorDialect::printAttribute(::mlir::Attribute attr,
-                                   ::mlir::DialectAsmPrinter &printer) const {
-  if (succeeded(generatedAttributePrinter(attr, printer)))
-    return;
-}

diff  --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
new file mode 100644
index 0000000000000..af2368b86c956
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -0,0 +1,56 @@
+// RUN: mlir-opt <%s -split-input-file -verify-diagnostics
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
+func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}}
+func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimLevelType = [1]}> // expected-error {{expected a string value in dimension level types}}
+func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimLevelType = ["strange"]}> // expected-error {{unexpected dimension level type: strange}}
+func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}}
+func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{dimOrdering = affine_map<(i,j) -> (i,i)>}> // expected-error {{expected a permutation affine map for dimension ordering}}
+func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}}
+func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}}
+func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
+func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}}
+func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+
+// -----
+
+#a = #sparse_tensor.encoding<{key = 1}> // expected-error {{unexpected key: key}}
+func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()

diff  --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
new file mode 100644
index 0000000000000..70a5ef36b3497
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -0,0 +1,16 @@
+// RUN: mlir-opt <%s | mlir-opt | FileCheck %s
+
+// CHECK-LABEL: func private @sparse_1d_tensor(
+// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 0, indexBitWidth = 0 }>>)
+func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
+
+#CSR = #sparse_tensor.encoding<{
+  dimLevelType = [ "dense", "compressed" ],
+  dimOrdering = affine_map<(i,j) -> (i,j)>,
+  pointerBitWidth = 64,
+  indexBitWidth = 64
+}>
+
+// CHECK-LABEL: func private @sparse_2d_tensor(
+// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>)
+func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)

diff  --git a/mlir/test/Dialect/Tensor/invalid_sparse_tensor.mlir b/mlir/test/Dialect/Tensor/invalid_sparse_tensor.mlir
deleted file mode 100644
index b317c3b684e99..0000000000000
--- a/mlir/test/Dialect/Tensor/invalid_sparse_tensor.mlir
+++ /dev/null
@@ -1,46 +0,0 @@
-// RUN: mlir-opt <%s -split-input-file -verify-diagnostics
-
-// -----
-
-#a = #tensor.sparse<{sparseDimLevelType = [1,2]}>
-func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
-
-// -----
-
-#a = #tensor.sparse<{sparseDimLevelType = [1]}>
-func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected string value in dimension level types}}
-
-// -----
-
-#a = #tensor.sparse<{sparseDimLevelType = ["strange"]}>
-func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{unexpected dimension level type: "strange"}}
-
-// -----
-
-#a = #tensor.sparse<{sparseDimOrdering = "wrong"}>
-func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an affine map for dimension ordering}}
-
-// -----
-
-#a = #tensor.sparse<{sparseDimOrdering = affine_map<(i,j) -> (i,i)>}>
-func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{expected a permutation affine map of size 2 for dimension ordering}}
-
-// -----
-
-#a = #tensor.sparse<{sparsePointerBitWidth = 42}>
-func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected bitwidth: 42}}
-
-// -----
-
-#a = #tensor.sparse<{sparseIndexBitWidth = "not really"}>
-func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{expected an integral bitwidth}}
-
-// -----
-
-#a = #tensor.sparse<{sparseIndexBitWidth = 128}>
-func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected bitwidth: 128}}
-
-// -----
-
-#a = #tensor.sparse<{key = 1}>
-func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected key: key}}

diff  --git a/mlir/test/Dialect/Tensor/valid_sparse.mlir b/mlir/test/Dialect/Tensor/valid_sparse.mlir
deleted file mode 100644
index 0f010e5ad9b70..0000000000000
--- a/mlir/test/Dialect/Tensor/valid_sparse.mlir
+++ /dev/null
@@ -1,14 +0,0 @@
-// RUN: mlir-opt <%s | mlir-opt | FileCheck %s
-
-// CHECK: func private @sparse_1d_tensor(tensor<32xf64, #tensor.sparse<{sparseDimLevelType = ["compressed"]}>>)
-func private @sparse_1d_tensor(tensor<32xf64, #tensor.sparse<{sparseDimLevelType = ["compressed"]}>>)
-
-#CSR = #tensor.sparse<{
-  sparseDimLevelType = [ "dense", "compressed" ],
-  sparseDimOrdering = affine_map<(i,j) -> (i,j)>,
-  sparseIndexBitWidth = 64,
-  sparsePointerBitWidth = 64
-}>
-
-// CHECK: func private @sparse_2d_tensor(tensor<?x?xf32, #tensor.sparse<{sparseDimLevelType = ["dense", "compressed"], sparseDimOrdering = affine_map<(d0, d1) -> (d0, d1)>, sparseIndexBitWidth = 64 : i64, sparsePointerBitWidth = 64 : i64}>>)
-func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)


        


More information about the Mlir-commits mailing list