[Mlir-commits] [mlir] 7714b40 - [mlir] introduce "encoding" attribute to tensor type

Aart Bik llvmlistbot at llvm.org
Mon Apr 12 10:37:36 PDT 2021


Author: Aart Bik
Date: 2021-04-12T10:37:15-07:00
New Revision: 7714b405a0de47e461c77fa8dbd2c21f0d34bbf2

URL: https://github.com/llvm/llvm-project/commit/7714b405a0de47e461c77fa8dbd2c21f0d34bbf2
DIFF: https://github.com/llvm/llvm-project/commit/7714b405a0de47e461c77fa8dbd2c21f0d34bbf2.diff

LOG: [mlir] introduce "encoding" attribute to tensor type

This CL introduces a generic attribute (called "encoding") on tensors.
The attribute currently does not carry any concrete information, but the type
system already correctly determines that tensor<8xi1,123> != tensor<8xi1,321>.
The attribute will be given meaning through an interface in subsequent CLs.

See ongoing discussion on discourse:

[RFC] Introduce a sparse tensor type to core MLIR
https://llvm.discourse.group/t/rfc-introduce-a-sparse-tensor-type-to-core-mlir/2944

A sparse tensor will look something like this:

```
// named alias with all properties we hold dear:
#CSR = {
  // individual named attributes
}

// actual sparse tensor type:
tensor<?x?xf64, #CSR>
```

I see the following rough 5 step plan going forward:

(1) introduce this format attribute in this CL, currently still empty
(2) introduce attribute interface that gives it "meaning", focused on sparse in first phase
(3) rewrite sparse compiler to use new type, remove linalg interface and "glue"
(4) teach passes to deal with new attribute, by rejecting/asserting on non-empty attribute as simplest solution, or doing meaningful rewrite in the longer run
(5) add FE support, document, test, publicize new features, extend "format" meaning to other domains if useful

Reviewed By: stellaraccident, bondhugula

Differential Revision: https://reviews.llvm.org/D99548

Added: 
    

Modified: 
    mlir/include/mlir-c/BuiltinAttributes.h
    mlir/include/mlir-c/BuiltinTypes.h
    mlir/include/mlir/IR/BuiltinTypes.td
    mlir/lib/Bindings/Python/IRAttributes.cpp
    mlir/lib/Bindings/Python/IRTypes.cpp
    mlir/lib/CAPI/IR/BuiltinAttributes.cpp
    mlir/lib/CAPI/IR/BuiltinTypes.cpp
    mlir/lib/IR/AsmPrinter.cpp
    mlir/lib/IR/BuiltinTypes.cpp
    mlir/lib/Parser/TypeParser.cpp
    mlir/test/CAPI/ir.c
    mlir/test/IR/invalid.mlir
    mlir/test/IR/parser.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir-c/BuiltinAttributes.h b/mlir/include/mlir-c/BuiltinAttributes.h
index 29df9cf60b8aa..c85825c8d91d9 100644
--- a/mlir/include/mlir-c/BuiltinAttributes.h
+++ b/mlir/include/mlir-c/BuiltinAttributes.h
@@ -22,6 +22,9 @@
 extern "C" {
 #endif
 
+/// Returns an empty attribute.
+MLIR_CAPI_EXPORTED MlirAttribute mlirAttributeGetNull();
+
 //===----------------------------------------------------------------------===//
 // Affine map attribute.
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/include/mlir-c/BuiltinTypes.h b/mlir/include/mlir-c/BuiltinTypes.h
index b2ec37c9deb64..7d45452af5f69 100644
--- a/mlir/include/mlir-c/BuiltinTypes.h
+++ b/mlir/include/mlir-c/BuiltinTypes.h
@@ -188,17 +188,20 @@ MLIR_CAPI_EXPORTED bool mlirTypeIsARankedTensor(MlirType type);
 /// Checks whether the given type is an unranked tensor type.
 MLIR_CAPI_EXPORTED bool mlirTypeIsAUnrankedTensor(MlirType type);
 
-/// Creates a tensor type of a fixed rank with the given shape and element type
-/// in the same context as the element type. The type is owned by the context.
+/// Creates a tensor type of a fixed rank with the given shape, element type,
+/// and optional encoding in the same context as the element type. The type is
+/// owned by the context. Tensor types without any specific encoding field
+/// should assign mlirAttributeGetNull() to this parameter.
 MLIR_CAPI_EXPORTED MlirType mlirRankedTensorTypeGet(intptr_t rank,
                                                     const int64_t *shape,
-                                                    MlirType elementType);
+                                                    MlirType elementType,
+                                                    MlirAttribute encoding);
 
 /// Same as "mlirRankedTensorTypeGet" but returns a nullptr wrapping MlirType on
 /// illegal arguments, emitting appropriate diagnostics.
-MLIR_CAPI_EXPORTED MlirType
-mlirRankedTensorTypeGetChecked(MlirLocation loc, intptr_t rank,
-                               const int64_t *shape, MlirType elementType);
+MLIR_CAPI_EXPORTED MlirType mlirRankedTensorTypeGetChecked(
+    MlirLocation loc, intptr_t rank, const int64_t *shape, MlirType elementType,
+    MlirAttribute encoding);
 
 /// Creates an unranked tensor type with the given element type in the same
 /// context as the element type. The type is owned by the context.

diff  --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td
index f271c56f41627..f266f61e182ea 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.td
+++ b/mlir/include/mlir/IR/BuiltinTypes.td
@@ -636,9 +636,10 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "TensorType"> {
     Syntax:
 
     ```
-    tensor-type ::= `tensor` `<` dimension-list type `>`
+    tensor-type ::= `tensor` `<` dimension-list type (`,` encoding)? `>`
     dimension-list ::= (dimension `x`)*
     dimension ::= `?` | decimal-literal
+    encoding ::= attribute-value
     ```
 
     Values with tensor type represents aggregate N-dimensional data values, and
@@ -654,6 +655,14 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "TensorType"> {
     [`dim` operation](Dialects/Standard.md#dim-operation) returns the size of a
     dimension from a value of tensor type.
 
+    The `encoding` attribute provides additional information on the tensor.
+    An empty attribute denotes a straightforward tensor without any specific
+    structure. But particular properties, like sparsity or other specific
+    characteristics of the data of the tensor can be encoded through this
+    attribute. The semantics are defined by a type and attribute interface
+    and must be respected by all passes that operate on tensor types.
+    TODO: provide this interface, and document it further.
+
     Note: hexadecimal integer literals are not allowed in tensor type
     declarations to avoid confusion between `0xf32` and `0 x f32`. Zero sizes
     are allowed in tensors and treated as other sizes, e.g.,
@@ -681,18 +690,24 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "TensorType"> {
 
     // Zero-element tensor of f32 type (hexadecimal literals not allowed here).
     tensor<0xf32>
+
+    // Tensor with an encoding attribute (where #ENCODING is a named alias).
+    tensor<?x?xf64, #ENCODING>
     ```
   }];
   let parameters = (ins
     ArrayRefParameter<"int64_t">:$shape,
-    "Type":$elementType
+    "Type":$elementType,
+    "Attribute":$encoding
   );
 
   let builders = [
     TypeBuilderWithInferredContext<(ins
-      "ArrayRef<int64_t>":$shape, "Type":$elementType
+      "ArrayRef<int64_t>":$shape,
+      "Type":$elementType,
+      CArg<"Attribute", "{}">:$encoding
     ), [{
-      return $_get(elementType.getContext(), shape, elementType);
+      return $_get(elementType.getContext(), shape, elementType, encoding);
     }]>
   ];
   let skipDefaultBuilders = 1;

diff  --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp
index 6f9206c1b9121..b5e3c5c9c94b7 100644
--- a/mlir/lib/Bindings/Python/IRAttributes.cpp
+++ b/mlir/lib/Bindings/Python/IRAttributes.cpp
@@ -502,8 +502,9 @@ class PyDenseElementsAttribute
            MlirType mlirElementType, py::buffer_info &arrayInfo) {
     SmallVector<int64_t, 4> shape(arrayInfo.shape.begin(),
                                   arrayInfo.shape.begin() + arrayInfo.ndim);
-    auto shapedType =
-        mlirRankedTensorTypeGet(shape.size(), shape.data(), mlirElementType);
+    MlirAttribute encodingAttr = mlirAttributeGetNull();
+    auto shapedType = mlirRankedTensorTypeGet(shape.size(), shape.data(),
+                                              mlirElementType, encodingAttr);
     intptr_t numElements = arrayInfo.size;
     const ElementTy *contents = static_cast<const ElementTy *>(arrayInfo.ptr);
     return ctor(shapedType, numElements, contents);

diff  --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp
index 96f6bf6666c95..421df4dab7ea0 100644
--- a/mlir/lib/Bindings/Python/IRTypes.cpp
+++ b/mlir/lib/Bindings/Python/IRTypes.cpp
@@ -10,6 +10,7 @@
 
 #include "PybindUtils.h"
 
+#include "mlir-c/BuiltinAttributes.h"
 #include "mlir-c/BuiltinTypes.h"
 
 namespace py = pybind11;
@@ -381,8 +382,9 @@ class PyRankedTensorType
         "get",
         [](std::vector<int64_t> shape, PyType &elementType,
            DefaultingPyLocation loc) {
+          MlirAttribute encodingAttr = mlirAttributeGetNull();
           MlirType t = mlirRankedTensorTypeGetChecked(
-              loc, shape.size(), shape.data(), elementType);
+              loc, shape.size(), shape.data(), elementType, encodingAttr);
           // TODO: Rework error reporting once diagnostic engine is exposed
           // in C API.
           if (mlirTypeIsNull(t)) {

diff  --git a/mlir/lib/CAPI/IR/BuiltinAttributes.cpp b/mlir/lib/CAPI/IR/BuiltinAttributes.cpp
index a54006db2d131..7580786def865 100644
--- a/mlir/lib/CAPI/IR/BuiltinAttributes.cpp
+++ b/mlir/lib/CAPI/IR/BuiltinAttributes.cpp
@@ -15,6 +15,8 @@
 
 using namespace mlir;
 
+MlirAttribute mlirAttributeGetNull() { return {nullptr}; }
+
 //===----------------------------------------------------------------------===//
 // Affine map attribute.
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
index c84ced1779f94..1e5fa8a32023b 100644
--- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp
+++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
@@ -191,18 +191,19 @@ bool mlirTypeIsAUnrankedTensor(MlirType type) {
 }
 
 MlirType mlirRankedTensorTypeGet(intptr_t rank, const int64_t *shape,
-                                 MlirType elementType) {
+                                 MlirType elementType, MlirAttribute encoding) {
   return wrap(RankedTensorType::get(
-      llvm::makeArrayRef(shape, static_cast<size_t>(rank)),
-      unwrap(elementType)));
+      llvm::makeArrayRef(shape, static_cast<size_t>(rank)), unwrap(elementType),
+      unwrap(encoding)));
 }
 
 MlirType mlirRankedTensorTypeGetChecked(MlirLocation loc, intptr_t rank,
                                         const int64_t *shape,
-                                        MlirType elementType) {
+                                        MlirType elementType,
+                                        MlirAttribute encoding) {
   return wrap(RankedTensorType::getChecked(
       unwrap(loc), llvm::makeArrayRef(shape, static_cast<size_t>(rank)),
-      unwrap(elementType)));
+      unwrap(elementType), unwrap(encoding)));
 }
 
 MlirType mlirUnrankedTensorTypeGet(MlirType elementType) {

diff  --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index bae9c97b1b40f..8a8f3be997597 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -1866,7 +1866,13 @@ void ModulePrinter::printType(Type type) {
             os << dim;
           os << 'x';
         }
-        os << tensorTy.getElementType() << '>';
+        os << tensorTy.getElementType();
+        // Only print the encoding attribute value if set.
+        if (tensorTy.getEncoding()) {
+          os << ", ";
+          printAttribute(tensorTy.getEncoding());
+        }
+        os << '>';
       })
       .Case<UnrankedTensorType>([&](UnrankedTensorType tensorTy) {
         os << "tensor<*x";

diff  --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index da1453367c7ac..4e2e2310ca01e 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -441,10 +441,12 @@ bool TensorType::isValidElementType(Type type) {
 
 LogicalResult
 RankedTensorType::verify(function_ref<InFlightDiagnostic()> emitError,
-                         ArrayRef<int64_t> shape, Type elementType) {
+                         ArrayRef<int64_t> shape, Type elementType,
+                         Attribute encoding) {
   for (int64_t s : shape)
     if (s < -1)
       return emitError() << "invalid tensor dimension size";
+  // TODO: verify contents of encoding attribute.
   return checkTensorElementType(emitError, elementType);
 }
 

diff  --git a/mlir/lib/Parser/TypeParser.cpp b/mlir/lib/Parser/TypeParser.cpp
index d81cb53060b18..0ec36c6085da1 100644
--- a/mlir/lib/Parser/TypeParser.cpp
+++ b/mlir/lib/Parser/TypeParser.cpp
@@ -409,14 +409,23 @@ Type Parser::parseTensorType() {
   // Parse the element type.
   auto elementTypeLoc = getToken().getLoc();
   auto elementType = parseType();
+
+  // Parse an optional encoding attribute.
+  Attribute encoding;
+  if (consumeIf(Token::comma))
+    encoding = parseAttribute();
+
   if (!elementType || parseToken(Token::greater, "expected '>' in tensor type"))
     return nullptr;
   if (!TensorType::isValidElementType(elementType))
     return emitError(elementTypeLoc, "invalid tensor element type"), nullptr;
 
-  if (isUnranked)
+  if (isUnranked) {
+    if (encoding)
+      return emitError("cannot apply encoding to unranked tensor"), nullptr;
     return UnrankedTensorType::get(elementType);
-  return RankedTensorType::get(dimensions, elementType);
+  }
+  return RankedTensorType::get(dimensions, elementType, encoding);
 }
 
 /// Parse a tuple type.

diff  --git a/mlir/test/CAPI/ir.c b/mlir/test/CAPI/ir.c
index 5ce496c8a0e2b..c5eb174ac2ca1 100644
--- a/mlir/test/CAPI/ir.c
+++ b/mlir/test/CAPI/ir.c
@@ -438,8 +438,8 @@ static void printFirstOfEach(MlirContext ctx, MlirOperation operation) {
   mlirOperationSetAttributeByName(
       operation, mlirStringRefCreateFromCString("elts"),
       mlirDenseElementsAttrInt32Get(
-          mlirRankedTensorTypeGet(1, eltsShape, mlirIntegerTypeGet(ctx, 32)), 4,
-          eltsData));
+          mlirRankedTensorTypeGet(1, eltsShape, mlirIntegerTypeGet(ctx, 32),
+                                  mlirAttributeGetNull()), 4, eltsData));
   MlirOpPrintingFlags flags = mlirOpPrintingFlagsCreate();
   mlirOpPrintingFlagsElideLargeElementsAttrs(flags, 2);
   mlirOpPrintingFlagsPrintGenericOpForm(flags);
@@ -687,8 +687,8 @@ static int printBuiltinTypes(MlirContext ctx) {
   // CHECK: vector<2x3xf32>
 
   // Ranked tensor type.
-  MlirType rankedTensor =
-      mlirRankedTensorTypeGet(sizeof(shape) / sizeof(int64_t), shape, f32);
+  MlirType rankedTensor = mlirRankedTensorTypeGet(
+      sizeof(shape) / sizeof(int64_t), shape, f32, mlirAttributeGetNull());
   if (!mlirTypeIsATensor(rankedTensor) ||
       !mlirTypeIsARankedTensor(rankedTensor))
     return 16;
@@ -889,24 +889,30 @@ int printBuiltinAttributes(MlirContext ctx) {
   int64_t ints64[] = {0, 1};
   float floats[] = {0.0f, 1.0f};
   double doubles[] = {0.0, 1.0};
+  MlirAttribute encoding = mlirAttributeGetNull();
   MlirAttribute boolElements = mlirDenseElementsAttrBoolGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 1)), 2, bools);
+      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 1), encoding),
+      2, bools);
   MlirAttribute uint32Elements = mlirDenseElementsAttrUInt32Get(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeUnsignedGet(ctx, 32)), 2,
-      uints32);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeUnsignedGet(ctx, 32), encoding),
+      2, uints32);
   MlirAttribute int32Elements = mlirDenseElementsAttrInt32Get(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 32)), 2,
-      ints32);
+      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 32), encoding),
+      2, ints32);
   MlirAttribute uint64Elements = mlirDenseElementsAttrUInt64Get(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeUnsignedGet(ctx, 64)), 2,
-      uints64);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeUnsignedGet(ctx, 64), encoding),
+      2, uints64);
   MlirAttribute int64Elements = mlirDenseElementsAttrInt64Get(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 64)), 2,
-      ints64);
+      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 64), encoding),
+      2, ints64);
   MlirAttribute floatElements = mlirDenseElementsAttrFloatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx)), 2, floats);
+      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx), encoding),
+      2, floats);
   MlirAttribute doubleElements = mlirDenseElementsAttrDoubleGet(
-      mlirRankedTensorTypeGet(2, shape, mlirF64TypeGet(ctx)), 2, doubles);
+      mlirRankedTensorTypeGet(2, shape, mlirF64TypeGet(ctx), encoding),
+      2, doubles);
 
   if (!mlirAttributeIsADenseElements(boolElements) ||
       !mlirAttributeIsADenseElements(uint32Elements) ||
@@ -943,19 +949,24 @@ int printBuiltinAttributes(MlirContext ctx) {
   // CHECK: dense<{{\[}}[0.000000e+00, 1.000000e+00]]> : tensor<1x2xf64>
 
   MlirAttribute splatBool = mlirDenseElementsAttrBoolSplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 1)), 1);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeGet(ctx, 1), encoding), 1);
   MlirAttribute splatUInt32 = mlirDenseElementsAttrUInt32SplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 32)), 1);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeGet(ctx, 32), encoding), 1);
   MlirAttribute splatInt32 = mlirDenseElementsAttrInt32SplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 32)), 1);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeGet(ctx, 32), encoding), 1);
   MlirAttribute splatUInt64 = mlirDenseElementsAttrUInt64SplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 64)), 1);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeGet(ctx, 64), encoding), 1);
   MlirAttribute splatInt64 = mlirDenseElementsAttrInt64SplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirIntegerTypeGet(ctx, 64)), 1);
+      mlirRankedTensorTypeGet(2, shape,
+                              mlirIntegerTypeGet(ctx, 64), encoding), 1);
   MlirAttribute splatFloat = mlirDenseElementsAttrFloatSplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx)), 1.0f);
+      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx), encoding), 1.0f);
   MlirAttribute splatDouble = mlirDenseElementsAttrDoubleSplatGet(
-      mlirRankedTensorTypeGet(2, shape, mlirF64TypeGet(ctx)), 1.0);
+      mlirRankedTensorTypeGet(2, shape, mlirF64TypeGet(ctx), encoding), 1.0);
 
   if (!mlirAttributeIsADenseElements(splatBool) ||
       !mlirDenseElementsAttrIsSplat(splatBool) ||
@@ -1024,13 +1035,14 @@ int printBuiltinAttributes(MlirContext ctx) {
   int64_t indices[] = {4, 7};
   int64_t two = 2;
   MlirAttribute indicesAttr = mlirDenseElementsAttrInt64Get(
-      mlirRankedTensorTypeGet(1, &two, mlirIntegerTypeGet(ctx, 64)), 2,
-      indices);
+      mlirRankedTensorTypeGet(1, &two, mlirIntegerTypeGet(ctx, 64), encoding),
+      2, indices);
   MlirAttribute valuesAttr = mlirDenseElementsAttrFloatGet(
-      mlirRankedTensorTypeGet(1, &two, mlirF32TypeGet(ctx)), 2, floats);
+      mlirRankedTensorTypeGet(1, &two, mlirF32TypeGet(ctx), encoding),
+      2, floats);
   MlirAttribute sparseAttr = mlirSparseElementsAttribute(
-      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx)), indicesAttr,
-      valuesAttr);
+      mlirRankedTensorTypeGet(2, shape, mlirF32TypeGet(ctx), encoding),
+      indicesAttr, valuesAttr);
   mlirAttributeDump(sparseAttr);
   // CHECK: sparse<[4, 7], [0.000000e+00, 1.000000e+00]> : tensor<1x2xf32>
 

diff  --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir
index 220f46d5b344b..24b9dc80cee43 100644
--- a/mlir/test/IR/invalid.mlir
+++ b/mlir/test/IR/invalid.mlir
@@ -100,6 +100,12 @@ func @memref_zero_stride(memref<42x42xi8, offset: ?, strides: [0, ?]>) // expect
 
 // -----
 
+func @tensor_encoding_mismatch(%arg0: tensor<8xi32, "enc">) -> (tensor<8xi32>) { // expected-note {{prior use here}}
+  return %arg0: tensor<8xi32> // expected-error {{use of value '%arg0' expects 
diff erent type than prior uses: 'tensor<8xi32>' vs 'tensor<8xi32, "enc">'}}
+}
+
+// -----
+
 func @bad_branch() {
 ^bb12:
   br ^missing  // expected-error {{reference to an undefined block}}

diff  --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir
index df6f216108237..7e8810c7479d9 100644
--- a/mlir/test/IR/parser.mlir
+++ b/mlir/test/IR/parser.mlir
@@ -77,6 +77,9 @@ func private @vectors(vector<1 x f32>, vector<2x4xf32>)
 func private @tensors(tensor<* x f32>, tensor<* x vector<2x4xf32>>,
                  tensor<1x?x4x?x?xi32>, tensor<i8>)
 
+// CHECK: func private @tensor_encoding(tensor<16x32xf64, "sparse">)
+func private @tensor_encoding(tensor<16x32xf64, "sparse">)
+
 // CHECK: func private @memrefs(memref<1x?x4x?x?xi32, #map{{[0-9]+}}>, memref<8xi8>)
 func private @memrefs(memref<1x?x4x?x?xi32, #map0>, memref<8xi8, #map1, #map1>)
 


        


More information about the Mlir-commits mailing list