[Mlir-commits] [mlir] e1dbc25 - [mlir][sparse] integrate sparse annotation into generic linalg op

Aart Bik llvmlistbot at llvm.org
Wed Nov 11 17:27:09 PST 2020


Author: Aart Bik
Date: 2020-11-11T17:26:30-08:00
New Revision: e1dbc25ee2311b840f70905cd5f4dfafdeb5233f

URL: https://github.com/llvm/llvm-project/commit/e1dbc25ee2311b840f70905cd5f4dfafdeb5233f
DIFF: https://github.com/llvm/llvm-project/commit/e1dbc25ee2311b840f70905cd5f4dfafdeb5233f.diff

LOG: [mlir][sparse] integrate sparse annotation into generic linalg op

This CL integrates the new sparse annotations (hereto merely added as fully
transparent attributes) more tightly to the generic linalg op in order to add
verification of the annotations' consistency as well as to make make other
passes more aware of their presence (in the long run, rewriting rules must
preserve the integrity of the annotations).

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D91224

Added: 
    mlir/test/Dialect/Linalg/sparse_invalid.mlir

Modified: 
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td
    mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
    mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 29ce9efc2e98b..2c200fe08b105 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -493,6 +493,8 @@ class GenericOpBase<string mnemonic> : LinalgStructuredBase_Op<mnemonic, [
                        ArrayAttr:$iterator_types,
                        OptionalAttr<StrAttr>:$doc,
                        OptionalAttr<StrAttr>:$library_call,
+                       // ArrayAttr of StrArrayAttr:
+                       OptionalAttr<ArrayAttr>:$sparse,
                        Confined<OptionalAttr<I64Attr>, [IntMinValue<0>]>
                          :$symbol_source);
   let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
@@ -549,6 +551,8 @@ def GenericOp : GenericOpBase<"generic"> {
         Each element of the list represents and iterator of one of the following
         types:
           parallel, reduction, window
+      - sparse: an optional list with per-dimension sparsity annotations (either
+        "D" for dense or "S" for sparse) for each input and output view.
       - symbol_source: index of the operand whose dimensions will be propagated
         as symbols to the indexing maps. When specified the number of symbols
         in each of the indexing maps has to be either 0 or the rank of the

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td
index 85e0e3c9f56a9..6646964a983e7 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td
@@ -678,6 +678,18 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
                llvm::all_of(this->getOperation()->getResults(), isTensorType);
       }]
     >,
+    InterfaceMethod<
+      /*desc=*/[{
+        Return whether the op has sparse tensor semantics.
+      }],
+      /*retTy=*/"bool",
+      /*methodName=*/"hasSparseSemantics",
+      /*args=*/(ins),
+      /*methodBody=*/"",
+      /*defaultImplementation=*/[{
+        return $_op.getAttr(getSparseAttrName()).template dyn_cast_or_null<ArrayAttr>() != nullptr;
+      }]
+    >,
     InterfaceMethod<
       /*desc=*/[{
         Return the name registered for this op when lowering to an external

diff  --git a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
index 805db03330748..21c311ed6d181 100644
--- a/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/StructuredOpsUtils.h
@@ -66,6 +66,9 @@ constexpr StringRef getDocAttrName() { return "doc"; }
 /// function that implements the structured op.
 constexpr StringRef getLibraryCallAttrName() { return "library_call"; }
 
+/// Attribute name for the ArrayAttr of StrArrayAttr that encodes sparsity.
+constexpr StringRef getSparseAttrName() { return "sparse"; }
+
 /// Attribute name for the StrArrayAttr which encodes the value of strides.
 constexpr StringRef getStridesAttrName() { return "strides"; }
 
@@ -134,6 +137,18 @@ inline StringRef toString(IteratorType t) {
   llvm_unreachable("Unsupported IteratorType");
 }
 
+/// Use to encode a dense or sparse dimension.
+constexpr StringRef getSparseDimName() { return "S"; }
+inline bool isSparseDim(Attribute attr) {
+  auto strAttr = attr.dyn_cast_or_null<StringAttr>();
+  return strAttr && strAttr.getValue() == getSparseDimName();
+}
+constexpr StringRef getDenseDimName() { return "D"; }
+inline bool isDenseDim(Attribute attr) {
+  auto strAttr = attr.dyn_cast_or_null<StringAttr>();
+  return strAttr && strAttr.getValue() == getDenseDimName();
+}
+
 } // end namespace mlir
 
 #endif // MLIR_UTILS_STRUCTUREDOPSUTILS_H

diff  --git a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
index 366aa0fdcc5aa..11ac845f0ec71 100644
--- a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
+++ b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
@@ -69,6 +69,7 @@ Operation *mlir::edsc::makeGenericLinalgOp(
               builder.getStrArrayAttr(iteratorStrTypes),
               StringAttr() /*doc*/,
               StringAttr() /*library_call*/,
+              ArrayAttr() /*sparse*/,
               IntegerAttr() /*symbol_source*/
               /* TODO: other attributes in op */
               )

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 4e7fef1495513..2cd52029832db 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -110,7 +110,7 @@ void GenericOp::build(
         builder.getStrArrayAttr(iteratorTypes),
         doc.empty() ? StringAttr() : builder.getStringAttr(doc),
         libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
-        symbolSource);
+        ArrayAttr(), symbolSource);
   if (!bodyBuild)
     return;
 
@@ -170,7 +170,7 @@ void IndexedGenericOp::build(
         builder.getStrArrayAttr(iteratorTypes),
         doc.empty() ? StringAttr() : builder.getStringAttr(doc),
         libraryCall.empty() ? StringAttr() : builder.getStringAttr(libraryCall),
-        symbolSource);
+        ArrayAttr(), symbolSource);
   if (!bodyBuild)
     return;
 
@@ -349,6 +349,7 @@ void IndexedGenericOp::getEffects(
 }
 
 namespace {
+
 template <typename GenericOpType>
 struct BlockArgsVerifier {
   static LogicalResult verify(GenericOpType op, Block &block);
@@ -405,6 +406,48 @@ LogicalResult BlockArgsVerifier<IndexedGenericOp>::verify(IndexedGenericOp op,
   }
   return success();
 }
+
+template <typename GenericOpType>
+struct AnnotationsVerifier {
+  static LogicalResult verify(GenericOpType op) { return success(); }
+};
+
+template <>
+LogicalResult AnnotationsVerifier<GenericOp>::verify(GenericOp op) {
+  ArrayAttr sparseAttr = op.sparseAttr();
+  if (!sparseAttr)
+    return success();
+  // Verify consistency of sparse annotations.
+  if (!op.hasTensorSemantics())
+    return op.emitOpError("expected sparse annotations on tensors only");
+  unsigned numTensors = op.getNumInputsAndOutputs();
+  if (sparseAttr.size() != numTensors)
+    return op.emitOpError("expected one sparse annotation for each tensor");
+  for (unsigned t = 0; t < numTensors; t++) {
+    auto dimAttr = sparseAttr[t].dyn_cast_or_null<ArrayAttr>();
+    if (!dimAttr)
+      return op.emitOpError("expected sparse annotation array for tensor ")
+             << t;
+    unsigned rank = op.getShapedType(t).getRank();
+    if (dimAttr.size() != rank)
+      return op.emitOpError("expected sparse annotation with rank ")
+             << rank << " for tensor " << t;
+    // Per-dimension annotations for each tensor consist of only "D" or "S".
+    for (unsigned d = 0; d < rank; d++) {
+      if (isDenseDim(dimAttr[d])) {
+        continue;
+      } else if (isSparseDim(dimAttr[d])) {
+        if (t == numTensors - 1)
+          return op.emitOpError("sparse output tensors not supported (yet)");
+        continue;
+      }
+      return op.emitOpError("expected sparse annotation at position ")
+             << d << " for tensor " << t;
+    }
+  }
+  return success();
+}
+
 } // namespace
 
 template <typename GenericOpType>
@@ -466,6 +509,9 @@ static LogicalResult verifyGenericOp(GenericOpType op) {
     return op.emitOpError("expected the concatenation of maps in indexing_map "
                           "to be invertible");
 
+  if (failed(AnnotationsVerifier<GenericOpType>::verify(op)))
+    return failure();
+
   return success();
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
index f0e1de7094f12..3672b80730b8c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
@@ -131,7 +131,8 @@ static void finalizeBufferAllocation(ConversionPatternRewriter &rewriter,
       /*outputBuffers=*/outputs,
       /*initTensors=*/llvm::None, genericOp.indexing_maps(),
       genericOp.iterator_types(), genericOp.docAttr(),
-      genericOp.library_callAttr(), genericOp.symbol_sourceAttr());
+      genericOp.library_callAttr(), genericOp.sparseAttr(),
+      genericOp.symbol_sourceAttr());
 
   // Create a new block in the region of the new Generic Op.
   Block *oldBlock = genericOp.getBody();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
index 3e3392d849754..7cb9bb5b13bfc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
@@ -227,6 +227,7 @@ fuseTensorOpsImpl(LinalgOp producer, LinalgOp consumer, unsigned consumerIdx,
                                      consumer.iterator_types(),
                                      /*doc=*/nullptr,
                                      /*library_call=*/nullptr,
+                                     /*sparse=*/nullptr,
                                      /*symbol_source=*/nullptr)
                   .getOperation();
   } else {
@@ -241,6 +242,7 @@ fuseTensorOpsImpl(LinalgOp producer, LinalgOp consumer, unsigned consumerIdx,
                                       consumer.iterator_types(),
                                       /*doc=*/nullptr,
                                       /*library_call=*/nullptr,
+                                      /*sparse=*/nullptr,
                                       /*symbol_source=*/nullptr)
             .getOperation();
   }
@@ -820,6 +822,7 @@ struct FoldConsumerReshapeOpByLinearization
         producer.iterator_types(),
         /*doc=*/nullptr,
         /*library_call=*/nullptr,
+        /*sparse=*/nullptr,
         /*symbol_source=*/nullptr);
     auto &fusedRegion = fusedOp.getOperation()->getRegion(0);
     rewriter.cloneRegionBefore(producer.getOperation()->getRegion(0),
@@ -903,6 +906,7 @@ struct FoldSplatConstants : public OpRewritePattern<LinalgOpTy> {
           linalgOp.iterator_types(),
           /*doc=*/nullptr,
           /*library_call=*/nullptr,
+          /*sparse=*/nullptr,
           /*symbol_source=*/nullptr);
 
       // Map the block argument corresponding to the replaced argument with the

diff  --git a/mlir/test/Dialect/Linalg/sparse_invalid.mlir b/mlir/test/Dialect/Linalg/sparse_invalid.mlir
new file mode 100644
index 0000000000000..985667ce433b6
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/sparse_invalid.mlir
@@ -0,0 +1,172 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+#trait_memref = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a
+    affine_map<(i) -> (i)>   // x (out)
+  ],
+  sparse = [
+    [ "S" ],  // a
+    [ "D" ]   // x
+  ],
+  iterator_types = ["parallel"]
+}
+
+func @invalid_memref(%arga: memref<32xf32>, %argb: f32) -> tensor<32xf32> {
+ // expected-error at +1 {{'linalg.generic' op expected sparse annotations on tensors only}}
+  %0 = linalg.generic #trait_memref
+    ins(%arga: memref<32xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32xf32>
+  return %0 : tensor<32xf32>
+}
+
+// -----
+
+#trait_too_many = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a
+    affine_map<(i) -> (i)>   // x (out)
+  ],
+  sparse = [
+    [ "S" ],  // a
+    [ "S" ],  // b
+    [ "D" ]   // x
+  ],
+  iterator_types = ["parallel"]
+}
+
+func @invalid_too_many(%arga: tensor<32xf32>, %argb: f32) -> tensor<32xf32> {
+ // expected-error at +1 {{'linalg.generic' op expected one sparse annotation for each tensor}}
+  %0 = linalg.generic #trait_too_many
+    ins(%arga: tensor<32xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32xf32>
+  return %0 : tensor<32xf32>
+}
+
+// -----
+
+#trait_no_array = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a
+    affine_map<(i) -> (i)>   // x (out)
+  ],
+  sparse = [ 1, 2 ],
+  iterator_types = ["parallel"]
+}
+
+func @invalid_no_array(%arga: tensor<32xf32>, %argb: f32) -> tensor<32xf32> {
+ // expected-error at +1 {{'linalg.generic' op expected sparse annotation array for tensor 0}}
+  %0 = linalg.generic #trait_no_array
+    ins(%arga: tensor<32xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32xf32>
+  return %0 : tensor<32xf32>
+}
+
+// -----
+
+#trait_wrong_rank = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a
+    affine_map<(i) -> (i)>   // x (out)
+  ],
+  sparse = [
+    [ "S" ],
+    [ "D", "D" ]
+  ],
+  iterator_types = ["parallel"]
+}
+
+func @invalid_wrong_rank(%arga: tensor<32xf32>, %argb: f32) -> tensor<32xf32> {
+ // expected-error at +1 {{'linalg.generic' op expected sparse annotation with rank 1 for tensor 1}}
+  %0 = linalg.generic #trait_wrong_rank
+    ins(%arga: tensor<32xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32xf32>
+  return %0 : tensor<32xf32>
+}
+
+// -----
+
+#trait_no_string = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>,  // a
+    affine_map<(i,j) -> (i,j)>   // x (out)
+  ],
+  sparse = [
+    [ "S", 1 ],
+    [ "D", "D" ]
+  ],
+  iterator_types = ["parallel","parallel"]
+}
+
+func @invalid_no_string(%arga: tensor<32x16xf32>, %argb: f32) -> tensor<32x16xf32> {
+ // expected-error at +1 {{'linalg.generic' op expected sparse annotation at position 1 for tensor 0}}
+  %0 = linalg.generic #trait_no_string
+    ins(%arga: tensor<32x16xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32x16xf32>
+  return %0 : tensor<32x16xf32>
+}
+
+// -----
+
+#trait_wrong_symbol = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>,  // a
+    affine_map<(i,j) -> (i,j)>   // x (out)
+  ],
+  sparse = [
+    [ "S", "S" ],
+    [ "D", "X" ]
+  ],
+  iterator_types = ["parallel","parallel"]
+}
+
+func @invalid_wrong_symbol(%arga: tensor<32x16xf32>, %argb: f32) -> tensor<32x16xf32> {
+  // expected-error at +1 {{'linalg.generic' op expected sparse annotation at position 1 for tensor 1}}
+  %0 = linalg.generic #trait_wrong_symbol
+    ins(%arga: tensor<32x16xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32x16xf32>
+  return %0 : tensor<32x16xf32>
+}
+
+// -----
+
+#trait_no_sparse_output = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>,  // a
+    affine_map<(i,j) -> (i,j)>   // x (out)
+  ],
+  sparse = [
+    [ "S", "S" ],
+    [ "D", "S" ]
+  ],
+  iterator_types = ["parallel","parallel"]
+}
+
+func @invalid_no_sparse_output(%arga: tensor<32x16xf32>, %argb: f32) -> tensor<32x16xf32> {
+  // expected-error at +1 {{'linalg.generic' op sparse output tensors not supported (yet)}}
+  %0 = linalg.generic #trait_no_sparse_output
+    ins(%arga: tensor<32x16xf32>) {
+      ^bb(%a: f32):
+        %0 = addf %a, %argb  : f32
+        linalg.yield %0 : f32
+  } -> tensor<32x16xf32>
+  return %0 : tensor<32x16xf32>
+}


        


More information about the Mlir-commits mailing list