[Mlir-commits] [mlir] 26ef386 - [mlir][sparse] Add optional size_hint operand to bufferization.alloc_tensor.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Nov 9 16:19:55 PST 2022


Author: bixia1
Date: 2022-11-09T16:19:49-08:00
New Revision: 26ef3868c5ee3a449437cc0e407e958b258d8562

URL: https://github.com/llvm/llvm-project/commit/26ef3868c5ee3a449437cc0e407e958b258d8562
DIFF: https://github.com/llvm/llvm-project/commit/26ef3868c5ee3a449437cc0e407e958b258d8562.diff

LOG: [mlir][sparse] Add optional size_hint operand to bufferization.alloc_tensor.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D137585

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/test/Dialect/Bufferization/ops.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index bfdafafd3764d..b5df91f778fb7 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -51,6 +51,11 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
     If neither `copy` nor `memory_space` is specified, the default memory space
     is used during bufferization.
 
+    The optional `size_hint` operand specifies the number of non-zero elements
+    for sparse tensors. The value of `size_hint` should be not less than 1 and
+    not larger than the linear size of the corresponding dense tensor type. If
+    this requirement is not met, the behavior of the operator is undefined.
+
     Both dense and sparse tensor types are supported. The result of a
     `bufferization.alloc_tensor` is a tensor value that can be used like any
     other tensor value. In practice, it is often used as the "out" operand of
@@ -66,10 +71,16 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
       outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>
     return %0 : tensor<?x?xf32, #SparseMatrix>
     ```
+
+    ```mlir
+    %c = bufferization.alloc_tensor(%d1, %d2) size_hint = %noe
+      : tensor<?x?xf32, #SparseMatrix>
+    ```
   }];
 
   let arguments = (ins Variadic<Index>:$dynamic_sizes,
                        Optional<AnyTensor>:$copy,
+                       Optional<Index>:$size_hint,
                        OptionalAttr<UI64Attr>:$memory_space);
 
   let results = (outs AnyTensor:$result);
@@ -129,12 +140,16 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
   }];
 
   let builders = [
-    // Build an op without `copy` or `memory_space`.
+    // Build an op without `copy` or `memory_space` or `size_hint`.
     OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>,
 
-    // Build an op without `memory_space`.
+    // Build an op without `memory_space` or `size_hint`.
     OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes,
                    "Value":$copy)>,
+
+    // Build an op without `size_hint`.
+    OpBuilder<(ins "TensorType":$type, "ValueRange":$dynamicSizes,
+                   "Value":$copy, "IntegerAttr":$memory_space)>,
   ];
 
   let hasCanonicalizer = 1;

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 56cbda574e5d2..a1aa8db0c0ded 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -282,16 +282,24 @@ LogicalResult AllocTensorOp::verify() {
 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                           RankedTensorType type, ValueRange dynamicSizes) {
   build(builder, result, type, dynamicSizes, /*copy=*/Value(),
+        /*size_hint=*/Value(),
         /*memory_space=*/IntegerAttr());
 }
 
 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                           RankedTensorType type, ValueRange dynamicSizes,
                           Value copy) {
-  build(builder, result, type, dynamicSizes, copy,
+  build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(),
         /*memory_space=*/IntegerAttr());
 }
 
+void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
+                          TensorType type, ValueRange dynamicSizes, Value copy,
+                          IntegerAttr memorySpace) {
+  build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(),
+        memorySpace);
+}
+
 namespace {
 /// Change the type of the result of a `bufferization.alloc_tensor` by making
 /// the result type statically sized along dimension that in the original
@@ -383,6 +391,11 @@ ParseResult AllocTensorOp::parse(OpAsmParser &parser, OperationState &result) {
     if (parser.parseLParen() || parser.parseOperand(copyOperand) ||
         parser.parseRParen())
       return failure();
+  ParseResult sizeHintKeyword = parser.parseOptionalKeyword("size_hint");
+  OpAsmParser::UnresolvedOperand sizeHintOperand;
+  if (sizeHintKeyword.succeeded())
+    if (parser.parseEqual() || parser.parseOperand(sizeHintOperand))
+      return failure();
   if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon())
     return failure();
 
@@ -397,10 +410,14 @@ ParseResult AllocTensorOp::parse(OpAsmParser &parser, OperationState &result) {
   if (copyKeyword.succeeded())
     if (parser.resolveOperand(copyOperand, type, result.operands))
       return failure();
+  if (sizeHintKeyword.succeeded())
+    if (parser.resolveOperand(sizeHintOperand, indexType, result.operands))
+      return failure();
   result.addAttribute(AllocTensorOp::getOperandSegmentSizeAttr(),
                       parser.getBuilder().getDenseI32ArrayAttr(
                           {static_cast<int32_t>(dynamicSizesOperands.size()),
-                           static_cast<int32_t>(copyKeyword.succeeded())}));
+                           static_cast<int32_t>(copyKeyword.succeeded()),
+                           static_cast<int32_t>(sizeHintKeyword.succeeded())}));
   return success();
 }
 
@@ -408,6 +425,8 @@ void AllocTensorOp::print(OpAsmPrinter &p) {
   p << "(" << getDynamicSizes() << ")";
   if (getCopy())
     p << " copy(" << getCopy() << ")";
+  if (getSizeHint())
+    p << " size_hint=" << getSizeHint();
   p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
                               AllocTensorOp::getOperandSegmentSizeAttr()});
   p << " : ";

diff  --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir
index 72edef3069393..5b707ba4d7c79 100644
--- a/mlir/test/Dialect/Bufferization/ops.mlir
+++ b/mlir/test/Dialect/Bufferization/ops.mlir
@@ -1,6 +1,10 @@
 // RUN: mlir-opt %s | mlir-opt | FileCheck %s
 // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
 
+#CSR = #sparse_tensor.encoding<{
+  dimLevelType = ["dense", "compressed"]
+}>
+
 // CHECK-LABEL: func @test_clone
 func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> {
   %clone = bufferization.clone %buf : memref<*xf32> to memref<*xf32>
@@ -39,6 +43,9 @@ func.func @test_alloc_tensor_op(%t: tensor<?x5xf32>, %sz: index)
   %4 = bufferization.alloc_tensor() copy(%t) {escape = true} : tensor<?x5xf32>
   // CHECK: bufferization.alloc_tensor() copy(%{{.*}}) {escape = false} : tensor<?x5xf32>
   %5 = bufferization.alloc_tensor() copy(%t) {escape = false} : tensor<?x5xf32>
+  %c100 = arith.constant 100 : index
+  // CHECK: bufferization.alloc_tensor() size_hint=
+  %6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR>
   return %1 : tensor<?x5xf32>
 }
 


        


More information about the Mlir-commits mailing list