[Mlir-commits] [mlir] 6badbd6 - [mlir][linalg] BufferizeToAllocation: Bufferize non-allocating ops

Matthias Springer llvmlistbot at llvm.org
Tue Jul 4 05:47:02 PDT 2023


Author: Matthias Springer
Date: 2023-07-04T14:46:54+02:00
New Revision: 6badbd6fd3b5748e98d0f05ba2857acdccf66630

URL: https://github.com/llvm/llvm-project/commit/6badbd6fd3b5748e98d0f05ba2857acdccf66630
DIFF: https://github.com/llvm/llvm-project/commit/6badbd6fd3b5748e98d0f05ba2857acdccf66630.diff

LOG: [mlir][linalg] BufferizeToAllocation: Bufferize non-allocating ops

Until now, only `tensor.pad` ops could be bufferized to an allocation. This revision adds support for all bufferizable ops that do not already bufferize to an allocation. (Those still need special handling.)

Differential Revision: https://reviews.llvm.org/D153971

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
    mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
    mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 51cbfc1a5b7eea..312cc7b5efef14 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -90,7 +90,25 @@ def BufferizeToAllocationOp : Op<Transform_Dialect,
     buffer. Furthermore, it returns a handle to the result of the `to_tensor`
     op.
 
-    Only `tensor.pad` targets are supported at the moment.
+    Only bufferizable ops are that bufferize to a memory write or have an
+    aliasing OpOperand (and do not themselves bufferize to an allocation) are
+    supported. They are bufferized using their BufferizableOpInterface
+    implementation. E.g.:
+
+    ```
+    %0 = tensor.insert %f into %dest[%pos] : tensor<10xf32>
+    ```
+
+    Is bufferized to:
+
+    ```
+    %alloc = memref.alloc() : memref<10xf32>
+    memref.tensor_store %dest, %alloc : memref<10xf32>
+    memref.store %f, %alloc[%pos] : memref<10xf32>
+    %0 = bufferization.to_tensor %alloc restrict writable : memref<10xf32>
+    ```
+
+    Selected ops that bufferize to an allocation are also supported:
     - `tensor.pad` is lowered to an allocation, followed by a `linalg.fill` and
       and a buffer copy (all on memrefs).
 

diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 0ff3032a2f79f1..64faf0df59d1ef 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -65,11 +65,15 @@ Value bufferizeToAllocation(RewriterBase &rewriter, tensor::PadOp padOp,
                             Attribute memorySpace = {});
 
 /// Bufferize the given op with tensor semantics and materialize the result in
-/// a newly allocated buffer. E.g.:
+/// a newly allocated buffer.
 ///
-/// Only tensor.pad is supported at the moment.
+/// Only bufferizable ops that bufferize to a memory write or have an
+/// aliasing OpOperand (and do not themselves bufferize to an allocation) are
+/// supported. They are bufferized using their BufferizableOpInterface
+/// implementation.
 ///
-/// This function returns the newly allocated buffer.
+/// Selected ops that bufferize to an allocation are also supported:
+/// - tensor.pad
 Value bufferizeToAllocation(RewriterBase &rewriter, Operation *op,
                             Attribute memorySpace = {});
 

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
index 4a5052d58b629f..75295862bd6fa8 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
@@ -942,7 +942,7 @@ static LogicalResult checkAliasInfoConsistency(Operation *op,
     // attribute. Such tensors may alias any other tensor, which is currently
     // not handled in the analysis.
     if (auto toTensorOp = dyn_cast<ToTensorOp>(op.getOperation())) {
-      if (!toTensorOp.getRestrict()) {
+      if (!toTensorOp.getRestrict() && !toTensorOp->getUses().empty()) {
         op->emitError("to_tensor ops without `restrict` are not supported by "
                       "One-Shot Analysis");
         return WalkResult::interrupt();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
index 88b86d6a384c17..b2d7fe2f58b180 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp
@@ -330,12 +330,91 @@ mlir::linalg::rewriteInDestinationPassingStyle(RewriterBase &rewriter,
 
 Value linalg::bufferizeToAllocation(RewriterBase &rewriter, Operation *op,
                                     Attribute memorySpace) {
+  using namespace bufferization;
+
   // Call specialized overload for certain ops.
   if (auto padOp = dyn_cast<tensor::PadOp>(op))
     return bufferizeToAllocation(rewriter, padOp, memorySpace);
 
-  // TODO: Support other ops.
-  return nullptr;
+  // Only bufferizable ops are supported.
+  auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
+  if (!bufferizableOp)
+    return nullptr;
+  BufferizationOptions options;
+  AnalysisState state(options);
+
+  // Gather tensor results.
+  SmallVector<OpResult> tensorResults;
+  for (OpResult result : op->getResults()) {
+    if (!result.getType().isa<TensorType>())
+      continue;
+    // Unranked tensors are not supported
+    if (!isa<RankedTensorType>(result.getType()))
+      return nullptr;
+    // Ops that bufferize to an allocation are not supported.
+    if (bufferizableOp.bufferizesToAllocation(result))
+      return nullptr;
+    tensorResults.push_back(result);
+  }
+
+  // Gather all operands that should bufferize to a new allocation. I.e.,
+  // bufferize out-of-place.
+  SmallVector<OpOperand *> outOfPlaceOperands, resultUses;
+  auto addOutOfPlaceOperand = [&](OpOperand *operand) {
+    if (llvm::find(outOfPlaceOperands, operand) == outOfPlaceOperands.end())
+      outOfPlaceOperands.push_back(operand);
+  };
+  for (OpResult result : tensorResults) {
+    AliasingOpOperandList aliasingOperands =
+        state.getAliasingOpOperands(result);
+    for (const AliasingOpOperand &operand : aliasingOperands) {
+      addOutOfPlaceOperand(operand.opOperand);
+      for (OpOperand &resultUse : result.getUses())
+        resultUses.push_back(&resultUse);
+    }
+  }
+  for (OpOperand &operand : op->getOpOperands()) {
+    if (!state.bufferizesToMemoryWrite(operand))
+      continue;
+    if (!isa<RankedTensorType>(operand.get().getType()))
+      return nullptr;
+    addOutOfPlaceOperand(&operand);
+  }
+  // TODO: Support multiple buffers.
+  if (outOfPlaceOperands.size() != 1)
+    return nullptr;
+
+  // Allocate buffers.
+  OpBuilder::InsertionGuard g(rewriter);
+  rewriter.setInsertionPoint(op);
+  SmallVector<Value> allocs;
+  for (OpOperand *operand : outOfPlaceOperands) {
+    Value alloc = createAllocationForTensor(rewriter, op->getLoc(),
+                                            operand->get(), memorySpace);
+    allocs.push_back(alloc);
+    // Initialize buffer with a copy of the operand data.
+    // TODO: Do not copy uninitialized tensors such as tensor.empty.
+    rewriter.create<memref::TensorStoreOp>(op->getLoc(), operand->get(), alloc);
+    rewriter.updateRootInPlace(op, [&]() {
+      operand->set(rewriter.create<ToTensorOp>(op->getLoc(), alloc));
+    });
+  }
+
+  // Bufferize the op.
+  if (failed(bufferizableOp.bufferize(rewriter, options)))
+    return nullptr;
+
+  // Set "restrict" attribute, indicating that no other tensor aliases with
+  // this tensor. That is because we just allocated a new buffer for the tensor.
+  for (OpOperand *resultUse : resultUses) {
+    auto toTensorOp = resultUse->get().getDefiningOp<ToTensorOp>();
+    assert(toTensorOp && "expected to_tensor op");
+    rewriter.updateRootInPlace(toTensorOp, [&]() {
+      toTensorOp.setRestrict(true);
+      toTensorOp.setWritable(true);
+    });
+  }
+  return allocs.front();
 }
 
 namespace {

diff  --git a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
index 47aa4ea71e3cb7..99393ff75ff0c9 100644
--- a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -split-input-file \
+// RUN: mlir-opt -split-input-file -verify-diagnostics \
 // RUN:   -test-transform-dialect-interpreter -canonicalize \
 // RUN:   -allow-unregistered-dialect -split-input-file %s | FileCheck %s
 
@@ -62,3 +62,41 @@ transform.sequence failures(propagate) {
   %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
 }
 
+// -----
+
+// CHECK-LABEL: func @tensor_insert(
+//  CHECK-SAME:     %[[t:.*]]: tensor<?x10xindex>
+//       CHECK:   %[[m:.*]] = bufferization.to_memref %[[t]]
+//       CHECK:   %[[alloc:.*]] = memref.alloc(%{{.*}}) : memref<?x10xindex, 4>
+//       CHECK:   memref.copy %[[m]], %[[alloc]]
+//       CHECK:   memref.store %{{.*}}, %[[alloc]]
+//       CHECK:   %[[r:.*]] = bufferization.to_tensor %[[alloc]] restrict writable
+//       CHECK:   memref.dealloc %[[alloc]]
+//       CHECK:   return %[[r]]
+func.func @tensor_insert(%t: tensor<?x10xindex>, %idx: index, %v: index) -> tensor<?x10xindex> {
+  %r = tensor.insert %v into %t[%idx, %idx] : tensor<?x10xindex>
+  return %r : tensor<?x10xindex>
+}
+
+transform.sequence failures(propagate) {
+^bb1(%arg1: !transform.any_op):
+  %0 = transform.structured.match ops{["tensor.insert"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+  %2 = transform.structured.bufferize_to_allocation %0 {memory_space = 4} : !transform.any_op
+  // Make sure that One-Shot Bufferize can bufferize the rest.
+  %4 = transform.bufferization.one_shot_bufferize %arg1 : (!transform.any_op) -> !transform.any_op
+}
+
+// -----
+
+func.func @tensor_extract(%t: tensor<?x10xindex>, %idx: index) -> index {
+  // expected-note @below{{target payload op}}
+  %r = tensor.extract %t[%idx, %idx] : tensor<?x10xindex>
+  return %r : index
+}
+
+transform.sequence failures(propagate) {
+^bb1(%arg1: !transform.any_op):
+  %0 = transform.structured.match ops{["tensor.extract"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+  // expected-error @below{{failed to bufferize operation}}
+  %2 = transform.structured.bufferize_to_allocation %0 {memory_space = 4} : !transform.any_op
+}


        


More information about the Mlir-commits mailing list