[Mlir-commits] [mlir] 3474d10 - [mlir][bufferization][NFC] Make `escape` a dialect attribute

Matthias Springer llvmlistbot at llvm.org
Thu Jun 23 10:34:53 PDT 2022


Author: Matthias Springer
Date: 2022-06-23T19:34:47+02:00
New Revision: 3474d10e1a9010e03b1428d19a88d7d1f0fb12af

URL: https://github.com/llvm/llvm-project/commit/3474d10e1a9010e03b1428d19a88d7d1f0fb12af
DIFF: https://github.com/llvm/llvm-project/commit/3474d10e1a9010e03b1428d19a88d7d1f0fb12af.diff

LOG: [mlir][bufferization][NFC] Make `escape` a dialect attribute

All bufferizable ops that bufferize to an allocation receive a `bufferization.escape` attribute during TensorCopyInsertion.

Differential Revision: https://reviews.llvm.org/D128137

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
    mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp
    mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
    mlir/test/Dialect/Bufferization/invalid.mlir
    mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir
    mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
index e550b900cb8a..ccc9d1d70626 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
@@ -15,9 +15,29 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
   let description = [{
     An op interface for One-Shot Bufferize. Ops that implement this interface
     interface can be analyzed and bufferized using One-Shot Bufferize.
+
+    Note: All "bufferizesTo*" and "getAliasing*" interface methods must be
+    implemented conservatively. If it is not statically known whether an
+    OpOperand/OpResult bufferizes in a certain way (e.g., to a memory write),
+    the worst case must be assumed (e.g., that it does). Similarly,
+    "getAliasing*" interface methods may always return additional OpOperands or
+    OpResults, but must not miss an OpOperand or OpResult that could potentially
+    alias at runtime.
   }];
   let cppNamespace = "::mlir::bufferization";
   let methods = [
+      InterfaceMethod<
+        /*desc=*/[{
+          Return `true` if the given OpResult may bufferize to a new buffer
+          allocation. If it is statically unknown if the given OpResult
+          bufferizes to a buffer allocation, `true` should be returned.
+        }],
+        /*retType=*/"bool",
+        /*methodName=*/"bufferizesToAllocation",
+        /*args=*/(ins "OpResult":$opResult),
+        /*methodBody=*/"",
+        /*defaultImplementation=*/"return false;"
+      >,
       InterfaceMethod<
         /*desc=*/[{
           Return `true` if the given OpOperand bufferizes to a memory read. This

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
index 53b7c10d1d0a..8ed8d763f640 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
@@ -39,6 +39,16 @@ def Bufferization_Dialect : Dialect {
     /// arguments during One-Shot Module Bufferize.
     constexpr const static ::llvm::StringLiteral
         kBufferLayoutAttrName = "bufferization.buffer_layout";
+
+    /// Attribute name used to mark escaping behavior of buffer allocations.
+    /// Escaping allocations cannot be deallocated in the same block and must
+    /// be treated specially: They are currently deallocated with the
+    /// BufferDeallocation pass.
+    ///
+    /// Note: Only ops with at least one OpResult that bufferizes to a buffer
+    /// allocation (as per BufferizableOpInterface) may have this attribute.
+    constexpr const static ::llvm::StringLiteral
+        kEscapeAttrName = "bufferization.escape";
   }];
   let hasOperationAttrVerify = 1;
   let emitAccessorPrefix = kEmitAccessorPrefix_Prefixed;

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 1ed644c6b0ce..db4ed5e34bce 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -38,11 +38,6 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
     If `copy` is specified, no dynamic sizes should be passed, since they are
     the same as the dynamic sizes of the `copy` operand.
 
-    The optional `escape` attribute indicates whether the buffer escapes the
-    parent block or not. In the latter case, the buffer is deallocated at the
-    of the block (during bufferization). In the former case, the buffer is not
-    deallocated and must be deallocated through some other mechanism.
-
     `alloc_tensor` is a helper op for bufferization. The operation is provided
     as an anchor that marks the beginning of a new tensor SSA use-def chain. It
     can be used to control in-place bufferization decisions during One-Shot
@@ -65,8 +60,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
   }];
 
   let arguments = (ins Variadic<Index>:$dynamic_sizes,
-                       Optional<AnyTensor>:$copy,
-                       OptionalAttr<BoolAttr>:$escape);
+                       Optional<AnyTensor>:$copy);
 
   let results = (outs AnyTensor:$result);
 
@@ -76,6 +70,8 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
 
     bool isMemoryWrite(OpResult opResult, const AnalysisState &state);
 
+    bool bufferizesToAllocation(OpResult opResult) { return true; }
+
     bool bufferizesToMemoryRead(OpOperand &opOperand,
                                 const AnalysisState &state);
 
@@ -119,16 +115,8 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
   }];
 
   let builders = [
-    // Build an op without `copy` operand and `escape` attribute.
+    // Build an op without `copy` operand.
     OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>,
-
-    // Build an op without `escape` attribute.
-    OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes,
-                   "Value":$copy)>,
-
-    // Build an op with `copy` and `escape` attribute.
-    OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes,
-                   "Value":$copy, "bool":$escape)>,
   ];
 
   let hasCanonicalizer = 1;

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index acb59d5b9cc9..85a3f562ce99 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -84,8 +84,11 @@ Value bufferization::allocateTensorForShapedValue(OpBuilder &b, Location loc,
       populateDynamicDimSizes(b, loc, tensor, dynamicSizes);
   }
 
-  return b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
-                                 copy ? tensor : Value(), escape);
+  auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes,
+                                               copy ? tensor : Value());
+  allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName,
+                         b.getBoolArrayAttr({escape}));
+  return allocTensorOp;
 }
 
 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp
index a1d880f3ba0c..1798346b7dee 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp
@@ -7,6 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
 #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
 #include "mlir/Dialect/MemRef/IR/MemRef.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
@@ -27,6 +28,9 @@ constexpr const ::llvm::StringLiteral BufferizationDialect::kWritableAttrName;
 constexpr const ::llvm::StringLiteral
     BufferizationDialect::kBufferLayoutAttrName;
 
+/// Attribute name used to mark escaping behavior of buffer allocations.
+constexpr const ::llvm::StringLiteral BufferizationDialect::kEscapeAttrName;
+
 //===----------------------------------------------------------------------===//
 // Bufferization Dialect Interfaces
 //===----------------------------------------------------------------------===//
@@ -80,6 +84,37 @@ BufferizationDialect::verifyOperationAttribute(Operation *op,
                              << " to be used on function-like operations";
     return success();
   }
+  if (attr.getName() == kEscapeAttrName) {
+    auto arrayAttr = attr.getValue().dyn_cast<ArrayAttr>();
+    if (!arrayAttr)
+      return op->emitError() << "'" << kEscapeAttrName
+                             << "' is expected to be a bool array attribute";
+    if (arrayAttr.size() != op->getNumResults())
+      return op->emitError()
+             << "'" << kEscapeAttrName
+             << "' has wrong number of elements, expected "
+             << op->getNumResults() << ", got " << arrayAttr.size();
+    auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
+    if (!bufferizableOp)
+      return op->emitError()
+             << "'" << kEscapeAttrName << "' only valid on bufferizable ops";
+    for (const auto &it : llvm::enumerate(arrayAttr)) {
+      auto attr = it.value();
+      auto boolAttr = attr.dyn_cast<BoolAttr>();
+      if (!boolAttr)
+        return op->emitError() << "'" << kEscapeAttrName
+                               << "' is expected to be a bool array attribute";
+      if (!boolAttr.getValue())
+        continue;
+      if (!op->getResult(it.index()).getType().isa<TensorType>())
+        return op->emitError()
+               << "'" << kEscapeAttrName << "' only valid for tensor results";
+      if (!bufferizableOp.bufferizesToAllocation(op->getOpResult(it.index())))
+        return op->emitError() << "'" << kEscapeAttrName
+                               << "' only valid for allocation results";
+    }
+    return success();
+  }
 
   return op->emitError() << "attribute '" << attr.getName()
                          << "' not supported by the bufferization dialect";

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 9ab731db49cc..c76245fd6e39 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -152,6 +152,7 @@ void mlir::bufferization::populateDynamicDimSizes(
 LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
                                        const BufferizationOptions &options) {
   OpBuilder::InsertionGuard g(rewriter);
+  Operation *op = this->getOperation();
   Location loc = getLoc();
 
   // Nothing to do for dead AllocTensorOps.
@@ -185,8 +186,11 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
   // Should the buffer be deallocated?
   AnalysisState analysisState(options);
   bool dealloc;
-  if (getEscape()) {
-    dealloc = !*getEscape();
+  if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) {
+    // AllocTensorOp has one result.
+    ArrayAttr escapeAttr =
+        op->getAttr(BufferizationDialect::kEscapeAttrName).cast<ArrayAttr>();
+    dealloc = !escapeAttr[0].cast<BoolAttr>().getValue();
   } else {
     // No "escape" annotation found.
     if (options.createDeallocs) {
@@ -251,20 +255,7 @@ LogicalResult AllocTensorOp::verify() {
 
 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                           RankedTensorType type, ValueRange dynamicSizes) {
-  build(builder, result, type, dynamicSizes, /*copy=*/Value(),
-        /*escape=*/BoolAttr());
-}
-
-void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
-                          RankedTensorType type, ValueRange dynamicSizes,
-                          Value copy) {
-  build(builder, result, type, dynamicSizes, copy, /*escape=*/BoolAttr());
-}
-
-void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
-                          RankedTensorType type, ValueRange dynamicSizes,
-                          Value copy, bool escape) {
-  build(builder, result, type, dynamicSizes, copy, builder.getBoolAttr(escape));
+  build(builder, result, type, dynamicSizes, /*copy=*/Value());
 }
 
 namespace {
@@ -305,8 +296,7 @@ struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
     if (newType == op.getType())
       return failure();
     auto newOp = rewriter.create<AllocTensorOp>(
-        op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(),
-        /*escape=*/op.getEscapeAttr());
+        op.getLoc(), newType, newDynamicSizes, /*copy=*/Value());
     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
     return success();
   }

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp
index 4fbe84c0a14d..cb320dc92f47 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp
@@ -44,20 +44,31 @@ LogicalResult
 mlir::bufferization::insertTensorCopies(Operation *op,
                                         const AnalysisState &state) {
   IRRewriter rewriter(op->getContext());
+  StringRef escapeAttrName = BufferizationDialect::kEscapeAttrName;
+
   WalkResult result = op->walk([&](Operation *op) {
     auto bufferizableOp = state.getOptions().dynCastBufferizableOp(op);
     if (!bufferizableOp)
       return WalkResult::skip();
 
-    // Find AllocTensorOps without an `escape` attribute and add the attribute
+    // Find allocations without an `escape` attribute and add the attribute
     // based on analysis results.
-    if (auto allocTensorOp = dyn_cast<AllocTensorOp>(op)) {
-      if (allocTensorOp.getEscape())
-        return WalkResult::advance();
-      bool escape = !state.getOptions().createDeallocs ||
-                    state.isTensorYielded(allocTensorOp.getResult());
-      allocTensorOp.setEscapeAttr(rewriter.getBoolAttr(escape));
-      return WalkResult::advance();
+    if (!op->hasAttr(escapeAttrName)) {
+      SmallVector<bool> escapeAttrValue;
+      bool foundTensorResult = false;
+      for (OpResult opResult : op->getOpResults()) {
+        if (!opResult.getType().isa<TensorType>() ||
+            !bufferizableOp.bufferizesToAllocation(opResult)) {
+          escapeAttrValue.push_back(false);
+          continue;
+        }
+        foundTensorResult = true;
+        bool escape = !state.getOptions().createDeallocs ||
+                      state.isTensorYielded(opResult);
+        escapeAttrValue.push_back(escape);
+      }
+      if (foundTensorResult)
+        op->setAttr(escapeAttrName, rewriter.getBoolArrayAttr(escapeAttrValue));
     }
 
     // Find inplacability conflicts and resolve them. (Typically with explicit

diff  --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index 93a11d809af3..26f5e2b0b551 100644
--- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -461,9 +461,8 @@ struct ForOpInterface
         yieldValues.push_back(value);
         continue;
       }
-      Value alloc = rewriter.create<bufferization::AllocTensorOp>(
-          yieldOp.getLoc(), value.getType().cast<RankedTensorType>(),
-          /*dynamicSizes=*/ValueRange(), value, /*escape=*/true);
+      Value alloc = allocateTensorForShapedValue(rewriter, yieldOp.getLoc(),
+                                                 value, /*escape=*/true);
       yieldValues.push_back(alloc);
     }
 
@@ -673,9 +672,8 @@ struct WhileOpInterface
         beforeYieldValues.push_back(value);
         continue;
       }
-      Value alloc = rewriter.create<bufferization::AllocTensorOp>(
-          conditionOp.getLoc(), value.getType().cast<RankedTensorType>(),
-          /*dynamicSizes=*/ValueRange(), value, /*escape=*/true);
+      Value alloc = allocateTensorForShapedValue(rewriter, conditionOp.getLoc(),
+                                                 value, /*escape=*/true);
       beforeYieldValues.push_back(alloc);
     }
     rewriter.updateRootInPlace(conditionOp, [&]() {
@@ -692,9 +690,8 @@ struct WhileOpInterface
         afterYieldValues.push_back(value);
         continue;
       }
-      Value alloc = rewriter.create<bufferization::AllocTensorOp>(
-          yieldOp.getLoc(), value.getType().cast<RankedTensorType>(),
-          /*dynamicSizes=*/ValueRange(), value, /*escape=*/true);
+      Value alloc = allocateTensorForShapedValue(rewriter, yieldOp.getLoc(),
+                                                 value, /*escape=*/true);
       afterYieldValues.push_back(alloc);
     }
     rewriter.updateRootInPlace(yieldOp, [&]() {
@@ -938,13 +935,11 @@ struct ForeachThreadOpInterface
       if (state.isInPlace(*destOperands.front()))
         continue;
 
-      // Create AllocTensorOp.
+      // Insert tensor allocation.
       bool isYielded = state.isTensorYielded(opResult);
-      auto resultType = opResult.getType().cast<RankedTensorType>();
-      Value alloc = rewriter.create<bufferization::AllocTensorOp>(
-          op->getLoc(), resultType, /*dynamicDims=*/ValueRange(),
-          /*copy=*/destOperands.front()->get(),
-          /*escape=*/isYielded);
+      Value alloc = allocateTensorForShapedValue(rewriter, op->getLoc(),
+                                                 destOperands.front()->get(),
+                                                 /*escape=*/isYielded);
 
       // Update terminator operand.
       rewriter.updateRootInPlace(destOperands.front()->getOwner(),

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
index c36a0a69ca65..5bc08c69305f 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir
@@ -9,9 +9,9 @@
 func.func @read_after_write_conflict(%t: tensor<?xf32>, %idx: index, %f: f32)
   -> (tensor<?xf32>, tensor<?xf32>)
 {
-  // CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[t]]) {escape = false} : tensor<?xf32>
-  // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor<?xf32>
-  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor<?xf32>
+  // CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[t]]) {bufferization.escape = [false]} : tensor<?xf32>
+  // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor<?xf32>
+  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor<?xf32>
   // CHECK: %[[insert:.*]] = tensor.insert %{{.*}} into %[[copy]]
   %0 = tensor.insert %f into %t[%idx] : tensor<?xf32>
   // CHECK: return %[[insert]], %[[t]]
@@ -24,9 +24,9 @@ func.func @read_after_write_conflict(%t: tensor<?xf32>, %idx: index, %f: f32)
 // CHECK-FUNC-LABEL: func @return_alloc_tensor
 // CHECK-NO-DEALLOC-LABEL: func @return_alloc_tensor
 func.func @return_alloc_tensor() -> (tensor<5xf32>) {
-  // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32>
-  // CHECK-FUNC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32>
-  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32>
+  // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
+  // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
+  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
   %0 = bufferization.alloc_tensor() : tensor<5xf32>
   return %0 : tensor<5xf32>
 }
@@ -38,12 +38,12 @@ func.func @return_alloc_tensor() -> (tensor<5xf32>) {
 func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
   -> (tensor<5xf32>, tensor<5xf32>)
 {
-  // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32>
+  // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
   // The second alloc_tensor should not have a copy operand.
-  // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32>
+  // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
 
-  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32>
-  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32>
+  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
+  // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32>
   %0 = bufferization.alloc_tensor() : tensor<5xf32>
   %1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
   return %0, %1 : tensor<5xf32>, tensor<5xf32>
@@ -55,7 +55,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
 func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32)
   -> (tensor<5xf32>, tensor<5xf32>)
 {
-  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {escape = false} : tensor<5xf32>
+  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32>
   // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>)
   %r = linalg.generic {
     indexing_maps = [affine_map<(d0) -> (d0)>],
@@ -74,7 +74,7 @@ func.func @do_not_copy_when_result_not_read(%t: tensor<5xf32>, %f: f32)
   -> (tensor<3xf32>)
 {
   %0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32>
-  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {escape = false} : tensor<3xf32>
+  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<3xf32>
   // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>)
   %r = linalg.generic {
     indexing_maps = [affine_map<(d0) -> (d0)>],

diff  --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir
index 64d873abb9a1..02ee6d4d25fb 100644
--- a/mlir/test/Dialect/Bufferization/invalid.mlir
+++ b/mlir/test/Dialect/Bufferization/invalid.mlir
@@ -27,7 +27,31 @@ func.func @alloc_tensor_copy_and_dims(%t: tensor<?xf32>, %sz: index) {
 // -----
 
 func.func @alloc_tensor_invalid_escape_attr(%sz: index) {
-  // expected-error @+1{{op attribute 'escape' failed to satisfy constraint: bool attribute}}
-  %0 = bufferization.alloc_tensor(%sz) {escape = 5} : tensor<?xf32>
+  // expected-error @+1{{'bufferization.escape' is expected to be a bool array attribute}}
+  %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = 5} : tensor<?xf32>
   return
 }
+
+// -----
+
+func.func @alloc_tensor_invalid_escape_attr_size(%sz: index) {
+  // expected-error @+1{{'bufferization.escape' has wrong number of elements, expected 1, got 2}}
+  %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = [true, false]} : tensor<?xf32>
+  return
+}
+
+// -----
+
+func.func @escape_attr_non_allocating(%t0: tensor<?xf32>) {
+  // expected-error @+1{{'bufferization.escape' only valid for allocation results}}
+  %0 = tensor.extract_slice %t0[0][5][1] {bufferization.escape = [true]} : tensor<?xf32> to tensor<5xf32>
+  return
+}
+
+// -----
+
+func.func @escape_attr_non_bufferizable(%m0: memref<?xf32>) {
+  // expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}}
+  %0 = memref.cast %m0 {bufferization.escape = [true]} : memref<?xf32> to memref<10xf32>
+  return
+}
\ No newline at end of file

diff  --git a/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir
index f0306b6acf69..63d5d88ba031 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir
@@ -7,8 +7,8 @@ func.func @scf_for(%A : tensor<?xf32>, %B : tensor<?xf32>,
                    %lb : index, %ub : index, %step : index)
   -> (tensor<?xf32>, tensor<?xf32>)
 {
-  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<?xf32>
-  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<?xf32>
+  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<?xf32>
+  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<?xf32>
   // CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A_copy]], %[[iter2:.*]] = %[[B_copy]])
   %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
       -> (tensor<?xf32>, tensor<?xf32>)
@@ -28,15 +28,15 @@ func.func @scf_for_swapping_yields(%A : tensor<?xf32>, %B : tensor<?xf32>,
                                    %lb : index, %ub : index, %step : index)
   -> (tensor<?xf32>, tensor<?xf32>)
 {
-  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<?xf32>
-  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<?xf32>
+  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<?xf32>
+  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<?xf32>
   // CHECK:   %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A_copy]], %[[iter2:.*]] = %[[B_copy]])
   %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B)
       -> (tensor<?xf32>, tensor<?xf32>)
   {
     // Yield tensors in 
diff erent order.
-    // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[iter2]]) {escape = true} : tensor<?xf32>
-    // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[iter1]]) {escape = true} : tensor<?xf32>
+    // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[iter2]]) {bufferization.escape = [true]} : tensor<?xf32>
+    // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[iter1]]) {bufferization.escape = [true]} : tensor<?xf32>
     // CHECK: scf.yield %[[yield1]], %[[yield2]]
     scf.yield %tB, %tA : tensor<?xf32>, tensor<?xf32>
   }
@@ -51,8 +51,8 @@ func.func @scf_for_swapping_yields(%A : tensor<?xf32>, %B : tensor<?xf32>,
 func.func @scf_while(%A: tensor<5xi1>, %B: tensor<5xi1>, %idx: index)
   -> (tensor<5xi1>, tensor<5xi1>)
 {
-  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<5xi1>
-  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<5xi1>
+  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<5xi1>
+  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<5xi1>
   // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[A_copy]], %[[w1:.*]] = %[[B_copy]]) {{.*}} {
   %r0, %r1 = scf.while (%w0 = %A, %w1 = %B)
       : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) {
@@ -82,24 +82,24 @@ func.func @scf_while_non_equiv_condition_and_body(%A: tensor<5xi1>,
                                                   %idx: index)
   -> (tensor<5xi1>, tensor<5xi1>)
 {
-  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<5xi1>
-  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<5xi1>
+  // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<5xi1>
+  // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<5xi1>
   // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[A_copy]], %[[w1:.*]] = %[[B_copy]]) {{.*}} {
   %r0, %r1 = scf.while (%w0 = %A, %w1 = %B)
       : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) {
     // CHECK: %[[condition:.*]] = tensor.extract %[[w0]]
     %condition = tensor.extract %w0[%idx] : tensor<5xi1>
     // Yield tensors in 
diff erent order.
-    // CHECK-DAG: %[[yield0:.*]] = bufferization.alloc_tensor() copy(%[[w1]]) {escape = true} : tensor<5xi1>
-    // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[w0]]) {escape = true} : tensor<5xi1>
+    // CHECK-DAG: %[[yield0:.*]] = bufferization.alloc_tensor() copy(%[[w1]]) {bufferization.escape = [true]} : tensor<5xi1>
+    // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[w0]]) {bufferization.escape = [true]} : tensor<5xi1>
     // CHECK: scf.condition(%[[condition]]) %[[yield0]], %[[yield1]]
     scf.condition(%condition) %w1, %w0 : tensor<5xi1>, tensor<5xi1>
   } do {
   ^bb0(%b0: tensor<5xi1>, %b1: tensor<5xi1>):
     // CHECK: } do {
     // CHECK: ^bb0(%[[b0:.*]]: tensor<5xi1>, %[[b1:.*]]: tensor<5xi1>):
-    // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[b1]]) {escape = true} : tensor<5xi1>
-    // CHECK-DAG: %[[yield3:.*]] = bufferization.alloc_tensor() copy(%[[b0]]) {escape = true} : tensor<5xi1>
+    // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[b1]]) {bufferization.escape = [true]} : tensor<5xi1>
+    // CHECK-DAG: %[[yield3:.*]] = bufferization.alloc_tensor() copy(%[[b0]]) {bufferization.escape = [true]} : tensor<5xi1>
     // CHECK: scf.yield %[[yield2]], %[[yield3]]
     // CHECK: }
     scf.yield %b1, %b0 : tensor<5xi1>, tensor<5xi1>
@@ -119,7 +119,7 @@ func.func @scf_foreach_thread_out_of_place(%in: tensor<100xf32>,
   %num_threads = arith.constant 100 : index
 
   // CHECK-FUNC-NOT: alloc_tensor
-  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[arg1]]) {escape = false} : tensor<100xf32>
+  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[arg1]]) {bufferization.escape = [false]} : tensor<100xf32>
   // CHECK: scf.foreach_thread
   %result = scf.foreach_thread (%thread_idx) in (%num_threads) -> tensor<100xf32> {
       // CHECK: tensor.extract_slice

diff  --git a/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir
index d399c65b441f..d4732976025f 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir
@@ -9,8 +9,8 @@ func.func @extract_slice(%t: tensor<?xf32>, %idx: index, %f: f32)
 {
   // CHECK: %[[extract_slice:.*]] = tensor.extract_slice %[[t]][10] [5] [1]
   %0 = tensor.extract_slice %t[10][5][1] : tensor<?xf32> to tensor<5xf32>
-  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[extract_slice]]) {escape = false} : tensor<5xf32>
-  // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor<5xf32>
+  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[extract_slice]]) {bufferization.escape = [false]} : tensor<5xf32>
+  // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor<5xf32>
   // CHECK: %[[insert:.*]] = tensor.insert %{{.*}} into %[[alloc]]
   %1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
   // CHECK: return %[[insert]], %[[t]]


        


More information about the Mlir-commits mailing list