[Mlir-commits] [mlir] 74902cc - [mlir][linalg][NFC] Cleanup: Drop linalg.inplaceable attribute

Matthias Springer llvmlistbot at llvm.org
Thu Jul 14 06:53:38 PDT 2022


Author: Matthias Springer
Date: 2022-07-14T15:50:03+02:00
New Revision: 74902cc96faa04b8606f4eba59c51dac48cb924d

URL: https://github.com/llvm/llvm-project/commit/74902cc96faa04b8606f4eba59c51dac48cb924d
DIFF: https://github.com/llvm/llvm-project/commit/74902cc96faa04b8606f4eba59c51dac48cb924d.diff

LOG: [mlir][linalg][NFC] Cleanup: Drop linalg.inplaceable attribute

bufferization.writable is used in most cases instead. All remaining test cases are updated. Some code that is no longer needed is deleted.

Differential Revision: https://reviews.llvm.org/D129739

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
    mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
    mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
    mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
    mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
    mlir/test/Dialect/Transform/selective-targeting.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
index 49a5c9115e718..9f6d118ae6eb1 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
@@ -397,13 +397,6 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
           && !bufferizableOp.bufferizesToMemoryWrite(opOperand, state)
           && !bufferizableOp.getAliasingOpResult(opOperand, state).empty();
     }
-
-    // TODO: This attribute is deprecated. Use `bufferization.writable` or add
-    // a new attribute in a 
diff erent dialect.
-    /// Attribute name used to mark region arguments that can be bufferized
-    /// in-place during one-shot bufferization.
-    constexpr const static ::llvm::StringLiteral
-        kInplaceableAttrName = "linalg.inplaceable";
   }];
 }
 

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 83d6fcc2e1f74..992715b431e9d 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -38,11 +38,6 @@ namespace bufferization {
 using namespace mlir;
 using namespace bufferization;
 
-/// Attribute name used to mark region arguments that can be bufferized
-/// in-place during linalg comprehensive bufferization.
-constexpr const ::llvm::StringLiteral
-    bufferization::BufferizableOpInterface::kInplaceableAttrName;
-
 /// Return the owner of the given value.
 static Operation *getOwnerOfValue(Value value) {
   if (auto opResult = value.dyn_cast<OpResult>())

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
index 2ba78160f1817..9e6692999ea81 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
@@ -325,24 +325,6 @@ getFuncOpsOrderedByCalls(ModuleOp moduleOp,
   return success();
 }
 
-/// Set the attribute that triggers inplace bufferization on a FuncOp argument
-/// `bbArg`.
-static void setInPlaceFuncArgument(BlockArgument bbArg, bool inPlace) {
-  auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
-  funcOp.setArgAttr(bbArg.getArgNumber(),
-                    BufferizableOpInterface::kInplaceableAttrName,
-                    BoolAttr::get(bbArg.getContext(), inPlace));
-}
-
-/// Annotate the IR with the result of the analysis. For testing/debugging only.
-static void annotateOpsWithBufferizationMarkers(func::FuncOp funcOp,
-                                                const AnalysisState &state) {
-  auto bufferizableOp = cast<BufferizableOpInterface>(funcOp.getOperation());
-  for (BlockArgument bbArg : funcOp.getArguments())
-    if (bbArg.getType().isa<TensorType>())
-      setInPlaceFuncArgument(bbArg, bufferizableOp.isWritable(bbArg, state));
-}
-
 /// Fold return values that are memref casts and update function return types.
 ///
 /// During FuncOp bufferization, the exact type of the returned memrefs (if any)
@@ -413,10 +395,6 @@ mlir::bufferization::analyzeModuleOp(ModuleOp moduleOp,
 
     // Mark op as fully analyzed.
     funcState.analyzedFuncOps[funcOp] = FuncOpAnalysisState::Analyzed;
-
-    // Add annotations to function arguments.
-    if (options.testAnalysisOnly)
-      annotateOpsWithBufferizationMarkers(funcOp, state);
   }
 
   return success();

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
index 34b253d223d72..6d30f049d3d32 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
@@ -125,19 +125,6 @@ void mlir::linalg::LinalgDialect::initialize() {
 
 LogicalResult LinalgDialect::verifyOperationAttribute(Operation *op,
                                                       NamedAttribute attr) {
-  using bufferization::BufferizableOpInterface;
-
-  if (attr.getName() == BufferizableOpInterface::kInplaceableAttrName) {
-    if (!attr.getValue().isa<BoolAttr>()) {
-      return op->emitError()
-             << "'" << BufferizableOpInterface::kInplaceableAttrName
-             << "' is expected to be a boolean attribute";
-    }
-    if (!isa<FunctionOpInterface>(op))
-      return op->emitError() << "expected " << attr.getName()
-                             << " to be used on function-like operations";
-    return success();
-  }
   if (attr.getName() == LinalgDialect::kMemoizedIndexingMapsAttrName)
     return success();
   return op->emitError() << "attribute '" << attr.getName()

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
index 757417624e684..6794c9ac0067a 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
@@ -1220,8 +1220,8 @@ func.func @write_after_select_no_conflict(
 
 // CHECK-LABEL: func @write_to_same_tensor_in_loop_out_of_place(
 func.func @write_to_same_tensor_in_loop_out_of_place(
-    %A : tensor<?xf32> {linalg.inplaceable = true},
-    %B : tensor<?xf32> {linalg.inplaceable = true},
+    %A : tensor<?xf32> {bufferization.writable = true},
+    %B : tensor<?xf32> {bufferization.writable = true},
     %lb : index, %ub : index, %step : index, %sz: index)
   -> (tensor<?xf32>)
 {
@@ -1248,7 +1248,7 @@ func.func @write_to_same_tensor_in_loop_out_of_place(
 
 // CHECK-LABEL: func @write_to_same_alloc_tensor_in_place(
 func.func @write_to_same_alloc_tensor_in_place(
-    %A : tensor<?xf32> {linalg.inplaceable = true},
+    %A : tensor<?xf32> {bufferization.writable = true},
     %lb : index, %ub : index, %step : index, %sz: index, %sz2: index)
   -> (tensor<?xf32>)
 {
@@ -1276,7 +1276,7 @@ func.func @write_to_same_alloc_tensor_in_place(
 
 // CHECK-LABEL: func @write_to_same_alloc_tensor_out_of_place(
 func.func @write_to_same_alloc_tensor_out_of_place(
-    %A : tensor<?xf32> {linalg.inplaceable = true},
+    %A : tensor<?xf32> {bufferization.writable = true},
     %lb : index, %ub : index, %step : index, %sz: index, %sz2: index, %f: f32)
   -> (tensor<?xf32>)
 {

diff  --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
index cbf8d97b40722..8e11adc5938d1 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
@@ -7,9 +7,9 @@
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1234(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -34,9 +34,9 @@ func.func @fill_extract_matmul_1234(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1243(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -61,9 +61,9 @@ func.func @fill_extract_matmul_1243(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1324(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -88,9 +88,9 @@ func.func @fill_extract_matmul_1324(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1342(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -115,9 +115,9 @@ func.func @fill_extract_matmul_1342(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1423(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -142,9 +142,9 @@ func.func @fill_extract_matmul_1423(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_1432(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -169,9 +169,9 @@ func.func @fill_extract_matmul_1432(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2134(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -196,9 +196,9 @@ func.func @fill_extract_matmul_2134(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2143(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -223,9 +223,9 @@ func.func @fill_extract_matmul_2143(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2314(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -250,9 +250,9 @@ func.func @fill_extract_matmul_2314(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2341(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -277,9 +277,9 @@ func.func @fill_extract_matmul_2341(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2413(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -304,9 +304,9 @@ func.func @fill_extract_matmul_2413(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_2431(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -331,9 +331,9 @@ func.func @fill_extract_matmul_2431(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3124(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -358,9 +358,9 @@ func.func @fill_extract_matmul_3124(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3142(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -385,9 +385,9 @@ func.func @fill_extract_matmul_3142(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3214(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})  -> tensor<256x256xf32>
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})  -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
@@ -411,9 +411,9 @@ func.func @fill_extract_matmul_3214(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3241(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -438,9 +438,9 @@ func.func @fill_extract_matmul_3241(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3412(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -465,9 +465,9 @@ func.func @fill_extract_matmul_3412(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_3421(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -492,9 +492,9 @@ func.func @fill_extract_matmul_3421(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4123(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -519,9 +519,9 @@ func.func @fill_extract_matmul_4123(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4132(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -546,9 +546,9 @@ func.func @fill_extract_matmul_4132(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4213(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -573,9 +573,9 @@ func.func @fill_extract_matmul_4213(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4231(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -600,9 +600,9 @@ func.func @fill_extract_matmul_4231(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4312(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index
@@ -627,9 +627,9 @@ func.func @fill_extract_matmul_4312(
 
 // CHECK-LABEL: func @fill_extract_matmul_
 func.func @fill_extract_matmul_4321(
-    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
-    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
+    %arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = false},
+    %arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferization.writable = true})
   -> tensor<256x256xf32>
 {
   %c0 = arith.constant 0 : index

diff  --git a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
index 556c6fb0591fb..bde8ac7bb5aa5 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
@@ -577,7 +577,7 @@ func.func @scf_if_out_of_place3(%t1: tensor<?xf32> {bufferization.writable = tru
 
 // CHECK-LABEL: func @write_to_same_tensor_in_loop_in_place(
 func.func @write_to_same_tensor_in_loop_in_place(
-    %A : tensor<?xf32> {linalg.inplaceable = true},
+    %A : tensor<?xf32> {bufferization.writable = true},
     %lb : index, %ub : index, %step : index, %sz: index)
   -> (tensor<?xf32>)
 {

diff  --git a/mlir/test/Dialect/Transform/selective-targeting.mlir b/mlir/test/Dialect/Transform/selective-targeting.mlir
index 397a80aeff5eb..c25d001ff66ec 100644
--- a/mlir/test/Dialect/Transform/selective-targeting.mlir
+++ b/mlir/test/Dialect/Transform/selective-targeting.mlir
@@ -3,7 +3,7 @@
 // CHECK-LABEL: func.func @matmul_tensors_1(
 func.func @matmul_tensors_1(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
-  %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg2: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // This operation is marked for tiling only.
   // CHECK-COUNT-3: scf.for
@@ -19,7 +19,7 @@ func.func @matmul_tensors_1(
 
 func.func @matmul_tensors_2(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
-  %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg2: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // This operation is marked f
   // This operation is marked for tiling and vectorization.
@@ -37,7 +37,7 @@ func.func @matmul_tensors_2(
 
 func.func @matmul_tensors_3(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
-  %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg2: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // This operation is marked for vectorization only.
   // CHECK-NOT: scf.for
@@ -89,7 +89,7 @@ transform.with_pdl_patterns {
 // CHECK-LABEL: @vectorize_one
 func.func @vectorize_one(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
-  %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg2: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // CHECK: vector.contract
   %0 = linalg.matmul {test.attrA}
@@ -101,7 +101,7 @@ func.func @vectorize_one(
 
 func.func @vectorize_none(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>,
-  %arg2: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg2: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // CHECK: linalg.matmul
   %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>)
@@ -134,7 +134,7 @@ transform.with_pdl_patterns {
 // CHECK-LABEL: @vectorize_all
 func.func @vectorize_all(
   %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>,
-  %arg3: tensor<128x128xf32> {linalg.inplaceable = true})
+  %arg3: tensor<128x128xf32>)
     -> tensor<128x128xf32> {
   // CHECK: vector.contract
   %0 = linalg.matmul {test.attrA}

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
index db5a539fdee30..7bb9d70813563 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
@@ -9,7 +9,7 @@
 #map0 = affine_map<(d0, d1)[s0] -> ((d1 - d0) ceildiv s0)>
 #map1 = affine_map<(d0, d1)[s0] -> ((d0 - d1) ceildiv s0)>
 
-func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f32> {linalg.inplaceable = true}) -> tensor<f32> {
+func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f32>) -> tensor<f32> {
   %c64 = arith.constant 64 : index
   %cst = arith.constant 0.000000e+00 : f32
   %c2 = arith.constant 2 : index


        


More information about the Mlir-commits mailing list