[Mlir-commits] [mlir] ec55f0b - [mlir][bufferization][NFC] Improve assembly format of AllocTensorOp

Matthias Springer llvmlistbot at llvm.org
Mon May 23 07:59:04 PDT 2022


Author: Matthias Springer
Date: 2022-05-23T16:58:01+02:00
New Revision: ec55f0bd5833ed3d64a49176a13075aee1965911

URL: https://github.com/llvm/llvm-project/commit/ec55f0bd5833ed3d64a49176a13075aee1965911
DIFF: https://github.com/llvm/llvm-project/commit/ec55f0bd5833ed3d64a49176a13075aee1965911.diff

LOG: [mlir][bufferization][NFC] Improve assembly format of AllocTensorOp

No longer pass static dim sizes as an attribute. This was redundant and required extra checks in the verifier. This change also makes the op symmetrical to memref::AllocOp.

Differential Revision: https://reviews.llvm.org/D126178

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
    mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir
    mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
    mlir/test/Dialect/Bufferization/canonicalize.mlir
    mlir/test/Dialect/Bufferization/invalid.mlir
    mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
    mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir
    mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
    mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
    mlir/test/Dialect/SCF/one-shot-bufferize.mlir
    mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
    mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 38a4d39d0ce2d..4ae773af33d7c 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -38,15 +38,11 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
     decisions during One-Shot Bufferize.
   }];
 
-  let arguments =
-    (ins Variadic<Index>:$sizes, I64ArrayAttr:$static_sizes);
+  let arguments = (ins Variadic<Index>:$dynamicSizes);
 
   let results = (outs AnyTensor:$result);
 
-  let assemblyFormat = [{
-    custom<OperandsOrIntegersSizesList>($sizes, $static_sizes) attr-dict
-    `:` type($result)
-  }];
+  let assemblyFormat = "`(`$dynamicSizes`)` attr-dict `:` type($result)";
 
   let extraClassDeclaration = [{
     LogicalResult bufferize(RewriterBase &rewriter, BufferizationState &state);
@@ -56,81 +52,41 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
       return false;
     }
 
-    static StringRef getStaticSizesAttrName() {
-      return "static_sizes";
-    }
-
     RankedTensorType getType() {
       return getResult().getType().cast<RankedTensorType>();
     }
 
-    // Infer the shape of the result tensor given the static shapes
-    // and element type of the result tensor.
-    static Type inferResultType(ArrayRef<int64_t> staticSizes, Type elementType,
-                                Attribute encoding = {});
-
     // Return true if the size of the tensor is dynamic at `idx`
-    bool isDynamicSize(unsigned idx) {
-      APInt v = *(static_sizes().getAsValueRange<IntegerAttr>().begin() + idx);
-      return ShapedType::isDynamic(v.getSExtValue());
-    }
-
-    // Assert that the size of the result tensor is static at `idx`
-    // and return the shape.
-    int64_t getStaticSize(unsigned idx) {
-      assert(!isDynamicSize(idx) && "expected static size");
-      APInt v = *(static_sizes().
-          template getAsValueRange<IntegerAttr>().begin() + idx);
-        return v.getSExtValue();
+    bool isDynamicDim(unsigned idx) {
+      return getType().isDynamicDim(idx);
     }
 
     // Return the argument position that contains the dynamic size of
     // the tensor at dimension `idx`. Asserts that the shape is
     // dynamic at that `idx`.
     unsigned getIndexOfDynamicSize(unsigned idx) {
-      assert(isDynamicSize(idx) && "expected dynamic size");
+      assert(isDynamicDim(idx) && "expected dynamic size");
+      ArrayRef<int64_t> shape = getType().getShape();
       return std::count_if(
-          static_sizes().getValue().begin(),
-          static_sizes().getValue().begin() + idx,
-          [&](Attribute attr) {
-            return ShapedType::isDynamic(attr.cast<IntegerAttr>().getInt());
-          });
+          shape.begin(), shape.begin() + idx,
+          [&](int64_t size) { return ShapedType::isDynamic(size); });
     }
 
-    // Return both static and dynamic sizes as a list of `OpFoldResult`.
-    SmallVector<OpFoldResult> getMixedSizes();
-
     // Return the Value of the dynamic size of the tensor at dimension
     // `idx`. Asserts that the shape is dynamic at that `idx.
     Value getDynamicSize(unsigned idx) {
       return getOperand(getIndexOfDynamicSize(idx));
     }
-  }];
 
-  let builders = [
-    OpBuilder<(ins "ValueRange":$shape,
-                  "ArrayRef<int64_t>":$staticShape, "Type":$elementType),
-    [{
-      build($_builder, $_state,
-            AllocTensorOp::inferResultType(staticShape, elementType),
-            shape, $_builder.getI64ArrayAttr(staticShape));
-    }]>,
-    OpBuilder<(ins "ValueRange":$shape, "Type":$elementType),
-    [{
-      SmallVector<int64_t, 4> staticShape(
-        shape.size(), ShapedType::kDynamicSize);
-      build($_builder, $_state, shape, staticShape, elementType);
-    }]>,
-    OpBuilder<(ins "ArrayRef<int64_t>":$staticShape, "Type":$elementType),
-    [{
-      build($_builder, $_state, ValueRange{}, staticShape, elementType);
-    }]>,
-    OpBuilder<(ins "ArrayRef<OpFoldResult>":$sizes, "Type":$elementType,
-      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
-  ];
+    // Assert that the size of the result tensor is static at `idx`
+    // and return the shape.
+    int64_t getStaticSize(unsigned idx) {
+      assert(!isDynamicDim(idx) && "expected static size");
+      return getType().getShape()[idx];
+    }
+  }];
 
   let hasCanonicalizer = 1;
-  let hasCustomAssemblyFormat = 1;
   let hasVerifier = 1;
 }
 

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 00f0a74f73924..d9eb0bb2118db 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -12,6 +12,7 @@
 #include "mlir/Dialect/MemRef/IR/MemRef.h"
 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/IR/Matchers.h"
 
 using namespace mlir;
 using namespace mlir::bufferization;
@@ -145,62 +146,14 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
   return success();
 }
 
-void AllocTensorOp::build(OpBuilder &b, OperationState &result,
-                          ArrayRef<OpFoldResult> sizes, Type elementType,
-                          ArrayRef<NamedAttribute> attrs) {
-  SmallVector<Value, 4> dynamicSizes;
-  SmallVector<int64_t, 4> staticSizes;
-  dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
-                             ShapedType::kDynamicSize);
-  auto resultType = RankedTensorType ::get(staticSizes, elementType);
-  build(b, result, resultType, dynamicSizes, b.getI64ArrayAttr(staticSizes));
-  result.addAttributes(attrs);
-}
-
 LogicalResult AllocTensorOp::verify() {
-  RankedTensorType resultType = getType();
-  SmallVector<int64_t, 4> staticSizes = llvm::to_vector<4>(llvm::map_range(
-      static_sizes().cast<ArrayAttr>(),
-      [](Attribute a) -> int64_t { return a.cast<IntegerAttr>().getInt(); }));
-
-  if (failed(verifyListOfOperandsOrIntegers(
-          *this, "sizes", resultType.getRank(), static_sizes(), sizes(),
-          ShapedType::isDynamic)))
-    return failure();
-
-  if (static_sizes().size() != static_cast<unsigned>(resultType.getRank()))
-    return emitError("expected ") << resultType.getRank() << " sizes values";
-
-  Type expectedType = AllocTensorOp::inferResultType(
-      staticSizes, resultType.getElementType(), resultType.getEncoding());
-  if (resultType != expectedType) {
-    return emitError("specified type ")
-           << resultType << " does not match the inferred type "
-           << expectedType;
-  }
+  if (getType().getNumDynamicDims() !=
+      static_cast<int64_t>(dynamicSizes().size()))
+    return emitError("expected ")
+           << getType().getNumDynamicDims() << " dynamic sizes";
   return success();
 }
 
-Type AllocTensorOp::inferResultType(ArrayRef<int64_t> staticSizes,
-                                    Type elementType, Attribute encoding) {
-  return RankedTensorType::get(staticSizes, elementType, encoding);
-}
-
-SmallVector<OpFoldResult> AllocTensorOp::getMixedSizes() {
-  SmallVector<OpFoldResult> mixedSizes;
-  mixedSizes.reserve(getType().getRank());
-  unsigned dynamicValIndex = 0;
-  for (Attribute attr : static_sizes()) {
-    auto intAttr = attr.cast<IntegerAttr>();
-    if (!ShapedType::isDynamic(intAttr.getInt())) {
-      mixedSizes.push_back(intAttr);
-      continue;
-    }
-    mixedSizes.push_back(sizes()[dynamicValIndex++]);
-  }
-  return mixedSizes;
-}
-
 namespace {
 /// Change the type of the result of a `bufferization.alloc_tensor` by making
 /// the result type statically sized along dimension that in the original
@@ -208,46 +161,36 @@ namespace {
 /// `constant` op. For example:
 ///
 ///  %c5 = arith.constant 5: index
-///  %0 = bufferization.alloc_tensor [%arg0, %c5] : tensor<?x?xf32>
+///  %0 = bufferization.alloc_tensor(%arg0, %c5) : tensor<?x?xf32>
 ///
 ///  to
 ///
-///  %0 = bufferization.alloc_tensor [%arg0, 5] : tensor<?x5xf32>
+///  %0 = bufferization.alloc_tensor(%arg0) : tensor<?x5xf32>
 struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
   using OpRewritePattern<AllocTensorOp>::OpRewritePattern;
 
   LogicalResult matchAndRewrite(AllocTensorOp op,
                                 PatternRewriter &rewriter) const override {
-    SmallVector<Value, 4> dynamicSizes;
-    SmallVector<int64_t, 4> staticSizes;
-    for (unsigned i = 0, e = op.getType().getRank(); i != e; ++i) {
-      // If the size is already static, nothing to do.
-      if (!op.isDynamicSize(i)) {
-        staticSizes.push_back(op.getStaticSize(i));
+    SmallVector<int64_t> newShape = llvm::to_vector(op.getType().getShape());
+    SmallVector<Value> newDynamicSizes;
+    unsigned int dynValCounter = 0;
+    for (int64_t i = 0; i < op.getType().getRank(); ++i) {
+      if (!op.isDynamicDim(i))
         continue;
+      Value value = op.dynamicSizes()[dynValCounter++];
+      APInt intVal;
+      if (matchPattern(value, m_ConstantInt(&intVal))) {
+        newShape[i] = intVal.getSExtValue();
+      } else {
+        newDynamicSizes.push_back(value);
       }
-
-      // If the size is dynamic but defined using a `constant` op, get the
-      // constant value to find the static size to use.
-      unsigned operandNum = op.getIndexOfDynamicSize(i);
-      Value sizeOperand = op.getOperand(operandNum);
-      if (auto constantIndexOp =
-              sizeOperand.getDefiningOp<arith::ConstantIndexOp>()) {
-        staticSizes.push_back(constantIndexOp.value());
-        continue;
-      }
-
-      // Fallback case. Keep the size dynamic.
-      dynamicSizes.push_back(sizeOperand);
-      staticSizes.push_back(ShapedType::kDynamicSize);
     }
-    RankedTensorType newType =
-        RankedTensorType::get(staticSizes, op.getType().getElementType());
+    RankedTensorType newType = RankedTensorType::get(
+        newShape, op.getType().getElementType(), op.getType().getEncoding());
     if (newType == op.getType())
       return failure();
     auto newOp =
-        rewriter.create<AllocTensorOp>(op.getLoc(), newType, dynamicSizes,
-                                       rewriter.getI64ArrayAttr(staticSizes));
+        rewriter.create<AllocTensorOp>(op.getLoc(), newType, newDynamicSizes);
     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
     return success();
   }
@@ -262,7 +205,7 @@ struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
     auto allocTensorOp = dimOp.source().getDefiningOp<AllocTensorOp>();
     if (!allocTensorOp || !maybeConstantIndex)
       return failure();
-    if (!allocTensorOp.isDynamicSize(*maybeConstantIndex))
+    if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex))
       return failure();
     rewriter.replaceOp(dimOp,
                        allocTensorOp.getDynamicSize(*maybeConstantIndex));
@@ -280,7 +223,7 @@ LogicalResult AllocTensorOp::reifyResultShapes(
     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
   auto shapes = llvm::to_vector<4>(llvm::map_range(
       llvm::seq<int64_t>(0, getType().getRank()), [&](int64_t dim) -> Value {
-        if (isDynamicSize(dim))
+        if (isDynamicDim(dim))
           return getDynamicSize(dim);
         return builder.create<arith::ConstantIndexOp>(getLoc(),
                                                       getStaticSize(dim));

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
index aa6672860ec28..fec5cb6b56a2b 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
@@ -32,7 +32,7 @@
 // Example: `foo` fails bufferization because %0 is not equivalent to any bbArg.
 // ```
 // func @foo() -> tensor<?xf32> {
-//   %0 = linalg.alloc_tensor [...] : tensor<?xf32>
+//   %0 = bufferization.alloc_tensor(...) : tensor<?xf32>
 //   return %0 : tensor<?xf32>
 // }
 // ```

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
index e1d74e99242e3..0f926f5a05281 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp
@@ -23,8 +23,8 @@ struct InitTensorLoweringPattern : public OpRewritePattern<InitTensorOp> {
 
   LogicalResult matchAndRewrite(InitTensorOp op,
                                 PatternRewriter &rewriter) const override {
-    rewriter.replaceOpWithNewOp<bufferization::AllocTensorOp>(
-        op, op.getMixedSizes(), op.getType().getElementType());
+    rewriter.replaceOpWithNewOp<bufferization::AllocTensorOp>(op, op.getType(),
+                                                              op.sizes());
     return success();
   }
 };

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir
index 76c835f04e5f5..dd7dd54b3a702 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir
@@ -18,7 +18,7 @@ func.func @buffer_forwarding_conflict(
   // insert_slice. AllocTensorOp replaces the alloc_tensor with an out-of-place
   // extract_slice.
   //     CHECK: %[[EXTRACT_SLICE_ALLOC:.*]] = memref.alloc(%[[sz]])
-  %a = bufferization.alloc_tensor[%sz] : tensor<?xf32>
+  %a = bufferization.alloc_tensor(%sz) : tensor<?xf32>
 
   //     CHECK: linalg.fill ins({{.*}} : f32) outs(%[[EXTRACT_SLICE_ALLOC]] : memref<?xf32>)
   %f = linalg.fill ins(%f0 : f32) outs(%a : tensor<?xf32>) -> tensor<?xf32>
@@ -50,7 +50,7 @@ func.func @buffer_forwarding_no_conflict(
   // alloc_tensor itself does not alloc but forwards to the insert_slice.
   // InitTensorOp replaces the alloc_tensor with an inplace extract_slice.
   // CHECK: %[[T_SUBVIEW:.*]] =  memref.subview %[[FUNC_ARG]][42] [%[[sz]]] [1]
-  %a = bufferization.alloc_tensor[%sz] : tensor<?xf32>
+  %a = bufferization.alloc_tensor(%sz) : tensor<?xf32>
 
   // CHECK: linalg.fill ins({{.*}} : f32) outs(%[[T_SUBVIEW]] : memref<?xf32
   %f = linalg.fill ins(%f0 : f32) outs(%a : tensor<?xf32>) -> tensor<?xf32>
@@ -71,7 +71,7 @@ func.func @insertion_point_inside_loop(%t : tensor<?xf32>, %sz : index) -> (tens
   %c5 = arith.constant 5 : index
 
   // CHECK-NOT: memref.alloc
-  %blank = bufferization.alloc_tensor [5] : tensor<5xf32>
+  %blank = bufferization.alloc_tensor() : tensor<5xf32>
 
   // CHECK: scf.for %[[iv:.*]] = %{{.*}} to %[[sz]] step %{{.*}} {
   %r = scf.for %iv = %c0 to %sz step %c5 iter_args(%bb = %t) -> (tensor<?xf32>) {
@@ -102,7 +102,7 @@ func.func @insertion_point_outside_loop(%t : tensor<?xf32>, %sz : index,
 
   // CHECK-NOT: memref.alloc
   // CHECK: %[[subview:.*]] = memref.subview %[[t]][%[[idx]]] [5] [1]
-  %blank = bufferization.alloc_tensor [5] : tensor<5xf32>
+  %blank = bufferization.alloc_tensor() : tensor<5xf32>
 
   // CHECK: scf.for %[[iv:.*]] = %{{.*}} to %[[sz]] step %{{.*}} {
   %r = scf.for %iv = %c0 to %sz step %c5 iter_args(%bb = %t) -> (tensor<?xf32>) {

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
index 56c75fefe3ce2..9fff7f990b391 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
@@ -16,7 +16,7 @@ func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32>
     // CHECK-NOT: dealloc
     // CHECK: scf.yield %[[casted]]
     %sz = "test.some_op"() : () -> (index)
-    %0 = bufferization.alloc_tensor[%sz] : tensor<?xf32>
+    %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
     scf.yield %0 : tensor<?xf32>
   } else {
   // CHECK: } else {

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
index 06de89c0d8ef8..1eb1b4cac9f61 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
@@ -142,7 +142,7 @@ func.func @unknown_op_may_read(%v: vector<5xf32>)
   // bufferizes out-of-place.
   // CHECK: %[[m1:.*]] = memref.alloc() {{.*}} : memref<10xf32>
   // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10xf32>
-  %t1 = bufferization.alloc_tensor [10] : tensor<10xf32>
+  %t1 = bufferization.alloc_tensor() : tensor<10xf32>
 
   // CHECK: linalg.fill ins(%{{.*}}{{.*}}outs(%[[m1]]
   // CHECK: %[[filled_tensor:.*]] = bufferization.to_tensor %[[m1]]

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
index 74382aef3fa72..e2cda814a8d15 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -44,7 +44,7 @@ func.func @return_tensor(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf3
 // CHECK-LABEL: func @func_without_tensor_args
 func.func @func_without_tensor_args(%v : vector<10xf32>) -> () {
   // CHECK: %[[alloc:.*]] = memref.alloc()
-  %0 = bufferization.alloc_tensor[10] : tensor<10xf32>
+  %0 = bufferization.alloc_tensor() : tensor<10xf32>
 
   %c0 = arith.constant 0 : index
   // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
@@ -97,7 +97,7 @@ func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index)
 // CHECK-LABEL: func @copy_deallocated(
 func.func @copy_deallocated() -> tensor<10xf32> {
   // CHECK: %[[alloc:.*]] = memref.alloc()
-  %0 = bufferization.alloc_tensor[10] : tensor<10xf32>
+  %0 = bufferization.alloc_tensor() : tensor<10xf32>
   // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]]
   // CHECK: memref.dealloc %[[alloc]]
   // CHECK: return %[[alloc_tensor]]
@@ -111,7 +111,7 @@ func.func @copy_deallocated() -> tensor<10xf32> {
 func.func @select_
diff erent_tensors(%t: tensor<?xf32>, %sz: index, %c: i1) -> tensor<?xf32> {
   // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?xf32, #{{.*}}>
   // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<?xf32>
-  %0 = bufferization.alloc_tensor [%sz] : tensor<?xf32>
+  %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
 
   // A cast must be inserted because %t and %0 have 
diff erent memref types.
   // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<?xf32> to memref<?xf32, #{{.*}}>

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
index 60b30cee6f6a7..044392a266752 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
@@ -17,7 +17,7 @@
 //       CHECK:   %[[alloc:.*]] = memref.alloc
 //       CHECK:   return %[[alloc]]
 func.func @create_tensor() -> tensor<10xf32> {
-  %0 = bufferization.alloc_tensor [10] : tensor<10xf32>
+  %0 = bufferization.alloc_tensor() : tensor<10xf32>
   return %0 : tensor<10xf32>
 }
 

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
index 3bade5c3bf062..757417624e684 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
@@ -682,7 +682,7 @@ func.func @matmul_on_tensors(
   %cst_0 = arith.constant 0.000000e+00 : f32
   %cst_1 = arith.constant 1.000000e+00 : f32
 
-  %7 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %7 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   //      CHECK: linalg.fill
   // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false"]}
@@ -720,7 +720,7 @@ func.func @matmul_on_tensors(
   %cst_0 = arith.constant 0.000000e+00 : f32
   %cst_1 = arith.constant 1.000000e+00 : f32
 
-  %7 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %7 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   //     CHECK: linalg.fill
   // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false"]}
@@ -1252,7 +1252,7 @@ func.func @write_to_same_alloc_tensor_in_place(
     %lb : index, %ub : index, %step : index, %sz: index, %sz2: index)
   -> (tensor<?xf32>)
 {
-  %B = bufferization.alloc_tensor [%sz2] : tensor<?xf32>
+  %B = bufferization.alloc_tensor(%sz2) : tensor<?xf32>
 
   // CHECK: scf.for {{.*}} {
   %r0 = scf.for %i = %lb to %ub step %step iter_args(%t = %A) -> (tensor<?xf32>) {
@@ -1280,7 +1280,7 @@ func.func @write_to_same_alloc_tensor_out_of_place(
     %lb : index, %ub : index, %step : index, %sz: index, %sz2: index, %f: f32)
   -> (tensor<?xf32>)
 {
-  %B = bufferization.alloc_tensor [%sz2] : tensor<?xf32>
+  %B = bufferization.alloc_tensor(%sz2) : tensor<?xf32>
   %C = tensor.insert %f into %B[%lb] : tensor<?xf32>
 
   // CHECK: scf.for {{.*}} {

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir
index 33983d19b9bca..140f67b7c3024 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir
@@ -60,7 +60,7 @@ func.func @scf_if_not_aliasing(
     scf.yield %t1 : tensor<?xf32>
   } else {
     // This buffer aliases.
-    %t2 = bufferization.alloc_tensor [%idx] : tensor<?xf32>
+    %t2 = bufferization.alloc_tensor(%idx) : tensor<?xf32>
     // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}}
     scf.yield %t2 : tensor<?xf32>
   }
@@ -221,7 +221,7 @@ func.func @unknown_op(%A : tensor<4xf32>) -> tensor<4xf32>
 
 func.func @mini_test_case1() -> tensor<10x20xf32> {
   %f0 = arith.constant 0.0 : f32
-  %t = bufferization.alloc_tensor [10, 20] : tensor<10x20xf32>
+  %t = bufferization.alloc_tensor() : tensor<10x20xf32>
   %r = linalg.fill ins(%f0 : f32) outs(%t : tensor<10x20xf32>) -> tensor<10x20xf32>
   // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}}
   return %r : tensor<10x20xf32>
@@ -274,7 +274,7 @@ func.func @call_to_unknown_tensor_returning_func(%t : tensor<?xf32>) {
 // -----
 
 func.func @foo(%t : tensor<5xf32>) -> (tensor<5xf32>) {
-  %0 = bufferization.alloc_tensor [5] : tensor<5xf32>
+  %0 = bufferization.alloc_tensor() : tensor<5xf32>
   // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}}
   return %0 : tensor<5xf32>
 }
@@ -291,7 +291,7 @@ func.func @call_to_func_returning_non_equiv_tensor(%t : tensor<5xf32>) {
 func.func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index,
                                                  %idx2 : index) -> f32 {
   %0 = scf.execute_region -> tensor<?xf32> {
-    %1 = bufferization.alloc_tensor [%idx] : tensor<?xf32>
+    %1 = bufferization.alloc_tensor(%idx) : tensor<?xf32>
     // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}}
     scf.yield %1 : tensor<?xf32>
   }
@@ -304,7 +304,7 @@ func.func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index,
 
 func.func @destination_passing_style_dominance_test_2(%cst : f32, %idx : index,
                                                  %idx2 : index) -> f32 {
-  %1 = bufferization.alloc_tensor [%idx] : tensor<?xf32>
+  %1 = bufferization.alloc_tensor(%idx) : tensor<?xf32>
 
   %0 = scf.execute_region -> tensor<?xf32> {
     // This YieldOp is in destination-passing style, thus no error.

diff  --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
index f6ba5c9f3f9a3..d617a29c03642 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
@@ -70,7 +70,7 @@ func.func private @external_func_with_return_val(tensor<4xi32>) -> f32
 //  CHECK-FULLY-DYNAMIC-LAYOUT-MAP-SAME: #[[$map2a]]> {
 func.func @return_extract_slice(%idx: index, %sz: index) -> (tensor<2x?xf32>)
 {
-  %t = bufferization.alloc_tensor [20, 10] : tensor<20x10xf32>
+  %t = bufferization.alloc_tensor() : tensor<20x10xf32>
   %0 = tensor.extract_slice %t[%idx, %idx][2, %sz][1, 1]
       : tensor<20x10xf32> to tensor<2x?xf32>
   return %0 : tensor<2x?xf32>
@@ -120,7 +120,7 @@ func.func @main(%t: tensor<?xf32> {bufferization.writable = false}) -> (f32) {
 // CHECK-LABEL: func @func_without_tensor_args
 func.func @func_without_tensor_args(%v : vector<10xf32>) -> () {
   // CHECK: %[[alloc:.*]] = memref.alloc()
-  %0 = bufferization.alloc_tensor[10] : tensor<10xf32>
+  %0 = bufferization.alloc_tensor() : tensor<10xf32>
 
   %c0 = arith.constant 0 : index
   // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
@@ -456,9 +456,9 @@ func.func @main() {
   //  CHECK-DAG:   %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]>
   //  CHECK-DAG:   %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]>
   //  CHECK-DAG:   %[[cC:.*]] = memref.cast %[[C]] : memref<f32> to memref<f32, #[[$DYN_0D_MAP]]>
-  %A = bufferization.alloc_tensor [64] : tensor<64xf32>
-  %B = bufferization.alloc_tensor [64] : tensor<64xf32>
-  %C = bufferization.alloc_tensor [] : tensor<f32>
+  %A = bufferization.alloc_tensor() : tensor<64xf32>
+  %B = bufferization.alloc_tensor() : tensor<64xf32>
+  %C = bufferization.alloc_tensor() : tensor<f32>
 
   //  CHECK-DAG:   linalg.fill ins(%[[C1]] : f32) outs(%[[A]] : memref<64xf32>)
   //  CHECK-DAG:   linalg.fill ins(%[[C2]] : f32) outs(%[[B]] : memref<64xf32>)

diff  --git a/mlir/test/Dialect/Bufferization/canonicalize.mlir b/mlir/test/Dialect/Bufferization/canonicalize.mlir
index 949024369836d..c41cebd77a9ae 100644
--- a/mlir/test/Dialect/Bufferization/canonicalize.mlir
+++ b/mlir/test/Dialect/Bufferization/canonicalize.mlir
@@ -249,10 +249,10 @@ func.func @load_from_buffer_cast(%arg0: index, %arg1: index,
 
 func.func @alloc_tensor_canonicalize() -> (tensor<4x5x?xf32>) {
   %c6 = arith.constant 6 : index
-  %0 = bufferization.alloc_tensor [4, 5, %c6] : tensor<4x5x?xf32>
+  %0 = bufferization.alloc_tensor(%c6) : tensor<4x5x?xf32>
   return %0 : tensor<4x5x?xf32>
 }
 // CHECK: func @alloc_tensor_canonicalize
-// CHECK:   %[[T0:.+]] = bufferization.alloc_tensor [4, 5, 6] : tensor<4x5x6xf32>
+// CHECK:   %[[T0:.+]] = bufferization.alloc_tensor() : tensor<4x5x6xf32>
 // CHECK:   %[[T1:.+]] = tensor.cast %[[T0]] : tensor<4x5x6xf32> to tensor<4x5x?xf32>
 // CHECK:   return %[[T1]]

diff  --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir
index f461f8a6e7677..9e732b9bc6e48 100644
--- a/mlir/test/Dialect/Bufferization/invalid.mlir
+++ b/mlir/test/Dialect/Bufferization/invalid.mlir
@@ -1,26 +1,8 @@
 // RUN: mlir-opt %s -split-input-file -verify-diagnostics
 
-func.func @alloc_tensor_err(%arg0 : index, %arg1 : index)
-{
-  // expected-error @+1 {{specified type 'tensor<4x?x?x5xf32>' does not match the inferred type 'tensor<4x5x?x?xf32>'}}
-  %1 = bufferization.alloc_tensor [4, 5, %arg0, %arg1] : tensor<4x?x?x5xf32>
-  return
-}
-
-// -----
-
-func.func @alloc_tensor_err(%arg0 : index)
-{
-  // expected-error @+1 {{expected 4 sizes values}}
-  %1 = bufferization.alloc_tensor [4, 5, %arg0] : tensor<4x?x?x5xf32>
-  return
-}
-
-// -----
-
 func.func @alloc_tensor_err(%arg0 : index)
 {
-  // expected-error @+1 {{expected 2 dynamic sizes values}}
-  %1 = "bufferization.alloc_tensor"(%arg0) {static_sizes = [4, -1, -1, 5]} : (index) -> tensor<4x?x?x5xf32>
+  // expected-error @+1 {{expected 2 dynamic sizes}}
+  %1 = bufferization.alloc_tensor(%arg0) : tensor<4x?x?x5xf32>
   return
 }

diff  --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
index b5e2ee8e1e135..cbf8d97b40722 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
@@ -15,7 +15,7 @@ func.func @fill_extract_matmul_1234(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -42,7 +42,7 @@ func.func @fill_extract_matmul_1243(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -69,7 +69,7 @@ func.func @fill_extract_matmul_1324(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -96,7 +96,7 @@ func.func @fill_extract_matmul_1342(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -123,7 +123,7 @@ func.func @fill_extract_matmul_1423(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -150,7 +150,7 @@ func.func @fill_extract_matmul_1432(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -177,7 +177,7 @@ func.func @fill_extract_matmul_2134(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -204,7 +204,7 @@ func.func @fill_extract_matmul_2143(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -231,7 +231,7 @@ func.func @fill_extract_matmul_2314(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -258,7 +258,7 @@ func.func @fill_extract_matmul_2341(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -285,7 +285,7 @@ func.func @fill_extract_matmul_2413(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -312,7 +312,7 @@ func.func @fill_extract_matmul_2431(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["none", "false"]}
   %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32>
@@ -339,7 +339,7 @@ func.func @fill_extract_matmul_3124(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -366,7 +366,7 @@ func.func @fill_extract_matmul_3142(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -392,7 +392,7 @@ func.func @fill_extract_matmul_3214(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -419,7 +419,7 @@ func.func @fill_extract_matmul_3241(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -446,7 +446,7 @@ func.func @fill_extract_matmul_3412(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -473,7 +473,7 @@ func.func @fill_extract_matmul_3421(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32>
@@ -500,7 +500,7 @@ func.func @fill_extract_matmul_4123(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>
@@ -527,7 +527,7 @@ func.func @fill_extract_matmul_4132(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>
@@ -554,7 +554,7 @@ func.func @fill_extract_matmul_4213(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>
@@ -581,7 +581,7 @@ func.func @fill_extract_matmul_4231(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>
@@ -608,7 +608,7 @@ func.func @fill_extract_matmul_4312(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>
@@ -635,7 +635,7 @@ func.func @fill_extract_matmul_4321(
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f32
   %cst_0 = arith.constant 1.000000e+00 : f32
-  %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32>
+  %0 = bufferization.alloc_tensor() : tensor<256x256xf32>
 
   // CHECK: {__inplace_operands_attr__ = ["false"]}
   %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32>

diff  --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir
index 088b8c708540f..18302e25ece14 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir
@@ -10,7 +10,7 @@ func.func @buffer_forwarding_conflict(%arg0: tensor<?xf32> {bufferization.writab
   //      CHECK: tensor.extract_slice
   // CHECK-SAME: {__inplace_operands_attr__ = ["false", "none"]
   // Instead of allocating, share buffer with some inplace bufferization?
-  %0 = bufferization.alloc_tensor [%arg1] : tensor<?xf32>
+  %0 = bufferization.alloc_tensor(%arg1) : tensor<?xf32>
 
   //      CHECK: linalg.fill
   // CHECK-SAME: {__inplace_operands_attr__ = ["none", "true"]
@@ -37,7 +37,7 @@ func.func @buffer_forwarding_no_conflict(%arg0: tensor<?xf32> {bufferization.wri
   //      CHECK: tensor.extract_slice
   // CHECK-SAME: {__inplace_operands_attr__ = ["true", "none"]
   // Instead of allocating, share buffer with some inplace bufferization?
-  %0 = bufferization.alloc_tensor [%arg1] : tensor<?xf32>
+  %0 = bufferization.alloc_tensor(%arg1) : tensor<?xf32>
 
   //      CHECK: linalg.fill
   // CHECK-SAME: {__inplace_operands_attr__ = ["none", "true"]

diff  --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
index 72023f62d7b08..7fad7d62dc907 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
@@ -347,9 +347,9 @@ func.func @depthwise_conv_1d_nwc_wc(%arg0: index, %arg1: index, %arg2: tensor<8x
   %c0 = arith.constant 0 : index
   %c32 = arith.constant 32 : index
   %c8 = arith.constant 8 : index
-  %0 = bufferization.alloc_tensor [4, 1, 6, 8] : tensor<4x1x6x8xf32>
+  %0 = bufferization.alloc_tensor() : tensor<4x1x6x8xf32>
   %1 = tensor.cast %0 : tensor<4x1x6x8xf32> to tensor<?x1x6x8xf32>
-  %2 = bufferization.alloc_tensor [1, 6, 8] : tensor<1x6x8xf32>
+  %2 = bufferization.alloc_tensor() : tensor<1x6x8xf32>
   %3 = scf.for %arg3 = %c0 to %c32 step %c8 iter_args(%arg4 = %1) -> (tensor<?x1x6x8xf32>) {
     %4 = affine.apply affine_map<(d0) -> (d0 ceildiv 8)>(%arg3)
     %5 = tensor.insert_slice %2 into %arg4[%4,0, 0, 0] [1, 1, 6, 8] [1, 1, 1, 1] :
@@ -370,7 +370,7 @@ func.func @do_not_copy_alloc_tensors(%f1: f32, %f2: f32, %idx: index)
   // CHECK-NOT: copy
   // CHECK: memref.store
   // CHECK: memref.store
-  %0 = bufferization.alloc_tensor [5] : tensor<5xf32>
+  %0 = bufferization.alloc_tensor() : tensor<5xf32>
   %1 = tensor.insert %f1 into %0[%idx] : tensor<5xf32>
   %2 = tensor.insert %f2 into %0[%idx] : tensor<5xf32>
   return %1, %2 : tensor<5xf32>, tensor<5xf32>

diff  --git a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
index c427db7d9fbec..556c6fb0591fb 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
@@ -583,7 +583,7 @@ func.func @write_to_same_tensor_in_loop_in_place(
 {
   // CHECK: scf.for {{.*}} {
   %r0 = scf.for %i = %lb to %ub step %step iter_args(%t = %A) -> (tensor<?xf32>) {
-    %B = bufferization.alloc_tensor [%sz] : tensor<?xf32>
+    %B = bufferization.alloc_tensor(%sz) : tensor<?xf32>
     %i2 = arith.index_cast %i : index to i32
     %i3 = arith.sitofp %i2 : i32 to f32
     // The tensor.insert is in-place because the %B is defined inside the loop.

diff  --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index 4ff9c9ea326fe..a3b57228beaf6 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -220,7 +220,7 @@ func.func @scf_if_non_equiv_yields(
 //       CHECK:   return %[[r]]
 func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 {
   %r = scf.execute_region -> (tensor<?xf32>) {
-    %t2 = bufferization.alloc_tensor [%i] : tensor<?xf32>
+    %t2 = bufferization.alloc_tensor(%i) : tensor<?xf32>
     scf.yield %t2 : tensor<?xf32>
   }
   %f = tensor.extract %r[%j] : tensor<?xf32>
@@ -274,7 +274,7 @@ func.func @scf_for_yield_non_equivalent(
 func.func @scf_for_yield_allocation(%t: tensor<?xf32>, %lb : index, %ub : index,
                                %step : index) -> tensor<?xf32> {
   %r = scf.for %i = %lb to %ub step %step iter_args(%a = %t) -> tensor<?xf32> {
-    %t2 = bufferization.alloc_tensor [%i] : tensor<?xf32>
+    %t2 = bufferization.alloc_tensor(%i) : tensor<?xf32>
     scf.yield %t2 : tensor<?xf32>
   }
 

diff  --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index 7a98300a1e66f..1e24376c9434d 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -179,9 +179,9 @@ func.func @rank_reducing(
   %c8 = arith.constant 8 : index
   %c32 = arith.constant 32 : index
   %c0 = arith.constant 0 : index
-  %0 = bufferization.alloc_tensor [4, 1, 6, 8] : tensor<4x1x6x8xf32>
+  %0 = bufferization.alloc_tensor() : tensor<4x1x6x8xf32>
   %1 = tensor.cast %0 : tensor<4x1x6x8xf32> to tensor<?x1x6x8xf32>
-  %2 = bufferization.alloc_tensor [1, 6, 8] : tensor<1x6x8xf32>
+  %2 = bufferization.alloc_tensor() : tensor<1x6x8xf32>
   %5 = scf.for %arg7 = %c0 to %c32 step %c8 iter_args(%arg8 = %1) -> (tensor<?x1x6x8xf32>) {
     %7 = affine.apply affine_map<(d0) -> (d0 ceildiv 8)>(%arg7)
     %8 = tensor.extract_slice %arg0[%i, %j, %arg7] [1, 6, 8] [1, 1, 1] : tensor<8x18x32xf32> to tensor<1x6x8xf32>

diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
index 731f7e5c5c0ff..db5a539fdee30 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir
@@ -16,7 +16,7 @@ func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: ten
   %c0 = arith.constant 0 : index
   %0 = linalg.fill ins(%cst : f32) outs(%arg2 : tensor<f32>) -> tensor<f32>
   %1 = affine.apply #map0(%c0, %c64)[%c2]
-  %2 = bufferization.alloc_tensor [%1, 2] : tensor<?x2xf32>
+  %2 = bufferization.alloc_tensor(%1) : tensor<?x2xf32>
   %3 = scf.for %arg3 = %c0 to %c64 step %c2 iter_args(%arg4 = %2) -> (tensor<?x2xf32>) {
     %8 = affine.apply #map1(%arg3, %c0)[%c2]
     %9 = tensor.extract_slice %arg1[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32>
@@ -33,13 +33,13 @@ func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: ten
   // call @printMemrefF32(%B) : (tensor<*xf32>) -> ()
 
   %4 = affine.apply #map0(%c0, %c64)[%c2]
-  %5 = bufferization.alloc_tensor [%4, 2] : tensor<?x2xf32>
+  %5 = bufferization.alloc_tensor(%4) : tensor<?x2xf32>
   %6 = scf.for %arg3 = %c0 to %c64 step %c2 iter_args(%arg4 = %5) -> (tensor<?x2xf32>) {
     %8 = affine.apply #map1(%arg3, %c0)[%c2]
     %9 = tensor.extract_slice %arg0[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32>
     %10 = tensor.cast %9 : tensor<2xf32> to tensor<?xf32>
     %11 = tensor.pad %10 low[%c0] high[%c0]  {
-    ^bb0(%arg5: index):  
+    ^bb0(%arg5: index):
       tensor.yield %cst : f32
     } : tensor<?xf32> to tensor<2xf32>
     %12 = tensor.insert_slice %11 into %arg4[%8, 0] [1, 2] [1, 1] : tensor<2xf32> into tensor<?x2xf32>
@@ -80,9 +80,9 @@ func.func @main() {
   %v1 = arith.constant 1.0 : f32
   %v2 = arith.constant 2.0 : f32
 
-  %A = bufferization.alloc_tensor [64] : tensor<64xf32>
-  %B = bufferization.alloc_tensor [64] : tensor<64xf32>
-  %C = bufferization.alloc_tensor [] : tensor<f32>
+  %A = bufferization.alloc_tensor() : tensor<64xf32>
+  %B = bufferization.alloc_tensor() : tensor<64xf32>
+  %C = bufferization.alloc_tensor() : tensor<f32>
   %AA = linalg.fill ins(%v1 : f32) outs(%A : tensor<64xf32>) -> tensor<64xf32>
   %BB = linalg.fill ins(%v2 : f32) outs(%B : tensor<64xf32>) -> tensor<64xf32>
   %CC = linalg.fill ins(%v0 : f32) outs(%C : tensor<f32>) -> tensor<f32>


        


More information about the Mlir-commits mailing list