[Mlir-commits] [mlir] d2e787d - [mlir][sparse][tensor] replace bufferization with empty tensor (#66450)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Sep 15 11:45:46 PDT 2023


Author: Aart Bik
Date: 2023-09-15T11:45:42-07:00
New Revision: d2e787d5d7ee49a0a77a6ca88b020e0297591790

URL: https://github.com/llvm/llvm-project/commit/d2e787d5d7ee49a0a77a6ca88b020e0297591790
DIFF: https://github.com/llvm/llvm-project/commit/d2e787d5d7ee49a0a77a6ca88b020e0297591790.diff

LOG: [mlir][sparse][tensor] replace bufferization with empty tensor (#66450)

Rationale:
    A bufferization.alloc_tensor can be directly replaced
    with tensor.empty since these are more or less semantically
    equivalent. The latter is considered a bit more "pure"
    with respect to SSA semantics.

Added: 
    

Modified: 
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
    mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 89bf215a2c7788b..4ef8b29ee4e1a84 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -71,7 +71,7 @@ module {
     %c2 = arith.constant 2.0 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #SparseMatrix>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #SparseMatrix>
-    %init = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DenseMatrix>
+    %init = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DenseMatrix>
     %0 = linalg.generic #trait_assign
        ins(%arga: tensor<?x?xf64, #SparseMatrix>)
       outs(%init: tensor<?x?xf64, #DenseMatrix>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index 420d3d8c6232744..317c7af990f78c4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -48,7 +48,7 @@ module {
                         %argb: tensor<?xbf16, #SparseVector>) -> tensor<?xbf16, #DenseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xbf16, #SparseVector>
-    %xv = bufferization.alloc_tensor (%d) : tensor<?xbf16, #DenseVector>
+    %xv = tensor.empty (%d) : tensor<?xbf16, #DenseVector>
     %0 = linalg.generic #trait_vec_op
        ins(%arga, %argb: tensor<?xbf16, #SparseVector>, tensor<?xbf16, #SparseVector>)
         outs(%xv: tensor<?xbf16, #DenseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index 96ea972bd6b5f0e..7c8510d8fbabc92 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -49,7 +49,7 @@ module {
                         %argb: tensor<?xf16, #SparseVector>) -> tensor<?xf16, #DenseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf16, #SparseVector>
-    %xv = bufferization.alloc_tensor (%d) : tensor<?xf16, #DenseVector>
+    %xv = tensor.empty (%d) : tensor<?xf16, #DenseVector>
     %0 = linalg.generic #trait_vec_op
        ins(%arga, %argb: tensor<?xf16, #SparseVector>, tensor<?xf16, #SparseVector>)
         outs(%xv: tensor<?xf16, #DenseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
index 0488f5186a4a77d..6cf99cf45997d43 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir
@@ -51,7 +51,7 @@ module {
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
                %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
+    %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32, #DCSR>)
       outs (%s: tensor<6x6xi32, #DCSR>) -> tensor<6x6xi32, #DCSR>
@@ -60,7 +60,7 @@ module {
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
                %filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSR>
+    %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32, #CSR>)
       outs (%s: tensor<6x6xi32, #CSR>) -> tensor<6x6xi32, #CSR>
@@ -69,7 +69,7 @@ module {
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
                %filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CDR>
+    %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32, #CDR>)
       outs (%s: tensor<6x6xi32, #CDR>) -> tensor<6x6xi32, #CDR>
@@ -78,7 +78,7 @@ module {
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
                %filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSC>
+    %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32, #CSC>)
       outs (%s: tensor<6x6xi32, #CSC>) -> tensor<6x6xi32, #CSC>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
index 584906034d2d20e..71054e456e49475 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir
@@ -46,7 +46,7 @@ module {
                              -> tensor<?xf64, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arg0, %c0 : tensor<?xf64, #SparseVector>
-    %xin = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xin = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_op
       ins(%arg0: tensor<?xf64, #SparseVector>)
       outs(%xin: tensor<?xf64, #SparseVector>) {
@@ -61,7 +61,7 @@ module {
                              -> tensor<?xi32, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arg0, %c0 : tensor<?xi32, #SparseVector>
-    %xin = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
+    %xin = tensor.empty(%d) : tensor<?xi32, #SparseVector>
     %0 = linalg.generic #trait_op
       ins(%arg0: tensor<?xi32, #SparseVector>)
       outs(%xin: tensor<?xi32, #SparseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
index 917f8a4838f4de5..826bf0da0ec81f3 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir
@@ -73,7 +73,7 @@ module {
                         %argb: tensor<?xi32, #SparseVector>) -> tensor<?xi32, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xi32, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
     %0 = linalg.generic #trait_vec_op
        ins(%arga, %argb: tensor<?xi32, #SparseVector>, tensor<?xi32, #SparseVector>)
         outs(%xv: tensor<?xi32, #SparseVector>) {
@@ -97,7 +97,7 @@ module {
                         %argb: tensor<?xf64>) -> tensor<?xf64, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_vec_op
        ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -121,7 +121,7 @@ module {
                             %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_vec_op
        ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -139,7 +139,7 @@ module {
   func.func @vector_index(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
     %0 = linalg.generic #trait_vec_scale
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xi32, #SparseVector>) {
@@ -166,7 +166,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_mat_op
        ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
         outs(%xv: tensor<?x?xf64, #DCSR>) {
@@ -191,7 +191,7 @@ module {
   // Tensor addition (use semi-ring binary operation).
   func.func @add_tensor_1(%A: tensor<4x4xf64, #DCSR>,
                           %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %0 = linalg.generic #trait_mat_op
       ins(%A, %B: tensor<4x4xf64, #DCSR>,
                   tensor<4x4xf64, #DCSR>)
@@ -213,7 +213,7 @@ module {
   // Same as @add_tensor_1, but use sparse_tensor.yield instead of identity to yield value.
   func.func @add_tensor_2(%A: tensor<4x4xf64, #DCSR>,
                           %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %0 = linalg.generic #trait_mat_op
       ins(%A, %B: tensor<4x4xf64, #DCSR>,
                   tensor<4x4xf64, #DCSR>)
@@ -241,7 +241,7 @@ module {
   // Performs triangular add/sub operation (using semi-ring binary op).
   func.func @triangular(%A: tensor<4x4xf64, #DCSR>,
                         %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %0 = linalg.generic #trait_mat_op
       ins(%A, %B: tensor<4x4xf64, #DCSR>,
                   tensor<4x4xf64, #DCSR>)
@@ -274,7 +274,7 @@ module {
   // Perform sub operation (using semi-ring binary op) with a constant threshold.
   func.func @sub_with_thres(%A: tensor<4x4xf64, #DCSR>,
                             %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     // Defines out-block constant bounds.
     %thres_out_up = arith.constant 2.0 : f64
     %thres_out_lo = arith.constant -2.0 : f64
@@ -323,7 +323,7 @@ module {
   // Performs isEqual only on intersecting elements.
   func.func @intersect_equal(%A: tensor<4x4xf64, #DCSR>,
                              %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
+    %C = tensor.empty() : tensor<4x4xi8, #DCSR>
     %0 = linalg.generic #trait_mat_op
       ins(%A, %B: tensor<4x4xf64, #DCSR>,
                   tensor<4x4xf64, #DCSR>)
@@ -346,7 +346,7 @@ module {
   // Keeps values on left, negate value on right, ignore value when overlapping.
   func.func @only_left_right(%A: tensor<4x4xf64, #DCSR>,
                              %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %0 = linalg.generic #trait_mat_op
       ins(%A, %B: tensor<4x4xf64, #DCSR>,
                   tensor<4x4xf64, #DCSR>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
index f6c72581153bfac..87ab88b8d9de99c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir
@@ -66,7 +66,7 @@ module {
 
   func.func @cmp_lhs_sparse(%arga: tensor<4x4xf64, #DCSR>,
                             %argb: tensor<4x4xf64>) -> tensor<4x4xi8, #DCSR> {
-    %argx = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
+    %argx = tensor.empty() : tensor<4x4xi8, #DCSR>
     %0 = linalg.generic #trait
        ins(%arga, %argb: tensor<4x4xf64, #DCSR>, tensor<4x4xf64>)
       outs(%argx: tensor<4x4xi8, #DCSR>) {
@@ -80,7 +80,7 @@ module {
 
   func.func @cmp_all_sparse(%arga: tensor<4x4xf64, #DCSR>,
                             %argb: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> {
-    %argx = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
+    %argx = tensor.empty() : tensor<4x4xi8, #DCSR>
     %0 = linalg.generic #trait
        ins(%arga, %argb: tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>)
       outs(%argx: tensor<4x4xi8, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
index 3203473f68b324d..45ea95d1a6f36fd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir
@@ -43,8 +43,8 @@ module {
     %c1 = arith.constant 1 : index
     %c2 = arith.constant 2 : index
     %c3 = arith.constant 3 : index
-    %t1 = bufferization.alloc_tensor() : tensor<4x5xf64, #DCSR>
-    %t2 = bufferization.alloc_tensor(%c2, %c3) : tensor<?x?xf64, #DCSR>
+    %t1 = tensor.empty() : tensor<4x5xf64, #DCSR>
+    %t2 = tensor.empty(%c2, %c3) : tensor<?x?xf64, #DCSR>
 
     %d1_0 = tensor.dim %t1, %c0 : tensor<4x5xf64, #DCSR>
     %d2_0 = tensor.dim %t2, %c0 : tensor<?x?xf64, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
index 6e67e043043ec64..f91b258e291e5bf 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir
@@ -48,7 +48,7 @@ module {
                       -> tensor<?xcomplex<f32>, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f32>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f32>, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xcomplex<f32>, #SparseVector>,
                          tensor<?xcomplex<f32>, #SparseVector>)
@@ -65,7 +65,7 @@ module {
                       -> tensor<?xcomplex<f32>, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f32>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f32>, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xcomplex<f32>, #SparseVector>,
                          tensor<?xcomplex<f32>, #SparseVector>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
index 0f9ce030269d75c..a89bce82197b0e2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir
@@ -45,7 +45,7 @@ module {
                       -> tensor<?xcomplex<f64>, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
                          tensor<?xcomplex<f64>, #SparseVector>)
@@ -62,7 +62,7 @@ module {
                       -> tensor<?xcomplex<f64>, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
                          tensor<?xcomplex<f64>, #SparseVector>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir
index 26c0f8c3ec7d010..a2da0a6beadd8cc 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir
@@ -57,7 +57,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op2
        ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
                          tensor<?xcomplex<f64>, #SparseVector>)
@@ -74,7 +74,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
         outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
@@ -89,7 +89,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
         outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
@@ -104,7 +104,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
         outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
@@ -119,7 +119,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
         outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
@@ -135,7 +135,7 @@ module {
                  -> tensor<?xcomplex<f64>, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
     %c = complex.constant [2.0 : f64, 0.0 : f64] : complex<f64>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
@@ -151,7 +151,7 @@ module {
                  -> tensor<?xf64, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_op1
        ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
index a2eafb8cc134c0e..a0ed16e29774d0b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir
@@ -41,7 +41,7 @@
 
 // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f
 func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> tensor<?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3) : tensor<?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3) : tensor<?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
   return %ret : tensor<?x?x?xf32>
 }
@@ -58,7 +58,7 @@ func.func @conv_1d_nwc_wcf_CCC(%arg0: tensor<?x?x?xf32, #CCC>, %arg1: tensor<?x?
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c3, %c6, %c1) : tensor<?x?x?xf32, #CCC>
+  %s = tensor.empty(%c3, %c6, %c1) : tensor<?x?x?xf32, #CCC>
   %ret = linalg.conv_1d_nwc_wcf {dilations = dense<1> : tensor<1xi64>,
                                    strides = dense<1> : tensor<1xi64>}
      ins (%arg0, %arg1: tensor<?x?x?xf32, #CCC>, tensor<?x?x?xf32>)
@@ -70,7 +70,7 @@ func.func @conv_1d_nwc_wcf_CDC(%arg0: tensor<?x?x?xf32, #CDC>, %arg1: tensor<?x?
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c3, %c6, %c1) : tensor<?x?x?xf32, #CDC>
+  %s = tensor.empty(%c3, %c6, %c1) : tensor<?x?x?xf32, #CDC>
   %ret = linalg.conv_1d_nwc_wcf {dilations = dense<1> : tensor<1xi64>,
                                    strides = dense<1> : tensor<1xi64>}
      ins (%arg0, %arg1: tensor<?x?x?xf32, #CDC>, tensor<?x?x?xf32>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
index 015b7153cdefe87..39292a63eb894ad 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir
@@ -51,7 +51,7 @@ module {
 
   func.func @conv2d_sparse_out(%input:  tensor<8x8xi32>,
                %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
+    %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
       outs (%s: tensor<6x6xi32, #DCSR>) -> tensor<6x6xi32, #DCSR>
@@ -60,7 +60,7 @@ module {
 
   func.func @conv2d_all_sparse_DCSR(%input:  tensor<8x8xi32, #DCSR>,
                %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
+    %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32>)
       outs (%s: tensor<6x6xi32, #DCSR>) -> tensor<6x6xi32, #DCSR>
@@ -69,7 +69,7 @@ module {
 
   func.func @conv2d_all_sparse_CSR(%input:  tensor<8x8xi32, #CSR>,
                %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSR>
+    %s = tensor.empty() : tensor<6x6xi32, #CSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32>)
       outs (%s: tensor<6x6xi32, #CSR>) -> tensor<6x6xi32, #CSR>
@@ -78,7 +78,7 @@ module {
 
   func.func @conv2d_all_sparse_CD(%input:  tensor<8x8xi32, #CDR>,
                %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CDR>
+    %s = tensor.empty() : tensor<6x6xi32, #CDR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32>)
       outs (%s: tensor<6x6xi32, #CDR>) -> tensor<6x6xi32, #CDR>
@@ -87,7 +87,7 @@ module {
 
   func.func @conv2d_all_sparse_CSC(%input:  tensor<8x8xi32, #CSC>,
                %filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSC>
+    %s = tensor.empty() : tensor<6x6xi32, #CSC>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32>)
       outs (%s: tensor<6x6xi32, #CSC>) -> tensor<6x6xi32, #CSC>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
index 0d2e2582bd371a1..bb2a84e0c122017 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir
@@ -45,7 +45,7 @@
 
 // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f
 func.func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> tensor<?x?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
   return %ret : tensor<?x?x?x?xf32>
 }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
index c5ce85593d84580..07136cbbeb1e05f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir
@@ -44,7 +44,7 @@
 
 // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f
 func.func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> tensor<?x?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
   return %ret : tensor<?x?x?x?xf32>
 }
@@ -61,7 +61,7 @@ func.func @conv_2d_nhwc_hwcf_CCCC(%arg0: tensor<?x?x?x?xf32, #CCCC>, %arg1: tens
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #CCCC>
+  %s = tensor.empty(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #CCCC>
   %ret = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>,
                                      strides = dense<1> : tensor<2xi64>}
      ins (%arg0, %arg1: tensor<?x?x?x?xf32, #CCCC>, tensor<?x?x?x?xf32>)
@@ -73,7 +73,7 @@ func.func @conv_2d_nhwc_hwcf_CDCD(%arg0: tensor<?x?x?x?xf32, #CDCD>, %arg1: tens
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #CDCD>
+  %s = tensor.empty(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #CDCD>
   %ret = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>,
                                      strides = dense<1> : tensor<2xi64>}
      ins (%arg0, %arg1: tensor<?x?x?x?xf32, #CDCD>, tensor<?x?x?x?xf32>)
@@ -85,7 +85,7 @@ func.func @conv_2d_nhwc_hwcf_DCCD(%arg0: tensor<?x?x?x?xf32, #DCCD>, %arg1: tens
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #DCCD>
+  %s = tensor.empty(%c3, %c6, %c6, %c1) : tensor<?x?x?x?xf32, #DCCD>
   %ret = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>,
                                      strides = dense<1> : tensor<2xi64>}
      ins (%arg0, %arg1: tensor<?x?x?x?xf32, #DCCD>, tensor<?x?x?x?xf32>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
index 3695bf3ffa3d6b4..3d1fb4122164c04 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir
@@ -44,7 +44,7 @@
 
 // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f
 func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> tensor<?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3) : tensor<?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3) : tensor<?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
   return %ret : tensor<?x?x?xf32>
 }
@@ -58,7 +58,7 @@ func.func @conv_3d(%arg0: tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>, %arg2: te
 
 func.func @conv_3d_CCC(%arg0: tensor<?x?x?xf32, #CCC>, %arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32, #CCC> {
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c6, %c6, %c6) : tensor<?x?x?xf32, #CCC>
+  %s = tensor.empty(%c6, %c6, %c6) : tensor<?x?x?xf32, #CCC>
   %ret = linalg.conv_3d
      ins (%arg0, %arg1: tensor<?x?x?xf32, #CCC>, tensor<?x?x?xf32>)
     outs (%s: tensor<?x?x?xf32, #CCC>) -> tensor<?x?x?xf32, #CCC>
@@ -67,7 +67,7 @@ func.func @conv_3d_CCC(%arg0: tensor<?x?x?xf32, #CCC>, %arg1: tensor<?x?x?xf32>)
 
 func.func @conv_3d_CDC(%arg0: tensor<?x?x?xf32, #CDC>, %arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32, #CDC> {
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c6, %c6, %c6) : tensor<?x?x?xf32, #CDC>
+  %s = tensor.empty(%c6, %c6, %c6) : tensor<?x?x?xf32, #CDC>
   %ret = linalg.conv_3d
      ins (%arg0, %arg1: tensor<?x?x?xf32, #CDC>, tensor<?x?x?xf32>)
     outs (%s: tensor<?x?x?xf32, #CDC>) -> tensor<?x?x?xf32, #CDC>
@@ -76,7 +76,7 @@ func.func @conv_3d_CDC(%arg0: tensor<?x?x?xf32, #CDC>, %arg1: tensor<?x?x?xf32>)
 
 func.func @conv_3d_DDC(%arg0: tensor<?x?x?xf32, #DDC>, %arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32, #DDC> {
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c6, %c6, %c6) : tensor<?x?x?xf32, #DDC>
+  %s = tensor.empty(%c6, %c6, %c6) : tensor<?x?x?xf32, #DDC>
   %ret = linalg.conv_3d
      ins (%arg0, %arg1: tensor<?x?x?xf32, #DDC>, tensor<?x?x?xf32>)
     outs (%s: tensor<?x?x?xf32, #DDC>) -> tensor<?x?x?xf32, #DDC>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
index 97b73175dc5f5ff..dd52d66c6b06f0f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir
@@ -40,7 +40,7 @@
 
 // Creates and returns 5-D buffer of size (%s1, %s2, %s3, %s4, %s5) filled with the value %f
 func.func @alloc_5d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %s5 : index, %f : f32) -> tensor<?x?x?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3, %s4, %s5) : tensor<?x?x?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3, %s4, %s5) : tensor<?x?x?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?x?x?xf32>) -> tensor<?x?x?x?x?xf32>
   return %ret : tensor<?x?x?x?x?xf32>
 }
@@ -60,7 +60,7 @@ func.func @conv_3d_ndhwc_dhwcf_CCCCC(%arg0: tensor<?x?x?x?x?xf32, #CCCCC>,
                                      -> tensor<?x?x?x?x?xf32, #CCCCC> {
   %c1 = arith.constant 1 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c1, %c6, %c6, %c6, %c1)
+  %s = tensor.empty(%c1, %c6, %c6, %c6, %c1)
     : tensor<?x?x?x?x?xf32, #CCCCC>
   %ret = linalg.conv_3d_ndhwc_dhwcf {dilations = dense<1> : tensor<3xi64>,
                                        strides = dense<1> : tensor<3xi64>}
@@ -74,7 +74,7 @@ func.func @conv_3d_ndhwc_dhwcf_CDCDC(%arg0: tensor<?x?x?x?x?xf32, #CDCDC>,
                                      -> tensor<?x?x?x?x?xf32, #CDCDC> {
   %c1 = arith.constant 1 : index
   %c6 = arith.constant 6 : index
-  %s = bufferization.alloc_tensor(%c1, %c6, %c6, %c6, %c1)
+  %s = tensor.empty(%c1, %c6, %c6, %c6, %c1)
     : tensor<?x?x?x?x?xf32, #CDCDC>
   %ret = linalg.conv_3d_ndhwc_dhwcf {dilations = dense<1> : tensor<3xi64>,
                                        strides = dense<1> : tensor<3xi64>}

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
index 8c5ad40cd83c668..d02f9128423e6b4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir
@@ -64,7 +64,7 @@ module {
     //
     // CHECK: 53
     //
-    %t = bufferization.alloc_tensor() : tensor<f32>
+    %t = tensor.empty() : tensor<f32>
     %z = arith.constant 0.0 : f32
     %x = tensor.insert %z into %t[] : tensor<f32>
     %0 = call @sparse_dot(%s1, %s2, %x) : (tensor<1024xf32, #SparseVector>,

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
index c5b62cea5a1dfa8..c83b8148f5b757c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
@@ -44,7 +44,7 @@ module {
   //
   func.func @matmul(%A: tensor<8x2xf64, #CSC>,
                     %B: tensor<2x4xf64, #CSC>) -> tensor<8x4xf64, #CSC> {
-    %C = bufferization.alloc_tensor() : tensor<8x4xf64, #CSC>
+    %C = tensor.empty() : tensor<8x4xf64, #CSC>
     %D = linalg.matmul
       ins(%A, %B: tensor<8x2xf64, #CSC>, tensor<2x4xf64, #CSC>)
          outs(%C: tensor<8x4xf64, #CSC>) -> tensor<8x4xf64, #CSC>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
index ddffd9498060fe9..0399667debf9a1d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
@@ -46,7 +46,7 @@ module {
 
   func.func @conv2d_sparse_out(%input:  tensor<8x8xi32>,
                %filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
-    %s = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
+    %s = tensor.empty() : tensor<6x6xi32, #DCSR>
     %0 = linalg.conv_2d
       ins  (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32, #DCSR>)
       outs (%s: tensor<6x6xi32, #DCSR>) -> tensor<6x6xi32, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
index 8ade31d4eb04815..89e64eb61eae4d4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir
@@ -68,7 +68,7 @@ module {
   //
   func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>)
                                  -> tensor<8xi64, #SparseVector> {
-    %init = bufferization.alloc_tensor() : tensor<8xi64, #SparseVector>
+    %init = tensor.empty() : tensor<8xi64, #SparseVector>
     %r = linalg.generic #trait_1d
         ins(%arga: tensor<8xi64, #SparseVector>)
        outs(%init: tensor<8xi64, #SparseVector>) {
@@ -86,7 +86,7 @@ module {
   //
   func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>)
                                  -> tensor<8xi64, #SparseVector> {
-    %init = bufferization.alloc_tensor() : tensor<8xi64, #SparseVector>
+    %init = tensor.empty() : tensor<8xi64, #SparseVector>
     %r = linalg.generic #trait_1d
         ins(%arga: tensor<8xi64, #SparseVector>)
        outs(%init: tensor<8xi64, #SparseVector>) {
@@ -104,7 +104,7 @@ module {
   //
   func.func @sparse_index_2d_conj(%arga: tensor<3x4xi64, #SparseMatrix>)
                                  -> tensor<3x4xi64, #SparseMatrix> {
-    %init = bufferization.alloc_tensor() : tensor<3x4xi64, #SparseMatrix>
+    %init = tensor.empty() : tensor<3x4xi64, #SparseMatrix>
     %r = linalg.generic #trait_2d
         ins(%arga: tensor<3x4xi64, #SparseMatrix>)
        outs(%init: tensor<3x4xi64, #SparseMatrix>) {
@@ -125,7 +125,7 @@ module {
   //
   func.func @sparse_index_2d_disj(%arga: tensor<3x4xi64, #SparseMatrix>)
                                  -> tensor<3x4xi64, #SparseMatrix> {
-    %init = bufferization.alloc_tensor() : tensor<3x4xi64, #SparseMatrix>
+    %init = tensor.empty() : tensor<3x4xi64, #SparseMatrix>
     %r = linalg.generic #trait_2d
         ins(%arga: tensor<3x4xi64, #SparseMatrix>)
        outs(%init: tensor<3x4xi64, #SparseMatrix>) {
@@ -143,7 +143,7 @@ module {
 
   func.func @add_outer_2d(%arg0: tensor<2x3xf32, #SparseMatrix>)
                          -> tensor<2x3xf32, #SparseMatrix> {
-    %0 = bufferization.alloc_tensor() : tensor<2x3xf32, #SparseMatrix>
+    %0 = tensor.empty() : tensor<2x3xf32, #SparseMatrix>
     %1 = linalg.generic #trait_2d
       ins(%arg0 : tensor<2x3xf32, #SparseMatrix>)
       outs(%0 : tensor<2x3xf32, #SparseMatrix>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
index 30a9c68ed234842..8faa1648bddffae 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir
@@ -161,8 +161,8 @@ module {
     %dm = sparse_tensor.convert %m2 : tensor<3x4xi64> to tensor<3x4xi64, #SparseMatrix>
 
     // Setup out tensors.
-    %init_8 = bufferization.alloc_tensor() : tensor<8xi64>
-    %init_3_4 = bufferization.alloc_tensor() : tensor<3x4xi64>
+    %init_8 = tensor.empty() : tensor<8xi64>
+    %init_3_4 = tensor.empty() : tensor<3x4xi64>
 
     // Call the kernels.
     %0 = call @sparse_index_1d_conj(%sv, %init_8) : (tensor<8xi64, #SparseVector>, tensor<8xi64>) -> tensor<8xi64>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
index 2f42e5f573177e0..a42d98838957093 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
@@ -75,7 +75,7 @@ module {
     %c1023 = arith.constant 1023 : index
 
     // Build the sparse vector from straightline code.
-    %0 = bufferization.alloc_tensor() : tensor<1024xf32, #SparseVector>
+    %0 = tensor.empty() : tensor<1024xf32, #SparseVector>
     %1 = sparse_tensor.insert %f1 into %0[%c0] : tensor<1024xf32, #SparseVector>
     %2 = sparse_tensor.insert %f2 into %1[%c1] : tensor<1024xf32, #SparseVector>
     %3 = sparse_tensor.insert %f3 into %2[%c3] : tensor<1024xf32, #SparseVector>
@@ -88,7 +88,7 @@ module {
     call @dump(%5) : (tensor<1024xf32, #SparseVector>) -> ()
 
     // Build another sparse vector in a loop.
-    %6 = bufferization.alloc_tensor() : tensor<1024xf32, #SparseVector>
+    %6 = tensor.empty() : tensor<1024xf32, #SparseVector>
     %7 = scf.for %i = %c0 to %c8 step %c1 iter_args(%vin = %6) -> tensor<1024xf32, #SparseVector> {
       %ii = arith.muli %i, %c3 : index
       %vout = sparse_tensor.insert %f1 into %vin[%ii] : tensor<1024xf32, #SparseVector>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
index 656fae6b7084d73..019edb9d01f95b5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
@@ -149,7 +149,7 @@ module {
     //
     // CHECK: ( 1, 0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 4 )
     //
-    %densea = bufferization.alloc_tensor() : tensor<4x3xf64, #Dense>
+    %densea = tensor.empty() : tensor<4x3xf64, #Dense>
     %dense1 = sparse_tensor.insert %f1 into %densea[%c0, %c0] : tensor<4x3xf64, #Dense>
     %dense2 = sparse_tensor.insert %f2 into %dense1[%c2, %c2] : tensor<4x3xf64, #Dense>
     %dense3 = sparse_tensor.insert %f3 into %dense2[%c3, %c0] : tensor<4x3xf64, #Dense>
@@ -165,7 +165,7 @@ module {
     // CHECK-NEXT: ( 0, 2, 0, 2 )
     // CHECK-NEXT: ( 1, 2, 3, 4 )
     //
-    %cooa = bufferization.alloc_tensor() : tensor<4x3xf64, #SortedCOO>
+    %cooa = tensor.empty() : tensor<4x3xf64, #SortedCOO>
     %coo1 = sparse_tensor.insert %f1 into %cooa[%c0, %c0] : tensor<4x3xf64, #SortedCOO>
     %coo2 = sparse_tensor.insert %f2 into %coo1[%c2, %c2] : tensor<4x3xf64, #SortedCOO>
     %coo3 = sparse_tensor.insert %f3 into %coo2[%c3, %c0] : tensor<4x3xf64, #SortedCOO>
@@ -180,7 +180,7 @@ module {
     // CHECK-NEXT: ( 0, 2, 0, 2 )
     // CHECK-NEXT: ( 1, 2, 3, 4 )
     //
-    %csra = bufferization.alloc_tensor() : tensor<4x3xf64, #CSR>
+    %csra = tensor.empty() : tensor<4x3xf64, #CSR>
     %csr1 = sparse_tensor.insert %f1 into %csra[%c0, %c0] : tensor<4x3xf64, #CSR>
     %csr2 = sparse_tensor.insert %f2 into %csr1[%c2, %c2] : tensor<4x3xf64, #CSR>
     %csr3 = sparse_tensor.insert %f3 into %csr2[%c3, %c0] : tensor<4x3xf64, #CSR>
@@ -197,7 +197,7 @@ module {
     // CHECK-NEXT: ( 0, 2, 0, 2 )
     // CHECK-NEXT: ( 1, 2, 3, 4 )
     //
-    %dcsra = bufferization.alloc_tensor() : tensor<4x3xf64, #DCSR>
+    %dcsra = tensor.empty() : tensor<4x3xf64, #DCSR>
     %dcsr1 = sparse_tensor.insert %f1 into %dcsra[%c0, %c0] : tensor<4x3xf64, #DCSR>
     %dcsr2 = sparse_tensor.insert %f2 into %dcsr1[%c2, %c2] : tensor<4x3xf64, #DCSR>
     %dcsr3 = sparse_tensor.insert %f3 into %dcsr2[%c3, %c0] : tensor<4x3xf64, #DCSR>
@@ -212,7 +212,7 @@ module {
     // CHECK-NEXT: ( 0, 2, 3 )
     // CHECK-NEXT: ( 1, 0, 0, 0, 0, 2, 3, 0, 4 )
     //
-    %rowa = bufferization.alloc_tensor() : tensor<4x3xf64, #Row>
+    %rowa = tensor.empty() : tensor<4x3xf64, #Row>
     %row1 = sparse_tensor.insert %f1 into %rowa[%c0, %c0] : tensor<4x3xf64, #Row>
     %row2 = sparse_tensor.insert %f2 into %row1[%c2, %c2] : tensor<4x3xf64, #Row>
     %row3 = sparse_tensor.insert %f3 into %row2[%c3, %c0] : tensor<4x3xf64, #Row>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index 6c39796ac664885..f76146fc423381f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -152,7 +152,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
     // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
     // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
     //
-    %tensora = bufferization.alloc_tensor() : tensor<5x4x3xf64, #TensorCSR>
+    %tensora = tensor.empty() : tensor<5x4x3xf64, #TensorCSR>
     %tensor1 = sparse_tensor.insert %f1 into %tensora[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorCSR>
     %tensor2 = sparse_tensor.insert %f2 into %tensor1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorCSR>
     %tensor3 = sparse_tensor.insert %f3 into %tensor2[%c3, %c3, %c1] : tensor<5x4x3xf64, #TensorCSR>
@@ -168,7 +168,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
     // CHECK-NEXT: ( 0, 3, 2, 3 )
     // CHECK-NEXT: ( 0, 1.1, 2.2, 0, 3.3, 0, 0, 0, 4.4, 0, 0, 5.5 )
     //
-    %rowa = bufferization.alloc_tensor() : tensor<5x4x3xf64, #TensorRow>
+    %rowa = tensor.empty() : tensor<5x4x3xf64, #TensorRow>
     %row1 = sparse_tensor.insert %f1 into %rowa[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorRow>
     %row2 = sparse_tensor.insert %f2 into %row1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorRow>
     %row3 = sparse_tensor.insert %f3 into %row2[%c3, %c3, %c1] : tensor<5x4x3xf64, #TensorRow>
@@ -184,7 +184,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
     // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
     // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
     // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
-    %ccoo = bufferization.alloc_tensor() : tensor<5x4x3xf64, #CCoo>
+    %ccoo = tensor.empty() : tensor<5x4x3xf64, #CCoo>
     %ccoo1 = sparse_tensor.insert %f1 into %ccoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #CCoo>
     %ccoo2 = sparse_tensor.insert %f2 into %ccoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #CCoo>
     %ccoo3 = sparse_tensor.insert %f3 into %ccoo2[%c3, %c3, %c1] : tensor<5x4x3xf64, #CCoo>
@@ -198,7 +198,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
     // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
     // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
     // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
-    %dcoo = bufferization.alloc_tensor() : tensor<5x4x3xf64, #DCoo>
+    %dcoo = tensor.empty() : tensor<5x4x3xf64, #DCoo>
     %dcoo1 = sparse_tensor.insert %f1 into %dcoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #DCoo>
     %dcoo2 = sparse_tensor.insert %f2 into %dcoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #DCoo>
     %dcoo3 = sparse_tensor.insert %f3 into %dcoo2[%c3, %c3, %c1] : tensor<5x4x3xf64, #DCoo>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
index 0ae03e5c4c2c6cc..101cec6040a4f66 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
@@ -68,7 +68,7 @@ module {
   //
   func.func @matmul2(%A: tensor<4x8xf64, #CSR>,
                      %B: tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
+    %C = tensor.empty() : tensor<4x4xf64, #CSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x8xf64, #CSR>, tensor<8x4xf64, #CSR>)
          outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
@@ -80,7 +80,7 @@ module {
   //
   func.func @matmul3(%A: tensor<4x8xf64, #DCSR>,
                      %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x8xf64, #DCSR>, tensor<8x4xf64, #DCSR>)
          outs(%C: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
index ac2a9c58220c9c0..bf8d7724bdc4f8d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -83,7 +83,7 @@ module {
   //
   func.func @matmul_dyn(%A: tensor<4x4xf64, #CSR_SLICE_dyn>,
                         %B: tensor<4x4xf64, #DCSR_SLICE_dyn>) -> tensor<4x4xf64, #CSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
+    %C = tensor.empty() : tensor<4x4xf64, #CSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x4xf64, #CSR_SLICE_dyn>, tensor<4x4xf64, #DCSR_SLICE_dyn>)
          outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
@@ -95,7 +95,7 @@ module {
   //
   func.func @matmul1(%A: tensor<4x4xf64, #CSR_SLICE_1>,
                      %B: tensor<4x4xf64, #DCSR_SLICE_1>) -> tensor<4x4xf64, #CSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
+    %C = tensor.empty() : tensor<4x4xf64, #CSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x4xf64, #CSR_SLICE_1>, tensor<4x4xf64, #DCSR_SLICE_1>)
          outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
@@ -107,7 +107,7 @@ module {
   //
   func.func @matmul2(%A: tensor<4x8xf64, #CSR_SLICE>,
                      %B: tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
+    %C = tensor.empty() : tensor<4x4xf64, #CSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x8xf64, #CSR_SLICE>, tensor<8x4xf64, #CSR>)
          outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
@@ -119,7 +119,7 @@ module {
   //
   func.func @matmul3(%A: tensor<4x8xf64, #DCSR_SLICE>,
                      %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
+    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x8xf64, #DCSR_SLICE>, tensor<8x4xf64, #DCSR>)
          outs(%C: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
@@ -131,7 +131,7 @@ module {
   //
   func.func @matmul5(%A: tensor<4x4xf64, #COO_SLICE_1>,
                      %B: tensor<4x4xf64, #COO_SLICE_2>) -> tensor<4x4xf64, #COO> {
-    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #COO>
+    %C = tensor.empty() : tensor<4x4xf64, #COO>
     %D = linalg.matmul
       ins(%A, %B: tensor<4x4xf64, #COO_SLICE_1>, tensor<4x4xf64, #COO_SLICE_2>)
          outs(%C: tensor<4x4xf64, #COO>) -> tensor<4x4xf64, #COO>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
index af53f66a4c2ea87..dcea7e159388054 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir
@@ -70,7 +70,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
-    %xm = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xm = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_scale
        ins(%arga: tensor<?x?xf64, #DCSR>)
         outs(%xm: tensor<?x?xf64, #DCSR>) {
@@ -100,7 +100,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
         outs(%xv: tensor<?x?xf64, #DCSR>) {
@@ -118,7 +118,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
         outs(%xv: tensor<?x?xf64, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
index 6f70264135234ed..b4677d3974778c8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
@@ -48,7 +48,7 @@ module {
   // Sparse kernel.
   func.func @sparse_mult_elt(
       %arga: tensor<32x16xf32, #DCSR>, %argb: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> {
-    %argx = bufferization.alloc_tensor() : tensor<32x16xf32, #DCSR>
+    %argx = tensor.empty() : tensor<32x16xf32, #DCSR>
     %0 = linalg.generic #trait_mult_elt
       ins(%arga, %argb: tensor<32x16xf32, #DCSR>, tensor<32x16xf32, #DCSR>)
       outs(%argx: tensor<32x16xf32, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
index 7749f2845e115f3..d239e4cce1ddcb5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
@@ -56,7 +56,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?x?xi32, #SparseTensor>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?x?xi32, #SparseTensor>
-    %xinit = bufferization.alloc_tensor(%d0, %d1): tensor<?x?xi32, #SparseMatrix>
+    %xinit = tensor.empty(%d0, %d1): tensor<?x?xi32, #SparseMatrix>
     %0 = linalg.generic #redsum
       ins(%arga, %argb: tensor<?x?x?xi32, #SparseTensor>,
                         tensor<?x?x?xi32, #SparseTensor>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
index fb1f766495529ba..a0d3565b427e446 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
@@ -27,7 +27,7 @@
 #CCCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }>
 
 func.func @pooling_nhwc_sum_CCCC(%input: tensor<1x4x4x1xf32, #CCCC>, %filter: tensor<2x2xf32>) -> tensor<1x3x3x1xf32, #CCCC> {
-  %init = bufferization.alloc_tensor() : tensor<1x3x3x1xf32, #CCCC>
+  %init = tensor.empty() : tensor<1x3x3x1xf32, #CCCC>
   %0 = linalg.pooling_nhwc_sum {dilations = dense<1> : tensor<2xi64>,
                                 strides = dense<1> : tensor<2xi64>}
      ins (%input, %filter: tensor<1x4x4x1xf32, #CCCC>, tensor<2x2xf32>)

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
index 6dd2c4c497a48c4..6acc601dd5f3bb4 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
@@ -46,7 +46,7 @@ module {
                 -> tensor<?xf32, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf32, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf32, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga: tensor<?xcomplex<f32>, #SparseVector>)
         outs(%xv: tensor<?xf32, #SparseVector>) {
@@ -61,7 +61,7 @@ module {
                 -> tensor<?xf32, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf32, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf32, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga: tensor<?xcomplex<f32>, #SparseVector>)
         outs(%xv: tensor<?xf32, #SparseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
index fb685470134c09f..a626b89bb0a8b79 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
@@ -59,7 +59,7 @@ module {
     %maxf = arith.constant 1.0e999 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSR>
     %d1 = tensor.dim %argb, %c1 : tensor<?x?xf64, #CSR>
-    %xm = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #CSR>
+    %xm = tensor.empty(%d0, %d1) : tensor<?x?xf64, #CSR>
     %0 = linalg.generic #trait_matmul
        ins(%arga, %argb: tensor<?x?xf64, #CSR>, tensor<?x?xf64, #CSR>)
         outs(%xm: tensor<?x?xf64, #CSR>) {
@@ -90,7 +90,7 @@ module {
     %maxf = arith.constant 1.0e999 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSR>
     %d1 = tensor.dim %argb, %c1 : tensor<?x?xf64, #CSC>
-    %xm = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #CSR>
+    %xm = tensor.empty(%d0, %d1) : tensor<?x?xf64, #CSR>
     %0 = linalg.generic #trait_matmul
        ins(%arga, %argb: tensor<?x?xf64, #CSR>, tensor<?x?xf64, #CSC>)
         outs(%xm: tensor<?x?xf64, #CSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
index 55111d1ee75c717..2360ea03e3f422e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir
@@ -55,7 +55,7 @@ module {
     %c0 = arith.constant 0 : index
     %cf1 = arith.constant 1.0 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSR>
-    %xv = bufferization.alloc_tensor(%d0): tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d0): tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_mat_reduce_rowwise
       ins(%arga: tensor<?x?xf64, #CSR>)
       outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -74,7 +74,7 @@ module {
     %c0 = arith.constant 0 : index
     %cf1 = arith.constant 1.0 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSC>
-    %xv = bufferization.alloc_tensor(%d0): tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d0): tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_mat_reduce_rowwise
       ins(%arga: tensor<?x?xf64, #CSC>)
       outs(%xv: tensor<?xf64, #SparseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
index 15684be1bcc1c6f..3ef3c7ae59c8ada 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
@@ -121,7 +121,7 @@ module {
   func.func @sparse_sampled_dd(%args: tensor<8x8xf64, #SM>,
                                %arga: tensor<8x8xf64>,
                                %argb: tensor<8x8xf64>) -> tensor<8x8xf64, #SM> {
-    %1 = bufferization.alloc_tensor() : tensor<8x8xf64, #SM>
+    %1 = tensor.empty() : tensor<8x8xf64, #SM>
     %2 = linalg.generic #trait_sampled_dense_dense
       ins(%args, %arga, %argb: tensor<8x8xf64, #SM>,
                                tensor<8x8xf64>, tensor<8x8xf64>)
@@ -154,7 +154,7 @@ module {
           linalg.yield %q : f64
     } -> tensor<8x8xf64>
     // Sample the result with elements-wise multiplication with sparse matrix.
-    %3 = bufferization.alloc_tensor() : tensor<8x8xf64, #SM>
+    %3 = tensor.empty() : tensor<8x8xf64, #SM>
     %4 = linalg.generic #trait_scale
       ins(%2, %args : tensor<8x8xf64>, tensor<8x8xf64, #SM>)
       outs(%3 : tensor<8x8xf64, #SM>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
index f451ae43cdef86e..c260d32e9d4e7d3 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
@@ -39,7 +39,7 @@ module @func_sparse.2 {
     %1 = scf.if %cond -> (tensor<2x3x4xf64, #SparseMatrix>) {
       %cst_2 = arith.constant dense<1.000000e+00> : tensor<f64>
       %cst_3 = arith.constant dense<1.000000e+00> : tensor<2x3x4xf64>
-      %2 = bufferization.alloc_tensor() : tensor<2x3x4xf64, #SparseMatrix>
+      %2 = tensor.empty() : tensor<2x3x4xf64, #SparseMatrix>
       %3 = linalg.generic {
         indexing_maps = [#map, #map, #map],
         iterator_types = ["parallel", "parallel", "parallel"]}
@@ -53,7 +53,7 @@ module @func_sparse.2 {
     } else {
       %cst_2 = arith.constant dense<1.000000e+00> : tensor<f64>
       %cst_3 = arith.constant dense<1.000000e+00> : tensor<2x3x4xf64>
-      %2 = bufferization.alloc_tensor() : tensor<2x3x4xf64, #SparseMatrix>
+      %2 = tensor.empty() : tensor<2x3x4xf64, #SparseMatrix>
       %3 = linalg.generic {
         indexing_maps = [#map, #map, #map],
         iterator_types = ["parallel", "parallel", "parallel"]}

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
index 70546f804fa1c3a..8b6e8ff18345339 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
@@ -60,7 +60,7 @@ module {
     %c0 = arith.constant 0 : index
     %cf1 = arith.constant 1.0 : f64
     %d0 = tensor.dim %arga, %c0 : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d0): tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d0): tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_vec_select
       ins(%arga: tensor<?xf64, #SparseVector>)
       outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -80,7 +80,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSR>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #CSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1): tensor<?x?xf64, #CSR>
+    %xv = tensor.empty(%d0, %d1): tensor<?x?xf64, #CSR>
     %0 = linalg.generic #trait_mat_select
       ins(%arga: tensor<?x?xf64, #CSR>)
       outs(%xv: tensor<?x?xf64, #CSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
index 7f72d0659eeb035..f0d89cfd9423a53 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
@@ -48,7 +48,7 @@ module {
   func.func @sparse_select(%cond: tensor<5x5xi1>,
                            %arga: tensor<5x5xf64, #DCSR>,
                            %argb: tensor<5x5xf64, #DCSR>) -> tensor<5x5xf64, #DCSR> {
-    %xv = bufferization.alloc_tensor() : tensor<5x5xf64, #DCSR>
+    %xv = tensor.empty() : tensor<5x5xf64, #DCSR>
     %0 = linalg.generic #sel_trait
        ins(%cond, %arga, %argb: tensor<5x5xi1>, tensor<5x5xf64, #DCSR>, tensor<5x5xf64, #DCSR>)
         outs(%xv: tensor<5x5xf64, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
index 327ac8656f405d4..1be1652dfa87e69 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
@@ -56,7 +56,7 @@ module {
                              -> tensor<?xf64, #SparseVector> {
     %c0 = arith.constant 0 : index
     %d = tensor.dim %arg0, %c0 : tensor<?xf64, #SparseVector>
-    %xin = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xin = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_op
       ins(%arg0: tensor<?xf64, #SparseVector>)
       outs(%xin: tensor<?xf64, #SparseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
index 748b740d99abde6..ab166b04965ef69 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir
@@ -40,7 +40,7 @@
 
 // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f
 func.func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> tensor<?x?x?x?xf32> {
-  %buf = bufferization.alloc_tensor(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
+  %buf = tensor.empty(%s1, %s2, %s3, %s4) : tensor<?x?x?x?xf32>
   %ret = linalg.fill ins(%f : f32) outs(%buf : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
   return %ret : tensor<?x?x?x?xf32>
 }

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
index b3a6c8a229e00ec..13f4f221dff05e5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
@@ -83,7 +83,7 @@ module {
     // Setup memory for a single reduction scalar,
     // initialized to zero.
     // TODO: tensor.from_elements does not support complex.
-    %alloc = bufferization.alloc_tensor() : tensor<complex<f64>>
+    %alloc = tensor.empty() : tensor<complex<f64>>
     %x = tensor.insert %d0 into %alloc[] : tensor<complex<f64>>
 
     // Read the sparse matrix from file, construct sparse storage.

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
index 9da1405163cb02e..c94de66724c938e 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir
@@ -55,7 +55,7 @@ module {
     %d0 = tensor.dim %arga, %c0 : tensor<?x?x?xf64, #ST>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?x?xf64, #ST>
     %d2 = tensor.dim %arga, %c2 : tensor<?x?x?xf64, #ST>
-    %xt = bufferization.alloc_tensor(%d0, %d1, %d2) : tensor<?x?x?xf64, #ST>
+    %xt = tensor.empty(%d0, %d1, %d2) : tensor<?x?x?xf64, #ST>
     %0 = linalg.generic #trait_mul
        ins(%arga, %argb: tensor<?x?x?xf64, #ST>, tensor<?x?x?xf64, #ST>)
         outs(%xt: tensor<?x?x?xf64, #ST>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
index b0a3691adae34ac..0530cf4ff633585 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir
@@ -55,7 +55,7 @@ module {
     %d0 = tensor.dim %arga, %c0 : tensor<?x?x?xf64, #ST1>
     %d1 = tensor.dim %arga, %c1 : tensor<?x?x?xf64, #ST1>
     %d2 = tensor.dim %arga, %c2 : tensor<?x?x?xf64, #ST1>
-    %xm = bufferization.alloc_tensor(%d0, %d1, %d2) : tensor<?x?x?xf64, #ST2>
+    %xm = tensor.empty(%d0, %d1, %d2) : tensor<?x?x?xf64, #ST2>
     %0 = linalg.generic #trait_scale
        ins(%arga: tensor<?x?x?xf64, #ST1>)
         outs(%xm: tensor<?x?x?xf64, #ST2>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
index b750bdf5559e2e7..d5d49a3502ee000 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir
@@ -60,7 +60,7 @@ module {
     %t = sparse_tensor.convert %arga
       : tensor<3x4xf64, #DCSR> to tensor<3x4xf64, #DCSC>
 
-    %i = bufferization.alloc_tensor() : tensor<4x3xf64, #DCSR>
+    %i = tensor.empty() : tensor<4x3xf64, #DCSR>
     %0 = linalg.generic #transpose_trait
        ins(%t: tensor<3x4xf64, #DCSC>)
        outs(%i: tensor<4x3xf64, #DCSR>) {
@@ -79,7 +79,7 @@ module {
   //
   func.func @sparse_transpose_auto(%arga: tensor<3x4xf64, #DCSR>)
                                        -> tensor<4x3xf64, #DCSR> {
-    %i = bufferization.alloc_tensor() : tensor<4x3xf64, #DCSR>
+    %i = tensor.empty() : tensor<4x3xf64, #DCSR>
     %0 = linalg.generic #transpose_trait
        ins(%arga: tensor<3x4xf64, #DCSR>)
        outs(%i: tensor<4x3xf64, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
index 5cb3e5e6cd45dca..4b4608452609866 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
@@ -40,7 +40,7 @@ module {
   // A linalg representation of some higher "transpose" op.
   func.func @transpose_coo(%arga: tensor<10x5xf32, #SortedCOO>)
                                -> tensor<5x10xf32, #SortedCOO> {
-    %0 = bufferization.alloc_tensor() : tensor<5x10xf32, #SortedCOO>
+    %0 = tensor.empty() : tensor<5x10xf32, #SortedCOO>
     %1 = linalg.generic {
       indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>,
                        affine_map<(d0, d1) -> (d0, d1)>],

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
index 2df3cfb703a2b19..e5f5d22b6983253 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir
@@ -58,7 +58,7 @@ module {
     %c = arith.constant 0 : index
     %ci1 = arith.constant 1 : i32
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
     %0 = linalg.generic #trait_vec
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xi32, #SparseVector>) {
@@ -79,7 +79,7 @@ module {
   func.func @vector_complement_dense(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xi32>
+    %xv = tensor.empty(%d) : tensor<?xi32>
     %0 = linalg.generic #trait_vec
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xi32>) {
@@ -100,7 +100,7 @@ module {
     %c = arith.constant 0 : index
     %cf1 = arith.constant 1.0 : f64
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_vec
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -123,7 +123,7 @@ module {
   func.func @vector_magnify(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_vec
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -151,7 +151,7 @@ module {
     %cfmax = arith.constant 7.0 : f64
     %d0 = tensor.dim %argx, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %argx, %c1 : tensor<?x?xf64, #DCSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_mat
        ins(%argx: tensor<?x?xf64, #DCSR>)
         outs(%xv: tensor<?x?xf64, #DCSR>) {
@@ -178,7 +178,7 @@ module {
     %c1 = arith.constant 1 : index
     %d0 = tensor.dim %argx, %c0 : tensor<?x?xf64, #DCSR>
     %d1 = tensor.dim %argx, %c1 : tensor<?x?xf64, #DCSR>
-    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
+    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
     %0 = linalg.generic #trait_mat
        ins(%argx: tensor<?x?xf64, #DCSR>)
         outs(%xv: tensor<?x?xf64, #DCSR>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
index d9e1125a79d856f..004fd0c18f4fe76 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir
@@ -76,7 +76,7 @@ module {
     %s = arith.constant 2.0 : f64
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_scale
        ins(%arga: tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -104,7 +104,7 @@ module {
                    %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -120,7 +120,7 @@ module {
                    %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -136,7 +136,7 @@ module {
                      %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #DenseVector> {
     %c = arith.constant 0 : index
     %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
-    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #DenseVector>
+    %xv = tensor.empty(%d) : tensor<?xf64, #DenseVector>
     %0 = linalg.generic #trait_op
        ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
         outs(%xv: tensor<?xf64, #DenseVector>) {

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
index 28af7a03e805ec8..258ea13e60c07c6 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-gemm-lib.mlir
@@ -34,7 +34,7 @@ module {
   // Computes C = A x B with A,B,C sparse CSR.
   func.func @matmulCSR(%A: tensor<8x8xf32, #CSR>,
                        %B: tensor<8x8xf32, #CSR>) -> tensor<8x8xf32, #CSR> {
-    %init = bufferization.alloc_tensor() : tensor<8x8xf32, #CSR>
+    %init = tensor.empty() : tensor<8x8xf32, #CSR>
     %C = linalg.matmul
       ins(%A, %B: tensor<8x8xf32, #CSR>,
                   tensor<8x8xf32, #CSR>)


        


More information about the Mlir-commits mailing list