[Mlir-commits] [mlir] fb35cd3 - [mlir][NFC] Update textual references of `func` to `func.func` in SparseTensor tests

River Riddle llvmlistbot at llvm.org
Wed Apr 20 22:24:37 PDT 2022


Author: River Riddle
Date: 2022-04-20T22:17:29-07:00
New Revision: fb35cd3baf777988a32464d7d117a83a75d5918d

URL: https://github.com/llvm/llvm-project/commit/fb35cd3baf777988a32464d7d117a83a75d5918d
DIFF: https://github.com/llvm/llvm-project/commit/fb35cd3baf777988a32464d7d117a83a75d5918d.diff

LOG: [mlir][NFC] Update textual references of `func` to `func.func` in SparseTensor tests

The special case parsing of `func` operations is being removed.

Added: 
    

Modified: 
    mlir/test/Dialect/SparseTensor/conversion.mlir
    mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
    mlir/test/Dialect/SparseTensor/dense.mlir
    mlir/test/Dialect/SparseTensor/fold.mlir
    mlir/test/Dialect/SparseTensor/invalid.mlir
    mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
    mlir/test/Dialect/SparseTensor/roundtrip.mlir
    mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
    mlir/test/Dialect/SparseTensor/sparse_1d.mlir
    mlir/test/Dialect/SparseTensor/sparse_2d.mlir
    mlir/test/Dialect/SparseTensor/sparse_3d.mlir
    mlir/test/Dialect/SparseTensor/sparse_affine.mlir
    mlir/test/Dialect/SparseTensor/sparse_expand.mlir
    mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
    mlir/test/Dialect/SparseTensor/sparse_index.mlir
    mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
    mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
    mlir/test/Dialect/SparseTensor/sparse_lower.mlir
    mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
    mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
    mlir/test/Dialect/SparseTensor/sparse_nd.mlir
    mlir/test/Dialect/SparseTensor/sparse_out.mlir
    mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
    mlir/test/Dialect/SparseTensor/sparse_perm.mlir
    mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
    mlir/test/Dialect/SparseTensor/sparse_scalars.mlir
    mlir/test/Dialect/SparseTensor/sparse_storage.mlir
    mlir/test/Dialect/SparseTensor/sparse_vector.mlir
    mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
    mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
    mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 00451c61f293f..cff7390a7e865 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -30,7 +30,7 @@
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
 //       CHECK: return %[[D]] : index
-func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
+func.func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
   %c = arith.constant 0 : index
   %0 = tensor.dim %arg0, %c : tensor<?xf64, #SparseVector>
   return %0 : index
@@ -41,7 +41,7 @@ func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
 //       CHECK: %[[C:.*]] = arith.constant 2 : index
 //       CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
 //       CHECK: return %[[D]] : index
-func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
+func.func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
   // Querying for dimension 1 in the tensor type needs to be
   // permuted into querying for dimension 2 in the stored sparse
   // tensor scheme, since the latter honors the dimOrdering.
@@ -54,7 +54,7 @@ func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[C:.*]] = arith.constant 20 : index
 //       CHECK: return %[[C]] : index
-func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
+func.func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
   // Querying for dimension 1 in the tensor type can be directly
   // folded into the right value (even though it corresponds
   // to dimension 2 in the stored sparse tensor scheme).
@@ -74,7 +74,7 @@ func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
 //   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
+func.func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
   %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
   return %0 : tensor<128xf64, #SparseVector>
 }
@@ -90,7 +90,7 @@ func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
 //   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
+func.func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
   %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #SparseMatrix>
   return %0 : tensor<?x?xf32, #SparseMatrix>
 }
@@ -106,7 +106,7 @@ func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
 //   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
+func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
   %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?x?xf32, #SparseTensor>
   return %0 : tensor<?x?x?xf32, #SparseTensor>
 }
@@ -128,7 +128,7 @@ func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
+func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
   %0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf64, #SparseMatrix>
   return %0 : tensor<?x?xf64, #SparseMatrix>
 }
@@ -137,7 +137,7 @@ func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix>
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr<i8>) -> ()
 //       CHECK: return
-func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+func.func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
   sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
   return
 }
@@ -145,7 +145,7 @@ func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
 // CHECK-LABEL: func @sparse_nop_convert(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
   return %0 : tensor<64xf32, #SparseVector>
 }
@@ -153,7 +153,7 @@ func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32,
 // CHECK-LABEL: func @sparse_hidden_nop_cast(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
+func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
   return %0 : tensor<?xf32, #SparseVector>
 }
@@ -161,7 +161,7 @@ func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf
 // CHECK-LABEL: func @sparse_nop_cast(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
+func.func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
   %0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor<?xf32, #SparseVector>
   return %0 : tensor<?xf32, #SparseVector>
 }
@@ -191,7 +191,7 @@ func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #Sp
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
 //       CHECK: call @delSparseTensorCOOI32(%[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
+func.func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector>
   return %0 : tensor<?xi32, #SparseVector>
 }
@@ -210,7 +210,7 @@ func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
 //       CHECK: call @delSparseTensorCOOF32(%[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
+func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
   return %0 : tensor<?xf32, #SparseVector32>
 }
@@ -242,7 +242,7 @@ func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf3
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
 //       CHECK: call @delSparseTensorCOOF64(%[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> {
+func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> {
   %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix>
   return %0 : tensor<2x4xf64, #SparseMatrix>
 }
@@ -272,7 +272,7 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
 //       CHECK: call @delSparseTensorCOOF32(%[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
+func.func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
   // Initialize a tensor.
   %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32>
   // Convert the tensor to a sparse tensor.
@@ -314,7 +314,7 @@ func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
 //       CHECK: call @delSparseTensorCOOF64(%[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
-func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
+func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
   %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor>
   return %0 : tensor<?x?x?xf64, #SparseTensor>
 }
@@ -324,7 +324,7 @@ func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTe
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparsePointers(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
 //       CHECK: return %[[T]] : memref<?xindex>
-func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
   return %0 : memref<?xindex>
@@ -335,7 +335,7 @@ func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex>
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
 //       CHECK: return %[[T]] : memref<?xi64>
-func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
+func.func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
   return %0 : memref<?xi64>
@@ -346,7 +346,7 @@ func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
 //       CHECK: return %[[T]] : memref<?xi32>
-func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
+func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
   return %0 : memref<?xi32>
@@ -357,7 +357,7 @@ func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparseIndices(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
 //       CHECK: return %[[T]] : memref<?xindex>
-func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
   return %0 : memref<?xindex>
@@ -368,7 +368,7 @@ func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
 //       CHECK: return %[[T]] : memref<?xi64>
-func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
+func.func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
   return %0 : memref<?xi64>
@@ -379,7 +379,7 @@ func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64>
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
 //       CHECK: return %[[T]] : memref<?xi32>
-func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
+func.func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
   return %0 : memref<?xi32>
@@ -389,7 +389,7 @@ func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32>
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64>
 //       CHECK: return %[[T]] : memref<?xf64>
-func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
+func.func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
   %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
   return %0 : memref<?xf64>
 }
@@ -398,7 +398,7 @@ func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf32>
 //       CHECK: return %[[T]] : memref<?xf32>
-func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
+func.func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
   %0 = sparse_tensor.values %arg0: tensor<128xf32, #SparseVector> to memref<?xf32>
   return %0 : memref<?xf32>
 }
@@ -407,7 +407,7 @@ func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = call @sparseValuesI32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi32>
 //       CHECK: return %[[T]] : memref<?xi32>
-func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
+func.func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
   %0 = sparse_tensor.values %arg0: tensor<128xi32, #SparseVector> to memref<?xi32>
   return %0 : memref<?xi32>
 }
@@ -416,7 +416,7 @@ func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = call @sparseValuesI16(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi16>
 //       CHECK: return %[[T]] : memref<?xi16>
-func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
+func.func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
   %0 = sparse_tensor.values %arg0: tensor<128xi16, #SparseVector> to memref<?xi16>
   return %0 : memref<?xi16>
 }
@@ -425,7 +425,7 @@ func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = call @sparseValuesI8(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi8>
 //       CHECK: return %[[T]] : memref<?xi8>
-func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
+func.func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
   %0 = sparse_tensor.values %arg0: tensor<128xi8, #SparseVector> to memref<?xi8>
   return %0 : memref<?xi8>
 }
@@ -433,7 +433,7 @@ func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
 // CHECK-LABEL: func @sparse_reconstruct(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
+func.func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
   %0 = sparse_tensor.load %arg0 : tensor<128xf32, #SparseVector>
   return %0 : tensor<128xf32, #SparseVector>
 }
@@ -442,7 +442,7 @@ func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf3
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
 //       CHECK: call @endInsert(%[[A]]) : (!llvm.ptr<i8>) -> ()
 //       CHECK: return %[[A]] : !llvm.ptr<i8>
-func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
+func.func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
   %0 = sparse_tensor.load %arg0 hasInserts : tensor<128xf32, #SparseVector>
   return %0 : tensor<128xf32, #SparseVector>
 }
@@ -453,7 +453,7 @@ func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<12
 //  CHECK-SAME: %[[C:.*]]: f32) {
 //       CHECK: call @lexInsertF32(%[[A]], %[[B]], %[[C]]) : (!llvm.ptr<i8>, memref<?xindex>, f32) -> ()
 //       CHECK: return
-func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
+func.func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
                     %arg1: memref<?xindex>,
                     %arg2: f32) {
   sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf32, #SparseVector>, memref<?xindex>, f32
@@ -468,7 +468,7 @@ func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
 //   CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[A]] : memref<?xf64>)
 //   CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
 //       CHECK: return %[[C]] : memref<?xindex>
-func @sparse_expansion() -> memref<?xindex> {
+func.func @sparse_expansion() -> memref<?xindex> {
   %c = arith.constant 8 : index
   %0 = sparse_tensor.init [%c, %c] : tensor<8x8xf64, #SparseMatrix>
   %values, %filled, %added, %count = sparse_tensor.expand %0
@@ -487,7 +487,7 @@ func @sparse_expansion() -> memref<?xindex> {
 //   CHECK-DAG: memref.dealloc %[[D]] : memref<?xi1>
 //   CHECK-DAG: memref.dealloc %[[E]] : memref<?xindex>
 //       CHECK: return
-func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
+func.func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
                          %arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>,
                          %arg4: memref<?xindex>, %arg5: index) {
   sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
@@ -504,7 +504,7 @@ func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
 //       CHECK: call @outSparseTensorF64(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> ()
 //       CHECK: call @delSparseTensorCOOF64(%[[COO]])
 //       CHECK: return
-func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
   sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr<i8>
   return
 }
@@ -518,7 +518,7 @@ func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
 //       CHECK: call @outSparseTensorF32(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> ()
 //       CHECK: call @delSparseTensorCOOF32(%[[COO]])
 //       CHECK: return
-func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr<i8>) {
   sparse_tensor.out %arg0, %arg1 : tensor<?x?x?xf32, #SparseTensor>, !llvm.ptr<i8>
   return
 }

diff  --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
index 88788c40d32f4..db932acba5380 100644
--- a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
@@ -47,7 +47,7 @@
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
 //       CHECK: return %[[T]] : tensor<13xi32>
-func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
+func.func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
   %0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
   return %0 : tensor<13xi32>
 }
@@ -86,7 +86,7 @@ func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32>
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
 //       CHECK: return %[[T]] : tensor<?xi32>
-func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
+func.func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
   %0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
   return %0 : tensor<?xi32>
 }
@@ -130,7 +130,7 @@ func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
 //       CHECK: return %[[T]] : tensor<2x4xf64>
-func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
+func.func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
   return %0 : tensor<2x4xf64>
 }
@@ -174,7 +174,7 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
 //       CHECK: return %[[T]] : tensor<?x4xf64>
-func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
+func.func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
   return %0 : tensor<?x4xf64>
 }
@@ -218,7 +218,7 @@ func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
 //       CHECK: return %[[T]] : tensor<2x?xf64>
-func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
+func.func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
   return %0 : tensor<2x?xf64>
 }
@@ -262,7 +262,7 @@ func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
 //       CHECK: return %[[T]] : tensor<?x?xf64>
-func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
+func.func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
   return %0 : tensor<?x?xf64>
 }
@@ -311,7 +311,7 @@ func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x
 //       CHECK: }
 //       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
 //       CHECK: return %[[T]] : tensor<2x3x4xf64>
-func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
+func.func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
   return %0 : tensor<2x3x4xf64>
 }

diff  --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir
index 5e9b587b39b28..247a4b810fa72 100644
--- a/mlir/test/Dialect/SparseTensor/dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/dense.mlir
@@ -56,7 +56,7 @@
 // CHECK:           %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_16]] : tensor<32x16xf32>
 // CHECK:         }
-func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
+func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
              %argx: tensor<32x16xf32> {linalg.inplaceable = false})
 	     -> tensor<32x16xf32> {
   %c = arith.constant 1.0 : f32
@@ -97,7 +97,7 @@ func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_15]] : tensor<32x16xf32>
 // CHECK:         }
-func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
+func.func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
              %argx: tensor<32x16xf32> {linalg.inplaceable = true})
 	     -> tensor<32x16xf32> {
   %c = arith.constant 1.0 : f32
@@ -138,7 +138,7 @@ func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
 // CHECK:           %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           return %[[VAL_15]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:         }
-func @dense3(%arga: tensor<32x16xf32>,
+func.func @dense3(%arga: tensor<32x16xf32>,
              %argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
 	     -> tensor<32x16xf32, #DenseMatrix> {
   %c = arith.constant 1.0 : f32
@@ -186,7 +186,7 @@ func @dense3(%arga: tensor<32x16xf32>,
 // CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           return %[[VAL_20]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:         }
-func @dense4(%arga: tensor<32x16x8xf32>,
+func.func @dense4(%arga: tensor<32x16x8xf32>,
              %argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
 	     -> tensor<32x16xf32, #DenseMatrix> {
   %0 = linalg.generic #trait_3d

diff  --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir
index 41189eee4271b..eedb205526b87 100644
--- a/mlir/test/Dialect/SparseTensor/fold.mlir
+++ b/mlir/test/Dialect/SparseTensor/fold.mlir
@@ -6,7 +6,7 @@
 //  CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
 //   CHECK-NOT: sparse_tensor.convert
 //       CHECK: return %[[A]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>
-func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
   return %0 : tensor<64xf32, #SparseVector>
 }
@@ -15,7 +15,7 @@ func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32,
 //  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
 //   CHECK-NOT: sparse_tensor.convert
 //       CHECK: return
-func @sparse_dce_convert(%arg0: tensor<64xf32>) {
+func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
   %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
   return
 }
@@ -26,7 +26,7 @@ func @sparse_dce_convert(%arg0: tensor<64xf32>) {
 //   CHECK-NOT: sparse_tensor.indices
 //   CHECK-NOT: sparse_tensor.values
 //       CHECK: return
-func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
+func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.pointers %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>
   %1 = sparse_tensor.indices %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>

diff  --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index a6fdb37262ae2..c840975c222d6 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -split-input-file -verify-diagnostics
 
-func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
+func.func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
   // expected-error at +1 {{expected a sparse tensor result}}
   %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<32xf32>
   return %0 : tensor<32xf32>
@@ -8,7 +8,7 @@ func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
 
 // -----
 
-func @invalid_release_dense(%arg0: tensor<4xi32>) {
+func.func @invalid_release_dense(%arg0: tensor<4xi32>) {
   // expected-error at +1 {{expected a sparse tensor to release}}
   sparse_tensor.release %arg0 : tensor<4xi32>
   return
@@ -16,7 +16,7 @@ func @invalid_release_dense(%arg0: tensor<4xi32>) {
 
 // -----
 
-func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
+func.func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
   // expected-error at +1 {{expected a sparse tensor result}}
   %0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf32>
   return %0 : tensor<?x?xf32>
@@ -26,7 +26,7 @@ func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
 
-func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
+func.func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
   // expected-error at +1 {{unexpected mismatch between tensor rank and sizes: 1 vs. 2}}
   %0 = sparse_tensor.init [%arg0, %arg0] : tensor<?xf32, #SparseVector>
   return %0 : tensor<?xf32, #SparseVector>
@@ -36,7 +36,7 @@ func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
 
 #SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
 
-func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
+func.func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
   %c10 = arith.constant 10 : index
   %c20 = arith.constant 20 : index
   // expected-error at +1 {{unexpected mismatch with static dimension size 10}}
@@ -46,7 +46,7 @@ func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
 
 // -----
 
-func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
+func.func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   // expected-error at +1 {{expected a sparse tensor to get pointers}}
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
@@ -55,7 +55,7 @@ func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
 
 // -----
 
-func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
+func.func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   // expected-error at +1 {{expected a sparse tensor to get pointers}}
   %0 = sparse_tensor.pointers %arg0, %c : tensor<*xf64> to memref<?xindex>
@@ -66,7 +66,7 @@ func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], pointerBitWidth=32}>
 
-func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   // expected-error at +1 {{unexpected type for pointers}}
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
@@ -77,7 +77,7 @@ func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
 
-func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 1 : index
   // expected-error at +1 {{requested pointers dimension out of bounds}}
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
@@ -86,7 +86,7 @@ func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
 
 // -----
 
-func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
+func.func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
   %c = arith.constant 1 : index
   // expected-error at +1 {{expected a sparse tensor to get indices}}
   %0 = sparse_tensor.indices %arg0, %c : tensor<10x10xi32> to memref<?xindex>
@@ -95,7 +95,7 @@ func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
 
 // -----
 
-func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
+func.func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   // expected-error at +1 {{expected a sparse tensor to get indices}}
   %0 = sparse_tensor.indices %arg0, %c : tensor<*xf64> to memref<?xindex>
@@ -106,7 +106,7 @@ func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
 
-func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
+func.func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
   %c = arith.constant 0 : index
   // expected-error at +1 {{unexpected type for indices}}
   %0 = sparse_tensor.indices %arg0, %c : tensor<?xf64, #SparseVector> to memref<?xi32>
@@ -117,7 +117,7 @@ func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi3
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
 
-func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 1 : index
   // expected-error at +1 {{requested indices dimension out of bounds}}
   %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
@@ -126,7 +126,7 @@ func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
 
 // -----
 
-func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
+func.func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
   // expected-error at +1 {{expected a sparse tensor to get values}}
   %0 = sparse_tensor.values %arg0 : tensor<1024xf32> to memref<?xf32>
   return %0 : memref<?xf32>
@@ -136,7 +136,7 @@ func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
 
 #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
 
-func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
+func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
   // expected-error at +1 {{unexpected mismatch in element types}}
   %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf32>
   return %0 : memref<?xf32>
@@ -144,7 +144,7 @@ func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32
 
 // -----
 
-func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
+func.func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
   // expected-error at +1 {{expected a sparse tensor to materialize}}
   %0 = sparse_tensor.load %arg0 : tensor<16x32xf64>
   return %0 : tensor<16x32xf64>
@@ -152,7 +152,7 @@ func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
 
 // -----
 
-func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>, %arg2: f64) {
+func.func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>, %arg2: f64) {
   // expected-error at +1 {{expected a sparse tensor for insertion}}
   sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf64>, memref<?xindex>, f64
   return
@@ -160,7 +160,7 @@ func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
 
 // -----
 
-func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
+func.func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
   // expected-error at +1 {{expected a sparse tensor for expansion}}
   %values, %filled, %added, %count = sparse_tensor.expand %arg0
     : tensor<128xf64> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
@@ -169,7 +169,7 @@ func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
 
 // -----
 
-func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
+func.func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
                                      %arg2: memref<?xf64>, %arg3: memref<?xi1>,
 				     %arg4: memref<?xindex>, %arg5: index) {
   // expected-error at +1 {{expected a sparse tensor for compression}}
@@ -179,7 +179,7 @@ func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xind
 
 // -----
 
-func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
+func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
   // expected-error at +1 {{unexpected type in convert}}
   %0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32>
   return %0 : tensor<10xf32>
@@ -189,7 +189,7 @@ func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
 
 #DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
 
-func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
+func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
   // expected-error at +1 {{unexpected conversion mismatch in rank}}
   %0 = sparse_tensor.convert %arg0 : tensor<10x10xf64, #DCSR> to tensor<?xf64>
   return %0 : tensor<?xf64>
@@ -199,7 +199,7 @@ func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?x
 
 #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
 
-func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
+func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
   // expected-error at +1 {{unexpected conversion mismatch in dimension 1}}
   %0 = sparse_tensor.convert %arg0 : tensor<10x?xf32> to tensor<10x10xf32, #CSR>
   return %0 : tensor<10x10xf32, #CSR>
@@ -207,7 +207,7 @@ func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32,
 
 // -----
 
-func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
+func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
   // expected-error at +1 {{expected a sparse tensor for output}}
   sparse_tensor.out %arg0, %arg1 : tensor<10xf64>, !llvm.ptr<i8>
   return
@@ -215,7 +215,7 @@ func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
 
 // -----
 
-func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
   // expected-error at +1 {{overlap region must have exactly 2 arguments}}
   %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
     overlap={
@@ -229,7 +229,7 @@ func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
 
 // -----
 
-func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
   // expected-error at +1 {{right region must have exactly 1 arguments}}
   %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
     overlap={}
@@ -243,7 +243,7 @@ func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
 
 // -----
 
-func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
   // expected-error at +1 {{overlap region argument 2 type mismatch}}
   %r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
     overlap={
@@ -257,7 +257,7 @@ func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
 
 // -----
 
-func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
+func.func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
   // expected-error at +1 {{left region yield type mismatch}}
   %0 = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
     overlap={}
@@ -272,7 +272,7 @@ func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
 
 // -----
 
-func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
+func.func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
   // expected-error at +1 {{left=identity requires first argument to have the same type as the output}}
   %0 = sparse_tensor.binary %arg0, %arg1 : i64, f64 to f64
     overlap={}
@@ -283,7 +283,7 @@ func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
 
 // -----
 
-func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
+func.func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
   // expected-error at +1 {{present region argument 1 type mismatch}}
   %r = sparse_tensor.unary %arg0 : f64 to f64
     present={
@@ -296,7 +296,7 @@ func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
 
 // -----
 
-func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
+func.func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
   // expected-error at +1 {{absent region must have exactly 0 arguments}}
   %r = sparse_tensor.unary %arg0 : f64 to f64
     present={}
@@ -309,7 +309,7 @@ func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
 
 // -----
 
-func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
+func.func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
   // expected-error at +1 {{present region yield type mismatch}}
   %0 = sparse_tensor.unary %arg0 : f64 to f64
     present={

diff  --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
index b78d921b4aa4e..cd3d7b14b707d 100644
--- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
@@ -1,59 +1,59 @@
 // RUN: mlir-opt %s -split-input-file -verify-diagnostics
 
 #a = #sparse_tensor.encoding<{dimLevelType = []}>
-func private @scalar(%arg0: tensor<f64, #a>) -> () // expected-error {{expected non-scalar sparse tensor}}
+func.func private @scalar(%arg0: tensor<f64, #a>) -> () // expected-error {{expected non-scalar sparse tensor}}
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
-func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
+func.func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}}
-func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimLevelType = [1]}> // expected-error {{expected a string value in dimension level types}}
-func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimLevelType = ["strange"]}> // expected-error {{unexpected dimension level type: strange}}
-func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}}
-func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
+func.func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{dimOrdering = affine_map<(i,j) -> (i,i)>}> // expected-error {{expected a permutation affine map for dimension ordering}}
-func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}}
-func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}}
-func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
-func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}}
-func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
 
 // -----
 
 #a = #sparse_tensor.encoding<{key = 1}> // expected-error {{unexpected key: key}}
-func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
+func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()

diff  --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index a7ea965410ab3..6b7e7619e78bc 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -6,7 +6,7 @@
 // CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //       CHECK: %[[T:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<128xf64, #{{.*}}>
 //       CHECK: return %[[T]] : tensor<128xf64, #{{.*}}>
-func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
+func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
   %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
   return %0 : tensor<128xf64, #SparseVector>
 }
@@ -20,7 +20,7 @@ func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
 //   CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index
 //       CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor<?x32xf64, #{{.*}}>
 //       CHECK: return %[[T]] : tensor<?x32xf64, #{{.*}}>
-func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
+func.func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
   %d1 = arith.constant 16 : index
   %d2 = arith.constant 32 : index
   %0 = sparse_tensor.init [%d1, %d2] : tensor<?x32xf64, #SparseMatrix>
@@ -35,7 +35,7 @@ func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
 // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>
 //       CHECK: sparse_tensor.release %[[A]] : tensor<128xf64, #{{.*}}>
 //       CHECK: return
-func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
+func.func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
   sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
   return
 }
@@ -48,7 +48,7 @@ func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
 // CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
 //       CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<64xf32> to tensor<64xf32, #{{.*}}>
 //       CHECK: return %[[T]] : tensor<64xf32, #{{.*}}>
-func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #SparseVector> {
+func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #SparseVector> {
   %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
   return %0 : tensor<64xf32, #SparseVector>
 }
@@ -61,7 +61,7 @@ func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, #Spar
 // CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>)
 //       CHECK: %[[T:.*]] = sparse_tensor.convert %[[A]] : tensor<8x8x8xf64, #{{.*}}> to tensor<8x8x8xf64>
 //       CHECK: return %[[T]] : tensor<8x8x8xf64>
-func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) -> tensor<8x8x8xf64> {
+func.func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) -> tensor<8x8x8xf64> {
   %0 = sparse_tensor.convert %arg0 : tensor<8x8x8xf64, #SparseTensor> to tensor<8x8x8xf64>
   return %0 : tensor<8x8x8xf64>
 }
@@ -75,7 +75,7 @@ func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>) ->
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]], %[[C]] : tensor<128xf64, #{{.*}}> to memref<?xindex>
 //       CHECK: return %[[T]] : memref<?xindex>
-func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
   return %0 : memref<?xindex>
@@ -90,7 +90,7 @@ func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex>
 //       CHECK: %[[C:.*]] = arith.constant 0 : index
 //       CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]], %[[C]] : tensor<128xf64, #{{.*}}> to memref<?xindex>
 //       CHECK: return %[[T]] : memref<?xindex>
-func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
+func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
   %c = arith.constant 0 : index
   %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
   return %0 : memref<?xindex>
@@ -104,7 +104,7 @@ func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
 //  CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
 //       CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64, #{{.*}}> to memref<?xf64>
 //       CHECK: return %[[T]] : memref<?xf64>
-func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
+func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
   %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
   return %0 : memref<?xf64>
 }
@@ -117,7 +117,7 @@ func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
 //  CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
 //       CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] : tensor<16x32xf64, #{{.*}}>
 //       CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
-func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
+func.func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
   %0 = sparse_tensor.load %arg0 : tensor<16x32xf64, #DenseMatrix>
   return %0 : tensor<16x32xf64, #DenseMatrix>
 }
@@ -130,7 +130,7 @@ func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #
 //  CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>)
 //       CHECK: %[[T:.*]] = sparse_tensor.load %[[A]] hasInserts : tensor<16x32xf64, #{{.*}}>
 //       CHECK: return %[[T]] : tensor<16x32xf64, #{{.*}}>
-func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
+func.func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf64, #DenseMatrix> {
   %0 = sparse_tensor.load %arg0 hasInserts : tensor<16x32xf64, #DenseMatrix>
   return %0 : tensor<16x32xf64, #DenseMatrix>
 }
@@ -145,7 +145,7 @@ func @sparse_load_ins(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf6
 //  CHECK-SAME: %[[C:.*]]: f64) {
 //       CHECK: sparse_tensor.lex_insert %[[A]], %[[B]], %[[C]] : tensor<128xf64, #{{.*}}>, memref<?xindex>, f64
 //       CHECK: return
-func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: memref<?xindex>, %arg2: f64) {
+func.func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: memref<?xindex>, %arg2: f64) {
   sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf64, #SparseVector>, memref<?xindex>, f64
   return
 }
@@ -158,7 +158,7 @@ func @sparse_insert(%arg0: tensor<128xf64, #SparseVector>, %arg1: memref<?xindex
 //  CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>)
 //       CHECK: sparse_tensor.expand %[[A]]
 //       CHECK: return
-func @sparse_expansion(%arg0: tensor<8x8xf64, #SparseMatrix>) {
+func.func @sparse_expansion(%arg0: tensor<8x8xf64, #SparseMatrix>) {
   %values, %filled, %added, %count = sparse_tensor.expand %arg0
     : tensor<8x8xf64, #SparseMatrix> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
   return
@@ -172,7 +172,7 @@ func @sparse_expansion(%arg0: tensor<8x8xf64, #SparseMatrix>) {
 //  CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>,
 //       CHECK: sparse_tensor.compress %[[A]]
 //       CHECK: return
-func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
+func.func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
                          %arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>,
                          %arg4: memref<?xindex>, %arg5: index) {
   sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
@@ -189,7 +189,7 @@ func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
 //  CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>)
 //       CHECK: sparse_tensor.out %[[A]], %[[B]] : tensor<?x?xf64, #sparse_tensor.encoding<{{.*}}>>, !llvm.ptr<i8>
 //       CHECK: return
-func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
+func.func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
   sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr<i8>
   return
 }
@@ -213,7 +213,7 @@ func @sparse_out(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) {
 //       CHECK:     }
 //       CHECK:   return %[[C1]] : f64
 //       CHECK: }
-func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
+func.func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
   %cf0 = arith.constant 0.0 : f64
   %r = sparse_tensor.binary %arg0, %arg1 : f64, i64 to f64
     overlap={
@@ -245,7 +245,7 @@ func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 {
 //       CHECK:     }
 //       CHECK:   return %[[C1]] : f64
 //       CHECK: }
-func @sparse_unary(%arg0: f64) -> f64 {
+func.func @sparse_unary(%arg0: f64) -> f64 {
   %r = sparse_tensor.unary %arg0 : f64 to f64
     present={
       ^bb0(%x: f64):
@@ -274,7 +274,7 @@ func @sparse_unary(%arg0: f64) -> f64 {
 //       CHECK:     }
 //       CHECK:   return %[[C1]] : i64
 //       CHECK: }
-func @sparse_unary(%arg0: f64) -> i64 {
+func.func @sparse_unary(%arg0: f64) -> i64 {
   %r = sparse_tensor.unary %arg0 : f64 to i64
     present={
       ^bb0(%x: f64):

diff  --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
index 2df47548d226e..d112974f3b4c2 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: func private @sparse_1d_tensor(
 // CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 0, indexBitWidth = 0 }>>)
-func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
+func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
 
 // -----
 
@@ -15,4 +15,4 @@ func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelT
 
 // CHECK-LABEL: func private @sparse_2d_tensor(
 // CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>)
-func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)
+func.func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
index 07d6cdbdafefa..4dec12c56ec13 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
@@ -32,7 +32,7 @@
 // CHECK:           %[[VAL_12:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
 // CHECK:           return %[[VAL_12]] : tensor<32xf32>
 // CHECK:         }
-func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #DV>)
     outs(%argx: tensor<32xf32>) {
@@ -61,7 +61,7 @@ func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> te
 // CHECK:           %[[VAL_11:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf32>
 // CHECK:           return %[[VAL_11]] : tensor<32xf32>
 // CHECK:         }
-func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
+func.func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
   %u = linalg.init_tensor [32] : tensor<32xf32>
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #DV>)
@@ -92,7 +92,7 @@ func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
 // CHECK:           %[[VAL_12:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
 // CHECK:           return %[[VAL_12]] : tensor<32xf32>
 // CHECK:         }
-func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #DV>)
     outs(%argx: tensor<32xf32>) {
@@ -148,7 +148,7 @@ func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> te
 // CHECK:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32>
 // CHECK:           return %[[VAL_30]] : tensor<32xf32>
 // CHECK:         }
-func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -186,7 +186,7 @@ func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> te
 // CHECK:           %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf32>
 // CHECK:           return %[[VAL_20]] : tensor<32xf32>
 // CHECK:         }
-func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -222,7 +222,7 @@ func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tenso
 // CHECK:           %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
 // CHECK:           return %[[VAL_16]] : tensor<32xf32>
 // CHECK:         }
-func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -264,7 +264,7 @@ func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) -> te
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
 // CHECK:           return %[[VAL_14]] : tensor<32xf32>
 // CHECK:         }
-func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #DV>, tensor<32xf32>)
     outs(%argx: tensor<32xf32>) {
@@ -296,7 +296,7 @@ func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf32>
 // CHECK:           return %[[VAL_14]] : tensor<32xf32>
 // CHECK:         }
-func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #DV>, tensor<32xf32>)
     outs(%argx: tensor<32xf32>) {
@@ -356,7 +356,7 @@ func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32
 // CHECK:           %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
 // CHECK:           return %[[VAL_34]] : tensor<32xf32>
 // CHECK:         }
-func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32>, tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -392,7 +392,7 @@ func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32
 // CHECK:           %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf32>
 // CHECK:           return %[[VAL_18]] : tensor<32xf32>
 // CHECK:         }
-func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32>, tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -452,7 +452,7 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32
 // CHECK:           %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
 // CHECK:           return %[[VAL_34]] : tensor<32xf32>
 // CHECK:         }
-func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32>)
     outs(%argx: tensor<32xf32>) {
@@ -488,7 +488,7 @@ func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32
 // CHECK:           %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf32>
 // CHECK:           return %[[VAL_18]] : tensor<32xf32>
 // CHECK:         }
-func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32>)
     outs(%argx: tensor<32xf32>) {
@@ -572,7 +572,7 @@ func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32
 // CHECK:           %[[VAL_53:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
 // CHECK:           return %[[VAL_53]] : tensor<32xf32>
 // CHECK:         }
-func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -634,7 +634,7 @@ func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
 // CHECK:           %[[VAL_41:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
 // CHECK:           return %[[VAL_41]] : tensor<32xf32>
 // CHECK:         }
-func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
+func.func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32xf32, #SV>, tensor<32xf32, #SV>)
     outs(%argx: tensor<32xf32>) {
@@ -725,7 +725,7 @@ func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
 // CHECK:           %[[VAL_60:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<16xf32>
 // CHECK:           return %[[VAL_60]] : tensor<16xf32>
 // CHECK:         }
-func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
+func.func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
   // Kernel "x(i) = a(i) * c + b(i) * c".
   %0 = linalg.generic #trait2
     ins(%arga, %argb: tensor<16xf32, #SV>, tensor<16xf32, #SV>)
@@ -818,7 +818,7 @@ func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc:
 // CHECK:           %[[VAL_59:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<16xf32>
 // CHECK:           return %[[VAL_59]] : tensor<16xf32>
 // CHECK:         }
-func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
+func.func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
                       %argb: tensor<16xf32, #SV>, %argc: f32, %argx: tensor<16xf32>) -> tensor<16xf32> {
   // Same kernel, but now expressed as "x(i) = (a(i) + b(i)) * c".
   %0 = linalg.generic #trait2
@@ -863,7 +863,7 @@ func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
 // CHECK:           %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<f32>
 // CHECK:           return %[[VAL_17]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait_sum_reduction
     ins(%arga: tensor<?xf32, #SV>)
     outs(%argx: tensor<f32>) {
@@ -965,7 +965,7 @@ func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32
 // CHECK:           %[[VAL_71:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<f32>
 // CHECK:           return %[[VAL_71]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
+func.func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
                        %argb: tensor<16xf32, #SV>,
                        %argx: tensor<f32>) -> tensor<f32> {
   // Just for testing. This case would be better expressed
@@ -1079,7 +1079,7 @@ func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
 // CHECK:           %[[VAL_77:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<f32>
 // CHECK:           return %[[VAL_77]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
+func.func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
                         %argb: tensor<f32>,
                         %argc: tensor<16xf32, #SV>,
                         %argx: tensor<f32>) -> tensor<f32> {
@@ -1260,7 +1260,7 @@ func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
 // CHECK:           %[[VAL_115:.*]] = bufferization.to_tensor %[[VAL_18]] : memref<?xf64>
 // CHECK:           return %[[VAL_115]] : tensor<?xf64>
 // CHECK:         }
-func @four_tensors_op(%arga: tensor<?xf64>,
+func.func @four_tensors_op(%arga: tensor<?xf64>,
                       %argb: tensor<?xf64, #SV>,
                       %argc: tensor<?xf64>,
                       %argd: tensor<?xf64, #SV>,
@@ -1577,7 +1577,7 @@ func @four_tensors_op(%arga: tensor<?xf64>,
 // CHECK:           %[[VAL_252:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<f64>
 // CHECK:           return %[[VAL_252]] : tensor<f64>
 // CHECK:         }
-func @red3s(%arga: tensor<?xf64, #SV>,
+func.func @red3s(%arga: tensor<?xf64, #SV>,
             %argb: tensor<?xf64, #SV>,
 	    %argc: tensor<?xf64, #SV>, %argx: tensor<f64>) ->tensor<f64>{
  %0 = linalg.generic #trait_red3s

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 7c318be65abee..d052489d4ff74 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -42,7 +42,7 @@
 // CHECK:           %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_18]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tdd>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -79,7 +79,7 @@ func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_18]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tdd>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -143,7 +143,7 @@ func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_37:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_37]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tds>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -183,7 +183,7 @@ func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_21]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tds>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -252,7 +252,7 @@ func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_40:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_40]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -293,7 +293,7 @@ func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_22]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -388,7 +388,7 @@ func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_58:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_58]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -432,7 +432,7 @@ func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_25]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32>)
     outs(%argx: tensor<32x16xf32>) {
@@ -597,7 +597,7 @@ func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
 // CHECK:           %[[VAL_116:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_116]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32, #Tss>)
     outs(%argx: tensor<32x16xf32>) {
@@ -694,7 +694,7 @@ func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
 // CHECK:           %[[VAL_72:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_72]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tss>, tensor<32x16xf32, #Tss>)
     outs(%argx: tensor<32x16xf32>) {
@@ -801,7 +801,7 @@ func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
 // CHECK:           %[[VAL_70:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_70]] : tensor<32x16xf32>
 // CHECK:         }
-func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32, #Tds>)
     outs(%argx: tensor<32x16xf32>) {
@@ -848,7 +848,7 @@ func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>,
 // CHECK:           %[[VAL_28:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_28]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
+func.func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>, %argx: tensor<32x16xf32>) -> tensor<32x16xf32> {
   %0 = linalg.generic #trait2
      ins(%arga, %argb: tensor<32x16xf32, #Tsd>, tensor<32x16xf32, #Tds>)
     outs(%argx: tensor<32x16xf32>) {
@@ -901,7 +901,7 @@ func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #Tds>,
 // CHECK:           %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<16xf32>
 // CHECK:           return %[[VAL_26]] : tensor<16xf32>
 // CHECK:         }
-func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: tensor<16xf32>) -> tensor<16xf32> {
+func.func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: tensor<16xf32>) -> tensor<16xf32> {
   %0 = linalg.generic #trait_matvec
        ins(%argA, %argb: tensor<16x32xf32, #Tds>, tensor<32xf32>)
       outs(%argx: tensor<16xf32>) {
@@ -949,7 +949,7 @@ func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: tenso
 // CHECK:           %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<f32>
 // CHECK:           return %[[VAL_23]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait_sum_reduction
      ins(%arga: tensor<10x20xf32, #Tds>)
     outs(%argx: tensor<f32>) {
@@ -997,7 +997,7 @@ func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) -> tenso
 // CHECK:           %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<?x?xf64>
 // CHECK:           return %[[VAL_20]] : tensor<?x?xf64>
 // CHECK:         }
-func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
+func.func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
   %0 = arith.constant 2.0 : f64
   %1 = linalg.generic #trait_scale
      ins(%arga: tensor<?x?xf64, #Tds>)
@@ -1065,7 +1065,7 @@ func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor<?x?x
 // CHECK:           %[[VAL_38:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<?x?xf32>
 // CHECK:           return %[[VAL_38]] : tensor<?x?xf32>
 // CHECK:         }
-func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
+func.func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
                           %arga: tensor<?x?xf32>,
                           %argb: tensor<?x?xf32>,
                           %argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
@@ -1285,7 +1285,7 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
 // CHECK:           %[[VAL_174:.*]] = bufferization.to_tensor %[[VAL_24]] : memref<?xf32>
 // CHECK:           return %[[VAL_174]] : tensor<?xf32>
 // CHECK:         }
-func @sum_kernel_with_inv(%arga: tensor<?x?xf32, #Tss>,
+func.func @sum_kernel_with_inv(%arga: tensor<?x?xf32, #Tss>,
                           %argb: tensor<?x?xf32, #Tds>,
                           %argc: tensor<?x?xf32, #Tds>,
                           %argd: tensor<?xf32>,

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
index 648d4f7e68adb..521404bc18cad 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
@@ -53,7 +53,7 @@
 // CHECK:           %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_22]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tddd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -95,7 +95,7 @@ func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_22]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tddd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -165,7 +165,7 @@ func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_42:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_42]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdds>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -211,7 +211,7 @@ func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_26]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdds>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -284,7 +284,7 @@ func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_43:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_43]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdsd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -329,7 +329,7 @@ func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_25]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdsd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -429,7 +429,7 @@ func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_62:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_62]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdss>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -478,7 +478,7 @@ func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_29]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tdss>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -556,7 +556,7 @@ func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_46:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_46]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsdd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -602,7 +602,7 @@ func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_26]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsdd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -707,7 +707,7 @@ func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_65:.*]] = bufferization.to_tensor %[[VAL_17]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_65]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsds>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -757,7 +757,7 @@ func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_30]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsds>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -865,7 +865,7 @@ func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_66:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_66]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tssd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -914,7 +914,7 @@ func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_29]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tssd>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -1049,7 +1049,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_85:.*]] = bufferization.to_tensor %[[VAL_19]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_85]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsss>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -1102,7 +1102,7 @@ func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16x8xf32>
 // CHECK:           return %[[VAL_33]] : tensor<32x16x8xf32>
 // CHECK:         }
-func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
+func.func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %argx: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> {
   %0 = linalg.generic #trait3
      ins(%arga, %argb: tensor<32x16x8xf32, #Tsss>, tensor<32x16x8xf32>)
     outs(%argx: tensor<32x16x8xf32>) {
@@ -1168,7 +1168,7 @@ func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>, %ar
 // CHECK:           %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<?x?xf32>
 // CHECK:           return %[[VAL_34]] : tensor<?x?xf32>
 // CHECK:         }
-func @kernel_3d(%arga: tensor<?x?xf32>,
+func.func @kernel_3d(%arga: tensor<?x?xf32>,
                 %argb: tensor<?x?x?xf32, #Tdds>,
                 %argc: tensor<?x?xf32>,
 	        %argd: tensor<?x?xf32>) -> tensor<?x?xf32> {
@@ -1230,7 +1230,7 @@ func @kernel_3d(%arga: tensor<?x?xf32>,
 // CHECK:           %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
 // CHECK:           return %[[VAL_34]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait_sum_reduction
      ins(%arga: tensor<10x20x30xf32, #Tsss>)
     outs(%argx: tensor<f32>) {
@@ -1284,7 +1284,7 @@ func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>) -> t
 // CHECK:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<f32>
 // CHECK:           return %[[VAL_30]] : tensor<f32>
 // CHECK:         }
-func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
+func.func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
                         %argb: tensor<?xf32, #Td>,
 		        %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait_sum_reduction_inv
@@ -1340,7 +1340,7 @@ func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
 // CHECK:           %[[VAL_22:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<10x20x30xf32>
 // CHECK:           return %[[VAL_22]] : tensor<10x20x30xf32>
 // CHECK:         }
-func @invariants(%arga: tensor<10xf32, #Td>,
+func.func @invariants(%arga: tensor<10xf32, #Td>,
                  %argb: tensor<20xf32>,
                  %argc: tensor<30xf32>,
                  %argx: tensor<10x20x30xf32>) -> tensor<10x20x30xf32> {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
index 8212f1c20650d..3251e459a7c46 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir
@@ -42,7 +42,7 @@
 // CHECK:           %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32>
 // CHECK:           return %[[VAL_21]] : tensor<32xf32>
 // CHECK:         }
-func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
+func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
                       %argb: tensor<4xf32>,
                       %argx: tensor<32xf32>) -> tensor<32xf32> {
   %0 = linalg.generic #trait1
@@ -93,7 +93,7 @@ func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
 // CHECK:           %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi32>
 // CHECK:           return %[[VAL_20]] : tensor<32xi32>
 // CHECK:         }
-func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
+func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
                          %argb: tensor<34xi32>,
                          %argx: tensor<32xi32>) -> tensor<32xi32> {
   %0 = linalg.generic #trait2
@@ -151,7 +151,7 @@ func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
 // CHECK:           %[[VAL_27:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf64>
 // CHECK:           return %[[VAL_27]] : tensor<32x16xf64>
 // CHECK:         }
-func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
+func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
                          %argb: tensor<34x19xf64>,
                          %argx: tensor<32x16xf64>) -> tensor<32x16xf64> {
   %0 = linalg.generic #trait3

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
index 63e7ab83d6344..c6621efc859e4 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir
@@ -46,7 +46,7 @@
 // CHECK-CONVERT: memref.dealloc %[[C]] : memref<?xindex>
 // CHECK-CONVERT: call @endInsert
 //
-func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
+func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
   %c0 = arith.constant 0 : index
   %n = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSC>
   %v = sparse_tensor.init [%n] : tensor<?xf64, #SV>

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
index 4b0d389bb1870..d27adbbfeca85 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
@@ -50,7 +50,7 @@
 // CHECK:           }
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
 // CHECK:           return %[[VAL_14]] : tensor<32xf64>
-func @abs(%arga: tensor<32xf64, #SV>,
+func.func @abs(%arga: tensor<32xf64, #SV>,
           %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf64, #SV>)
@@ -82,7 +82,7 @@ func @abs(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
 // CHECK:           return %[[VAL_14]] : tensor<32xf64>
 // CHECK:         }
-func @ceil(%arga: tensor<32xf64, #SV>,
+func.func @ceil(%arga: tensor<32xf64, #SV>,
            %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf64, #SV>)
@@ -114,7 +114,7 @@ func @ceil(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
 // CHECK:           return %[[VAL_14]] : tensor<32xf64>
 // CHECK:         }
-func @floor(%arga: tensor<32xf64, #SV>,
+func.func @floor(%arga: tensor<32xf64, #SV>,
             %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf64, #SV>)
@@ -146,7 +146,7 @@ func @floor(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
 // CHECK:           return %[[VAL_14]] : tensor<32xf64>
 // CHECK:         }
-func @neg(%arga: tensor<32xf64, #SV>,
+func.func @neg(%arga: tensor<32xf64, #SV>,
           %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait1
      ins(%arga: tensor<32xf64, #SV>)
@@ -205,7 +205,7 @@ func @neg(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
 // CHECK:           return %[[VAL_33]] : tensor<32xf64>
 // CHECK:         }
-func @add(%arga: tensor<32xf64, #SV>,
+func.func @add(%arga: tensor<32xf64, #SV>,
           %argb: tensor<32xf64>,
           %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait2
@@ -267,7 +267,7 @@ func @add(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
 // CHECK:           return %[[VAL_35]] : tensor<32xf64>
 // CHECK:         }
-func @sub(%arga: tensor<32xf64, #SV>,
+func.func @sub(%arga: tensor<32xf64, #SV>,
           %argb: tensor<32xf64>,
           %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait2
@@ -303,7 +303,7 @@ func @sub(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf64>
 // CHECK:           return %[[VAL_17]] : tensor<32xf64>
 // CHECK:         }
-func @mul(%arga: tensor<32xf64, #SV>,
+func.func @mul(%arga: tensor<32xf64, #SV>,
           %argb: tensor<32xf64>,
           %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait2
@@ -337,7 +337,7 @@ func @mul(%arga: tensor<32xf64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf64>
 // CHECK:           return %[[VAL_15]] : tensor<32xf64>
 // CHECK:         }
-func @divbyc(%arga: tensor<32xf64, #SV>,
+func.func @divbyc(%arga: tensor<32xf64, #SV>,
            %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %c = arith.constant 2.0 : f64
   %0 = linalg.generic #traitc

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_index.mlir
index f41c765376bbe..aa6d3c885e3a4 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_index.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_index.mlir
@@ -46,7 +46,7 @@
 // CHECK:           %[[VAL_21:.*]] = sparse_tensor.load %[[VAL_5]] : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:           return %[[VAL_21]] : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:         }
-func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
+func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
                       -> tensor<?x?xi64, #DenseMatrix> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 0 : index
@@ -104,7 +104,7 @@ func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
 // CHECK:           %[[VAL_27:.*]] = sparse_tensor.load %[[VAL_6]] hasInserts : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:           return %[[VAL_27]] : tensor<?x?xi64, #sparse_tensor.encoding
 // CHECK:         }
-func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
+func.func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
                        -> tensor<?x?xi64, #SparseMatrix> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 0 : index

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
index a865dc2bec116..dd615706016fa 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
@@ -69,7 +69,7 @@
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
 // CHECK:           return %[[VAL_33]] : tensor<32xi64>
 // CHECK:         }
-func @add(%arga: tensor<32xi64, #SV>,
+func.func @add(%arga: tensor<32xi64, #SV>,
           %argb: tensor<32xi64>,
           %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -132,7 +132,7 @@ func @add(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_36:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xi64>
 // CHECK:           return %[[VAL_36]] : tensor<32xi64>
 // CHECK:         }
-func @sub(%arga: tensor<32xi64, #SV>,
+func.func @sub(%arga: tensor<32xi64, #SV>,
           %argb: tensor<32xi64>,
           %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -168,7 +168,7 @@ func @sub(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xi64>
 // CHECK:           return %[[VAL_17]] : tensor<32xi64>
 // CHECK:         }
-func @mul(%arga: tensor<32xi64, #SV>,
+func.func @mul(%arga: tensor<32xi64, #SV>,
           %argb: tensor<32xi64>,
           %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -202,7 +202,7 @@ func @mul(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
 // CHECK:           return %[[VAL_15]] : tensor<32xi64>
 // CHECK:         }
-func @divsbyc(%arga: tensor<32xi64, #SV>,
+func.func @divsbyc(%arga: tensor<32xi64, #SV>,
               %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %c = arith.constant 2 : i64
   %0 = linalg.generic #traitc
@@ -236,7 +236,7 @@ func @divsbyc(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
 // CHECK:           return %[[VAL_15]] : tensor<32xi64>
 // CHECK:         }
-func @divubyc(%arga: tensor<32xi64, #SV>,
+func.func @divubyc(%arga: tensor<32xi64, #SV>,
               %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %c = arith.constant 2 : i64
   %0 = linalg.generic #traitc
@@ -272,7 +272,7 @@ func @divubyc(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xi64>
 // CHECK:           return %[[VAL_17]] : tensor<32xi64>
 // CHECK:         }
-func @and(%arga: tensor<32xi64, #SV>,
+func.func @and(%arga: tensor<32xi64, #SV>,
           %argb: tensor<32xi64>,
           %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -332,7 +332,7 @@ func @and(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
 // CHECK:           return %[[VAL_33]] : tensor<32xi64>
 // CHECK:         }
-func @or(%arga: tensor<32xi64, #SV>,
+func.func @or(%arga: tensor<32xi64, #SV>,
          %argb: tensor<32xi64>,
          %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -392,7 +392,7 @@ func @or(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi64>
 // CHECK:           return %[[VAL_33]] : tensor<32xi64>
 // CHECK:         }
-func @xor(%arga: tensor<32xi64, #SV>,
+func.func @xor(%arga: tensor<32xi64, #SV>,
           %argb: tensor<32xi64>,
           %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %0 = linalg.generic #trait2
@@ -426,7 +426,7 @@ func @xor(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
 // CHECK:           return %[[VAL_15]] : tensor<32xi64>
 // CHECK:         }
-func @ashrbyc(%arga: tensor<32xi64, #SV>,
+func.func @ashrbyc(%arga: tensor<32xi64, #SV>,
               %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %c = arith.constant 2 : i64
   %0 = linalg.generic #traitc
@@ -460,7 +460,7 @@ func @ashrbyc(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
 // CHECK:           return %[[VAL_15]] : tensor<32xi64>
 // CHECK:         }
-func @lsrbyc(%arga: tensor<32xi64, #SV>,
+func.func @lsrbyc(%arga: tensor<32xi64, #SV>,
              %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %c = arith.constant 2 : i64
   %0 = linalg.generic #traitc
@@ -494,7 +494,7 @@ func @lsrbyc(%arga: tensor<32xi64, #SV>,
 // CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xi64>
 // CHECK:           return %[[VAL_15]] : tensor<32xi64>
 // CHECK:         }
-func @lslbyc(%arga: tensor<32xi64, #SV>,
+func.func @lslbyc(%arga: tensor<32xi64, #SV>,
              %argx: tensor<32xi64> {linalg.inplaceable = true}) -> tensor<32xi64> {
   %c = arith.constant 2 : i64
   %0 = linalg.generic #traitc

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
index 05e895a262f6e..172882e174a27 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
@@ -45,7 +45,7 @@
 // CHECK:           %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<10x30xf32>
 // CHECK:           return %[[VAL_29]] : tensor<10x30xf32>
 // CHECK:         }
-func @matmul1(%a: tensor<10x20xf32, #DCSR>,
+func.func @matmul1(%a: tensor<10x20xf32, #DCSR>,
               %b: tensor<20x30xf32>,
               %c: tensor<10x30xf32>) -> tensor<10x30xf32> {
   %0 = linalg.matmul
@@ -144,7 +144,7 @@ func @matmul1(%a: tensor<10x20xf32, #DCSR>,
 // CHECK:           %[[VAL_77:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
 // CHECK:           return %[[VAL_77]] : tensor<4x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
 // CHECK:         }
-func @matmul2(%A: tensor<4x8xf64, #DCSR>,
+func.func @matmul2(%A: tensor<4x8xf64, #DCSR>,
               %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
   %c4 = arith.constant 4 : index
   %C = sparse_tensor.init [%c4, %c4] : tensor<4x4xf64, #DCSR>
@@ -196,7 +196,7 @@ func @matmul2(%A: tensor<4x8xf64, #DCSR>,
 // CHECK:           %[[VAL_32:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<6x6xi32>
 // CHECK:           return %[[VAL_32]] : tensor<6x6xi32>
 // CHECK:         }
-func @conv2d(%input:  tensor<8x8xi32>,
+func.func @conv2d(%input:  tensor<8x8xi32>,
              %filter: tensor<3x3xi32, #DCSR>,
              %output: tensor<6x6xi32>) -> tensor<6x6xi32> {
   %0 = linalg.conv_2d
@@ -247,7 +247,7 @@ func @conv2d(%input:  tensor<8x8xi32>,
 // CHECK:           %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<5x6xi64>
 // CHECK:           return %[[VAL_33]] : tensor<5x6xi64>
 // CHECK:         }
-func @quantized_matmul(%input1: tensor<5x3xi8>,
+func.func @quantized_matmul(%input1: tensor<5x3xi8>,
                        %input2: tensor<3x6xi8, #DCSR>,
                        %output: tensor<5x6xi64>) -> tensor<5x6xi64> {
   %c0 = arith.constant 0 : i32
@@ -310,7 +310,7 @@ func @quantized_matmul(%input1: tensor<5x3xi8>,
 // CHECK:           %[[VAL_48:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<f32>
 // CHECK:           return %[[VAL_48]] : tensor<f32>
 // CHECK:         }
-func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
+func.func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
                  %b: tensor<1024xf32, #SparseVector>,
 		 %x: tensor<f32>) -> tensor<f32> {
   %dot = linalg.dot ins(%a, %b: tensor<1024xf32, #SparseVector>,

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
index abde97eac3645..6b3d6448397ef 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
@@ -116,7 +116,7 @@
 // CHECK-LIR:           return %[[VAL_9]] : memref<32xf64>
 // CHECK-LIR:         }
 
-func @matvec(%arga: tensor<32x64xf64, #CSR>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSR>,
              %argb: tensor<64xf64>,
              %argx: tensor<32xf64>) -> tensor<32xf64> {
   %0 = linalg.generic #trait_matvec

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
index 122021811ebab..1b8891fd529b7 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
@@ -116,7 +116,7 @@
 // CHECK-LIR:           return %[[VAL_10]] : memref<32xf64>
 // CHECK-LIR:         }
 
-func @matvec(%arga: tensor<32x64xf64, #CSC>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSC>,
              %argb: tensor<64xf64>,
              %argx: tensor<32xf64>) -> tensor<32xf64> {
   %0 = linalg.generic #trait_matvec

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
index e611b0a0684c8..1b8b0ee7c0324 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
@@ -110,7 +110,7 @@
 // CHECK-LIR:           return %[[VAL_2]] : memref<32xf64>
 // CHECK-LIR:         }
 
-func @matvec(%arga: tensor<32x64xf64, #CSR>,
+func.func @matvec(%arga: tensor<32x64xf64, #CSR>,
              %argb: tensor<64xf64>,
 	     %argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
   %0 = linalg.generic #trait_matvec

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir
index 5cf64309fd541..3a7a32d953c25 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir
@@ -84,7 +84,7 @@
 // CHECK:           %[[VAL_50:.*]] = bufferization.to_tensor %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
 // CHECK:           return %[[VAL_50]] : tensor<10x20x30x40x50x60x70x80xf32>
 // CHECK:         }
-func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
+func.func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
           %argb: tensor<80x70x60x50x40x30x20x10xf32, #SparseTensor>,
           %argx: tensor<10x20x30x40x50x60x70x80xf32>)
 	      -> tensor<10x20x30x40x50x60x70x80xf32> {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
index 13a984b8e8d25..7bb7f3229f3ed 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
@@ -46,7 +46,7 @@
 // CHECK:           %[[VAL_18:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           return %[[VAL_18]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:         }
-func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
+func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
   %c = arith.constant 2.0 : f32
   %0 = linalg.generic #trait_scale_inpl
     outs(%argx: tensor<32x16xf32, #DCSR>) {
@@ -80,7 +80,7 @@ func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable
 // CHECK:           %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:         }
-func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
+func.func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable = true}) -> tensor<32x16xf32, #DCSR> {
   %0 = linalg.generic #trait_scale_inpl
     outs(%argx: tensor<32x16xf32, #DCSR>) {
       ^bb(%x: f32):
@@ -128,7 +128,7 @@ func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR> {linalg.inplaceable
 // CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_7]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           return %[[VAL_20]] : tensor<10x20xf32, #sparse_tensor.encoding<{
 // CHECK:         }
-func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> {
+func.func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> {
   %s = arith.constant 2.0 : f32
   %d10 = arith.constant 10 : index
   %d20 = arith.constant 20 : index
@@ -282,7 +282,7 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
 // CHECK:           %[[VAL_112:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<?x?xi32, #{{.*}}>
 // CHECK:           return %[[VAL_112]] : tensor<?x?xi32, #{{.*}}>
 // CHECK:         }
-func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
+func.func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
              %argb: tensor<?x?x?xi32, #SparseTensor>) -> tensor<?x?xi32, #DCSR> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -399,7 +399,7 @@ func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
 // CHECK:           %[[VAL_78:.*]] = sparse_tensor.load %[[VAL_9]] hasInserts : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
 // CHECK:           return %[[VAL_78]] : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
 // CHECK:         }
-func @matmat(%arga: tensor<?x?xf32, #DCSR>,
+func.func @matmat(%arga: tensor<?x?xf32, #DCSR>,
              %argb: tensor<?x?xf32, #DCSR>) -> tensor<?x?xf32, #DCSR> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
index 72472601c9eeb..9af037c7829a8 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
@@ -56,7 +56,7 @@
 // CHECK-PAR4:           scf.parallel
 // CHECK-PAR4:         return
 //
-func @scale_dd(%scale: f32,
+func.func @scale_dd(%scale: f32,
                %arga: tensor<?x?xf32, #DenseMatrix>,
 	       %argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
   %0 = linalg.generic #trait_dd
@@ -104,7 +104,7 @@ func @scale_dd(%scale: f32,
 // CHECK-PAR4:           scf.parallel
 // CHECK-PAR4:         return
 //
-func @scale_ss(%scale: f32,
+func.func @scale_ss(%scale: f32,
                %arga: tensor<?x?xf32, #SparseMatrix>,
 	       %argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
   %0 = linalg.generic #trait_ss
@@ -153,7 +153,7 @@ func @scale_ss(%scale: f32,
 // CHECK-PAR4:           scf.for
 // CHECK-PAR4:         return
 //
-func @matvec(%arga: tensor<16x32xf32, #CSR>,
+func.func @matvec(%arga: tensor<16x32xf32, #CSR>,
              %argb: tensor<32xf32>,
 	     %argx: tensor<16xf32>) -> tensor<16xf32> {
   %0 = linalg.generic #trait_matvec

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir
index 14c8b78d4b752..b30f56260a53d 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir
@@ -41,7 +41,7 @@
 // CHECK:           %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<20x30x10xf32>
 // CHECK:           return %[[VAL_18]] : tensor<20x30x10xf32>
 // CHECK:         }
-func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
+func.func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
                          %argx: tensor<20x30x10xf32>) -> tensor<20x30x10xf32> {
   %0 = linalg.generic #trait
     ins(%arga: tensor<10x20x30xf32, #X>)
@@ -80,7 +80,7 @@ func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
 // CHECK:           %[[VAL_19:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<?x?x?xf32>
 // CHECK:           return %[[VAL_19]] : tensor<?x?x?xf32>
 // CHECK:         }
-func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
+func.func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
                           %argx: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
   %0 = linalg.generic #trait
     ins(%arga: tensor<?x?x?xf32, #X>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
index b9b1fcbb26d26..ce82c7d951f83 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
@@ -83,7 +83,7 @@
 // CHECK-MIR:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
 // CHECK-MIR:           return %[[VAL_30]] : tensor<f32>
 // CHECK-MIR:         }
-func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
+func.func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
                           %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait
     ins(%arga: tensor<?x?x?xf32, #X>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir
index ea654ce8cb72b..d5ee64701b8e2 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir
@@ -59,7 +59,7 @@
 // CHECK:           %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
 // CHECK:           return %[[VAL_34]] : tensor<32x16xf32>
 // CHECK:         }
-func @mul(%arga: tensor<32x16xf32, #SparseMatrix>,
+func.func @mul(%arga: tensor<32x16xf32, #SparseMatrix>,
           %argp: tensor<f32>,
           %argq: f32,
           %argr: f32,

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir
index b3f3b5e6b1307..66ab1ce349728 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir
@@ -37,7 +37,7 @@
 // CHECK:   %[[MUL:.*]] = arith.mulf %[[VAL0]], %[[VAL1]] : f64
 // CHECK:   store %[[MUL]], %{{.*}}[%[[INDC]]] : memref<32xf64>
 // CHECK: }
-func @mul64(%arga: tensor<32xf64, #SparseVector64>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
+func.func @mul64(%arga: tensor<32xf64, #SparseVector64>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
   %0 = linalg.generic #trait_mul
      ins(%arga, %argb: tensor<32xf64, #SparseVector64>, tensor<32xf64>)
     outs(%argx: tensor<32xf64>) {
@@ -66,7 +66,7 @@ func @mul64(%arga: tensor<32xf64, #SparseVector64>, %argb: tensor<32xf64>, %argx
 // CHECK:   %[[MUL:.*]] = arith.mulf %[[VAL0]], %[[VAL1]] : f64
 // CHECK:   store %[[MUL]], %{{.*}}[%[[INDC]]] : memref<32xf64>
 // CHECK: }
-func @mul32(%arga: tensor<32xf64, #SparseVector32>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
+func.func @mul32(%arga: tensor<32xf64, #SparseVector32>, %argb: tensor<32xf64>, %argx: tensor<32xf64>) -> tensor<32xf64> {
   %0 = linalg.generic #trait_mul
      ins(%arga, %argb: tensor<32xf64, #SparseVector32>, tensor<32xf64>)
     outs(%argx: tensor<32xf64>) {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
index 7b2d3a4213494..1425a7b896213 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
@@ -74,7 +74,7 @@
 // CHECK-VEC4:       }
 // CHECK-VEC4:       return
 //
-func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
   %0 = linalg.generic #trait_scale_d
     ins(%arga: tensor<1024xf32, #DenseVector>)
     outs(%argx: tensor<1024xf32>) {
@@ -216,7 +216,7 @@ func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024
 // CHECK-VEC4:       }
 // CHECK-VEC4:       return
 //
-func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
   %0 = linalg.generic #trait_mul_s
     ins(%arga, %argb: tensor<1024xf32, #SparseVector>, tensor<1024xf32>)
     outs(%argx: tensor<1024xf32>) {
@@ -312,7 +312,7 @@ func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %ar
 // CHECK-VEC4:       %{{.*}} = vector.reduction <add>, %[[red]] : vector<[4]xf32> into f32
 // CHECK-VEC4:       return
 //
-func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>, %argx: tensor<f32>) -> tensor<f32> {
+func.func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>, %argx: tensor<f32>) -> tensor<f32> {
   %0 = linalg.generic #trait_reduction_d
     ins(%arga, %argb: tensor<1024xf32, #DenseVector>, tensor<1024xf32>)
     outs(%argx: tensor<f32>) {
@@ -475,7 +475,7 @@ func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>
 // CHECK-VEC4:       }
 // CHECK-VEC4:       return
 //
-func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024xf32>, %argx: tensor<512x1024xf32>) -> tensor<512x1024xf32> {
+func.func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024xf32>, %argx: tensor<512x1024xf32>) -> tensor<512x1024xf32> {
   %0 = linalg.generic #trait_mul_ds
     ins(%arga, %argb: tensor<512x1024xf32, #SparseMatrix>, tensor<512x1024xf32>)
     outs(%argx: tensor<512x1024xf32>) {
@@ -584,7 +584,7 @@ func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024x
 // CHECK-VEC4:       }
 // CHECK-VEC4:       return
 //
-func @add_dense(%arga: tensor<32x64xf64, #SparseMatrix>,
+func.func @add_dense(%arga: tensor<32x64xf64, #SparseMatrix>,
                 %argx: tensor<33x64xf64> {linalg.inplaceable = true}) -> tensor<33x64xf64> {
   %0 = linalg.generic #trait_affine
      ins(%arga: tensor<32x64xf64, #SparseMatrix>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
index 8d24a38762ca1..df55b8373e0ee 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
@@ -112,7 +112,7 @@
 // CHECK:           %[[VAL_87:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<f64>
 // CHECK:           return %[[VAL_87]] : tensor<f64>
 // CHECK:         }
-func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
+func.func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
                          %arga: tensor<64x32xf64, #SparseMatrix>,
                          %argb: tensor<64x32xf64, #SparseMatrix>) -> tensor<f64> {
   %0 = linalg.generic #trait

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
index 253ac0cb37f49..792e741931499 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
@@ -48,7 +48,7 @@
 // CHECK:           %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<8xi64>
 // CHECK:           return %[[VAL_20]] : tensor<8xi64>
 // CHECK:         }
-func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
+func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
   %init = linalg.init_tensor [8] : tensor<8xi64>
   %r = linalg.generic #trait_1d
       ins(%arga: tensor<8xi64, #SparseVector>)
@@ -109,7 +109,7 @@ func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64>
 // CHECK:           %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<8xi64>
 // CHECK:           return %[[VAL_35]] : tensor<8xi64>
 // CHECK:         }
-func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
+func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
   %init = linalg.init_tensor [8] : tensor<8xi64>
   %r = linalg.generic #trait_1d
       ins(%arga: tensor<8xi64, #SparseVector>)

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir
index c48a620291241..65f1f7216bed8 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir
@@ -51,7 +51,7 @@
 // CHECK:       }
 // CHECK:       return
 //
-func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
+func.func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
   %0 = linalg.generic #trait_mul_s
     ins(%arga, %argb: tensor<1024xf32, #SparseVector>, tensor<1024xf32>)
     outs(%argx: tensor<1024xf32>) {


        


More information about the Mlir-commits mailing list