[Mlir-commits] [mlir] 92d38ad - [mlir][NFC] Update textual references of `func` to `func.func` in Linalg tests

River Riddle llvmlistbot at llvm.org
Wed Apr 20 22:24:32 PDT 2022


Author: River Riddle
Date: 2022-04-20T22:17:28-07:00
New Revision: 92d38adb83f4e4e8257d092adeffba9132aa4830

URL: https://github.com/llvm/llvm-project/commit/92d38adb83f4e4e8257d092adeffba9132aa4830
DIFF: https://github.com/llvm/llvm-project/commit/92d38adb83f4e4e8257d092adeffba9132aa4830.diff

LOG: [mlir][NFC] Update textual references of `func` to `func.func` in Linalg tests

The special case parsing of `func` operations is being removed.

Added: 
    

Modified: 
    mlir/test/Dialect/Linalg/affine.mlir
    mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir
    mlir/test/Dialect/Linalg/bufferize.mlir
    mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir
    mlir/test/Dialect/Linalg/canonicalize.mlir
    mlir/test/Dialect/Linalg/comprehensive-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-aliasing-in.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-aliasing-in.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-init-tensor-elimination.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-init-tensor-elimination.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir
    mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
    mlir/test/Dialect/Linalg/conv-interface-invalid.mlir
    mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
    mlir/test/Dialect/Linalg/decompose-convolution.mlir
    mlir/test/Dialect/Linalg/detensorize_0d.mlir
    mlir/test/Dialect/Linalg/detensorize_br_operands.mlir
    mlir/test/Dialect/Linalg/detensorize_if.mlir
    mlir/test/Dialect/Linalg/detensorize_trivial.mlir
    mlir/test/Dialect/Linalg/detensorize_while.mlir
    mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
    mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
    mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
    mlir/test/Dialect/Linalg/fill-interface-invalid.mlir
    mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
    mlir/test/Dialect/Linalg/forward-vector-transfers.mlir
    mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
    mlir/test/Dialect/Linalg/fusion-2-level.mlir
    mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
    mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir
    mlir/test/Dialect/Linalg/fusion-indexed.mlir
    mlir/test/Dialect/Linalg/fusion-pattern.mlir
    mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
    mlir/test/Dialect/Linalg/fusion-sequence.mlir
    mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir
    mlir/test/Dialect/Linalg/fusion.mlir
    mlir/test/Dialect/Linalg/generalize-named-ops.mlir
    mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
    mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir
    mlir/test/Dialect/Linalg/hoist-padding.mlir
    mlir/test/Dialect/Linalg/hoisting.mlir
    mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
    mlir/test/Dialect/Linalg/inlining.mlir
    mlir/test/Dialect/Linalg/interchange.mlir
    mlir/test/Dialect/Linalg/invalid.mlir
    mlir/test/Dialect/Linalg/library-calls.mlir
    mlir/test/Dialect/Linalg/loops.mlir
    mlir/test/Dialect/Linalg/lower-pad-tensor.mlir
    mlir/test/Dialect/Linalg/named-ops.mlir
    mlir/test/Dialect/Linalg/namedop_conversion.mlir
    mlir/test/Dialect/Linalg/one-shot-module-bufferize-allow-return-allocs.mlir
    mlir/test/Dialect/Linalg/one-shot-module-bufferize.mlir
    mlir/test/Dialect/Linalg/pad_fusion.mlir
    mlir/test/Dialect/Linalg/parallel-loops.mlir
    mlir/test/Dialect/Linalg/promote.mlir
    mlir/test/Dialect/Linalg/promotion_options.mlir
    mlir/test/Dialect/Linalg/reshape_control_fusion.mlir
    mlir/test/Dialect/Linalg/reshape_fusion.mlir
    mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
    mlir/test/Dialect/Linalg/reshape_linearization_fusion_with_unit_dims.mlir
    mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
    mlir/test/Dialect/Linalg/roundtrip.mlir
    mlir/test/Dialect/Linalg/split_reduction.mlir
    mlir/test/Dialect/Linalg/standard.mlir
    mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir
    mlir/test/Dialect/Linalg/tile-and-distribute.mlir
    mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir
    mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
    mlir/test/Dialect/Linalg/tile-conv.mlir
    mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir
    mlir/test/Dialect/Linalg/tile-indexed.mlir
    mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir
    mlir/test/Dialect/Linalg/tile-parallel-reduce.mlir
    mlir/test/Dialect/Linalg/tile-parallel.mlir
    mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
    mlir/test/Dialect/Linalg/tile-tensors.mlir
    mlir/test/Dialect/Linalg/tile-zero.mlir
    mlir/test/Dialect/Linalg/tile.mlir
    mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
    mlir/test/Dialect/Linalg/transform-patterns.mlir
    mlir/test/Dialect/Linalg/vectorization.mlir
    mlir/test/Dialect/Linalg/vectorize-convolution.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir
index cb7a80c933828..46dd2a3d606c3 100644
--- a/mlir/test/Dialect/Linalg/affine.mlir
+++ b/mlir/test/Dialect/Linalg/affine.mlir
@@ -3,7 +3,7 @@
 // Test that we can lower all the way to LLVM without crashing, don't check results here.
 // RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1
 
-func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
+func.func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %A = memref.view %arg0[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32>
@@ -17,7 +17,7 @@ func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
 //----------------------------------------------------------------------------//
 // Named ops to loops.
 //----------------------------------------------------------------------------//
-func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
+func.func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
   linalg.batch_matmul ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
                      outs(%C : memref<?x?x?xf32>)
   return

diff  --git a/mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir b/mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir
index 234c2b8fec30f..126927e8f07d8 100644
--- a/mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir
+++ b/mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir
@@ -1,6 +1,6 @@
 //RUN: mlir-opt -test-linalg-transform-patterns=test-bubble-up-extract-slice-op-pattern -split-input-file %s | FileCheck %s
 
-func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3: index, %arg4: index, %arg5:index) -> tensor<?x?xf32> {
+func.func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3: index, %arg4: index, %arg5:index) -> tensor<?x?xf32> {
   %0 = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                      affine_map<(d0, d1) -> (d1)>,
@@ -27,7 +27,7 @@ func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3:
 
 //-----
 
-func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
+func.func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
   %0 = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                      affine_map<(d0, d1) -> (d1)>,
@@ -54,7 +54,7 @@ func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
 
 //-----
 
-func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: index) -> tensor<?x2xf32> {
+func.func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: index) -> tensor<?x2xf32> {
   %0 = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                      affine_map<(d0, d1) -> (d1)>,
@@ -81,7 +81,7 @@ func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: i
 
 //-----
 
-func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<4x2xf32> {
+func.func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<4x2xf32> {
   %0 = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                      affine_map<(d0, d1) -> (d1)>,
@@ -108,7 +108,7 @@ func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<
 
 //-----
 
-func @matmul_slice() -> tensor<2x2xf32> {
+func.func @matmul_slice() -> tensor<2x2xf32> {
     %lhs = arith.constant dense<1.0> : tensor<4x4xf32>
     %rhs = arith.constant dense<1.0> : tensor<4x4xf32>
     %dst = arith.constant dense<[[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0]]> : tensor<4x4xf32>
@@ -126,7 +126,7 @@ func @matmul_slice() -> tensor<2x2xf32> {
 
 //-----
 
-func @conv_slice(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>) -> tensor<1x32x32x16xf32> {
+func.func @conv_slice(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>) -> tensor<1x32x32x16xf32> {
   %c112 = arith.constant 112 : index
   %c32 = arith.constant 32 : index
   %c16 = arith.constant 16 : index

diff  --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir
index 6776197a91fe3..dd0ba1ca68a02 100644
--- a/mlir/test/Dialect/Linalg/bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/bufferize.mlir
@@ -23,7 +23,7 @@
 // CHECK:           }
 // CHECK:           %[[RESULT:.*]] = bufferization.to_tensor %[[RESULT_MEMREF]] : memref<4xf32>
 // CHECK:           return %[[RESULT]] : tensor<4xf32>
-func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
+func.func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
     %0 = linalg.generic {
       indexing_maps = [#map0, #map0],
       iterator_types = ["parallel"]
@@ -51,7 +51,7 @@ func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
 // CHECK:         linalg.generic
 // CHECK-SAME:    ins(%[[MEMREF]] : memref<?xf32>)
 // CHECK-SAME:    outs(%[[OUT_BUF]] : memref<?xf32>) {
-func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
+func.func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
   %init = linalg.init_tensor [%size] : tensor<?xf32>
   %0 = linalg.generic {
     indexing_maps = [#map0, #map0],
@@ -77,7 +77,7 @@ func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
 // CHECK-SAME:      ins(%{{.*}} : memref<4xf32>)
 // CHECK-SAME:      outs(%[[RESULT0]], %[[RESULT1]] : memref<4xf32>, memref<4xf32>)
 // CHECK-NEXT: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32):
-func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
+func.func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
     %0, %1 = linalg.generic {
       indexing_maps = [#map0, #map0, #map0],
       iterator_types = ["parallel"]
@@ -109,7 +109,7 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
 // CHECK:           linalg.generic
 // CHECK-SAME:      ins(%[[MEMREF_ARG]] : memref<?x?xf32>)
 // CHECK-SAME:      outs(%[[RESULT0]], %[[RESULT1]] : memref<?x?xf32>, memref<?x?xf32>)
-func @dynamic_results(%arg0: tensor<?x?xf32>)
+func.func @dynamic_results(%arg0: tensor<?x?xf32>)
          -> (tensor<?x?xf32>, tensor<?x?xf32>) {
     %0, %1 = linalg.generic {
       indexing_maps = [#map_2d, #map_2d, #map_2d],
@@ -147,7 +147,7 @@ func @dynamic_results(%arg0: tensor<?x?xf32>)
 // CHECK:           linalg.generic
 // CHECK-SAME:      ins(%[[ARG0_MEMREF]] : memref<2x3x4xvector<3x4xi4>>)
 // CHECK-SAME:      outs(%[[INIT_BUFFER]] : memref<3x2xf32>) {
-func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
+func.func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
   %arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
 
   %0 = linalg.generic #trait
@@ -164,7 +164,7 @@ func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
 
 // CHECK-LABEL: func @bufferize_fill(
 // CHECK-SAME:    %[[IN:.*]]: tensor<?xf32>
-func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
+func.func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
   %c0 = arith.constant 0.0 : f32
   // CHECK: %[[ALLOC:.*]] = memref.alloc
   // CHECK: linalg.fill ins(%cst : f32) outs(%[[ALLOC]] : memref<?xf32>)
@@ -177,7 +177,7 @@ func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
 // -----
 
 // CHECK-LABEL:   func @bufferize_dot
-func @bufferize_dot(%in: tensor<4xf32>, %out: tensor<f32>) -> tensor<f32> {
+func.func @bufferize_dot(%in: tensor<4xf32>, %out: tensor<f32>) -> tensor<f32> {
   %dot = linalg.dot ins(%in, %in : tensor<4xf32>, tensor<4xf32>)
                     outs(%out : tensor<f32>) -> tensor<f32>
   return %dot : tensor<f32>

diff  --git a/mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir b/mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir
index e4b2dd7282b59..827168c5b4131 100644
--- a/mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir
@@ -6,7 +6,7 @@
 
 // CHECK: #[[$MAP:.*]] = affine_map<(d0) -> (d0)>
 // CHECK-LABEL: @basic
-func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
+func.func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
   // CHECK: linalg.generic{{.*}}[#[[$MAP]], #[[$MAP]]]
   // CHECK:   attrs =  {someattr}
   // CHECK:   ^bb0(%[[BBARG:.*]]: f32, %{{.*}}: f32):
@@ -32,7 +32,7 @@ func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
 // CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1, d0)>
 // CHECK-LABEL: @distinct_affine_maps
-func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
   // CHECK: linalg.generic{{.*}}[#[[$MAP0]], #[[$MAP1]], #[[$MAP0]]]
   %0 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]}
      ins(%arg0, %arg0 : tensor<?x?xf32>, tensor<?x?xf32>)
@@ -55,7 +55,7 @@ func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
 // CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1, d0)>
 // CHECK-LABEL: @mixed_redundant_non_redundant
-func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
   // CHECK: linalg.generic{{.*}}[#[[$MAP0]], #[[$MAP1]], #[[$MAP0]]]
   // CHECK:   ^bb0(%[[BBARG0:.*]]: f32, %[[BBARG1:.*]]: f32, %{{[a-zA-Z0-9]+}}: f32):
   // CHECK:     "test.elementwise_mappable"(%[[BBARG0]], %[[BBARG1]], %[[BBARG0]])
@@ -77,7 +77,7 @@ func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
 
 // CHECK: #[[$MAP:.*]] = affine_map<(d0) -> (d0)>
 // CHECK-LABEL: @multiple_
diff erent_redundant_args
-func @multiple_
diff erent_redundant_args(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func @multiple_
diff erent_redundant_args(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
   // CHECK: linalg.generic{{.*}}[#[[$MAP]], #[[$MAP]], #[[$MAP]]]
   // CHECK:   ^bb0(%[[BBARG0:.*]]: f32, %[[BBARG1:.*]]: f32, %{{[a-zA-Z0-9]+}}: f32):
   // CHECK:     "test.elementwise_mappable"(%[[BBARG0]], %[[BBARG1]], %[[BBARG0]], %[[BBARG1]])

diff  --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index ecc3bc5b696ba..b08af21b4bfce 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -canonicalize -split-input-file | FileCheck %s
 
 // CHECK-LABEL: func @memref_cast(
-func @memref_cast(%a: index, %b: index) -> memref<?x?xf32> {
+func.func @memref_cast(%a: index, %b: index) -> memref<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c8 = arith.constant 8 : index
@@ -27,7 +27,7 @@ func @memref_cast(%a: index, %b: index) -> memref<?x?xf32> {
   iterator_types = ["parallel"]
 }
 
-func @dce_zero_memref(%arg0 : memref<0xf32>, %arg1: tensor<0xf32>) -> tensor<0xf32> {
+func.func @dce_zero_memref(%arg0 : memref<0xf32>, %arg1: tensor<0xf32>) -> tensor<0xf32> {
   // memref<0x32> is expected to be dce'ed
   memref.copy %arg0, %arg0 : memref<0xf32> to memref<0xf32>
 
@@ -49,7 +49,7 @@ func @dce_zero_memref(%arg0 : memref<0xf32>, %arg1: tensor<0xf32>) -> tensor<0xf
 
 
 // CHECK-LABEL: func @tensor.cast(
-func @tensor.cast(%a : tensor<3x4xf32>, %b : tensor<4x?xf32>, %c : tensor<3x?xf32>)
+func.func @tensor.cast(%a : tensor<3x4xf32>, %b : tensor<4x?xf32>, %c : tensor<3x?xf32>)
   -> tensor<3x?xf32>
 {
   %ta = tensor.cast %a : tensor<3x4xf32> to tensor<?x?xf32>
@@ -72,7 +72,7 @@ func @tensor.cast(%a : tensor<3x4xf32>, %b : tensor<4x?xf32>, %c : tensor<3x?xf3
 //  CHECK-SAME:     %[[A:[a-z0-9]*]]: tensor<?x?xf32>
 //  CHECK-SAME:     %[[B:[a-z0-9]*]]: memref<?x?xf32>
 //  CHECK-SAME:     %[[C:[a-z0-9]*]]: tensor<?x?xf32>
-func @linalg_effects(%a : tensor<?x?xf32>, %b : memref<?x?xf32>, %c : tensor<?x?xf32>) {
+func.func @linalg_effects(%a : tensor<?x?xf32>, %b : memref<?x?xf32>, %c : tensor<?x?xf32>) {
   // CHECK-NOT:   %{{.*}} = linalg.matmul
   %t = linalg.matmul ins(%a, %b : tensor<?x?xf32>, memref<?x?xf32>)
                     outs(%c : tensor<?x?xf32>) -> tensor<?x?xf32>
@@ -85,7 +85,7 @@ func @linalg_effects(%a : tensor<?x?xf32>, %b : memref<?x?xf32>, %c : tensor<?x?
 
 // -----
 
-func @init_tensor_canonicalize() -> (tensor<4x5x?xf32>) {
+func.func @init_tensor_canonicalize() -> (tensor<4x5x?xf32>) {
   %c6 = arith.constant 6 : index
   %0 = linalg.init_tensor [4, 5, %c6] : tensor<4x5x?xf32>
   return %0 : tensor<4x5x?xf32>
@@ -97,7 +97,7 @@ func @init_tensor_canonicalize() -> (tensor<4x5x?xf32>) {
 
 // -----
 
-func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
+func.func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
   %0 = linalg.init_tensor [6, 5, %arg0] : tensor<6x5x?xf32>
   %1 = tensor.expand_shape %0 [[0, 1], [2], [3, 4, 5]]
       : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
@@ -112,7 +112,7 @@ func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
 
 // -----
 
-func @init_tensor_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> {
+func.func @init_tensor_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> {
   %0 = linalg.init_tensor [2, 3, 5, 4, %arg0, 7] : tensor<2x3x5x4x?x7xf32>
   %1 = tensor.collapse_shape %0 [[0, 1], [2], [3, 4, 5]]
       : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
@@ -128,7 +128,7 @@ func @init_tensor_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> {
 // -----
 
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
+func.func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
   -> (tensor<?x?x?xf32>, tensor<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -155,7 +155,7 @@ func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
 // -----
 
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @remove_no_op_mismatched_types(%arg0 : tensor<?x?x?xf32>)
+func.func @remove_no_op_mismatched_types(%arg0 : tensor<?x?x?xf32>)
   -> tensor<1x2x3xf32> {
   %out = linalg.init_tensor [1, 2, 3] : tensor<1x2x3xf32>
   %g = linalg.generic {
@@ -176,7 +176,7 @@ func @remove_no_op_mismatched_types(%arg0 : tensor<?x?x?xf32>)
 // -----
 
 #map = affine_map<() -> ()>
-func @cant_fold_to_tensor_cast(%arg0 : f32) -> tensor<f32> {
+func.func @cant_fold_to_tensor_cast(%arg0 : f32) -> tensor<f32> {
   %out = linalg.init_tensor [] : tensor<f32>
   %g = linalg.generic {
     indexing_maps = [#map, #map],
@@ -194,7 +194,7 @@ func @cant_fold_to_tensor_cast(%arg0 : f32) -> tensor<f32> {
 // -----
 
 #map = affine_map<(d0, d1) -> (d0, d1)>
-func @keep_not_noop(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @keep_not_noop(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %cst = arith.constant 1.000000e+00 : f32
@@ -219,7 +219,7 @@ func @keep_not_noop(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32> {
 // -----
 
 #map = affine_map<(d0, d1) -> (d0, d1)>
-func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
+func.func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
   -> (tensor<?x?xf32>, tensor<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -246,7 +246,7 @@ func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
 
 // -----
 
-func @fold_init_tensor_with_slice
+func.func @fold_init_tensor_with_slice
   (%arg0 : index, %arg1 : index) -> tensor<5x?x20xf32>
 {
   %0 = linalg.init_tensor[%arg0, 10, 40] : tensor<?x10x40xf32>
@@ -262,7 +262,7 @@ func @fold_init_tensor_with_slice
 
 // -----
 
-func @fold_init_tensor_with_cast(%arg0 : index) -> tensor<1x12xf32> {
+func.func @fold_init_tensor_with_cast(%arg0 : index) -> tensor<1x12xf32> {
   %0 = linalg.init_tensor [%arg0, 12] : tensor<?x12xf32>
   %1 = tensor.cast %0 : tensor<?x12xf32> to tensor<1x12xf32>
   return %1 : tensor<1x12xf32>
@@ -288,7 +288,7 @@ func @fold_init_tensor_with_cast(%arg0 : index) -> tensor<1x12xf32> {
 //   CHECK-NOT:   linalg.generic
 //   CHECK-NOT:   tensor.pad
 //       CHECK:   return
-func @dead_linalg_tensor(%arg0 : tensor<7x7xi32>, %arg1 : tensor<7x7xf32>,
+func.func @dead_linalg_tensor(%arg0 : tensor<7x7xi32>, %arg1 : tensor<7x7xf32>,
                          %arg2: tensor<?x?xf32>, %high : index) {
   %c0_i32 = arith.constant 0 : i32
   %c0 = arith.constant 0 : index
@@ -309,7 +309,7 @@ func @dead_linalg_tensor(%arg0 : tensor<7x7xi32>, %arg1 : tensor<7x7xf32>,
 
 // -----
 
-func @propogate_casts(%arg0 : tensor<?x?xf32>, %arg1 : f32, %arg2 : index,
+func.func @propogate_casts(%arg0 : tensor<?x?xf32>, %arg1 : f32, %arg2 : index,
     %arg3 : index) -> tensor<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -332,7 +332,7 @@ func @propogate_casts(%arg0 : tensor<?x?xf32>, %arg1 : f32, %arg2 : index,
 // -----
 
 // CHECK-LABEL: @self_copy
-func @self_copy(%arg0 : memref<2x3x?x4xf32>) {
+func.func @self_copy(%arg0 : memref<2x3x?x4xf32>) {
 
 //   CHECK-NOT: memref.copy
   memref.copy %arg0, %arg0 : memref<2x3x?x4xf32> to memref<2x3x?x4xf32>
@@ -349,7 +349,7 @@ func @self_copy(%arg0 : memref<2x3x?x4xf32>) {
 //  CHECK-SAME: ins(%[[ARG0]] : tensor<?xf32>)
 //  CHECK-SAME: outs({{.*}} : tensor<?xf32>) {
 #map0 = affine_map<(d0) -> (d0)>
-func @remove_deadargs_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
+func.func @remove_deadargs_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
   %c0 = arith.constant 0 : index
   %cst = arith.constant 7.0 : f32
   %0 = tensor.dim %arg0, %c0 : tensor<?xf32>
@@ -371,7 +371,7 @@ func @remove_deadargs_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
 //  CHECK-SAME: outs({{.*}} : tensor<?x?xf32>) {
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
-func @remove_deadargs_generic_mixedaccess(%arg0: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
+func.func @remove_deadargs_generic_mixedaccess(%arg0: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 0 : index
   %cst1 = arith.constant 7.0 : f32
@@ -391,7 +391,7 @@ func @remove_deadargs_generic_mixedaccess(%arg0: tensor<?x?xf32>) -> (tensor<?x?
 
 // -----
 // CHECK-LABEL: func @fold_fill_reshape()
-func @fold_fill_reshape() -> tensor<6x4xf32> {
+func.func @fold_fill_reshape() -> tensor<6x4xf32> {
   %zero = arith.constant 0.0 : f32
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [6, 4] : tensor<6x4xf32>
   %init = linalg.init_tensor [1, 2, 3, 4] : tensor<1x2x3x4xf32>
@@ -407,7 +407,7 @@ func @fold_fill_reshape() -> tensor<6x4xf32> {
 
 //       CHECK: func @fold_fill_reshape_dynamic
 //  CHECK-SAME:   %[[ARG0:.+]]: tensor<?x?x?x?x?xf32>
-func @fold_fill_reshape_dynamic(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf32> {
+func.func @fold_fill_reshape_dynamic(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf32> {
   %zero = arith.constant 0.0 : f32
   // CHECK: %[[RESHAPE:.+]] = tensor.collapse_shape %[[ARG0]]
   %0 = linalg.fill ins(%zero : f32) outs(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?x?x?x?xf32>
@@ -421,11 +421,11 @@ func @fold_fill_reshape_dynamic(%arg0 : tensor<?x?x?x?x?xf32>) -> tensor<?x?xf32
 
 // -----
 
-func private @some_use(%i : index, %j : index)
+func.func private @some_use(%i : index, %j : index)
 
 // CHECK-LABEL: func @init_canonicalize
 //  CHECK-SAME:   %[[I:.*]]: index
-func @init_canonicalize(%i : index) {
+func.func @init_canonicalize(%i : index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
 
@@ -446,7 +446,7 @@ func @init_canonicalize(%i : index) {
 // -----
 
 // CHECK-LABEL: func @rank_reducing_init_extract
-func @rank_reducing_init_extract(%sz : index, %idx : index) -> tensor<2xf32> {
+func.func @rank_reducing_init_extract(%sz : index, %idx : index) -> tensor<2xf32> {
   // CHECK: linalg.init_tensor [2] : tensor<2xf32>
   %a = linalg.init_tensor [%sz, 2] : tensor<?x2xf32>
 
@@ -458,7 +458,7 @@ func @rank_reducing_init_extract(%sz : index, %idx : index) -> tensor<2xf32> {
 // -----
 
 // CHECK: func @fold_self_copy
-func @fold_self_copy(%0 : memref<4x16xf32>) {
+func.func @fold_self_copy(%0 : memref<4x16xf32>) {
 // CHECK-NEXT: return
   linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                                    affine_map<(d0, d1) -> (d0, d1)>],
@@ -478,7 +478,7 @@ func @fold_self_copy(%0 : memref<4x16xf32>) {
 //       CHECK:   %[[INIT:.+]] = linalg.init_tensor [412, 276] : tensor<412x276xf32>
 //       CHECK:   %[[FILL:.+]] = linalg.fill ins(%[[F0]]{{.*}}outs(%[[INIT]]
 //       CHECK:   return %[[FILL]]
-func @fold_static_pad_fill() -> tensor<412x276xf32> {
+func.func @fold_static_pad_fill() -> tensor<412x276xf32> {
   %f0 = arith.constant 0.0 : f32
   %init = linalg.init_tensor [400, 273] : tensor<400x273xf32>
   %fill = linalg.fill ins(%f0 : f32) outs(%init : tensor<400x273xf32>) -> tensor<400x273xf32>
@@ -510,7 +510,7 @@ func @fold_static_pad_fill() -> tensor<412x276xf32> {
 //      CHECK:   %[[INIT:.+]] = linalg.init_tensor [%[[S0]], %[[S1]], %[[S2]], %[[S3]]] : tensor<?x?x?x?xf32>
 //      CHECK:   %[[FILL:.+]] = linalg.fill ins(%[[F0]]{{.*}}outs(%[[INIT]]
 //      CHECK:   return %[[FILL]]
-func @fold_dynamic_pad_fill(%init: tensor<8x?x16x32xf32>, %low0: index, %low3: index, %high2: index, %high3: index) -> tensor<?x?x?x?xf32> {
+func.func @fold_dynamic_pad_fill(%init: tensor<8x?x16x32xf32>, %low0: index, %low3: index, %high2: index, %high3: index) -> tensor<?x?x?x?xf32> {
   %f0 = arith.constant 0.0 : f32
   %fill = linalg.fill ins(%f0 : f32) outs(%init : tensor<8x?x16x32xf32>) -> tensor<8x?x16x32xf32>
   %pad = tensor.pad %fill low[%low0, 8, 7, %low3] high[1, 2, %high2, %high3] {
@@ -523,7 +523,7 @@ func @fold_dynamic_pad_fill(%init: tensor<8x?x16x32xf32>, %low0: index, %low3: i
 // -----
 
 // CHECK-LABEL: func @no_fold_pad_fill_value_mismatch
-func @no_fold_pad_fill_value_mismatch() -> tensor<412x276xf32> {
+func.func @no_fold_pad_fill_value_mismatch() -> tensor<412x276xf32> {
   %f0 = arith.constant 0.0 : f32
   %f1 = arith.constant 1.0 : f32
   %init = linalg.init_tensor [400, 273] : tensor<400x273xf32>
@@ -545,7 +545,7 @@ func @no_fold_pad_fill_value_mismatch() -> tensor<412x276xf32> {
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @static_input_without_cast
 // CHECK-SAME:  (%[[ARG0:.*]]: tensor<2x3x4xf32>, %[[ARG1:.*]]: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
-func @static_input_without_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
+func.func @static_input_without_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -575,7 +575,7 @@ func @static_input_without_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @static_input_with_cast
 // CHECK-SAME:  (%[[ARG0:.*]]: tensor<2x3x4xf32>, %[[ARG1:.*]]: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
-func @static_input_with_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
+func.func @static_input_with_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -606,7 +606,7 @@ func @static_input_with_cast(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<?x?x?xf32>
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @static_output_with_cast
 // CHECK-SAME:  (%[[ARG0:.*]]: tensor<?x?x?xf32>, %[[ARG1:.*]]: tensor<?x?x?xf32>, %[[ARG2:.*]]: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
-func @static_output_with_cast(%arg0 : tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>, %arg2: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
+func.func @static_output_with_cast(%arg0 : tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>, %arg2: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -640,7 +640,7 @@ func @static_output_with_cast(%arg0 : tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @cast_source
 // CHECK-SAME:  (%[[ARG0:.*]]: tensor<2x3x4xf32>, %[[ARG1:.*]]: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
-func @cast_source(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
+func.func @cast_source(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -671,7 +671,7 @@ func @cast_source(%arg0 : tensor<2x3x4xf32>, %arg1: tensor<2x3x4xf32>) -> tensor
 #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-LABEL: func @cast_dest
 // CHECK-SAME:  (%[[ARG0:.*]]: tensor<?x?x?xf32>, %[[ARG1:.*]]: tensor<1x?x?xf32>,
-func @cast_dest(%arg0: tensor<?x?x?xf32>, %arg1: tensor<1x?x?xf32>, %arg2: index, %arg3: index, %arg4: index) -> tensor<?x?x?xf32> {
+func.func @cast_dest(%arg0: tensor<?x?x?xf32>, %arg1: tensor<1x?x?xf32>, %arg2: index, %arg3: index, %arg4: index) -> tensor<?x?x?xf32> {
   %0 = linalg.init_tensor [%arg2, %arg3, %arg4] : tensor<?x?x?xf32>
   %1 = tensor.cast %arg1 : tensor<1x?x?xf32> to tensor<?x?x?xf32>
   %2 = linalg.generic {
@@ -706,7 +706,7 @@ func @cast_dest(%arg0: tensor<?x?x?xf32>, %arg1: tensor<1x?x?xf32>, %arg2: index
 //       CHECK: %[[D1:.+]] = tensor.dim %[[INPUT]], %[[C1]] : tensor<?x?x?xf32>
 //       CHECK: %[[D2:.+]] = tensor.dim %[[INPUT]], %[[C2]] : tensor<?x?x?xf32>
 //       CHECK: tensor.insert_slice %[[INPUT]] into %[[FILL]][%[[LOW0]], %[[OFFSET1]], 2] [%[[D0]], %[[D1]], %[[D2]]] [1, 1, 1]
-func @insert_pad_into_fill(%input: tensor<?x?x?xf32>, %low0: index, %low1: index, %high1: index, %high2: index) -> tensor<8x384x384xf32> {
+func.func @insert_pad_into_fill(%input: tensor<?x?x?xf32>, %low0: index, %low1: index, %high1: index, %high2: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   %pad = tensor.pad %input low[%low0, %low1, %c0] high[%c0, %high1, %high2] {
@@ -727,7 +727,7 @@ func @insert_pad_into_fill(%input: tensor<?x?x?xf32>, %low0: index, %low1: index
 //       CHECK:   %[[INSERT0:.+]] = tensor.insert_slice %[[A]] into %[[FILL]][%[[OFFSET]], 0, 0] [8, 128, 128] [1, 1, 1]
 //       CHECK:   %[[INSERT1:.+]] = tensor.insert_slice %[[A]] into %[[INSERT0]][0, 128, %[[OFFSET]]] [8, 128, 128] [1, 1, 1]
 //       CHECK:                  tensor.insert_slice %[[INPUT]] into %[[INSERT1]][1, 2, 256] [7, 123, 124] [1, 1, 1]
-func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
+func.func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   %pad = tensor.pad %input low[1, 2, 0] high[0, 3, 4] {
@@ -745,7 +745,7 @@ func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128
 // -----
 
 // CHECK-LABEL: func @multi_insert_pad_into_fill_overlap
-func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
+func.func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   // CHECK: tensor.pad
@@ -765,7 +765,7 @@ func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tens
 // -----
 
 // CHECK-LABEL: func @multi_insert_pad_into_fill_overlap
-func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
+func.func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   // CHECK: tensor.pad
@@ -785,7 +785,7 @@ func @multi_insert_pad_into_fill_overlap(%input: tensor<7x123x124xf32>, %a: tens
 // -----
 
 // CHECK-LABEL: func @multi_insert_pad_into_fill
-func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
+func.func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   // CHECK-NOT: tensor.pad
@@ -806,7 +806,7 @@ func @multi_insert_pad_into_fill(%input: tensor<7x123x124xf32>, %a: tensor<8x128
 // -----
 
 // CHECK-LABEL: func @multi_insert_pad_into_fill_mismatch
-func @multi_insert_pad_into_fill_mismatch(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
+func.func @multi_insert_pad_into_fill_mismatch(%input: tensor<7x123x124xf32>, %a: tensor<8x128x128xf32>, %offset: index) -> tensor<8x384x384xf32> {
   %f0 = arith.constant 0.0 : f32
   %f1 = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
@@ -826,7 +826,7 @@ func @multi_insert_pad_into_fill_mismatch(%input: tensor<7x123x124xf32>, %a: ten
 
 // -----
 
-func @fold_linalgop_with_cast_consumer(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
+func.func @fold_linalgop_with_cast_consumer(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
     %arg2 : tensor<?x?xf32>) -> (tensor<4x8xf32>, tensor<?x?xf32>) {
   %0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
       outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
@@ -848,7 +848,7 @@ func @fold_linalgop_with_cast_consumer(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?
 
 // -----
 
-func @fold_conv_op_with_cast_consumer(%arg0 : tensor<?x?x?x?xf32>,
+func.func @fold_conv_op_with_cast_consumer(%arg0 : tensor<?x?x?x?xf32>,
     %arg1 : tensor<?x?x?x?xf32>,  %arg2 : tensor<?x?x?x?xf32>) ->
     (tensor<4x8x12x16xf32>, tensor<?x?x?x?xf32>) {
   %0 = linalg.conv_2d_nchw_fchw ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
@@ -869,7 +869,7 @@ func @fold_conv_op_with_cast_consumer(%arg0 : tensor<?x?x?x?xf32>,
 
 // -----
 
-func @fold_multi_use_generic_op_with_consumer(%arg0 : tensor<?x?x?xf32>) -> (tensor<?x?x?xf32>, tensor<2x3x4xf32>) {
+func.func @fold_multi_use_generic_op_with_consumer(%arg0 : tensor<?x?x?xf32>) -> (tensor<?x?x?xf32>, tensor<2x3x4xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-bufferize-analysis-2fill-extract-matmul-all-perms.mlir b/mlir/test/Dialect/Linalg/comprehensive-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
index f9b7cd0c0fe41..e1d5e7ed114e7 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-bufferize-analysis-2fill-extract-matmul-all-perms.mlir
@@ -6,7 +6,7 @@
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1234(
+func.func @fill_extract_matmul_1234(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -33,7 +33,7 @@ func @fill_extract_matmul_1234(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1243(
+func.func @fill_extract_matmul_1243(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -60,7 +60,7 @@ func @fill_extract_matmul_1243(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1324(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
+func.func @fill_extract_matmul_1324(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
   -> tensor<256x256xf32>
@@ -86,7 +86,7 @@ func @fill_extract_matmul_1324(%arg0: tensor<518x518xf32> {linalg.buffer_layout
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1342(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
+func.func @fill_extract_matmul_1342(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
   -> tensor<256x256xf32>
@@ -112,7 +112,7 @@ func @fill_extract_matmul_1342(%arg0: tensor<518x518xf32> {linalg.buffer_layout
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1423(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
+func.func @fill_extract_matmul_1423(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
   -> tensor<256x256xf32>
@@ -138,7 +138,7 @@ func @fill_extract_matmul_1423(%arg0: tensor<518x518xf32> {linalg.buffer_layout
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_1432(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
+func.func @fill_extract_matmul_1432(%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
                         %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
   -> tensor<256x256xf32>
@@ -164,7 +164,7 @@ func @fill_extract_matmul_1432(%arg0: tensor<518x518xf32> {linalg.buffer_layout
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2134(
+func.func @fill_extract_matmul_2134(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -191,7 +191,7 @@ func @fill_extract_matmul_2134(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2143(
+func.func @fill_extract_matmul_2143(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -218,7 +218,7 @@ func @fill_extract_matmul_2143(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2314(
+func.func @fill_extract_matmul_2314(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -245,7 +245,7 @@ func @fill_extract_matmul_2314(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2341(
+func.func @fill_extract_matmul_2341(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -272,7 +272,7 @@ func @fill_extract_matmul_2341(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2413(
+func.func @fill_extract_matmul_2413(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -299,7 +299,7 @@ func @fill_extract_matmul_2413(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_2431(
+func.func @fill_extract_matmul_2431(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -326,7 +326,7 @@ func @fill_extract_matmul_2431(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3124(
+func.func @fill_extract_matmul_3124(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -353,7 +353,7 @@ func @fill_extract_matmul_3124(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3142(
+func.func @fill_extract_matmul_3142(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -380,7 +380,7 @@ func @fill_extract_matmul_3142(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3214(
+func.func @fill_extract_matmul_3214(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -407,7 +407,7 @@ func @fill_extract_matmul_3214(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3241(
+func.func @fill_extract_matmul_3241(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -434,7 +434,7 @@ func @fill_extract_matmul_3241(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3412(
+func.func @fill_extract_matmul_3412(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -461,7 +461,7 @@ func @fill_extract_matmul_3412(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_3421(
+func.func @fill_extract_matmul_3421(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -488,7 +488,7 @@ func @fill_extract_matmul_3421(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4123(
+func.func @fill_extract_matmul_4123(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -515,7 +515,7 @@ func @fill_extract_matmul_4123(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4132(
+func.func @fill_extract_matmul_4132(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -542,7 +542,7 @@ func @fill_extract_matmul_4132(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4213(
+func.func @fill_extract_matmul_4213(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -569,7 +569,7 @@ func @fill_extract_matmul_4213(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4231(
+func.func @fill_extract_matmul_4231(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -596,7 +596,7 @@ func @fill_extract_matmul_4231(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4312(
+func.func @fill_extract_matmul_4312(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -623,7 +623,7 @@ func @fill_extract_matmul_4312(
 // -----
 
 // CHECK-LABEL: func @fill_extract_matmul_
-func @fill_extract_matmul_4321(
+func.func @fill_extract_matmul_4321(
     %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-aliasing-in.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-aliasing-in.mlir
index 922b4816af620..fb4b43abddacc 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-aliasing-in.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-aliasing-in.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: func @linalg_op_bufferizes_inplace_with_input
 //  CHECK-SAME:     %[[t1:.*]]: memref<?x?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>, %[[t3:.*]]: memref<?x?xf32, #{{.*}}>
-func @linalg_op_bufferizes_inplace_with_input(
+func.func @linalg_op_bufferizes_inplace_with_input(
     %t1: tensor<?x?xf32> {linalg.inplaceable = true},
     %t2: tensor<?xf32> {linalg.inplaceable = false},
     %t3: tensor<?x?xf32> {linalg.inplaceable = false},
@@ -26,7 +26,7 @@ func @linalg_op_bufferizes_inplace_with_input(
 
 // CHECK-LABEL: func @linalg_op_bufferizes_out_of_place_with_input
 //  CHECK-SAME:     %[[t1:.*]]: memref<?x?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>, %[[t3:.*]]: memref<?x?xf32, #{{.*}}>
-func @linalg_op_bufferizes_out_of_place_with_input(
+func.func @linalg_op_bufferizes_out_of_place_with_input(
     %t1: tensor<?x?xf32> {linalg.inplaceable = false},
     %t2: tensor<?xf32> {linalg.inplaceable = false},
     %t3: tensor<?x?xf32> {linalg.inplaceable = false},
@@ -53,7 +53,7 @@ func @linalg_op_bufferizes_out_of_place_with_input(
 
 // CHECK-LABEL: func @linalg_op_output_cannot_alias_with_input
 //  CHECK-SAME:     %[[t1:.*]]: memref<?x?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>, %[[t3:.*]]: memref<?x?xf32, #{{.*}}>
-func @linalg_op_output_cannot_alias_with_input(
+func.func @linalg_op_output_cannot_alias_with_input(
     %t1: tensor<?x?xf32> {linalg.inplaceable = true},
     %t2: tensor<?xf32> {linalg.inplaceable = false},
     %t3: tensor<?x?xf32> {linalg.inplaceable = true},

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir
index 50735f24ad451..88613a29b1cc8 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-alloca.mlir
@@ -7,7 +7,7 @@
 // CHECK-SAME:    %[[A:[a-zA-Z0-9]*]]: memref<64xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[B:[a-zA-Z0-9]*]]: memref<64xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[C:[a-zA-Z0-9]*]]: memref<f32, #[[$DYN_0D_MAP]]>
-func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> tensor<f32> {
+func.func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> tensor<f32> {
   // CHECK-NEXT:   %[[C0:.*]] = arith.constant 0{{.*}} : f32
   %v0 = arith.constant 0.0 : f32
 
@@ -23,7 +23,7 @@ func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> t
 }
 
 //      CHECK:  func @main()
-func @main() {
+func.func @main() {
   //  CHECK-DAG:   %[[C0:.*]] = arith.constant 0{{.*}} : f32
   //  CHECK-DAG:   %[[C1:.*]] = arith.constant 1{{.*}} : f32
   //  CHECK-DAG:   %[[C2:.*]] = arith.constant 2{{.*}} : f32
@@ -62,4 +62,4 @@ func @main() {
 }
 
 //     CHECK:   func private @print_memref_f32(memref<*xf32>)
-func private @print_memref_f32(tensor<*xf32>)
+func.func private @print_memref_f32(tensor<*xf32>)

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-aliasing-in.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-aliasing-in.mlir
index ba63e3b99cd5c..c7817e07b19bf 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-aliasing-in.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-aliasing-in.mlir
@@ -15,7 +15,7 @@
 }
 
 // CHECK-LABEL: func @linalg_op_same_out_tensors(
-func @linalg_op_same_out_tensors(
+func.func @linalg_op_same_out_tensors(
     %t1: tensor<?xf32> {linalg.inplaceable = true},
 // CHECK-SAME:          bufferization.access = "read-write"
     %t2: tensor<?xf32> {linalg.inplaceable = true})
@@ -53,7 +53,7 @@ func @linalg_op_same_out_tensors(
 }
 
 // CHECK-LABEL: func @linalg_op_same_out_tensors_2(
-func @linalg_op_same_out_tensors_2(
+func.func @linalg_op_same_out_tensors_2(
     %t1: tensor<?xf32> {linalg.inplaceable = true},
 // CHECK-SAME:          bufferization.access = "read-write"
     %t2: tensor<?xf32> {linalg.inplaceable = true})

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-init-tensor-elimination.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-init-tensor-elimination.mlir
index 2dd2d534c8dd5..a54b42bf631df 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-init-tensor-elimination.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis-init-tensor-elimination.mlir
@@ -7,7 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: func @buffer_forwarding_conflict
-func @buffer_forwarding_conflict(%arg0: tensor<?xf32> {linalg.inplaceable = true}, %arg1: index) -> (tensor<?xf32>, tensor<?xf32>) {
+func.func @buffer_forwarding_conflict(%arg0: tensor<?xf32> {linalg.inplaceable = true}, %arg1: index) -> (tensor<?xf32>, tensor<?xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   //      CHECK: tensor.extract_slice
   // CHECK-SAME: {__inplace_operands_attr__ = ["false", "none"]
@@ -34,7 +34,7 @@ func @buffer_forwarding_conflict(%arg0: tensor<?xf32> {linalg.inplaceable = true
 // -----
 
 // CHECK-LABEL: func @buffer_forwarding_no_conflict
-func @buffer_forwarding_no_conflict(%arg0: tensor<?xf32> {linalg.inplaceable = true}, %arg1: index) -> (tensor<?xf32>, tensor<?xf32>) {
+func.func @buffer_forwarding_no_conflict(%arg0: tensor<?xf32> {linalg.inplaceable = true}, %arg1: index) -> (tensor<?xf32>, tensor<?xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   //      CHECK: tensor.extract_slice
   // CHECK-SAME: {__inplace_operands_attr__ = ["true", "none"]

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
index ea088e9d3684d..9d5d42199bb20 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
@@ -12,7 +12,7 @@
 // -----
 
 // CHECK-LABEL: func @extract_slice_fun(
-func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
+func.func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
 //  CHECK-SAME:          bufferization.access = "read"
                         %B : tensor<?xf32> {linalg.inplaceable = true})
 //  CHECK-SAME:         bufferization.access = "read"
@@ -36,7 +36,7 @@ func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
 // -----
 
 // CHECK-LABEL: func @insert_slice_fun(
-func @insert_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
+func.func @insert_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
 //  CHECK-SAME:        bufferization.access = "read"
                        %B : tensor<?xf32> {linalg.inplaceable = true},
 //  CHECK-SAME:        bufferization.access = "read-write"
@@ -62,7 +62,7 @@ func @insert_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = false},
 // -----
 
 // CHECK-LABEL: func @conflict_on_B(
-func @conflict_on_B(%A : tensor<4x4xf32> {linalg.inplaceable = true},
+func.func @conflict_on_B(%A : tensor<4x4xf32> {linalg.inplaceable = true},
 //  CHECK-SAME:     bufferization.access = "read"
                     %B : tensor<4x4xf32> {linalg.inplaceable = true})
 //  CHECK-SAME:     bufferization.access = "read-write"
@@ -101,7 +101,7 @@ func @conflict_on_B(%A : tensor<4x4xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @extract_slice_extract_slice(
-func @extract_slice_extract_slice(
+func.func @extract_slice_extract_slice(
     %A : tensor<?xf32> {linalg.inplaceable = true},
 //  CHECK-SAME:         bufferization.access = "read"
     %B : tensor<?xf32> {linalg.inplaceable = false})
@@ -130,7 +130,7 @@ func @extract_slice_extract_slice(
 // -----
 
 // CHECK-LABEL: func @insert_slice_insert_slice(
-func @insert_slice_insert_slice(
+func.func @insert_slice_insert_slice(
     %A : tensor<?xf32> {linalg.inplaceable = true},
 //  CHECK-SAME:         bufferization.access = "read-write"
     %A2 : tensor<4xf32> {linalg.inplaceable = true},
@@ -165,7 +165,7 @@ func @insert_slice_insert_slice(
 // -----
 
 // CHECK-LABEL: func @extract_slice_nonmatching_insert_slice
-func @extract_slice_nonmatching_insert_slice(
+func.func @extract_slice_nonmatching_insert_slice(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = false},
     %idx: index)
@@ -204,7 +204,7 @@ func @extract_slice_nonmatching_insert_slice(
 // -----
 
 // CHECK-LABEL: func @extract_slice_matching_insert_slice
-func @extract_slice_matching_insert_slice(
+func.func @extract_slice_matching_insert_slice(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = false})
   -> (tensor<?xf32>, tensor<?xf32>)
@@ -242,7 +242,7 @@ func @extract_slice_matching_insert_slice(
 // -----
 
 // CHECK-LABEL: @read_of_matching_insert_slice_source
-func @read_of_matching_insert_slice_source(
+func.func @read_of_matching_insert_slice_source(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %idx : index,
     %idx2 : index)
@@ -273,7 +273,7 @@ func @read_of_matching_insert_slice_source(
 // -----
 
 // CHECK-LABEL: @read_of_matching_insert_slice_source_interleaved
-func @read_of_matching_insert_slice_source_interleaved(
+func.func @read_of_matching_insert_slice_source_interleaved(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %idx : index,
     %idx2 : index,
@@ -317,7 +317,7 @@ func @read_of_matching_insert_slice_source_interleaved(
 // -----
 
 // CHECK-LABEL: func @extract_slice_linalg_readonly_use
-func @extract_slice_linalg_readonly_use(
+func.func @extract_slice_linalg_readonly_use(
     %A : tensor<?x?xf32> {linalg.inplaceable = false},
     %B : tensor<4x4xf32> {linalg.inplaceable = false},
     %C : tensor<4x4xf32> {linalg.inplaceable = true})
@@ -351,7 +351,7 @@ func @extract_slice_linalg_readonly_use(
 // -----
 
 // CHECK-LABEL: func @extract_slice_to_linalg_write_use
-func @extract_slice_to_linalg_write_use(
+func.func @extract_slice_to_linalg_write_use(
     %A : tensor<4x4xf32> {linalg.inplaceable = false},
     %B : tensor<?x?xf32> {linalg.inplaceable = false},
     %C : tensor<?x?xf32> {linalg.inplaceable = true})
@@ -391,7 +391,7 @@ func @extract_slice_to_linalg_write_use(
 // -----
 
 // CHECK-LABEL: func @insert_slice_double_extract_slice
-func @insert_slice_double_extract_slice(
+func.func @insert_slice_double_extract_slice(
     %s1: index,
     %s2: index,
     %s3: index,
@@ -429,7 +429,7 @@ func @insert_slice_double_extract_slice(
 // -----
 
 // CHECK-LABEL: func @extract_slice_to_linalg_write_use
-func @extract_slice_to_linalg_write_use(
+func.func @extract_slice_to_linalg_write_use(
     %A : tensor<4x4xf32> {linalg.inplaceable = false},
     %B : tensor<?x?xf32> {linalg.inplaceable = false},
     %C : tensor<?x?xf32> {linalg.inplaceable = true})
@@ -471,7 +471,7 @@ func @extract_slice_to_linalg_write_use(
 // -----
 
 // CHECK-LABEL: func @nested_extract_slice_and_insert
-func @nested_extract_slice_and_insert(
+func.func @nested_extract_slice_and_insert(
     %A : tensor<?x?xf32> {linalg.inplaceable = false},
     %B : tensor<?x?xf32> {linalg.inplaceable = true},
     %C : tensor<?x?xf32> {linalg.inplaceable = true},
@@ -563,7 +563,7 @@ func @nested_extract_slice_and_insert(
 // -----
 
 // CHECK-LABEL: func @scf_for_yield_only
-func @scf_for_yield_only(
+func.func @scf_for_yield_only(
     %A : tensor<?xf32> {linalg.inplaceable = false},
     %B : tensor<?xf32> {linalg.inplaceable = true},
     %lb : index,
@@ -595,7 +595,7 @@ func @scf_for_yield_only(
 // -----
 
 // CHECK-LABEL: func @scf_for_with_tensor.insert_slice
-func @scf_for_with_tensor.insert_slice(
+func.func @scf_for_with_tensor.insert_slice(
     %A : tensor<?xf32> {linalg.inplaceable = false},
     %B : tensor<?xf32> {linalg.inplaceable = true},
     %C : tensor<4xf32> {linalg.inplaceable = false},
@@ -630,10 +630,10 @@ func @scf_for_with_tensor.insert_slice(
 
 // -----
 
-func private @some_use(tensor<?xf32>) -> ()
+func.func private @some_use(tensor<?xf32>) -> ()
 
 // CHECK-LABEL: func @scf_for_deps
-func @scf_for_deps(
+func.func @scf_for_deps(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = true},
     %lb : index,
@@ -677,10 +677,10 @@ func @scf_for_deps(
 // Cross function boundary cases.
 //===----------------------------------------------------------------------===//
 
-func private @foo(tensor<64xf32>)
+func.func private @foo(tensor<64xf32>)
 
 // CHECK-LABEL: dependence_through_call
-func @dependence_through_call(%I : tensor<64xf32> {linalg.inplaceable = true}) {
+func.func @dependence_through_call(%I : tensor<64xf32> {linalg.inplaceable = true}) {
   %f1 = arith.constant 1.000000e+00 : f32
   %f2 = arith.constant 2.000000e+00 : f32
 
@@ -704,14 +704,14 @@ func @dependence_through_call(%I : tensor<64xf32> {linalg.inplaceable = true}) {
 
 // -----
 
-func private @foo(tensor<64xf32>)
+func.func private @foo(tensor<64xf32>)
 
-func private @bar(%A : tensor<64xf32>) {
+func.func private @bar(%A : tensor<64xf32>) {
   call @foo(%A) : (tensor<64xf32>) -> ()
   return
 }
 
-func @read_dependence_through_scf_and_call(
+func.func @read_dependence_through_scf_and_call(
     %I : tensor<64xf32> {linalg.inplaceable = true},
     %I2 : tensor<64xf32> {linalg.inplaceable = true}) {
   %c0 = arith.constant 0 : index
@@ -769,7 +769,7 @@ func @read_dependence_through_scf_and_call(
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: func @write_into_constant_via_alias
-func @write_into_constant_via_alias(%v : vector<5xi32>,
+func.func @write_into_constant_via_alias(%v : vector<5xi32>,
                                     %s1 : index, %s2 : index,
                                     %s3 : index) -> tensor<?xi32> {
   %A = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
@@ -875,7 +875,7 @@ func.func @matmul_on_tensors(
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: func @insert_slice_chain(
-func @insert_slice_chain(
+func.func @insert_slice_chain(
     %v1: vector<32x90xf32>,
     %v2: vector<30x90xf32>,
     %arg0: tensor<62x126xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
@@ -926,7 +926,7 @@ func @insert_slice_chain(
 
 // Only test IR validity wrt dominance.
 // CHECK-LABEL: func @ip
-func @ip(%t: tensor<10x20xf32> {linalg.inplaceable = true},
+func.func @ip(%t: tensor<10x20xf32> {linalg.inplaceable = true},
          %x: index, %y: index, %v: vector<5x6xf32>)
   -> tensor<10x20xf32>
 {
@@ -959,7 +959,7 @@ func @ip(%t: tensor<10x20xf32> {linalg.inplaceable = true},
 }
 
 // CHECK-LABEL: func @linalg_op_same_out_tensors(
-func @linalg_op_same_out_tensors(
+func.func @linalg_op_same_out_tensors(
     %t1: tensor<?xf32> {linalg.inplaceable = true},
 // CHECK-SAME:          bufferization.access = "read"
     %t2: tensor<?xf32> {linalg.inplaceable = true})
@@ -993,7 +993,7 @@ func @linalg_op_same_out_tensors(
 }
 
 // CHECK-LABEL: func @linalg_op_same_out_tensors_2(
-func @linalg_op_same_out_tensors_2(
+func.func @linalg_op_same_out_tensors_2(
     %t1: tensor<?xf32> {linalg.inplaceable = true},
 // CHECK-SAME:          bufferization.access = "read"
     %t2: tensor<?xf32> {linalg.inplaceable = true})
@@ -1017,7 +1017,7 @@ func @linalg_op_same_out_tensors_2(
 // -----
 
 // CHECK-LABEL: func @double_insert_slice_into_alias
-func @double_insert_slice_into_alias(
+func.func @double_insert_slice_into_alias(
     %v1: vector<32x90xf32>,
     %v2: vector<30x90xf32>,
     %arg2: tensor<62x90xf32> {linalg.inplaceable = true},
@@ -1060,7 +1060,7 @@ func @double_insert_slice_into_alias(
 // -----
 
 // CHECK-LABEL: func @interleaved_extract_insert_slice_chain_1
-func @interleaved_extract_insert_slice_chain_1(
+func.func @interleaved_extract_insert_slice_chain_1(
     %arg2: tensor<62x90xf32> {linalg.inplaceable = true})
   -> (tensor<62x90xf32>)
 {
@@ -1091,7 +1091,7 @@ func @interleaved_extract_insert_slice_chain_1(
 // -----
 
 // CHECK-LABEL: func @interleaved_extract_insert_slice_chain_2
-func @interleaved_extract_insert_slice_chain_2(
+func.func @interleaved_extract_insert_slice_chain_2(
     %arg2: tensor<62x90xf32> {linalg.inplaceable = true})
   -> (tensor<62x90xf32>)
 {
@@ -1122,7 +1122,7 @@ func @interleaved_extract_insert_slice_chain_2(
 // -----
 
 // CHECK-LABEL: func @extract_once_insert_twice
-func @extract_once_insert_twice(
+func.func @extract_once_insert_twice(
     %arg2: tensor<62x90xf32> {linalg.inplaceable = true})
   -> (tensor<62x90xf32>)
 {
@@ -1154,7 +1154,7 @@ func @extract_once_insert_twice(
 }
 
 // CHECK-LABEL: func @reading_scf_for
-func @reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %s: index, %v: vector<5xf32>) -> (tensor<?xf32>, vector<5xf32>) {
 
   %c0 = arith.constant 0 : index
@@ -1201,7 +1201,7 @@ func @reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
 }
 
 // CHECK-LABEL: func @non_reading_scf_for
-func @non_reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @non_reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
                           %s: index, %v: vector<5xf32>) -> (tensor<?xf32>, vector<5xf32>) {
 
   %c0 = arith.constant 0 : index
@@ -1250,7 +1250,7 @@ func @non_reading_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
 
 // This example passes analysis, but it fails when bufferizing.
 // CHECK-LABEL: func @scf_if_inplace1
-func @scf_if_inplace1(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace1(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %t2: tensor<?xf32> {linalg.inplaceable = true},
                       %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -1268,7 +1268,7 @@ func @scf_if_inplace1(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_inplace2
-func @scf_if_inplace2(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace2(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %v: vector<5xf32>, %idx: index,
                       %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -1289,7 +1289,7 @@ func @scf_if_inplace2(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_inplace3
-func @scf_if_inplace3(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace3(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %v1: vector<5xf32>, %v2: vector<5xf32>, %idx: index,
                       %cond: i1) -> tensor<?xf32> {
   //      CHECK: tensor.extract_slice
@@ -1317,7 +1317,7 @@ func @scf_if_inplace3(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_in_place4
-func @scf_if_in_place4(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_in_place4(%t1: tensor<?xf32> {linalg.inplaceable = true},
                        %v: vector<5xf32>, %idx: index,
                        %cond: i1, %cond2: i1) -> (tensor<?xf32>, vector<10xf32>) {
   %cst = arith.constant 0.0 : f32
@@ -1353,7 +1353,7 @@ func @scf_if_in_place4(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_inplace5
-func @scf_if_inplace5(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace5(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %idx: index, %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
     //      CHECK: tensor.extract_slice
@@ -1385,7 +1385,7 @@ func @scf_if_inplace5(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_inplace6
-func @scf_if_inplace6(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace6(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %v1: vector<5xf32>, %v2: vector<5xf32>,
                       %v3: vector<5xf32>, %idx: index,
                       %cond: i1, %cond2: i1) -> tensor<?xf32> {
@@ -1426,7 +1426,7 @@ func @scf_if_inplace6(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_inplace7
-func @scf_if_inplace7(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inplace7(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %v1: vector<5xf32>, %v2: vector<5xf32>, %idx: index,
                       %idx2: index, %cond: i1) -> (tensor<?xf32>, vector<5xf32>) {
   %cst = arith.constant 0.0 : f32
@@ -1456,7 +1456,7 @@ func @scf_if_inplace7(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_out_of_place1a
-func @scf_if_out_of_place1a(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_out_of_place1a(%t1: tensor<?xf32> {linalg.inplaceable = true},
                             %idx: index, %idx2: index,
                             %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -1483,7 +1483,7 @@ func @scf_if_out_of_place1a(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_out_of_place1b
-func @scf_if_out_of_place1b(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_out_of_place1b(%t1: tensor<?xf32> {linalg.inplaceable = true},
                             %idx: index, %idx2: index, %idx3: index,
                             %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -1519,7 +1519,7 @@ func @scf_if_out_of_place1b(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_out_of_place1c
-func @scf_if_out_of_place1c(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_out_of_place1c(%t1: tensor<?xf32> {linalg.inplaceable = true},
                             %idx: index, %idx2: index, %cond: i1) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
     //      CHECK: tensor.extract_slice
@@ -1550,7 +1550,7 @@ func @scf_if_out_of_place1c(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_out_of_place2
-func @scf_if_out_of_place2(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_out_of_place2(%t1: tensor<?xf32> {linalg.inplaceable = true},
                            %v: vector<5xf32>, %idx: index,
                            %cond: i1) -> (tensor<?xf32>, vector<10xf32>) {
   %cst = arith.constant 0.0 : f32
@@ -1574,7 +1574,7 @@ func @scf_if_out_of_place2(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @scf_if_out_of_place3
-func @scf_if_out_of_place3(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_out_of_place3(%t1: tensor<?xf32> {linalg.inplaceable = true},
                            %v: vector<5xf32>, %idx: index,
                            %cond: i1, %cond2: i1) -> (tensor<?xf32>, vector<10xf32>) {
   %cst = arith.constant 0.0 : f32
@@ -1605,7 +1605,7 @@ func @scf_if_out_of_place3(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @some_use
-func @some_use(%A : tensor<?xf32> {linalg.inplaceable = true},
+func.func @some_use(%A : tensor<?xf32> {linalg.inplaceable = true},
                %v : vector<5xf32>) -> (tensor<?xf32>) {
   %idx = arith.constant 0 : index
   //      CHECK: vector.transfer_write
@@ -1616,7 +1616,7 @@ func @some_use(%A : tensor<?xf32> {linalg.inplaceable = true},
 
 
 // CHECK-LABEL: func @main_func
-func @main_func(%A : tensor<?xf32> {linalg.inplaceable = true},
+func.func @main_func(%A : tensor<?xf32> {linalg.inplaceable = true},
                 %v : vector<5xf32>) -> (tensor<?xf32>) {
   //      CHECK: call
   // CHECK-SAME: {__inplace_operands_attr__ = ["true", "none"]
@@ -1627,7 +1627,7 @@ func @main_func(%A : tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @to_tensor_op_not_writable
-func @to_tensor_op_not_writable(%m: memref<?xf32>, %v:  vector<5xf32>,
+func.func @to_tensor_op_not_writable(%m: memref<?xf32>, %v:  vector<5xf32>,
                                 %idx1: index, %idx2: index)
     -> vector<10xf32> {
   %0 = bufferization.to_tensor %m : memref<?xf32>
@@ -1646,7 +1646,7 @@ func @to_tensor_op_not_writable(%m: memref<?xf32>, %v:  vector<5xf32>,
 // -----
 
 // CHECK-LABEL: func @to_memref_op_is_reading
-func @to_memref_op_is_reading(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @to_memref_op_is_reading(%t1: tensor<?xf32> {linalg.inplaceable = true},
                               %idx1: index, %idx2: index, %idx3: index,
                               %v1: vector<5xf32>)
     -> (vector<5xf32>, vector<5xf32>) {
@@ -1668,13 +1668,13 @@ func @to_memref_op_is_reading(%t1: tensor<?xf32> {linalg.inplaceable = true},
 // -----
 
 // CHECK-LABEL: func @inner_func
-func @inner_func(%t: tensor<?xf32>) -> tensor<?xf32> {
+func.func @inner_func(%t: tensor<?xf32>) -> tensor<?xf32> {
   //      CHECK: return
   // CHECK-SAME: __equivalent_func_args__ = [0]
   return %t : tensor<?xf32>
 }
 
-func @equivalent_func_arg(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf32>) -> tensor<?xf32> {
+func.func @equivalent_func_arg(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf32>) -> tensor<?xf32> {
   // This test does not check IR. It just asserts there is no failure due to
   // non-equivalent scf.for yield values.
   %1 = scf.for %iv = %c0 to %c10 step %c1 iter_args(%t1 = %t0) -> (tensor<?xf32>) {
@@ -1687,7 +1687,7 @@ func @equivalent_func_arg(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf32
 // -----
 
 // CHECK-LABEL: func @inner_func_2
-func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
+func.func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
   %f = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
   %0 = tensor.insert %f into %t[%c0] : tensor<?xf32>
@@ -1696,7 +1696,7 @@ func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
   return %0 : tensor<?xf32>
 }
 
-func @equivalent_func_arg_2(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf32>) -> tensor<?xf32> {
+func.func @equivalent_func_arg_2(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf32>) -> tensor<?xf32> {
   // This test does not check IR. It just asserts there is no failure due to
   // non-equivalent scf.for yield values.
   %1 = scf.for %iv = %c0 to %c10 step %c1 iter_args(%t1 = %t0) -> (tensor<?xf32>) {
@@ -1710,7 +1710,7 @@ func @equivalent_func_arg_2(%c0: index, %c10: index, %c1: index, %t0: tensor<?xf
 
 // CHECK-LABEL: func @write_after_select_read_one
 //  CHECK-SAME:     %[[t1:.*]]: tensor<?xf32> {{.*}}, %[[t2:.*]]: tensor<?xf32>
-func @write_after_select_read_one(
+func.func @write_after_select_read_one(
     %t1 : tensor<?xf32> {linalg.inplaceable = true},
     %t2 : tensor<?xf32> {linalg.inplaceable = true},
     %c : i1)
@@ -1736,7 +1736,7 @@ func @write_after_select_read_one(
 
 // CHECK-LABEL: func @write_after_select_read_both
 //  CHECK-SAME:     %[[t1:.*]]: tensor<?xf32> {{.*}}, %[[t2:.*]]: tensor<?xf32>
-func @write_after_select_read_both(
+func.func @write_after_select_read_both(
     %t1 : tensor<?xf32> {linalg.inplaceable = true},
     %t2 : tensor<?xf32> {linalg.inplaceable = true},
     %c : i1)
@@ -1765,7 +1765,7 @@ func @write_after_select_read_both(
 
 // CHECK-LABEL: func @write_after_select_no_conflict
 //  CHECK-SAME:     %[[t1:.*]]: tensor<?xf32> {{.*}}, %[[t2:.*]]: tensor<?xf32>
-func @write_after_select_no_conflict(
+func.func @write_after_select_no_conflict(
     %t1 : tensor<?xf32> {linalg.inplaceable = true},
     %t2 : tensor<?xf32> {linalg.inplaceable = true},
     %c : i1)
@@ -1790,7 +1790,7 @@ func @write_after_select_no_conflict(
 // -----
 
 // CHECK-LABEL: func @write_to_same_tensor_in_loop_out_of_place(
-func @write_to_same_tensor_in_loop_out_of_place(
+func.func @write_to_same_tensor_in_loop_out_of_place(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = true},
     %lb : index, %ub : index, %step : index, %sz: index)
@@ -1818,7 +1818,7 @@ func @write_to_same_tensor_in_loop_out_of_place(
 // -----
 
 // CHECK-LABEL: func @write_to_same_tensor_in_loop_in_place(
-func @write_to_same_tensor_in_loop_in_place(
+func.func @write_to_same_tensor_in_loop_in_place(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %lb : index, %ub : index, %step : index, %sz: index)
   -> (tensor<?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-init-tensor-elimination.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-init-tensor-elimination.mlir
index 36232ab8069a4..2beed95f122fa 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-init-tensor-elimination.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-init-tensor-elimination.mlir
@@ -5,7 +5,7 @@
 //      CHECK: func @buffer_forwarding_conflict(
 // CHECK-SAME:   %[[FUNC_ARG:[0-9a-zA-Z]*]]: memref<?xf32>
 // CHECK-SAME:   %[[sz:[0-9a-zA-Z]*]]: index
-func @buffer_forwarding_conflict(
+func.func @buffer_forwarding_conflict(
   %t: tensor<?xf32> {linalg.buffer_layout = affine_map<(d0) -> (d0)>, linalg.inplaceable = true},
   %sz: index)
     -> (tensor<?xf32>, tensor<?xf32>)
@@ -42,7 +42,7 @@ func @buffer_forwarding_conflict(
 //      CHECK: func @buffer_forwarding_no_conflict(
 // CHECK-SAME:   %[[FUNC_ARG:[0-9a-zA-Z]*]]: memref<?xf32>
 // CHECK-SAME:   %[[sz:[0-9a-zA-Z]*]]: index
-func @buffer_forwarding_no_conflict(
+func.func @buffer_forwarding_no_conflict(
   %t: tensor<?xf32> {linalg.buffer_layout = affine_map<(d0) -> (d0)>, linalg.inplaceable = true},
   %sz: index)
     -> (tensor<?xf32>)
@@ -67,7 +67,7 @@ func @buffer_forwarding_no_conflict(
 
 //      CHECK: func @insertion_point_inside_loop(
 // CHECK-SAME:     %[[t:.*]]: memref<?xf32, #{{.*}}>, %[[sz:.*]]: index)
-func @insertion_point_inside_loop(%t : tensor<?xf32>, %sz : index) -> (tensor<?xf32>) {
+func.func @insertion_point_inside_loop(%t : tensor<?xf32>, %sz : index) -> (tensor<?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c5 = arith.constant 5 : index
@@ -96,7 +96,7 @@ func @insertion_point_inside_loop(%t : tensor<?xf32>, %sz : index) -> (tensor<?x
 
 //      CHECK: func @insertion_point_outside_loop(
 // CHECK-SAME:     %[[t:.*]]: memref<?xf32, #{{.*}}>, %[[sz:.*]]: index, %[[idx:.*]]: index)
-func @insertion_point_outside_loop(%t : tensor<?xf32>, %sz : index,
+func.func @insertion_point_outside_loop(%t : tensor<?xf32>, %sz : index,
                                    %idx : index) -> (tensor<?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir
index 85abc8883d208..3f564b942c378 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir
@@ -1,8 +1,8 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize -split-input-file -verify-diagnostics
 
-func private @foo() -> tensor<?xf32>
+func.func private @foo() -> tensor<?xf32>
 
-func @bar() -> tensor<?xf32> {
+func.func @bar() -> tensor<?xf32> {
   %foo = constant @foo : () -> (tensor<?xf32>)
 // expected-error @+1 {{expected a CallOp}}
   %res = call_indirect %foo() : () -> (tensor<?xf32>)
@@ -12,12 +12,12 @@ func @bar() -> tensor<?xf32> {
 // -----
 
 // expected-error @+1 {{cannot bufferize bodiless function that returns a tensor}}
-func private @foo() -> tensor<?xf32>
+func.func private @foo() -> tensor<?xf32>
 
 // -----
 
 // expected-error @+1 {{cannot bufferize a FuncOp with tensors and without a unique ReturnOp}}
-func @swappy(%cond1 : i1, %cond2 : i1, %t1 : tensor<f32>, %t2 : tensor<f32>)
+func.func @swappy(%cond1 : i1, %cond2 : i1, %t1 : tensor<f32>, %t2 : tensor<f32>)
     -> (tensor<f32>, tensor<f32>)
 {
   cf.cond_br %cond1, ^bb1, ^bb2
@@ -35,7 +35,7 @@ func @swappy(%cond1 : i1, %cond2 : i1, %t1 : tensor<f32>, %t2 : tensor<f32>)
 
 // -----
 
-func @scf_if_not_equivalent(
+func.func @scf_if_not_equivalent(
     %cond: i1, %t1: tensor<?xf32> {linalg.inplaceable = true},
     %idx: index) -> tensor<?xf32> {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -52,7 +52,7 @@ func @scf_if_not_equivalent(
 
 // -----
 
-func @scf_if_not_aliasing(
+func.func @scf_if_not_aliasing(
     %cond: i1, %t1: tensor<?xf32> {linalg.inplaceable = true},
     %idx: index) -> f32 {
   %r = scf.if %cond -> (tensor<?xf32>) {
@@ -71,19 +71,19 @@ func @scf_if_not_aliasing(
 
 // expected-error @-3 {{expected callgraph to be free of circular dependencies}}
 
-func @foo() {
+func.func @foo() {
   call @bar() : () -> ()
   return
 }
 
-func @bar() {
+func.func @bar() {
   call @foo() : () -> ()
   return
 }
 
 // -----
 
-func @scf_for(%A : tensor<?xf32>,
+func.func @scf_for(%A : tensor<?xf32>,
               %B : tensor<?xf32> {linalg.inplaceable = true},
               %C : tensor<4xf32>,
               %lb : index, %ub : index, %step : index)
@@ -109,14 +109,14 @@ func @scf_for(%A : tensor<?xf32>,
 
 // -----
 
-func private @fun_with_side_effects(%A: tensor<?xf32> {linalg.inplaceable = true})
+func.func private @fun_with_side_effects(%A: tensor<?xf32> {linalg.inplaceable = true})
 
-func @foo(%A: tensor<?xf32> {linalg.inplaceable = true}) -> (tensor<?xf32>) {
+func.func @foo(%A: tensor<?xf32> {linalg.inplaceable = true}) -> (tensor<?xf32>) {
   call @fun_with_side_effects(%A) : (tensor<?xf32>) -> ()
   return %A: tensor<?xf32>
 }
 
-func @scf_yield_needs_copy(%A : tensor<?xf32> {linalg.inplaceable = true}, %iters : index) {
+func.func @scf_yield_needs_copy(%A : tensor<?xf32> {linalg.inplaceable = true}, %iters : index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %res = scf.for %arg0 = %c0 to %iters step %c1 iter_args(%bbarg = %A) -> (tensor<?xf32>) {
@@ -130,7 +130,7 @@ func @scf_yield_needs_copy(%A : tensor<?xf32> {linalg.inplaceable = true}, %iter
 
 // -----
 
-func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = true})
+func.func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = true})
   ->  tensor<4xf32>
 {
   // This bufferizes to a pattern that the cross-function boundary pass needs to
@@ -147,7 +147,7 @@ func @extract_slice_fun(%A : tensor<?xf32> {linalg.inplaceable = true})
 
 // -----
 
-func @scf_yield(%b : i1, %A : tensor<4xf32>, %B : tensor<4xf32>) -> tensor<4xf32>
+func.func @scf_yield(%b : i1, %A : tensor<4xf32>, %B : tensor<4xf32>) -> tensor<4xf32>
 {
   %r = scf.if %b -> (tensor<4xf32>) {
     scf.yield %A : tensor<4xf32>
@@ -160,7 +160,7 @@ func @scf_yield(%b : i1, %A : tensor<4xf32>, %B : tensor<4xf32>) -> tensor<4xf32
 
 // -----
 
-func @unknown_op(%A : tensor<4xf32>) -> tensor<4xf32>
+func.func @unknown_op(%A : tensor<4xf32>) -> tensor<4xf32>
 {
   // expected-error: @+1 {{op was not bufferized}}
   %r = "marklar"(%A) : (tensor<4xf32>) -> (tensor<4xf32>)
@@ -170,7 +170,7 @@ func @unknown_op(%A : tensor<4xf32>) -> tensor<4xf32>
 
 // -----
 
-func @mini_test_case1() -> tensor<10x20xf32> {
+func.func @mini_test_case1() -> tensor<10x20xf32> {
   %f0 = arith.constant 0.0 : f32
   %t = linalg.init_tensor [10, 20] : tensor<10x20xf32>
   %r = linalg.fill ins(%f0 : f32) outs(%t : tensor<10x20xf32>) -> tensor<10x20xf32>
@@ -180,7 +180,7 @@ func @mini_test_case1() -> tensor<10x20xf32> {
 
 // -----
 
-func @main() -> tensor<4xi32> {
+func.func @main() -> tensor<4xi32> {
   %r = scf.execute_region -> tensor<4xi32> {
     %A = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
     scf.yield %A: tensor<4xi32>
@@ -192,7 +192,7 @@ func @main() -> tensor<4xi32> {
 
 // -----
 
-func @to_memref_op_is_writing(
+func.func @to_memref_op_is_writing(
     %t1: tensor<?xf32> {linalg.inplaceable = true}, %idx1: index,
     %idx2: index, %idx3: index, %v1: vector<5xf32>) -> (vector<5xf32>, vector<5xf32>) {
   // This is a RaW conflict because to_memref is an inplace write and %t1 is
@@ -213,16 +213,16 @@ func @to_memref_op_is_writing(
 // -----
 
 // expected-error @+1 {{cannot bufferize bodiless function that returns a tensor}}
-func private @foo(%t : tensor<?xf32>) -> (f32, tensor<?xf32>, f32)
+func.func private @foo(%t : tensor<?xf32>) -> (f32, tensor<?xf32>, f32)
 
-func @call_to_unknown_tensor_returning_func(%t : tensor<?xf32>) {
+func.func @call_to_unknown_tensor_returning_func(%t : tensor<?xf32>) {
   call @foo(%t) : (tensor<?xf32>) -> (f32, tensor<?xf32>, f32)
   return
 }
 
 // -----
 
-func @foo(%t : tensor<5xf32>) -> (tensor<5xf32>) {
+func.func @foo(%t : tensor<5xf32>) -> (tensor<5xf32>) {
   %0 = linalg.init_tensor [5] : tensor<5xf32>
   // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}}
   return %0 : tensor<5xf32>
@@ -230,14 +230,14 @@ func @foo(%t : tensor<5xf32>) -> (tensor<5xf32>) {
 
 // Note: This function is not analyzed because there was an error in the
 // previous one.
-func @call_to_func_returning_non_equiv_tensor(%t : tensor<5xf32>) {
+func.func @call_to_func_returning_non_equiv_tensor(%t : tensor<5xf32>) {
   call @foo(%t) : (tensor<5xf32>) -> (tensor<5xf32>)
   return
 }
 
 // -----
 
-func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index,
+func.func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index,
                                                  %idx2 : index) -> f32 {
   %0 = scf.execute_region -> tensor<?xf32> {
     %1 = linalg.init_tensor [%idx] : tensor<?xf32>
@@ -251,7 +251,7 @@ func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index,
 
 // -----
 
-func @destination_passing_style_dominance_test_2(%cst : f32, %idx : index,
+func.func @destination_passing_style_dominance_test_2(%cst : f32, %idx : index,
                                                  %idx2 : index) -> f32 {
   %1 = linalg.init_tensor [%idx] : tensor<?xf32>
 

diff  --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
index 7ed7bd13321b1..5fef35f3d4c19 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
@@ -10,7 +10,7 @@
 
 // CHECK-LABEL: func @transfer_read(%{{.*}}: memref<?xf32, #map>) -> vector<4xf32> {
 // CHECK-NO-LAYOUT-MAP-LABEL: func @transfer_read(%{{.*}}: memref<?xf32>) -> vector<4xf32>
-func @transfer_read(
+func.func @transfer_read(
     %A : tensor<?xf32> {linalg.inplaceable = false})
   -> (vector<4xf32>)
 {
@@ -31,7 +31,7 @@ func @transfer_read(
 // CHECK-LABEL: func @fill_inplace(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 // CHECK-NO-LAYOUT-MAP-LABEL: func @fill_inplace(%{{.*}}: memref<?xf32>) {
-func @fill_inplace(
+func.func @fill_inplace(
     %A : tensor<?xf32> {linalg.inplaceable = true})
   -> tensor<?xf32>
 {
@@ -51,7 +51,7 @@ func @fill_inplace(
 // -----
 
 // CHECK-LABEL: func @tensor_extract(%{{.*}}: memref<?xf32, #{{.*}}>) -> f32 {
-func @tensor_extract(%A : tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
+func.func @tensor_extract(%A : tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
   %c0 = arith.constant 0 : index
 
 //       CHECK: %[[RES:.*]] = memref.load {{.*}} : memref<?xf32, #{{.*}}>
@@ -69,7 +69,7 @@ func @tensor_extract(%A : tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
 // CHECK-LABEL: func @not_inplace(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>) -> memref<?xf32> {
 // CHECK-NO-LAYOUT-MAP-LABEL: func @not_inplace(%{{.*}}: memref<?xf32>) -> memref<?xf32>
-func @not_inplace(
+func.func @not_inplace(
     %A : tensor<?xf32> {linalg.inplaceable = false})
   -> tensor<?xf32>
 {
@@ -93,7 +93,7 @@ func @not_inplace(
 // CHECK-LABEL: func @not_inplace
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?x?xf32, #[[$map_2d_dyn]]>) {
 // CHECK-NO-LAYOUT-MAP-LABEL: func @not_inplace(%{{.*}}: memref<?x?xf32>) {
-func @not_inplace(
+func.func @not_inplace(
     %A : tensor<?x?xf32> {linalg.inplaceable = true})
   -> tensor<?x?xf32>
 {
@@ -120,7 +120,7 @@ func @not_inplace(
 // -----
 
 // CHECK-LABEL: func @not_inplace
-func @not_inplace(%A : tensor<?x?xf32> {linalg.inplaceable = true}) -> tensor<?x?xf32> {
+func.func @not_inplace(%A : tensor<?x?xf32> {linalg.inplaceable = true}) -> tensor<?x?xf32> {
   /// Within op multiple uses of %A, must alloc.
   // CHECK: alloc
   %r = linalg.matmul  ins(%A, %A: tensor<?x?xf32>, tensor<?x?xf32>)
@@ -132,7 +132,7 @@ func @not_inplace(%A : tensor<?x?xf32> {linalg.inplaceable = true}) -> tensor<?x
 // -----
 
 // CHECK-LABEL: func @vec_inplace
-func @vec_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vector<4xf32>)
+func.func @vec_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vector<4xf32>)
     -> tensor<?xf32>
 {
   %c0 = arith.constant 0 : index
@@ -151,7 +151,7 @@ func @vec_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vector<
 
 // CHECK-LABEL: func @vec_not_inplace
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
-func @vec_not_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vector<4xf32>)
+func.func @vec_not_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vector<4xf32>)
     -> (tensor<?xf32>, tensor<?xf32>)
 {
   %c0 = arith.constant 0 : index
@@ -182,7 +182,7 @@ func @vec_not_inplace(%A : tensor<?xf32> {linalg.inplaceable = true}, %vec : vec
 //  CHECK-SAME:   %[[A1:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>,
 //  CHECK-SAME:   %[[t0:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>,
 //  CHECK-SAME:   %[[t1:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>
-func @insert_slice_fun(%A0 : tensor<?xf32> {linalg.inplaceable = false},
+func.func @insert_slice_fun(%A0 : tensor<?xf32> {linalg.inplaceable = false},
                        %A1 : tensor<?xf32> {linalg.inplaceable = true},
                        %t0 : tensor<4xf32> {linalg.inplaceable = false},
                        %t1 : tensor<4xf32> {linalg.inplaceable = true})
@@ -229,7 +229,7 @@ func @insert_slice_fun(%A0 : tensor<?xf32> {linalg.inplaceable = false},
 // CHECK-LABEL: func @insert_slice_fun
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   %[[t:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>
-func @insert_slice_fun(
+func.func @insert_slice_fun(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %t : tensor<4xf32> {linalg.inplaceable = false})
   -> tensor<?xf32>
@@ -257,7 +257,7 @@ func @insert_slice_fun(
 // CHECK-LABEL: func @insert_slice_fun
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   %[[t:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>
-func @insert_slice_fun(
+func.func @insert_slice_fun(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %t : tensor<4xf32> {linalg.inplaceable = false})
   -> tensor<?xf32>
@@ -285,7 +285,7 @@ func @insert_slice_fun(
 // CHECK-LABEL: func @insert_slice_fun_not_inplace
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   %[[t:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>
-func @insert_slice_fun_not_inplace(
+func.func @insert_slice_fun_not_inplace(
     %A : tensor<?xf32> {linalg.inplaceable = false},
     %t : tensor<4xf32> {linalg.inplaceable = false})
   -> tensor<?xf32>
@@ -312,7 +312,7 @@ func @insert_slice_fun_not_inplace(
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>,
 //  CHECK-SAME:   %[[t:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   ) -> memref<?xf32> {
-func @scf_for_yield_only(%A : tensor<?xf32> {linalg.inplaceable = false},
+func.func @scf_for_yield_only(%A : tensor<?xf32> {linalg.inplaceable = false},
                          %B : tensor<?xf32> {linalg.inplaceable = true},
                          %lb : index, %ub : index, %step : index)
   -> (tensor<?xf32>, tensor<?xf32>)
@@ -342,7 +342,7 @@ func @scf_for_yield_only(%A : tensor<?xf32> {linalg.inplaceable = false},
 // just want to make sure that it does not crash.
 
 // CHECK-LABEL: func @nested_scf_for
-func @nested_scf_for(%A : tensor<?xf32> {linalg.inplaceable = true},
+func.func @nested_scf_for(%A : tensor<?xf32> {linalg.inplaceable = true},
                      %v : vector<5xf32>) -> tensor<?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -365,7 +365,7 @@ func @nested_scf_for(%A : tensor<?xf32> {linalg.inplaceable = true},
 //  CHECK-SAME:   %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   %[[B:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
 //  CHECK-SAME:   %[[C:[a-zA-Z0-9]*]]: memref<4xf32, #[[$map_1d_dyn]]>
-func @scf_for_with_tensor.insert_slice(
+func.func @scf_for_with_tensor.insert_slice(
    %A : tensor<?xf32> {linalg.inplaceable = false},
    %B : tensor<?xf32> {linalg.inplaceable = true},
    %C : tensor<4xf32> {linalg.inplaceable = false},
@@ -407,7 +407,7 @@ func @scf_for_with_tensor.insert_slice(
 
 // CHECK-LABEL: func @execute_region_with_conflict(
 //  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
-func @execute_region_with_conflict(%t1 : tensor<?xf32> {linalg.inplaceable = "true"})
+func.func @execute_region_with_conflict(%t1 : tensor<?xf32> {linalg.inplaceable = "true"})
     -> (f32, tensor<?xf32>, f32)
 {
   %f1 = arith.constant 0.0 : f32
@@ -438,7 +438,7 @@ func @execute_region_with_conflict(%t1 : tensor<?xf32> {linalg.inplaceable = "tr
 // CHECK-SAME:   %[[A:[0-9a-zA-Z]*]]: memref<128x256xf32>
 // CHECK-SAME:   %[[B:[0-9a-zA-Z]*]]: memref<256x192xf32>
 // CHECK-SAME:   %[[C:[0-9a-zA-Z]*]]: memref<128x192xf32>
-func @matmul(
+func.func @matmul(
     %A: tensor<128x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %B: tensor<256x192xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
     %C: tensor<128x192xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
@@ -512,7 +512,7 @@ func @matmul(
 //       CHECK:   memref.copy %[[A]], %[[alloc]]
 //       CHECK:   %[[subview:.*]] = memref.subview %[[A]][{{.*}}] [4] [1] : {{.*}} to memref<4xf32
 //       CHECK:   memref.copy %[[alloc]], %[[subview]]
-func @tensor_cast_not_in_place(
+func.func @tensor_cast_not_in_place(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = false}, %idx: index)
   -> (tensor<?xf32>)
@@ -532,7 +532,7 @@ func @tensor_cast_not_in_place(
 /// errors in the def-use chains.
 
 // CHECK-LABEL: func @dominance_violation_bug_1
-func @dominance_violation_bug_1(
+func.func @dominance_violation_bug_1(
     %A : tensor<?x?xf32> {linalg.inplaceable = false},
     %idx : index)
   -> tensor<?x?xf32>
@@ -552,7 +552,7 @@ func @dominance_violation_bug_1(
 
 // CHECK-LABEL: func @scf_if_inplace(
 //  CHECK-SAME:     %[[cond:.*]]: i1, %[[t1:.*]]: memref<?xf32{{.*}}>, %[[v:.*]]: vector
-func @scf_if_inplace(%cond: i1,
+func.func @scf_if_inplace(%cond: i1,
                      %t1: tensor<?xf32> {linalg.inplaceable = true},
                      %v: vector<5xf32>, %idx: index) -> tensor<?xf32> {
 
@@ -582,7 +582,7 @@ func @scf_if_inplace(%cond: i1,
 //       CHECK:       vector.transfer_write
 //       CHECK:     }
 //       CHECK:   }
-func @scf_if_inside_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
+func.func @scf_if_inside_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
                       %v: vector<5xf32>, %idx: index,
                       %cond: i1) -> tensor<?xf32> {
   %c0 = arith.constant 0 : index
@@ -604,7 +604,7 @@ func @scf_if_inside_scf_for(%t1: tensor<?xf32> {linalg.inplaceable = true},
 
 // CHECK-LABEL: func @scf_if_non_equiv_yields(
 //  CHECK-SAME:     %[[cond:.*]]: i1, %[[A:.*]]: memref<{{.*}}>, %[[B:.*]]: memref<{{.*}}>) -> memref<{{.*}}>
-func @scf_if_non_equiv_yields(
+func.func @scf_if_non_equiv_yields(
     %b : i1,
     %A : tensor<4xf32> {linalg.inplaceable = false},
     %B : tensor<4xf32> {linalg.inplaceable = false})
@@ -624,7 +624,7 @@ func @scf_if_non_equiv_yields(
 
 // CHECK-LABEL: func @insert_op
 //  CHECK-SAME:     %[[t1:.*]]: memref<?xf32, {{.*}}>, %[[s:.*]]: f32, %[[i:.*]]: index
-func @insert_op(%t1 : tensor<?xf32> {linalg.inplaceable = true},
+func.func @insert_op(%t1 : tensor<?xf32> {linalg.inplaceable = true},
                 %s : f32, %i : index) -> tensor<?xf32> {
   // CHECK: memref.store %[[s]], %[[t1]][%[[i]]]
   %0 = tensor.insert %s into %t1[%i] : tensor<?xf32>
@@ -634,7 +634,7 @@ func @insert_op(%t1 : tensor<?xf32> {linalg.inplaceable = true},
 
 // -----
 
-func @gather_like(
+func.func @gather_like(
     %arg0 : tensor<?x?xf32> {linalg.inplaceable = false},
     %arg1 : tensor<?xi32> {linalg.inplaceable = false},
     %arg2 : tensor<?x?xf32> {linalg.inplaceable = true}) -> tensor<?x?xf32> {
@@ -666,7 +666,7 @@ func @gather_like(
 
 // CHECK-LABEL: func @linalg_op_bufferizes_inplace_with_input
 //  CHECK-SAME:     %[[t1:.*]]: memref<?x?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>, %[[t3:.*]]: memref<?x?xf32, #{{.*}}>
-func @linalg_op_bufferizes_inplace_with_input(
+func.func @linalg_op_bufferizes_inplace_with_input(
     %t1: tensor<?x?xf32> {linalg.inplaceable = true},
     %t2: tensor<?xf32> {linalg.inplaceable = true},
     %t3: tensor<?x?xf32> {linalg.inplaceable = true},
@@ -698,7 +698,7 @@ func @linalg_op_bufferizes_inplace_with_input(
 
 // CHECK-LABEL: func @op_is_reading_but_following_ops_are_not
 //  CHECK-SAME:     %[[t0:.*]]: memref<?xf32
-func @op_is_reading_but_following_ops_are_not(
+func.func @op_is_reading_but_following_ops_are_not(
     %t0 : tensor<?xf32> {linalg.inplaceable = false},
     %cst : f32)
   -> tensor<?xf32>
@@ -729,7 +729,7 @@ func @op_is_reading_but_following_ops_are_not(
 // InitTensorOp elimination would produce SSA violations for the example below.
 //===----------------------------------------------------------------------===//
 
-func @depthwise_conv_1d_nwc_wc(%arg0: index, %arg1: index, %arg2: tensor<8x18x32xf32>)
+func.func @depthwise_conv_1d_nwc_wc(%arg0: index, %arg1: index, %arg2: tensor<8x18x32xf32>)
     -> tensor<?x1x6x8xf32> {
   %c0 = arith.constant 0 : index
   %c32 = arith.constant 32 : index
@@ -750,7 +750,7 @@ func @depthwise_conv_1d_nwc_wc(%arg0: index, %arg1: index, %arg2: tensor<8x18x32
 
 // CHECK-LABEL: func @write_to_select_op_source
 //  CHECK-SAME:     %[[t1:.*]]: memref<?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>
-func @write_to_select_op_source(
+func.func @write_to_select_op_source(
     %t1 : tensor<?xf32> {linalg.inplaceable = true},
     %t2 : tensor<?xf32> {linalg.inplaceable = true},
     %c : i1)
@@ -772,7 +772,7 @@ func @write_to_select_op_source(
 
 // CHECK-LABEL: func @write_after_select_read_one
 //  CHECK-SAME:     %[[t1:.*]]: memref<?xf32, #{{.*}}>, %[[t2:.*]]: memref<?xf32, #{{.*}}>
-func @write_after_select_read_one(
+func.func @write_after_select_read_one(
     %t1 : tensor<?xf32> {linalg.inplaceable = true},
     %t2 : tensor<?xf32> {linalg.inplaceable = true},
     %c : i1)
@@ -803,7 +803,7 @@ func @write_after_select_read_one(
 // correctly.
 
 // CHECK-LABEL: func @rank_reducing
-func @rank_reducing(
+func.func @rank_reducing(
     %i: index, %j: index,
     %arg0: tensor<8x18x32xf32>)
       -> tensor<?x1x6x8xf32> {
@@ -840,7 +840,7 @@ func @rank_reducing(
 //       CHECK:   %[[r:.*]] = memref.load %[[clone]][%{{.*}}]
 //       CHECK:   memref.dealloc %[[clone]]
 //       CHECK:   return %[[r]]
-func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 {
+func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 {
   %r = scf.execute_region -> (tensor<?xf32>) {
     %t2 = linalg.init_tensor [%i] : tensor<?xf32>
     scf.yield %t2 : tensor<?xf32>
@@ -864,7 +864,7 @@ func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 {
 //       CHECK:     memref.copy %[[t]], %[[alloc2]]
 //       CHECK:     scf.yield %[[alloc2]]
 //       CHECK:   return %[[for]]
-func @scf_for_yield_non_equivalent(
+func.func @scf_for_yield_non_equivalent(
     %t: tensor<?xf32>, %lb : index, %ub : index, %step : index) -> tensor<?xf32> {
   %r = scf.for %i = %lb to %ub step %step iter_args(%a = %t) -> tensor<?xf32> {
     scf.yield %t : tensor<?xf32>
@@ -893,7 +893,7 @@ func @scf_for_yield_non_equivalent(
 //       CHECK:     %[[casted3:.*]] = memref.cast %[[alloc3]]
 //       CHECK:     scf.yield %[[casted3]]
 //       CHECK:   return %[[for]]
-func @scf_for_yield_allocation(%t: tensor<?xf32>, %lb : index, %ub : index,
+func.func @scf_for_yield_allocation(%t: tensor<?xf32>, %lb : index, %ub : index,
                                %step : index) -> tensor<?xf32> {
   %r = scf.for %i = %lb to %ub step %step iter_args(%a = %t) -> tensor<?xf32> {
     %t2 = linalg.init_tensor [%i] : tensor<?xf32>
@@ -911,7 +911,7 @@ func @scf_for_yield_allocation(%t: tensor<?xf32>, %lb : index, %ub : index,
 // CHECK-LABEL: func @scf_for_swapping_yields(
 //  CHECK-SAME:     %[[A:.*]]: memref<?xf32, #{{.*}}>, %[[B:.*]]: memref<?xf32, #{{.*}}>
 
-func @scf_for_swapping_yields(
+func.func @scf_for_swapping_yields(
     %A : tensor<?xf32>, %B : tensor<?xf32> {linalg.inplaceable = true},
     %C : tensor<4xf32>, %lb : index, %ub : index, %step : index)
   -> (f32, f32)

diff  --git a/mlir/test/Dialect/Linalg/conv-interface-invalid.mlir b/mlir/test/Dialect/Linalg/conv-interface-invalid.mlir
index b46845aae265c..f9b20d3f3e3f8 100644
--- a/mlir/test/Dialect/Linalg/conv-interface-invalid.mlir
+++ b/mlir/test/Dialect/Linalg/conv-interface-invalid.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s
 
-func @test_conv_op_not_linalg_op(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>,
+func.func @test_conv_op_not_linalg_op(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>,
     %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected a LinalgOp}}
   %0 = "test.conv_op_not_linalg_op"(%arg0, %arg1, %arg2)
@@ -12,7 +12,7 @@ func @test_conv_op_not_linalg_op(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>,
 
 // Check for number of operands being >= 2.
 #map = affine_map<(d0) -> (d0)>
-func @test_conv_op_wrong_num_operands(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_wrong_num_operands(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected op with 2 inputs and 1 output}}
   %0 = test.linalg_conv_op {
@@ -27,7 +27,7 @@ func @test_conv_op_wrong_num_operands(%arg0 : tensor<?xf32>,
 
 // -----
 
-func @test_conv_op_wrong_input_indexing_map1(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_wrong_input_indexing_map1(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{unexpected input index map for convolution}}
   %0 = test.linalg_conv_op {
@@ -45,7 +45,7 @@ func @test_conv_op_wrong_input_indexing_map1(%arg0 : tensor<?xf32>,
 
 // -----
 
-func @test_conv_op_wrong_input_indexing_map2(%arg0 : tensor<?x?xf32>,
+func.func @test_conv_op_wrong_input_indexing_map2(%arg0 : tensor<?x?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{unexpected input index map for convolution}}
   %0 = test.linalg_conv_op {
@@ -63,7 +63,7 @@ func @test_conv_op_wrong_input_indexing_map2(%arg0 : tensor<?x?xf32>,
 
 // -----
 
-func @test_conv_op_filter_index_map_not_projection(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_filter_index_map_not_projection(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected output/filter indexing maps to be projected permutations}}
   %0 = test.linalg_conv_op {
@@ -81,7 +81,7 @@ func @test_conv_op_filter_index_map_not_projection(%arg0 : tensor<?xf32>,
 
 // -----
 
-func @test_conv_op_output_index_map_not_projection(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_output_index_map_not_projection(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected output/filter indexing maps to be projected permutations}}
   %0 = test.linalg_conv_op {
@@ -101,7 +101,7 @@ func @test_conv_op_output_index_map_not_projection(%arg0 : tensor<?xf32>,
 
 // Convolution op illegal if a loop dimension is used to access
 // output, filter and is convolved.
-func @test_conv_op_output_filter_convolved(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_output_filter_convolved(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
   // expected-error @+1 {{unexpected loop dimension for convolution op}}
   %0 = test.linalg_conv_op {
@@ -120,7 +120,7 @@ func @test_conv_op_output_filter_convolved(%arg0 : tensor<?xf32>,
 // -----
 
 // Convolution op illegal if a loop dimension is used only in the output.
-func @test_conv_op_output_only_dim(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_output_only_dim(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
   // expected-error @+1 {{unexpected loop dimension for convolution op}}
   %0 = test.linalg_conv_op {
@@ -139,7 +139,7 @@ func @test_conv_op_output_only_dim(%arg0 : tensor<?xf32>,
 // -----
 
 // Convolution op illegal if a loop dimension is used only in the filter.
-func @test_conv_op_filter_only_dim(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_filter_only_dim(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?x?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{unexpected loop dimension for convolution op}}
   %0 = test.linalg_conv_op {
@@ -158,7 +158,7 @@ func @test_conv_op_filter_only_dim(%arg0 : tensor<?xf32>,
 // -----
 
 // Convolution op illegal if a loop dimension is used only in the input.
-func @test_conv_op_input_only_dim(%arg0 : tensor<?x?xf32>,
+func.func @test_conv_op_input_only_dim(%arg0 : tensor<?x?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{unexpected loop dimension for convolution op}}
   %0 = test.linalg_conv_op {
@@ -177,7 +177,7 @@ func @test_conv_op_input_only_dim(%arg0 : tensor<?x?xf32>,
 // -----
 
 // Convolution op illegal if a loop dimension accessing output is not parallel.
-func @test_conv_op_non_output_access_loop_parallel(%arg0 : tensor<?xf32>,
+func.func @test_conv_op_non_output_access_loop_parallel(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected all iterators not used to access outputs to be reduction}}
   %0 = test.linalg_conv_op  {

diff  --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
index 3fafc6e86d570..4f71a0ccc22c2 100644
--- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
+++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
@@ -5,7 +5,7 @@
 // CHECK-LABEL: func @addf_rank0
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<f32>
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]*]]: tensor<f32>
-func @addf_rank0(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
+func.func @addf_rank0(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
   //      CHECK: %{{.*}} = linalg.generic
   // CHECK-SAME: indexing_maps = [#[[$MAP]], #[[$MAP]], #[[$MAP]]]
   // CHECK-SAME: iterator_types = []
@@ -25,7 +25,7 @@ func @addf_rank0(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
 // CHECK-LABEL: func @addf_rank1
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<?xf32>
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]*]]: tensor<?xf32>
-func @addf_rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
+func.func @addf_rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
   // CHECK: linalg.generic
   // CHECK-SAME: iterator_types = ["parallel"]
   // CHECK-SAME:  ins(%[[ARG0]], %[[ARG1]]
@@ -39,7 +39,7 @@ func @addf_rank1(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
 // Check a unary op.
 // CHECK-LABEL: func @exp
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<f32>
-func @exp(%arg0: tensor<f32>) -> tensor<f32> {
+func.func @exp(%arg0: tensor<f32>) -> tensor<f32> {
   // CHECK: linalg.generic
   // CHECK-SAME:  ins(%[[ARG0]]
   // CHECK-SAME: outs(%[[ARG0]]
@@ -57,7 +57,7 @@ func @exp(%arg0: tensor<f32>) -> tensor<f32> {
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<i1>
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]*]]: tensor<i32>
 //  CHECK-SAME:   %[[ARG2:[0-9a-zA-Z]*]]: tensor<i32>
-func @select(%arg0: tensor<i1>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<i32> {
+func.func @select(%arg0: tensor<i1>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tensor<i32> {
   // CHECK: linalg.generic
   // CHECK-SAME:  ins(%[[ARG0]], %[[ARG1]], %[[ARG2]]
   // CHECK-SAME: outs(%[[ARG1]]
@@ -74,7 +74,7 @@ func @select(%arg0: tensor<i1>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tenso
 // CHECK-LABEL: func @cmpf(
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<f32>
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]*]]: tensor<f32>
-func @cmpf(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<i1> {
+func.func @cmpf(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<i1> {
   // CHECK: %[[INIT:.*]] = linalg.init_tensor [] : tensor<i1>
   // CHECK: linalg.generic
   // CHECK-SAME:  ins(%[[ARG0]], %[[ARG1]]
@@ -91,7 +91,7 @@ func @cmpf(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<i1> {
 // CHECK-LABEL: func @cmpf(
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]*]]: tensor<4x?x?x8x2x?xf32>
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]*]]: tensor<4x?x?x8x2x?xf32>
-func @cmpf(%arg0: tensor<4x?x?x8x2x?xf32>, %arg1: tensor<4x?x?x8x2x?xf32>) -> tensor<4x?x?x8x2x?xi1> {
+func.func @cmpf(%arg0: tensor<4x?x?x8x2x?xf32>, %arg1: tensor<4x?x?x8x2x?xf32>) -> tensor<4x?x?x8x2x?xi1> {
   // CHECK: %[[C1:.*]] = arith.constant 1 : index
   // CHECK: %[[D1:.*]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<4x?x?x8x2x?xf32>
   // CHECK: %[[C2:.*]] = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Linalg/decompose-convolution.mlir b/mlir/test/Dialect/Linalg/decompose-convolution.mlir
index f8007df84f479..ad900a568c709 100644
--- a/mlir/test/Dialect/Linalg/decompose-convolution.mlir
+++ b/mlir/test/Dialect/Linalg/decompose-convolution.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: func @conv2d_nhwc_4x1x2x8_tensor
 //  CHECK-SAME: (%[[INPUT:.+]]: tensor<4x1x6x3xf32>, %[[FILTER:.+]]: tensor<1x2x3x8xf32>, %[[INIT:.+]]: tensor<4x1x2x8xf32>)
-func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x1x6x3xf32>, %filter: tensor<1x2x3x8xf32>, %init: tensor<4x1x2x8xf32>) -> tensor<4x1x2x8xf32> {
+func.func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x1x6x3xf32>, %filter: tensor<1x2x3x8xf32>, %init: tensor<4x1x2x8xf32>) -> tensor<4x1x2x8xf32> {
   %0 = linalg.conv_2d_nhwc_hwcf
     {dilations = dense<[2, 3]> : tensor<2xi64>, strides = dense<[3, 2]> : tensor<2xi64>}
     ins(%input, %filter : tensor<4x1x6x3xf32>, tensor<1x2x3x8xf32>)
@@ -28,7 +28,7 @@ func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x1x6x3xf32>, %filter: tensor<1x
 
 // CHECK-LABEL: func @conv2d_nhwc_qxqx1xq_tensor
 //  CHECK-SAME: (%[[INPUT:.+]]: tensor<?x?x1x?xf32>, %[[FILTER:.+]]: tensor<?x1x?x?xf32>, %[[INIT:.+]]: tensor<?x?x1x?xf32>)
-func @conv2d_nhwc_qxqx1xq_tensor(%input: tensor<?x?x1x?xf32>, %filter: tensor<?x1x?x?xf32>, %init: tensor<?x?x1x?xf32>) -> tensor<?x?x1x?xf32> {
+func.func @conv2d_nhwc_qxqx1xq_tensor(%input: tensor<?x?x1x?xf32>, %filter: tensor<?x1x?x?xf32>, %init: tensor<?x?x1x?xf32>) -> tensor<?x?x1x?xf32> {
   %0 = linalg.conv_2d_nhwc_hwcf
     {dilations = dense<[2, 3]> : tensor<2xi64>, strides = dense<[3, 2]> : tensor<2xi64>}
     ins(%input, %filter : tensor<?x?x1x?xf32>, tensor<?x1x?x?xf32>)
@@ -60,7 +60,7 @@ func @conv2d_nhwc_qxqx1xq_tensor(%input: tensor<?x?x1x?xf32>, %filter: tensor<?x
 // Do not convert convolution ops whose window dimensions are not ones.
 
 // CHECK-LABEL: func @conv2d_nhwc_4x1x2x8_tensor
-func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x3x5x3xf32>, %filter: tensor<2x2x3x8xf32>, %init: tensor<4x1x2x8xf32>) -> tensor<4x1x2x8xf32> {
+func.func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x3x5x3xf32>, %filter: tensor<2x2x3x8xf32>, %init: tensor<4x1x2x8xf32>) -> tensor<4x1x2x8xf32> {
   // CHECK: linalg.conv_2d_nhwc_hwcf
   %0 = linalg.conv_2d_nhwc_hwcf
     {dilations = dense<[2, 3]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
@@ -72,7 +72,7 @@ func @conv2d_nhwc_4x1x2x8_tensor(%input: tensor<4x3x5x3xf32>, %filter: tensor<2x
 // -----
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwc_tensor
-func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x1x113x96xf32>, %filter: tensor<1x3x96xf32>, %out: tensor<1x1x56x96xf32>) -> tensor<1x1x56x96xf32> {
+func.func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x1x113x96xf32>, %filter: tensor<1x3x96xf32>, %out: tensor<1x1x56x96xf32>) -> tensor<1x1x56x96xf32> {
   //     CHECK: linalg.depthwise_conv_1d_nwc_wc
   %0 = linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
          ins(%input, %filter: tensor<1x1x113x96xf32>, tensor<1x3x96xf32>)
@@ -85,7 +85,7 @@ func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x1x113x96xf32>, %filter:
 // Do not convert convolution ops whose window dimensions are not ones.
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwc_tensor
-func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>, %out: tensor<1x56x56x96xf32>) -> tensor<1x56x56x96xf32> {
+func.func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>, %out: tensor<1x56x56x96xf32>) -> tensor<1x56x56x96xf32> {
   //     CHECK: linalg.depthwise_conv_2d_nhwc_hwc
   %0 = linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
          ins(%input, %filter: tensor<1x113x113x96xf32>, tensor<3x3x96xf32>)

diff  --git a/mlir/test/Dialect/Linalg/detensorize_0d.mlir b/mlir/test/Dialect/Linalg/detensorize_0d.mlir
index 33ab4126b5894..218d7228b7d14 100644
--- a/mlir/test/Dialect/Linalg/detensorize_0d.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_0d.mlir
@@ -2,7 +2,7 @@
 
 #map = affine_map<() -> ()>
 
-func @detensor_simple(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
+func.func @detensor_simple(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
   %0 = linalg.init_tensor [] : tensor<f32>
   %1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
     ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
@@ -21,7 +21,7 @@ func @detensor_simple(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> att
 // CHECK:         %[[new_tensor_res:.*]] = tensor.from_elements %[[detensored_res]]
 // CHECK:         return %[[new_tensor_res]]
 
-func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
+func.func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
   %0 = linalg.init_tensor [] : tensor<f32>
   %1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
     ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
@@ -61,7 +61,7 @@ func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32
 // CHECK:         %[[new_tensor_res:.*]] = tensor.from_elements %[[detensored_res3]]
 // CHECK:         return %[[new_tensor_res]]
 
-func @detensor_multiple_ops(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
+func.func @detensor_multiple_ops(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
   %0 = linalg.init_tensor [] : tensor<f32>
   %1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
     ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
@@ -82,7 +82,7 @@ func @detensor_multiple_ops(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f3
 // CHECK:         %[[new_tensor_res:.*]] = tensor.from_elements %[[detensored_res2]]
 // CHECK:         return %[[new_tensor_res]]
 
-func @detensor_foreign_op(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
+func.func @detensor_foreign_op(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> attributes {iree.module.export} {
   %0 = linalg.init_tensor [] : tensor<f32>
   %1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
     ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)

diff  --git a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir
index 0c7e3e9322561..66bab42ec1978 100644
--- a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s
 
 // TODO: Detensoring breaks if %arg0 or %arg1 are passed directly as tensors. Fix that.
-func @if_true_test(%arg0: i1, %arg1: i32) -> tensor<i32> attributes {} {
+func.func @if_true_test(%arg0: i1, %arg1: i32) -> tensor<i32> attributes {} {
   %arg0_t = tensor.from_elements %arg0 : tensor<i1>
   %arg1_t = tensor.from_elements %arg1 : tensor<i32>
 

diff  --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir
index 2cc282e422654..fec9273badea7 100644
--- a/mlir/test/Dialect/Linalg/detensorize_if.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir
@@ -7,7 +7,7 @@
   iterator_types = []
 }
 
-func @main() -> (tensor<i32>) attributes {} {
+func.func @main() -> (tensor<i32>) attributes {} {
   %c0 = arith.constant 0 : i32
   %0 = tensor.from_elements %c0 : tensor<i32>
   %c10 = arith.constant 10 : i32
@@ -68,7 +68,7 @@ func @main() -> (tensor<i32>) attributes {} {
   iterator_types = []
 }
 
-func @main() -> (tensor<i32>) attributes {} {
+func.func @main() -> (tensor<i32>) attributes {} {
   %c0 = arith.constant 0 : i32
   %0 = tensor.from_elements %c0 : tensor<i32>
   %c10 = arith.constant 10 : i32
@@ -131,7 +131,7 @@ func @main() -> (tensor<i32>) attributes {} {
   iterator_types = []
 }
 
-func @main() -> (tensor<i32>) attributes {} {
+func.func @main() -> (tensor<i32>) attributes {} {
   %c0 = arith.constant 0 : i32
   %0 = tensor.from_elements %c0 : tensor<i32>
   %c10 = arith.constant 10 : i32

diff  --git a/mlir/test/Dialect/Linalg/detensorize_trivial.mlir b/mlir/test/Dialect/Linalg/detensorize_trivial.mlir
index ad851a736d248..7abbcba653ba2 100644
--- a/mlir/test/Dialect/Linalg/detensorize_trivial.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_trivial.mlir
@@ -9,7 +9,7 @@
   iterator_types = []
 }
 
-func @main(%farg0 : tensor<i32>) -> (tensor<i1>) attributes {} {
+func.func @main(%farg0 : tensor<i32>) -> (tensor<i1>) attributes {} {
   %c10 = arith.constant 10 : i32
   %1 = tensor.from_elements %c10 : tensor<i32>
   %3 = linalg.init_tensor [] : tensor<i1>

diff  --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir
index 44c38120f6e53..6d34c9f418971 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir
@@ -8,7 +8,7 @@
   iterator_types = []
 }
 
-func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attributes {} {
+func.func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attributes {} {
   cf.br ^bb1(%farg0 : tensor<i32>)
 
 ^bb1(%0: tensor<i32>):  // 2 preds: ^bb0, ^bb2

diff  --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
index 0acb82cefd8ab..87a28af803690 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
@@ -21,7 +21,7 @@
   iterator_types = ["parallel"]
 }
 
-func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attributes {} {
+func.func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attributes {} {
   cf.br ^bb1(%farg0 : tensor<10xi32>)
 
 ^bb1(%0: tensor<10xi32>):  // 2 preds: ^bb0, ^bb2

diff  --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
index 993d52225f37e..2ff00598b331f 100644
--- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
+++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
@@ -7,7 +7,7 @@
   iterator_types = []
 }
 
-func @main() -> () attributes {} {
+func.func @main() -> () attributes {} {
   %c0 = arith.constant 0 : i32
   %0 = tensor.from_elements %c0 : tensor<1xi32>
   %reshaped0 = tensor.collapse_shape %0 [] : tensor<1xi32> into tensor<i32>

diff  --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index ca3bf0aa61880..8c424ffbea268 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -12,7 +12,7 @@
   library_call = "some_external_func"
 }
 
-func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %arg1 : f32, %shape: tensor<?x1x?x1x?xf32>) -> tensor<?x1x?x1x?xf32> {
+func.func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %arg1 : f32, %shape: tensor<?x1x?x1x?xf32>) -> tensor<?x1x?x1x?xf32> {
   %0 = linalg.generic #trait
      ins(%arg0, %arg1 : tensor<?x1x?xf32>, f32)
     outs(%shape : tensor<?x1x?x1x?xf32>) {
@@ -44,7 +44,7 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %arg1 : f32, %shape: tensor
   library_call = "some_external_func"
 }
 
-func @drop_one_trip_loops_indexed
+func.func @drop_one_trip_loops_indexed
   (%arg0 : tensor<?x1x?xi32>, %shape: tensor<?x1x?x1x?xi32>) -> tensor<?x1x?x1x?xi32>
 {
   %0 = linalg.generic #trait
@@ -91,7 +91,7 @@ func @drop_one_trip_loops_indexed
   library_call = "some_external_func"
 }
 
-func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
+func.func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
 {
   %0 = linalg.generic #trait
      ins(%arg0 : tensor<1x1xf32>)
@@ -118,7 +118,7 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
   library_call = "some_external_func"
 }
 
-func @drop_all_loops_indexed
+func.func @drop_all_loops_indexed
   (%arg0 : tensor<1x1xi32>) -> tensor<1x1xi32>{
   %0 = linalg.generic #trait
      ins(%arg0 : tensor<1x1xi32>)
@@ -152,7 +152,7 @@ func @drop_all_loops_indexed
   library_call = "some_external_fn"
 }
 
-func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf32>) -> tensor<5xf32> {
+func.func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf32>) -> tensor<5xf32> {
   %0 = linalg.generic #trait
      ins(%arg0 : tensor<1x5xf32>)
     outs(%shape : tensor<5xf32>) {
@@ -183,7 +183,7 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf3
   library_call = "some_external_fn"
 }
 
-func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>, %shape : tensor<5x5xf32>) -> tensor<5x5xf32>
+func.func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>, %shape : tensor<5x5xf32>) -> tensor<5x5xf32>
 {
   %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<5xf32> into tensor<1x5xf32>
   %1 = tensor.expand_shape %arg1 [[0, 1]] : tensor<5xf32> into tensor<5x1xf32>
@@ -219,7 +219,7 @@ func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>, %shape : tens
   library_call = "some_external_fn"
 }
 
-func @broadcast_scalar(%arg0 : tensor<1x1xf32>, %shape : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @broadcast_scalar(%arg0 : tensor<1x1xf32>, %shape : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
    %0 = linalg.generic #trait
      ins(%arg0 : tensor<1x1xf32>)
@@ -244,7 +244,7 @@ func @broadcast_scalar(%arg0 : tensor<1x1xf32>, %shape : tensor<?x?xf32>) -> ten
 
 #map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 #map1 = affine_map<(d0, d1, d2) -> (d2)>
-func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
+func.func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
 {
   %1 = linalg.init_tensor [1, 2, 5] : tensor<1x2x5xf32>
   %2 = linalg.generic {i64, indexing_maps = [#map1, #map0],
@@ -263,7 +263,7 @@ func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
 
 // -----
 
-func @fold_unit_dim_for_init_tensor(%input: tensor<1x1000xf32>) -> tensor<1xf32> {
+func.func @fold_unit_dim_for_init_tensor(%input: tensor<1x1000xf32>) -> tensor<1xf32> {
   %cst = arith.constant 0.0 : f32
   %init = linalg.init_tensor [1] : tensor<1xf32>
   %fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1xf32>) -> tensor<1xf32>
@@ -299,7 +299,7 @@ func @fold_unit_dim_for_init_tensor(%input: tensor<1x1000xf32>) -> tensor<1xf32>
 
 // -----
 
-func @fold_slice(
+func.func @fold_slice(
     %arg0 : tensor<1x?x?x1x?x1x1xf32>, %arg1 : tensor<1x?x?x?x?x1x1xf32>,
     %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index,
     %arg6 : index, %arg7 : index) -> (tensor<1x?x?x1x?x1x1xf32>, tensor<1x?x?x1x?x1x1xf32>) {
@@ -326,7 +326,7 @@ func @fold_slice(
 
 // -----
 
-func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
+func.func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
   %cst = arith.constant 1.000000e+00 : f32
   %c3 = arith.constant 3 : index
   %0 = tensor.dim %arg0, %c3 : tensor<1x?x1x?xf32>
@@ -361,7 +361,7 @@ func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
 
 // -----
 
-func @unit_dim_for_both_reduction(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1xf32> {
+func.func @unit_dim_for_both_reduction(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1xf32> {
   %cst = arith.constant 1.000000e+00 : f32
   %c3 = arith.constant 3 : index
   %1 = linalg.init_tensor [1, 1] : tensor<1x1xf32>
@@ -394,7 +394,7 @@ func @unit_dim_for_both_reduction(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1xf32>
 
 // -----
 
-func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32> {
+func.func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32> {
   %cst = arith.constant 1.000000e+00 : f32
   %c2 = arith.constant 2 : index
   %0 = tensor.dim %arg0, %c2 : tensor<?x1x?x1xf32>
@@ -429,7 +429,7 @@ func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32
 
 // -----
 
-func @slice_unit_dims(%arg0: tensor<1x3xf32>) -> tensor<1x1xf32> {
+func.func @slice_unit_dims(%arg0: tensor<1x3xf32>) -> tensor<1x1xf32> {
   %0 = tensor.extract_slice %arg0[0, 2] [1, 1] [1, 1] : tensor<1x3xf32> to tensor<1x1xf32>
   return %0 : tensor<1x1xf32>
 }
@@ -441,7 +441,7 @@ func @slice_unit_dims(%arg0: tensor<1x3xf32>) -> tensor<1x1xf32> {
 
 // -----
 
-func @insert_slice_unit_dims(%arg0: tensor<1x3xf32>, %arg1: tensor<1x1xf32>) -> tensor<1x3xf32> {
+func.func @insert_slice_unit_dims(%arg0: tensor<1x3xf32>, %arg1: tensor<1x1xf32>) -> tensor<1x3xf32> {
   %0 = tensor.insert_slice %arg1 into %arg0[0, 2] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<1x3xf32>
   return %0 : tensor<1x3xf32>
 }
@@ -465,7 +465,7 @@ func @insert_slice_unit_dims(%arg0: tensor<1x3xf32>, %arg1: tensor<1x1xf32>) ->
   library_call = "some_external_func"
 }
 
-func @drop_one_trip_loops(%arg0 : memref<?x1x?xf32>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
+func.func @drop_one_trip_loops(%arg0 : memref<?x1x?xf32>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
   linalg.generic #trait
      ins(%arg0, %arg1 : memref<?x1x?xf32>, f32)
     outs(%shape : memref<?x1x?x1x?xf32>) {
@@ -496,7 +496,7 @@ func @drop_one_trip_loops(%arg0 : memref<?x1x?xf32>, %arg1 : f32, %shape: memref
   library_call = "some_external_func"
 }
 
-func @drop_one_trip_loops_indexed
+func.func @drop_one_trip_loops_indexed
   (%arg0 : memref<?x1x?xi32>, %shape: memref<?x1x?x1x?xi32>) -> memref<?x1x?x1x?xi32>
 {
   linalg.generic #trait
@@ -543,7 +543,7 @@ func @drop_one_trip_loops_indexed
   library_call = "some_external_func"
 }
 
-func @drop_all_loops(%arg0 : memref<1x1xf32>) -> memref<1x1xf32>
+func.func @drop_all_loops(%arg0 : memref<1x1xf32>) -> memref<1x1xf32>
 {
   linalg.generic #trait
      ins(%arg0 : memref<1x1xf32>)
@@ -570,7 +570,7 @@ func @drop_all_loops(%arg0 : memref<1x1xf32>) -> memref<1x1xf32>
   library_call = "some_external_func"
 }
 
-func @drop_all_loops_indexed
+func.func @drop_all_loops_indexed
   (%arg0 : memref<1x1xi32>) -> memref<1x1xi32>{
   linalg.generic #trait
      ins(%arg0 : memref<1x1xi32>)
@@ -604,7 +604,7 @@ func @drop_all_loops_indexed
   library_call = "some_external_fn"
 }
 
-func @leading_dim_1_canonicalization(%arg0: memref<1x5xf32>, %shape: memref<5xf32>) -> memref<5xf32> {
+func.func @leading_dim_1_canonicalization(%arg0: memref<1x5xf32>, %shape: memref<5xf32>) -> memref<5xf32> {
   linalg.generic #trait
      ins(%arg0 : memref<1x5xf32>)
     outs(%shape : memref<5xf32>) {
@@ -635,7 +635,7 @@ func @leading_dim_1_canonicalization(%arg0: memref<1x5xf32>, %shape: memref<5xf3
   library_call = "some_external_fn"
 }
 
-func @broadcast_test(%arg0 : memref<5xf32>, %arg1 : memref<5xf32>, %shape : memref<5x5xf32>) -> memref<5x5xf32>
+func.func @broadcast_test(%arg0 : memref<5xf32>, %arg1 : memref<5xf32>, %shape : memref<5x5xf32>) -> memref<5x5xf32>
 {
   %0 = memref.expand_shape %arg0 [[0, 1]] : memref<5xf32> into memref<1x5xf32>
   %1 = memref.expand_shape %arg1 [[0, 1]] : memref<5xf32> into memref<5x1xf32>
@@ -671,7 +671,7 @@ func @broadcast_test(%arg0 : memref<5xf32>, %arg1 : memref<5xf32>, %shape : memr
   library_call = "some_external_fn"
 }
 
-func @broadcast_scalar(%arg0 : memref<1x1xf32>, %shape : memref<?x?xf32>) -> memref<?x?xf32>
+func.func @broadcast_scalar(%arg0 : memref<1x1xf32>, %shape : memref<?x?xf32>) -> memref<?x?xf32>
 {
    linalg.generic #trait
      ins(%arg0 : memref<1x1xf32>)
@@ -696,7 +696,7 @@ func @broadcast_scalar(%arg0 : memref<1x1xf32>, %shape : memref<?x?xf32>) -> mem
 
 #map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 #map1 = affine_map<(d0, d1, d2) -> (d2)>
-func @fold_unit_dim_memref_reshape_op(%arg0 : memref<5xf32>) -> memref<2x5xf32>
+func.func @fold_unit_dim_memref_reshape_op(%arg0 : memref<5xf32>) -> memref<2x5xf32>
 {
   %1 = memref.alloc() : memref<1x2x5xf32>
   linalg.generic {i64, indexing_maps = [#map1, #map0],
@@ -719,7 +719,7 @@ func @fold_unit_dim_memref_reshape_op(%arg0 : memref<5xf32>) -> memref<2x5xf32>
 
 // -----
 
-func @fold_unit_dim_for_init_memref(%input: memref<1x1000xf32>) -> memref<1xf32> {
+func.func @fold_unit_dim_for_init_memref(%input: memref<1x1000xf32>) -> memref<1xf32> {
   %cst = arith.constant 0.0 : f32
   %init = memref.alloc() : memref<1xf32>
   linalg.generic {
@@ -767,7 +767,7 @@ func @fold_unit_dim_for_init_memref(%input: memref<1x1000xf32>) -> memref<1xf32>
   library_call = "some_external_func"
 }
 
-func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
+func.func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shape: memref<?x1x?x1x?xf32>) -> memref<?x1x?x1x?xf32> {
   linalg.generic #trait
      ins(%arg0, %arg1 : memref<?x1x?xf32, #map0>, f32)
     outs(%shape : memref<?x1x?x1x?xf32>) {
@@ -811,7 +811,7 @@ func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shape: me
 
 #CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"] }>
 
-func @sparse_case(%arg0: tensor<8x8xf32, #CSR>, %arg1: tensor<8xf32>) -> tensor<8xf32> {
+func.func @sparse_case(%arg0: tensor<8x8xf32, #CSR>, %arg1: tensor<8xf32>) -> tensor<8xf32> {
     %0 = linalg.init_tensor [8] : tensor<8xf32>
     %1 = linalg.generic #matvec
       ins(%arg0, %arg1: tensor<8x8xf32, #CSR>, tensor<8xf32>)

diff  --git a/mlir/test/Dialect/Linalg/fill-interface-invalid.mlir b/mlir/test/Dialect/Linalg/fill-interface-invalid.mlir
index 17a5f119cfd50..0c96680a91b85 100644
--- a/mlir/test/Dialect/Linalg/fill-interface-invalid.mlir
+++ b/mlir/test/Dialect/Linalg/fill-interface-invalid.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s
 
-func @test_fill_op_not_linalg_op(%arg0 : f32, %arg1 : tensor<?xf32>)
+func.func @test_fill_op_not_linalg_op(%arg0 : f32, %arg1 : tensor<?xf32>)
      -> tensor<?xf32> {
   // expected-error @+1 {{expected a LinalgOp}}
   %0 = "test.fill_op_not_linalg_op"(%arg0, %arg1)
@@ -12,7 +12,7 @@ func @test_fill_op_not_linalg_op(%arg0 : f32, %arg1 : tensor<?xf32>)
 
 #map0 = affine_map<(d0) -> ()>
 #map1 = affine_map<(d0) -> (d0)>
-func @test_fill_op_wrong_num_operands(%arg0 : f32, %arg1 : tensor<?xf32>)
+func.func @test_fill_op_wrong_num_operands(%arg0 : f32, %arg1 : tensor<?xf32>)
      -> tensor<?xf32> {
   // expected-error @+1 {{expected op with 1 input and 1 output}}
   %0 = test.linalg_fill_op {
@@ -28,7 +28,7 @@ func @test_fill_op_wrong_num_operands(%arg0 : f32, %arg1 : tensor<?xf32>)
 // -----
 
 #map1 = affine_map<(d0) -> (d0)>
-func @test_fill_op_non_scalar_input(%arg0 : tensor<?xf32>,
+func.func @test_fill_op_non_scalar_input(%arg0 : tensor<?xf32>,
     %arg1 : tensor<?xf32>) -> tensor<?xf32> {
   // expected-error @+1 {{expected op with scalar input}}
   %0 = test.linalg_fill_op {

diff  --git a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
index 4576cf0d8f5b8..3107583640206 100644
--- a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
+++ b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
@@ -11,7 +11,7 @@
   library_call = "some_external_func"
 }
 
-func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %shape: tensor<?x1x?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
+func.func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %shape: tensor<?x1x?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
 {
   %0 = linalg.generic #trait
     ins(%arg0 : tensor<?x1x?xf32>)
@@ -38,7 +38,7 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %shape: tensor<?x1x?x1x?xf3
   library_call = "some_external_func"
 }
 
-func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
+func.func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
 {
   %0 = linalg.generic #trait
      ins(%arg0 : tensor<1x1xf32>)
@@ -64,7 +64,7 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
   library_call = "some_external_func"
 }
 
-func @drop_all_loops(%arg0 : memref<1x1xf32>, %arg1 : memref<1x1xf32>)
+func.func @drop_all_loops(%arg0 : memref<1x1xf32>, %arg1 : memref<1x1xf32>)
 {
   linalg.generic #trait
      ins(%arg0 : memref<1x1xf32>)
@@ -93,7 +93,7 @@ func @drop_all_loops(%arg0 : memref<1x1xf32>, %arg1 : memref<1x1xf32>)
   library_call = "some_external_fn"
 }
 
-func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf32>) -> tensor<5xf32> {
+func.func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf32>) -> tensor<5xf32> {
   %0 = linalg.generic #trait
        ins(%arg0 : tensor<1x5xf32>)
       outs(%shape : tensor<5xf32>) {

diff  --git a/mlir/test/Dialect/Linalg/forward-vector-transfers.mlir b/mlir/test/Dialect/Linalg/forward-vector-transfers.mlir
index 1afb5e13facaa..3530770580782 100644
--- a/mlir/test/Dialect/Linalg/forward-vector-transfers.mlir
+++ b/mlir/test/Dialect/Linalg/forward-vector-transfers.mlir
@@ -7,7 +7,7 @@
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_read %[[ARG0]]
 //   CHECK-NOT: in_bounds
-func @testAllocRead(%in: memref<? x f32>) -> vector<32 x f32> {
+func.func @testAllocRead(%in: memref<? x f32>) -> vector<32 x f32> {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<32 x f32>
@@ -25,7 +25,7 @@ func @testAllocRead(%in: memref<? x f32>) -> vector<32 x f32> {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_read %[[ARG0]]
 //   CHECK-NOT: in_bounds
-func @testAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
+func.func @testAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<32 x f32>
@@ -44,7 +44,7 @@ func @testAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_read %[[ARG0]]
 //   CHECK-NOT: in_bounds
-func @testViewRead(%in: memref<? x f32>) -> vector<32 x f32> {
+func.func @testViewRead(%in: memref<? x f32>) -> vector<32 x f32> {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<128 x i8>
@@ -63,7 +63,7 @@ func @testViewRead(%in: memref<? x f32>) -> vector<32 x f32> {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_read %[[ARG0]]
 //   CHECK-NOT: in_bounds
-func @testViewFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
+func.func @testViewFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<128 x i8>
@@ -83,7 +83,7 @@ func @testViewFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_write %[[ARG0]], %[[ARG1]]
 //   CHECK-NOT: in_bounds
-func @testAllocWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
+func.func @testAllocWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<32 x f32>
@@ -101,7 +101,7 @@ func @testAllocWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_write %[[ARG0]], %[[ARG1]]
 //   CHECK-NOT: in_bounds
-func @testViewWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
+func.func @testViewWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<128 x i8>
@@ -124,7 +124,7 @@ func @testViewWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: memref.copy
 //       CHECK: vector.transfer_read %[[ALLOC]]
-func @failAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
+func.func @failAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %f1 = arith.constant 1.0: f32
@@ -146,7 +146,7 @@ func @failAllocFillRead(%in: memref<? x f32>) -> vector<32 x f32> {
 //       CHECK: %[[ALLOC:.*]] = memref.alloc
 //       CHECK: vector.transfer_write %[[ARG0]], %[[ALLOC]]
 //       CHECK: memref.copy
-func @failAllocWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
+func.func @failAllocWrite(%vec: vector<32 x f32>, %out: memref<? x f32>) {
   %c0 = arith.constant 0: index
   %f0 = arith.constant 0.0: f32
   %alloc = memref.alloc() : memref<32 x f32>

diff  --git a/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir b/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
index ee49c929af0e2..a526bf7ded8d3 100644
--- a/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
+++ b/mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
@@ -7,7 +7,7 @@
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d3, d4, d5, d6)>
 #map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
-func @fuse_by_collapsing(%arg0 : tensor<2x12x5x336x9xi32>,
+func.func @fuse_by_collapsing(%arg0 : tensor<2x12x5x336x9xi32>,
     %arg1 : tensor<2x3x4xi32>, %arg2 : tensor<5x6x7x8xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
   %expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
       : tensor<2x12x5x336x9xi32> into tensor<2x3x4x5x6x7x8x9xi32>
@@ -58,7 +58,7 @@ func @fuse_by_collapsing(%arg0 : tensor<2x12x5x336x9xi32>,
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d3, d4, d5, d6)>
 #map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
-func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
+func.func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
     %arg1 : tensor<2x3x4xi32>, %arg2 : tensor<5x6x7x8xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
   %expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
       : tensor<2x12x5x336x9xi32> into tensor<2x3x4x5x6x7x8x9xi32>
@@ -120,7 +120,7 @@ func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d5, d6, d0)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d4, d1, d2, d3)>
 #map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
-func @fuse_by_collapsing_change_reshape_order(%arg0 : tensor<9x56x2x60x6xi32>,
+func.func @fuse_by_collapsing_change_reshape_order(%arg0 : tensor<9x56x2x60x6xi32>,
     %arg1 : tensor<7x8x2xi32>, %arg2 : tensor<6x3x4x5xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
   %expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
       : tensor<9x56x2x60x6xi32> into tensor<9x7x8x2x3x4x5x6xi32>
@@ -164,7 +164,7 @@ func @fuse_by_collapsing_change_reshape_order(%arg0 : tensor<9x56x2x60x6xi32>,
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d5, d6, d0)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d4, d1, d2, d3)>
 #map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
-func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
+func.func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
     %arg1 : tensor<?x?x?xi32>, %arg2 : tensor<?x?x?x?xi32>) -> tensor<?x3x?x5x?x7x?x?xi32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -224,7 +224,7 @@ func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
-func @fuse_reductions(%arg0 : tensor<2x?x5xf32>, %arg1 : tensor<2x5xf32>) -> tensor<2x5xf32> {
+func.func @fuse_reductions(%arg0 : tensor<2x?x5xf32>, %arg1 : tensor<2x5xf32>) -> tensor<2x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x?x5xf32> into tensor<2x6x?x5xf32>
   %1 = linalg.generic {
       indexing_maps = [#map0, #map1],
@@ -252,7 +252,7 @@ func @fuse_reductions(%arg0 : tensor<2x?x5xf32>, %arg1 : tensor<2x5xf32>) -> ten
 // Test no fusion because the folded dimensions are not all preserved.
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
-func @no_fuse_unpreserved_folding(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x3x4x5xf32> {
+func.func @no_fuse_unpreserved_folding(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x3x4x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
   %init = linalg.init_tensor [2, 3, 4, 5] : tensor<2x3x4x5xf32>
   %1 = linalg.generic {
@@ -279,7 +279,7 @@ func @no_fuse_unpreserved_folding(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0)>
 #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d1, d3)>
-func @no_fuse_unpreserved_folding_transpose(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2xf32>) -> tensor<2x4x3x5xf32> {
+func.func @no_fuse_unpreserved_folding_transpose(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2xf32>) -> tensor<2x4x3x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
   %init = linalg.init_tensor [2, 4, 3, 5] : tensor<2x4x3x5xf32>
   %1 = linalg.generic {
@@ -306,7 +306,7 @@ func @no_fuse_unpreserved_folding_transpose(%arg0 : tensor<2x12x5xf32>, %arg1 :
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
 #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
-func @no_fuse_mismatched_iterator_types(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x5xf32> {
+func.func @no_fuse_mismatched_iterator_types(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
   %init = linalg.init_tensor [2, 5] : tensor<2x5xf32>
   %1 = linalg.generic {
@@ -334,7 +334,7 @@ func @no_fuse_mismatched_iterator_types(%arg0 : tensor<2x12x5xf32>, %arg1 : tens
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
 #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tensor<2x3x4x5xf32> {
+func.func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tensor<2x3x4x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<6xf32> into tensor<2x3xf32>
   %1 = tensor.expand_shape %arg1 [[0, 1]] : tensor<20xf32> into tensor<4x5xf32>
     %init = linalg.init_tensor [2, 3, 4, 5] : tensor<2x3x4x5xf32>
@@ -381,7 +381,7 @@ func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tensor<2x
 
 // Corner case that isnt handled currently.
 #map = affine_map<(d0) -> (d0)>
-func @zero_D_test(%arg0: tensor<f32>) -> tensor<1xf32> {
+func.func @zero_D_test(%arg0: tensor<f32>) -> tensor<1xf32> {
   %0 = tensor.expand_shape %arg0 [] : tensor<f32> into tensor<1xf32>
   %init = linalg.init_tensor [1] : tensor<1xf32>
   %1 = linalg.generic {
@@ -404,7 +404,7 @@ func @zero_D_test(%arg0: tensor<f32>) -> tensor<1xf32> {
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d1, d0, d2, d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-func @fuse_only_one_reassociation(%arg0 : tensor<?x?xf32>, %arg1 : tensor<4x?x?x8xf32>) -> tensor<4x?x?x8xf32> {
+func.func @fuse_only_one_reassociation(%arg0 : tensor<?x?xf32>, %arg1 : tensor<4x?x?x8xf32>) -> tensor<4x?x?x8xf32> {
   %0 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] : tensor<?x?xf32> into tensor<?x4x?x8xf32>
   %1 = linalg.generic {
       indexing_maps = [#map0, #map1, #map1],
@@ -438,7 +438,7 @@ func @fuse_only_one_reassociation(%arg0 : tensor<?x?xf32>, %arg1 : tensor<4x?x?x
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3, d1)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d3, d1, d0, d2)>
-func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>) -> tensor<?x8x?x4xi32> {
+func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>) -> tensor<?x8x?x4xi32> {
   %c0 = arith.constant 0 : index
   %c2 = arith.constant 2 : index
   %0 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] : tensor<?x?xi32> into tensor<?x4x?x8xi32>
@@ -496,7 +496,7 @@ func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>) -> tensor<?x8x?x4xi32>
 // So no change in the code.
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3, d1)>
 #map1 = affine_map<(d0, d1, d2, d3) -> ()>
-func @no_fold_non_consecutive_reduction_dims(%arg0 : tensor<?x?xi32>) -> tensor<i32> {
+func.func @no_fold_non_consecutive_reduction_dims(%arg0 : tensor<?x?xi32>) -> tensor<i32> {
   %c0 = arith.constant 0 : index
   %c2 = arith.constant 2 : index
   %0 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] : tensor<?x?xi32> into tensor<?x4x?x8xi32>

diff  --git a/mlir/test/Dialect/Linalg/fusion-2-level.mlir b/mlir/test/Dialect/Linalg/fusion-2-level.mlir
index ca385c5e0d748..83eba26982d97 100644
--- a/mlir/test/Dialect/Linalg/fusion-2-level.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-2-level.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-greedy-fusion | FileCheck %s
 
-func @f1(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>, %B: memref<?x?xf32, offset: ?, strides: [?, 1]>, %C: memref<?x?xf32, offset: ?, strides: [?, 1]>, %D: memref<?x?xf32, offset: ?, strides: [?, 1]>, %E: memref<?x?xf32, offset: ?, strides: [?, 1]>) -> memref<?x?xf32, offset: ?, strides: [?, 1]> {
+func.func @f1(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>, %B: memref<?x?xf32, offset: ?, strides: [?, 1]>, %C: memref<?x?xf32, offset: ?, strides: [?, 1]>, %D: memref<?x?xf32, offset: ?, strides: [?, 1]>, %E: memref<?x?xf32, offset: ?, strides: [?, 1]>) -> memref<?x?xf32, offset: ?, strides: [?, 1]> {
   %c1 = arith.constant 1 : index
   %c0 = arith.constant 0 : index
   %c4 = arith.constant 4 : index

diff  --git a/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
index 868b6e5f3a7d6..0ab282296ba88 100644
--- a/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
@@ -4,7 +4,7 @@
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @add_mul_fusion
-func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -46,7 +46,7 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
 #map1 = affine_map<(d0, d1) -> ()>
 
 // CHECK-LABEL: @scalar_add_mul_fusion
-func @scalar_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : f32, %arg2 : f32) -> tensor<?x?xf32>
+func.func @scalar_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : f32, %arg2 : f32) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -88,7 +88,7 @@ func @scalar_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : f32, %arg2 : f32) ->
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
 
 // CHECK-LABEL: @transpose_add_mul_fusion
-func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -122,7 +122,7 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
 
 // CHECK-LABEL: @add_transpose_mul_fusion
-func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -157,7 +157,7 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
 #map2 = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @add_broadcast_mul_fusion
-func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -190,7 +190,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
 #map0 = affine_map<() -> ()>
 
 // CHECK-LABEL: @add_mul_scalar_fusion
-func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32>
+func.func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32>
 {
   %0 = linalg.init_tensor [] : tensor<f32>
   %1 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = []}
@@ -218,7 +218,7 @@ func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tenso
 
 #map0 = affine_map<(d0, d1, d2) -> (d0)>
 #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @generic_op_constant_fusion(%arg0 : tensor<5x?x?xf32>) -> tensor<5x?x?xf32>
+func.func @generic_op_constant_fusion(%arg0 : tensor<5x?x?xf32>) -> tensor<5x?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -249,7 +249,7 @@ func @generic_op_constant_fusion(%arg0 : tensor<5x?x?xf32>) -> tensor<5x?x?xf32>
 
 #map0 = affine_map<(d0, d1, d2) -> ()>
 #map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @generic_op_zero_dim_constant_fusion(%arg0 : tensor<5x?x?xf32>)
+func.func @generic_op_zero_dim_constant_fusion(%arg0 : tensor<5x?x?xf32>)
   -> tensor<5x?x?xf32>
 {
   %c0 = arith.constant 0 : index
@@ -280,7 +280,7 @@ func @generic_op_zero_dim_constant_fusion(%arg0 : tensor<5x?x?xf32>)
 // -----
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
-func @producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>,
+func.func @producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>,
                                        %arg1: tensor<?x?xi32>) -> tensor<?x?xi32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -332,7 +332,7 @@ func @producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>,
 // -----
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
-func @indexed_producer_consumer_fusion(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
+func.func @indexed_producer_consumer_fusion(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %0 = tensor.dim %arg0, %c0 : tensor<?x?xi32>
@@ -385,7 +385,7 @@ func @indexed_producer_consumer_fusion(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32
 // The indices of the first generic op are swapped after fusion.
 #map0 = affine_map<(d0, d1) -> (d1, d0)>
 #map1 = affine_map<(d0, d1) -> (d0, d1)>
-func @indexed_producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>)
+func.func @indexed_producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>)
                                                -> tensor<?x?xi32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -448,7 +448,7 @@ func @indexed_producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>)
 #map1 = affine_map<(d0) -> (d0)>
 #map2 = affine_map<(d0, d1) -> (d0, d1)>
 #map3 = affine_map<(d0, d1) -> (d1)>
-func @one_dim_indexed_producer_consumer_fusion(%arg0 : tensor<?xi32>,
+func.func @one_dim_indexed_producer_consumer_fusion(%arg0 : tensor<?xi32>,
                                                %arg1 : tensor<?x?xi32>) -> tensor<?x?xi32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -494,7 +494,7 @@ func @one_dim_indexed_producer_consumer_fusion(%arg0 : tensor<?xi32>,
 
 // -----
 
-func @scalar_generic_fusion
+func.func @scalar_generic_fusion
   (%arg0: tensor<5x1x1xf32>, %arg1 : tensor<i32>) -> tensor<10xf32>
 {
   %c0 = arith.constant 0 : index
@@ -536,7 +536,7 @@ func @scalar_generic_fusion
 
 // -----
 
-func @constant_fusion(%arg0 : tensor<4xf32>) -> (tensor<4xf32>) {
+func.func @constant_fusion(%arg0 : tensor<4xf32>) -> (tensor<4xf32>) {
   %cst = arith.constant dense<1.0> : tensor<4xf32>
   %1 = linalg.init_tensor [4] : tensor<4xf32>
   %2 = linalg.generic
@@ -571,7 +571,7 @@ func @constant_fusion(%arg0 : tensor<4xf32>) -> (tensor<4xf32>) {
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0) -> (0, d0)>
 #map2 = affine_map<(d0) -> (0)>
-func @consumer_with_reduction(%arg0: tensor<1x10xf32>,
+func.func @consumer_with_reduction(%arg0: tensor<1x10xf32>,
                               %arg1: tensor<1x10xf32>,
                               %arg2: tensor<1xf32>) -> tensor<1xf32> {
   %init = linalg.init_tensor [1, 10] : tensor<1x10xf32>
@@ -614,7 +614,7 @@ func @consumer_with_reduction(%arg0: tensor<1x10xf32>,
 //       CHECK:   %[[RES:.*]] = linalg.generic
 //   CHECK-NOT:   linalg.generic
 //       CHECK:   return %[[RES]]
-func @sigmoid_dynamic_dim(%0: tensor<?x1xf32>) -> tensor<?x1xf32> {
+func.func @sigmoid_dynamic_dim(%0: tensor<?x1xf32>) -> tensor<?x1xf32> {
   %cp5 = arith.constant 5.000000e-01 : f32
   %c0 = arith.constant 0 : index
   %shape = shape.shape_of %0 : tensor<?x1xf32> -> tensor<?xindex>
@@ -648,11 +648,11 @@ func @sigmoid_dynamic_dim(%0: tensor<?x1xf32>) -> tensor<?x1xf32> {
 
 // -----
 
-func private @compute1(%a: f64) -> f64
-func private @compute2(%a: f64, %b: i32) -> i32
+func.func private @compute1(%a: f64) -> f64
+func.func private @compute2(%a: f64, %b: i32) -> i32
 
 // CHECK-LABEL: func @generic_index_op2(
-func @generic_index_op2(%arg0: tensor<1x8xf64>, %arg1: tensor<1x8xi32>) -> tensor<1x8xi32> {
+func.func @generic_index_op2(%arg0: tensor<1x8xf64>, %arg1: tensor<1x8xi32>) -> tensor<1x8xi32> {
   %0 = linalg.generic {
     indexing_maps = [affine_map<(i, j) -> (i, j)>],
     iterator_types = ["parallel", "parallel"]}
@@ -685,7 +685,7 @@ func @generic_index_op2(%arg0: tensor<1x8xf64>, %arg1: tensor<1x8xi32>) -> tenso
 // -----
 
 // CHECK-LABEL: func @no_fuse_constant_with_reduction
-func @no_fuse_constant_with_reduction() -> tensor<3xf32>
+func.func @no_fuse_constant_with_reduction() -> tensor<3xf32>
 {
   //      CHECK: %[[CONST:.+]] = arith.constant {{.+}} : tensor<3x2xf32>
   //      CHECK: %[[RESULT:.+]] = linalg.generic
@@ -712,7 +712,7 @@ func @no_fuse_constant_with_reduction() -> tensor<3xf32>
   indexing_maps = [#map, #map],
   iterator_types = ["parallel", "parallel"]
 }
-func @break_outs_dependency(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @break_outs_dependency(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %0 = linalg.generic #trait ins(%arg0 : tensor<?x?xf32>) outs(%arg0 : tensor<?x?xf32>) {
        ^bb0(%arg1 : f32, %arg2 : f32) :
@@ -743,7 +743,7 @@ func @break_outs_dependency(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
 
 // -----
 
-func @fuse_scalar_constant(%arg0 : tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
+func.func @fuse_scalar_constant(%arg0 : tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xi32>) {
   %cst = arith.constant 4.0 : f32
   %c42 = arith.constant 42 : i32
   %c0 = arith.constant 0 : index
@@ -778,7 +778,7 @@ func @fuse_scalar_constant(%arg0 : tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<
 // -----
 
 // CHECK-LABEL: @transpose_fold_2d_fp32
-func @transpose_fold_2d_fp32(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
+func.func @transpose_fold_2d_fp32(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
   %input = arith.constant dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>
   //               CHECK: %[[CST:.+]] = arith.constant
   // CHECK-SAME{LITERAL}:   dense<[[0.000000e+00, 3.000000e+00], [1.000000e+00, 4.000000e+00], [2.000000e+00, 5.000000e+00]]> : tensor<3x2xf32>
@@ -796,7 +796,7 @@ func @transpose_fold_2d_fp32(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
 // -----
 
 // CHECK-LABEL: @transpose_fold_2d_fp64
-func @transpose_fold_2d_fp64(%init: tensor<3x2xf64>) -> tensor<3x2xf64> {
+func.func @transpose_fold_2d_fp64(%init: tensor<3x2xf64>) -> tensor<3x2xf64> {
   %input = arith.constant dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf64>
   //               CHECK: %[[CST:.+]] = arith.constant
   // CHECK-SAME{LITERAL}:   dense<[[0.000000e+00, 3.000000e+00], [1.000000e+00, 4.000000e+00], [2.000000e+00, 5.000000e+00]]> : tensor<3x2xf64>
@@ -814,7 +814,7 @@ func @transpose_fold_2d_fp64(%init: tensor<3x2xf64>) -> tensor<3x2xf64> {
 // -----
 
 // CHECK-LABEL: @transpose_fold_4d_i32
-func @transpose_fold_4d_i32(%init: tensor<3x1x4x2xi32>) -> tensor<3x1x4x2xi32> {
+func.func @transpose_fold_4d_i32(%init: tensor<3x1x4x2xi32>) -> tensor<3x1x4x2xi32> {
   %input = arith.constant dense<[[
     [[ 0,  1,  2,  3], [ 4,  5,  6,  7], [ 8,  9, 10, 11]],
     [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]
@@ -838,7 +838,7 @@ func @transpose_fold_4d_i32(%init: tensor<3x1x4x2xi32>) -> tensor<3x1x4x2xi32> {
 // -----
 
 // CHECK-LABEL: @transpose_fold_4d_i16
-func @transpose_fold_4d_i16(%init: tensor<3x1x4x2xi16>) -> tensor<3x1x4x2xi16> {
+func.func @transpose_fold_4d_i16(%init: tensor<3x1x4x2xi16>) -> tensor<3x1x4x2xi16> {
   %input = arith.constant dense<[[
     [[ 0,  1,  2,  3], [ 4,  5,  6,  7], [ 8,  9, 10, 11]],
     [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]
@@ -862,7 +862,7 @@ func @transpose_fold_4d_i16(%init: tensor<3x1x4x2xi16>) -> tensor<3x1x4x2xi16> {
 // -----
 
 // CHECK-LABEL: @transpose_nofold_non_cst_input
-func @transpose_nofold_non_cst_input(%input: tensor<2x3xf32>, %init: tensor<3x2xf32>) -> tensor<3x2xf32> {
+func.func @transpose_nofold_non_cst_input(%input: tensor<2x3xf32>, %init: tensor<3x2xf32>) -> tensor<3x2xf32> {
   // CHECK: linalg.generic
   %1 = linalg.generic {
     indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, affine_map<(d0, d1) -> (d0, d1)>],
@@ -877,7 +877,7 @@ func @transpose_nofold_non_cst_input(%input: tensor<2x3xf32>, %init: tensor<3x2x
 // -----
 
 // CHECK-LABEL: @transpose_nofold_yield_const
-func @transpose_nofold_yield_const(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
+func.func @transpose_nofold_yield_const(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
   %input = arith.constant dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>
   %cst = arith.constant 8.0 : f32
   // CHECK: linalg.generic
@@ -894,7 +894,7 @@ func @transpose_nofold_yield_const(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
 // -----
 
 // CHECK-LABEL: @transpose_nofold_multi_ops_in_region
-func @transpose_nofold_multi_ops_in_region(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
+func.func @transpose_nofold_multi_ops_in_region(%init: tensor<3x2xf32>) -> tensor<3x2xf32> {
   %input = arith.constant dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>
   // CHECK: linalg.generic
   %1 = linalg.generic {
@@ -923,7 +923,7 @@ func @transpose_nofold_multi_ops_in_region(%init: tensor<3x2xf32>) -> tensor<3x2
 // CHECK-LABEL: @no_fusion_missing_reduction_shape
 // CHECK: linalg.generic
 // CHECK: linalg.generic
-func @no_fusion_missing_reduction_shape(%arg0: tensor<f32>, %arg1: index) -> tensor<?xf32> {
+func.func @no_fusion_missing_reduction_shape(%arg0: tensor<f32>, %arg1: index) -> tensor<?xf32> {
   %cst = arith.constant 0xFF800000 : f32
   %4 = linalg.init_tensor [%arg1, %arg1] : tensor<?x?xf32>
   %5 = linalg.generic {
@@ -948,7 +948,7 @@ func @no_fusion_missing_reduction_shape(%arg0: tensor<f32>, %arg1: index) -> ten
 
 // -----
 
-func @illegal_fusion(%arg0 : tensor<5000xi64>, %arg1 : tensor<5000xi32>) -> tensor<5000xi32> {
+func.func @illegal_fusion(%arg0 : tensor<5000xi64>, %arg1 : tensor<5000xi32>) -> tensor<5000xi32> {
   %c1_i32 = arith.constant 1 : i32
   %0 = linalg.generic {
         indexing_maps = [affine_map<(d0) -> (d0)>],
@@ -985,7 +985,7 @@ func @illegal_fusion(%arg0 : tensor<5000xi64>, %arg1 : tensor<5000xi32>) -> tens
 //  CHECK-SAME: ins(%[[ARG0]] : tensor<?xf32>)
 //  CHECK-SAME: outs({{.*}} : tensor<?xf32>) {
 #map0 = affine_map<(d0) -> (d0)>
-func @fold_fill_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
+func.func @fold_fill_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
   %c0 = arith.constant 0 : index
   %cst = arith.constant 7.0 : f32
   %0 = tensor.dim %arg0, %c0 : tensor<?xf32>
@@ -1009,7 +1009,7 @@ func @fold_fill_generic_basic(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
 //  CHECK-SAME: outs({{.*}} : tensor<?x?xf32>) {
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
-func @fold_fill_generic_mixedaccess(%arg0: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
+func.func @fold_fill_generic_mixedaccess(%arg0: tensor<?x?xf32>) -> (tensor<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 0 : index
   %cst1 = arith.constant 7.0 : f32

diff  --git a/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir b/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir
index 103a04d79ba4a..1efaaa3b75a84 100644
--- a/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir
@@ -9,7 +9,7 @@
   indexing_maps = [#map0, #map0, #map0, #map0],
   iterator_types = ["parallel", "parallel"]
 }
-func @test_fusion_limit(
+func.func @test_fusion_limit(
     %arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>,
     %arg3 : tensor<?x?xf32>, %arg4 : tensor<?x?xf32>, %arg5 : tensor<?x?xf32>)
     -> tensor<?x?xf32> {

diff  --git a/mlir/test/Dialect/Linalg/fusion-indexed.mlir b/mlir/test/Dialect/Linalg/fusion-indexed.mlir
index 03ac767136f00..20e98f7370374 100644
--- a/mlir/test/Dialect/Linalg/fusion-indexed.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-indexed.mlir
@@ -6,7 +6,7 @@
   indexing_maps = [#id_2d, #id_2d, #id_2d],
   iterator_types = ["parallel", "parallel"]
 }
-func @fuse_indexed_consumer(%A: memref<?x?xf32>,
+func.func @fuse_indexed_consumer(%A: memref<?x?xf32>,
                                     %B: memref<?x?xf32>,
                                     %C: memref<?x?xf32>,
                                     %D: memref<?x?xf32>) {
@@ -66,7 +66,7 @@ func @fuse_indexed_consumer(%A: memref<?x?xf32>,
 // -----
 
 #map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-func @fuse_indexed_producer(%A: memref<?x?xindex>,
+func.func @fuse_indexed_producer(%A: memref<?x?xindex>,
                             %B: memref<?x?xindex>) {
   %c1 = arith.constant 1 : index
   %c0 = arith.constant 0 : index
@@ -116,7 +116,7 @@ func @fuse_indexed_producer(%A: memref<?x?xindex>,
 // -----
 
 #map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
-func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
+func.func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
                                                   %B: memref<?x?xindex>) {
   %c1 = arith.constant 1 : index
   %c0 = arith.constant 0 : index

diff  --git a/mlir/test/Dialect/Linalg/fusion-pattern.mlir b/mlir/test/Dialect/Linalg/fusion-pattern.mlir
index f2aa5dc3970e5..787eff6f39aeb 100644
--- a/mlir/test/Dialect/Linalg/fusion-pattern.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-pattern.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -test-linalg-fusion-transform-patterns -canonicalize -cse -split-input-file | FileCheck %s
 
 module {
-  func @basic_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @basic_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                      %arg2: memref<?x?xf32>) {
     %cst = arith.constant 0.000000e+00 : f32
     linalg.fill ins(%cst : f32) outs(%arg2 : memref<?x?xf32>)
@@ -74,7 +74,7 @@ module {
 // -----
 
 module {
-  func @matmul_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @matmul_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                       %arg2: memref<?x?xf32>, %arg3: memref<?x?xf32>,
                       %arg4: memref<?x?xf32>) {
     linalg.matmul ins(%arg0, %arg1 : memref<?x?xf32>, memref<?x?xf32>)
@@ -151,7 +151,7 @@ module {
 // -----
 
 module {
-  func @matmul_plus_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @matmul_plus_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                            %arg2: memref<?x?xf32>) {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
@@ -201,7 +201,7 @@ module {
 // -----
 
 module {
-  func @matmul_plus_transpose_matmul(%arg0: memref<?x?xf32>,
+  func.func @matmul_plus_transpose_matmul(%arg0: memref<?x?xf32>,
                                      %arg1: memref<?x?xf32>,
                                      %arg2: memref<?x?xf32>) {
     %c0 = arith.constant 0 : index
@@ -243,7 +243,7 @@ module {
 #map2 = affine_map<(d0)[s0] -> (16, -d0 + s0)>
 #map3 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 module {
-  func @basic_no_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @basic_no_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                         %arg2: memref<?x?xf32>) {
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
@@ -284,7 +284,7 @@ module {
 // -----
 
 module {
-  func @basic_conv_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @basic_conv_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                           %arg2: memref<?x?xf32>) {
     %cst = arith.constant 0.000000e+00 : f32
     linalg.fill ins(%cst : f32) outs(%arg2 : memref<?x?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
index a1d428865120f..ea699d820b610 100644
--- a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
@@ -11,7 +11,7 @@
 // CHECK-SAME: ins(%[[A]], %[[B]] : tensor<?x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<?x16xf32>)
 //      CHECK: %[[RR:.*]] = tensor.expand_shape %[[R]] {{\[}}[0, 1], [2]] : tensor<?x16xf32> into tensor<?x112x16xf32>
 //      CHECK: return %[[RR]] : tensor<?x112x16xf32>
-func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf32>) -> tensor<?x112x16xf32> {
+func.func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf32>) -> tensor<?x112x16xf32> {
   %0 = tensor.expand_shape %A [[0, 1], [2]]
       : tensor<?x16xf32> into tensor<?x112x16xf32>
   %2 = linalg.generic {indexing_maps = [
@@ -41,7 +41,7 @@ func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf
 // CHECK-SAME: ins(%[[A]], %[[B]], %[[C]] : tensor<12544x16xf32>, tensor<12544x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<12544x16xf32>)
 //      CHECK: %[[RR:.*]] = tensor.expand_shape %[[R]] {{\[}}[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32>
 //      CHECK: return %[[RR]] : tensor<112x112x16xf32>
-func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
+func.func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
   %C: tensor<16xf32>) -> tensor<112x112x16xf32> {
   %0 = tensor.expand_shape %A [[0, 1], [2]]
       : tensor<12544x16xf32> into tensor<112x112x16xf32>
@@ -72,7 +72,7 @@ func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
 // CHECK: tensor.expand_shape {{.*}} : tensor<12544x16xf32> into tensor<112x112x16xf32>
 // CHECK: linalg.generic
 // CHECK: } -> tensor<112x112x16xf32>
-func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<112x112x16xf32> {
+func.func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<112x112x16xf32> {
   %20 = tensor.expand_shape %A [[0, 1], [2]]
       : tensor<12544x16xf32> into tensor<112x112x16xf32>
   %21 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
@@ -91,7 +91,7 @@ func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<
 
 // -----
 
-func @type_correctness(%arg0 : tensor<6x5xi32>, %arg1 : tensor<5xf32>,
+func.func @type_correctness(%arg0 : tensor<6x5xi32>, %arg1 : tensor<5xf32>,
     %arg2 : tensor<5xf32>) -> tensor<2x3x5xf32> {
   %cst_6 = arith.constant 1.000000e+00 : f32
   %cst_7 = arith.constant 7.000000e+00 : f32
@@ -127,7 +127,7 @@ func @type_correctness(%arg0 : tensor<6x5xi32>, %arg1 : tensor<5xf32>,
 
 // -----
 
-func @generic_op_index_semantics(%A: tensor<?x16xi64>, %B: tensor<16xi64>, %init: tensor<?x112x16xi64>) -> tensor<?x112x16xi64> {
+func.func @generic_op_index_semantics(%A: tensor<?x16xi64>, %B: tensor<16xi64>, %init: tensor<?x112x16xi64>) -> tensor<?x112x16xi64> {
   %0 = tensor.expand_shape %A [[0, 1], [2]]
       : tensor<?x16xi64> into tensor<?x112x16xi64>
   %2 = linalg.generic {indexing_maps = [

diff  --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir
index 10e1fd3eb439f..ffe85804a3093 100644
--- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -pass-pipeline="func.func(test-linalg-tile-and-fuse{tile-sizes=16,32,64}),resolve-shaped-type-result-dims,canonicalize,cse" -split-input-file %s | FileCheck %s
 
 module {
-  func @three_op_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @three_op_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                         %arg2: memref<?xf32>, %arg3 : memref<?x?xf32>) {
     %cst = arith.constant 0.000000e+00 : f32
     %c0 = arith.constant 0 : index
@@ -57,7 +57,7 @@ module {
 // -----
 
 module {
-  func @sequence_of_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
+  func.func @sequence_of_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
                            %arg2: memref<?x?xf32>, %arg3: memref<?x?xf32>,
                            %arg4: memref<?x?xf32>) {
     %cst = arith.constant 0.000000e+00 : f32
@@ -140,7 +140,7 @@ module {
 // -----
 
 module {
-  func @tensor_op_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
+  func.func @tensor_op_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
                          %arg2: tensor<?x?xf32>, %arg3: tensor<?xf32>)
     -> tensor<?x?xf32> {
     %c0 = arith.constant 0 : index
@@ -193,7 +193,7 @@ module {
 // -----
 
 module {
-  func @tensor_matmul_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
+  func.func @tensor_matmul_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
                              %arg2: tensor<?x?xf32>, %arg3: tensor<?x?xf32>,
            %arg4: tensor<?x?xf32>, %arg5: tensor<?x?xf32>,
            %arg6: tensor<?x?xf32>) -> tensor<?x?xf32> {

diff  --git a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir
index fe6ad9b5df76d..56f4c9d628173 100644
--- a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -test-linalg-tensor-fusion-transform-patterns -resolve-shaped-type-result-dims -canonicalize -cse --split-input-file | FileCheck %s
 
 module {
-  func @matmul_fusion(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>,
+  func.func @matmul_fusion(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>,
                       %AB_init: tensor<?x?xf32>, %C: tensor<?x?xf32>,
                       %ABC_init: tensor<?x?xf32>) -> tensor<?x?xf32> {
     %AB = linalg.matmul ins(%A, %B : tensor<?x?xf32>, tensor<?x?xf32>)
@@ -85,7 +85,7 @@ module {
 // -----
 
 module {
-  func @matmul_plus_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
+  func.func @matmul_plus_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
                            %arg2: tensor<?x?xf32>) -> tensor<?x?xf32>{
     %c0 = arith.constant 0 : index
     %c1 = arith.constant 1 : index
@@ -139,7 +139,7 @@ module {
 // -----
 
 module {
-  func @matmul_out_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
+  func.func @matmul_out_fusion(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
                       %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
     %c0 = arith.constant 0.0 : f32
     %0 = linalg.fill ins(%c0 : f32) outs(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32>
@@ -174,7 +174,7 @@ module {
 // -----
 
 module {
-  func @generic_plus_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
+  func.func @generic_plus_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
                       %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
     %c0 = arith.constant 0.0 : f32
     %0 = linalg.generic {

diff  --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir
index 4e17564ee9873..3218fa4df4159 100644
--- a/mlir/test/Dialect/Linalg/fusion.mlir
+++ b/mlir/test/Dialect/Linalg/fusion.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-greedy-fusion -split-input-file | FileCheck %s
 
-func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,
+func.func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, 1]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, 1]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, 1]>,
@@ -48,7 +48,7 @@ func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,
 // -----
 
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
-func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -100,7 +100,7 @@ func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
 
-func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -154,7 +154,7 @@ func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
 
-func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -212,7 +212,7 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 // -----
 
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
-func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -290,7 +290,7 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 #map1 = affine_map<(d0) -> (d0 + 4)>
 #map2 = affine_map<(d0) -> (d0 + 3)>
 
-func @f6(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f6(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -345,7 +345,7 @@ func @f6(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 
 // -----
 
-func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -432,7 +432,7 @@ func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
 #map1 = affine_map<(d0) -> (d0 + 4)>
 #map2 = affine_map<(d0) -> (d0 + 3)>
 
-func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
          %D: memref<?x?xf32, offset: 0, strides: [?, ?]>,
@@ -492,7 +492,7 @@ func @f8(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   indexing_maps = [#id_2d, #id_2d, #id_2d],
   iterator_types = ["parallel", "parallel"]
 }
-func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
+func.func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
                 %B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
                 %C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
                 %D: memref<?x?xf32, offset: 0, strides: [?, ?]>) {
@@ -549,7 +549,7 @@ func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
   indexing_maps = [#id_2d, #id_2d, #id_2d],
   iterator_types = ["parallel", "parallel"]
 }
-func @pointwise_no_view(%M: index, %N: index) {
+func.func @pointwise_no_view(%M: index, %N: index) {
   %c1 = arith.constant 1 : index
   %c0 = arith.constant 0 : index
   %c3 = arith.constant 3 : index
@@ -607,7 +607,7 @@ func @pointwise_no_view(%M: index, %N: index) {
 #map1 = affine_map<(d0, d1) -> (d0, d1)>
 #map2 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
-func @fusion_of_three(%arg0: memref<100x10xf32>,
+func.func @fusion_of_three(%arg0: memref<100x10xf32>,
                       %arg1: memref<100xf32>,
                       %arg2: memref<100x10xf32>) {
   %c0 = arith.constant 0 : index
@@ -679,7 +679,7 @@ func @fusion_of_three(%arg0: memref<100x10xf32>,
 #map3 = affine_map<(d0)[s0, s1] -> (s0 + 1, -d0 + s0 + s1)>
 #map4 = affine_map<(d0)[s0, s1] -> (s0 + 2, -d0 + s0 + s1)>
 
-func @fill_and_conv(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
+func.func @fill_and_conv(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   %c2 = arith.constant 2 : index
   %c3 = arith.constant 3 : index
@@ -712,7 +712,7 @@ func @fill_and_conv(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memre
 // -----
 
 // Test that 
diff erent allocation-like ops are recognized and properly handled.
-func @accept_
diff erent_alloc_ops(%dim: index, %s0 : index, %s1: index) {
+func.func @accept_
diff erent_alloc_ops(%dim: index, %s0 : index, %s1: index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
index 5602d18f9abfb..a61b4fbc916a8 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -split-input-file -linalg-generalize-named-ops | FileCheck %s
 
-func @generalize_matmul_buffer(%A : memref<16x8xf32>, %B: memref<8x32xf32>, %C: memref<16x32xf32>) {
+func.func @generalize_matmul_buffer(%A : memref<16x8xf32>, %B: memref<8x32xf32>, %C: memref<16x32xf32>) {
   linalg.matmul ins(%A, %B: memref<16x8xf32>, memref<8x32xf32>)
                outs(%C: memref<16x32xf32>)
   return
@@ -29,7 +29,7 @@ func @generalize_matmul_buffer(%A : memref<16x8xf32>, %B: memref<8x32xf32>, %C:
 
 // -----
 
-func @generalize_matmul_tensor(%A : tensor<16x8xf32>, %B: tensor<8x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_matmul_tensor(%A : tensor<16x8xf32>, %B: tensor<8x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x8xf32>, tensor<8x32xf32>)
                     outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
@@ -49,7 +49,7 @@ func @generalize_matmul_tensor(%A : tensor<16x8xf32>, %B: tensor<8x32xf32>, %C:
 
 // -----
 
-func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
   linalg.depthwise_conv_2d_nhwc_hwcm
      { dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> }
      ins(%input, %filter : memref<2x4x5x2xf32>, memref<2x2x2x3xf32>)
@@ -76,7 +76,7 @@ func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2
 
 // -----
 
-func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x2x3x2x3xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x2x3x2x3xf32>) {
   linalg.depthwise_conv_2d_nhwc_hwcm
      { dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> }
      ins(%input, %filter : memref<2x4x5x2xf32>, memref<2x2x2x3xf32>)
@@ -103,7 +103,7 @@ func @depthwise_conv_2d_nhwc_hwcm(%input: memref<2x4x5x2xf32>, %filter: memref<2
 
 // -----
 
-func @depthwise_conv_2d_nhwc_hwc(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwc(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
   linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
     ins(%input, %filter: memref<1x113x113x96xf32>, memref<3x3x96xf32>)
     outs(%output: memref<1x56x56x96xf32>)
@@ -129,7 +129,7 @@ func @depthwise_conv_2d_nhwc_hwc(%input: memref<1x113x113x96xf32>, %filter: memr
 
 // -----
 
-func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %output: memref<?x?x?xf32>) {
+func.func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %output: memref<?x?x?xf32>) {
   linalg.conv_1d_nwc_wcf {dilations = dense<1> : tensor<1xi64>,
                                        strides = dense<1> : tensor<1xi64>}
      ins (%input, %filter: memref<?x?x?xf32>, memref<?x?x?xf32>)
@@ -155,7 +155,7 @@ func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %ou
 
 // -----
 
-func @generalize_fill(%output: memref<?x?xf32>, %value : f32) {
+func.func @generalize_fill(%output: memref<?x?xf32>, %value : f32) {
   linalg.fill ins(%value : f32) outs(%output : memref<?x?xf32>)
   return
 }
@@ -177,7 +177,7 @@ func @generalize_fill(%output: memref<?x?xf32>, %value : f32) {
 
 // -----
 
-func @generalize_batch_matm_vec(%lhs : memref<?x?x?xi8>, %rhs: memref<?x?xi8>,  %out: memref<?x?xf32>) {
+func.func @generalize_batch_matm_vec(%lhs : memref<?x?x?xi8>, %rhs: memref<?x?xi8>,  %out: memref<?x?xf32>) {
   linalg.batch_matvec ins(%lhs, %rhs: memref<?x?x?xi8>, memref<?x?xi8>)
                      outs(%out: memref<?x?xf32>)
   return

diff  --git a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
index d455d84c228d6..a769684b57bb5 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -split-input-file -linalg-generalize-named-ops | FileCheck %s
 
 // Verifies that 
diff erent argument types is legal.
-func @generalize_matmul_tensor_f16f64f32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_matmul_tensor_f16f64f32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x8xf16>, tensor<8x32xf64>)
                           outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
@@ -20,7 +20,7 @@ func @generalize_matmul_tensor_f16f64f32(%A : tensor<16x8xf16>, %B: tensor<8x32x
 // -----
 
 // Verifies that 
diff erent argument types is legal.
-func @generalize_matmul_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @generalize_matmul_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>)
                           outs(%C: tensor<16x32xi32>) -> tensor<16x32xi32>
   return %0: tensor<16x32xi32>
@@ -40,7 +40,7 @@ func @generalize_matmul_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: tensor<8x32x
 // -----
 
 // Verifies that cast attributes control the cast operations used.
-func @generalize_matmul_tensor_i16i64i32_unsigned(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @generalize_matmul_tensor_i16i64i32_unsigned(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
   %0 = linalg.matmul {cast = #linalg.type_fn<cast_unsigned>}
                      ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>)
                           outs(%C: tensor<16x32xi32>) -> tensor<16x32xi32>
@@ -52,7 +52,7 @@ func @generalize_matmul_tensor_i16i64i32_unsigned(%A : tensor<16x8xi16>, %B: ten
 
 // -----
 
-func @generalize_matmul_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_matmul_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>)
                      outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
@@ -65,7 +65,7 @@ func @generalize_matmul_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: tensor<8x32x
 
 // -----
 
-func @generalize_matmul_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @generalize_matmul_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x8xf16>, tensor<8x32xf64>)
                               outs(%C: tensor<16x32xi32>) -> tensor<16x32xi32>
   return %0: tensor<16x32xi32>
@@ -78,7 +78,7 @@ func @generalize_matmul_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: tensor<8x32x
 
 // -----
 
-func @generalize_matmul_unsigned_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @generalize_matmul_unsigned_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
   %0 = linalg.matmul_unsigned ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>)
                               outs(%C: tensor<16x32xi32>) -> tensor<16x32xi32>
   return %0: tensor<16x32xi32>
@@ -91,7 +91,7 @@ func @generalize_matmul_unsigned_tensor_i16i64i32(%A : tensor<16x8xi16>, %B: ten
 
 // -----
 
-func @generalize_matmul_unsigned_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_matmul_unsigned_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: tensor<8x32xi64>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul_unsigned ins(%A, %B: tensor<16x8xi16>, tensor<8x32xi64>)
                               outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
@@ -104,7 +104,7 @@ func @generalize_matmul_unsigned_tensor_i16i64f32(%A : tensor<16x8xi16>, %B: ten
 
 // -----
 
-func @generalize_matmul_unsigned_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
+func.func @generalize_matmul_unsigned_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: tensor<8x32xf64>, %C: tensor<16x32xi32>) -> tensor<16x32xi32> {
   %0 = linalg.matmul_unsigned ins(%A, %B: tensor<16x8xf16>, tensor<8x32xf64>)
                               outs(%C: tensor<16x32xi32>) -> tensor<16x32xi32>
   return %0: tensor<16x32xi32>
@@ -117,7 +117,7 @@ func @generalize_matmul_unsigned_tensor_f16f64i32(%A : tensor<16x8xf16>, %B: ten
 
 // -----
 
-func @generalize_pooling_nhwc_max_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
+func.func @generalize_pooling_nhwc_max_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
   %0 = linalg.pooling_nhwc_max {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xf32>, tensor<2x2xf32>) outs(%output : tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32>
   return %0: tensor<1x2x4x1xf32>
@@ -131,7 +131,7 @@ func @generalize_pooling_nhwc_max_f32(%input : tensor<1x4x16x1xf32>, %shape: ten
 
 // -----
 
-func @generalize_pooling_nhwc_max_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
+func.func @generalize_pooling_nhwc_max_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
   %0 = linalg.pooling_nhwc_max {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xi32>, tensor<2x2xi32>) outs(%output : tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32>
   return %0: tensor<1x2x4x1xi32>
@@ -143,7 +143,7 @@ func @generalize_pooling_nhwc_max_i32(%input : tensor<1x4x16x1xi32>, %shape: ten
 
 // -----
 
-func @generalize_pooling_nhwc_max_unsigned_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
+func.func @generalize_pooling_nhwc_max_unsigned_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
   %0 = linalg.pooling_nhwc_max_unsigned {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xi32>, tensor<2x2xi32>) outs(%output : tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32>
   return %0: tensor<1x2x4x1xi32>
@@ -155,7 +155,7 @@ func @generalize_pooling_nhwc_max_unsigned_i32(%input : tensor<1x4x16x1xi32>, %s
 
 // -----
 
-func @generalize_pooling_nhwc_min_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
+func.func @generalize_pooling_nhwc_min_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
   %0 = linalg.pooling_nhwc_min {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xf32>, tensor<2x2xf32>) outs(%output : tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32>
   return %0: tensor<1x2x4x1xf32>
@@ -169,7 +169,7 @@ func @generalize_pooling_nhwc_min_f32(%input : tensor<1x4x16x1xf32>, %shape: ten
 
 // -----
 
-func @generalize_pooling_nhwc_min_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
+func.func @generalize_pooling_nhwc_min_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
   %0 = linalg.pooling_nhwc_min {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xi32>, tensor<2x2xi32>) outs(%output : tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32>
   return %0: tensor<1x2x4x1xi32>
@@ -181,7 +181,7 @@ func @generalize_pooling_nhwc_min_i32(%input : tensor<1x4x16x1xi32>, %shape: ten
 
 // -----
 
-func @generalize_pooling_nhwc_min_unsigned_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
+func.func @generalize_pooling_nhwc_min_unsigned_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
   %0 = linalg.pooling_nhwc_min_unsigned {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xi32>, tensor<2x2xi32>) outs(%output : tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32>
   return %0: tensor<1x2x4x1xi32>
@@ -193,7 +193,7 @@ func @generalize_pooling_nhwc_min_unsigned_i32(%input : tensor<1x4x16x1xi32>, %s
 
 // -----
 
-func @generalize_pooling_nhwc_sum_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
+func.func @generalize_pooling_nhwc_sum_f32(%input : tensor<1x4x16x1xf32>, %shape: tensor<2x2xf32>, %output: tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32> {
   %0 = linalg.pooling_nhwc_sum {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xf32>, tensor<2x2xf32>) outs(%output : tensor<1x2x4x1xf32>) -> tensor<1x2x4x1xf32>
   return %0: tensor<1x2x4x1xf32>
@@ -207,7 +207,7 @@ func @generalize_pooling_nhwc_sum_f32(%input : tensor<1x4x16x1xf32>, %shape: ten
 
 // -----
 
-func @generalize_pooling_nhwc_sum_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
+func.func @generalize_pooling_nhwc_sum_i32(%input : tensor<1x4x16x1xi32>, %shape: tensor<2x2xi32>, %output: tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32> {
   %0 = linalg.pooling_nhwc_sum {dilations = dense<[1, 2]> : tensor<2xi64>, strides = dense<[2, 4]> : tensor<2xi64>}
     ins(%input, %shape : tensor<1x4x16x1xi32>, tensor<2x2xi32>) outs(%output : tensor<1x2x4x1xi32>) -> tensor<1x2x4x1xi32>
   return %0: tensor<1x2x4x1xi32>
@@ -221,7 +221,7 @@ func @generalize_pooling_nhwc_sum_i32(%input : tensor<1x4x16x1xi32>, %shape: ten
 
 // -----
 
-func @generalize_fill_0d(%value: f64, %O: tensor<f32>) -> tensor<f32> {
+func.func @generalize_fill_0d(%value: f64, %O: tensor<f32>) -> tensor<f32> {
   %0 = linalg.fill ins(%value: f64) outs(%O : tensor<f32>) -> tensor<f32>
   return %0: tensor<f32>
 }
@@ -235,7 +235,7 @@ func @generalize_fill_0d(%value: f64, %O: tensor<f32>) -> tensor<f32> {
 
 // -----
 
-func @generalize_fill_2d(%value: f64, %O: memref<16x32xf32>) {
+func.func @generalize_fill_2d(%value: f64, %O: memref<16x32xf32>) {
   linalg.fill ins(%value: f64) outs(%O : memref<16x32xf32>)
   return
 }
@@ -250,7 +250,7 @@ func @generalize_fill_2d(%value: f64, %O: memref<16x32xf32>) {
 
 // -----
 
-func @generalize_index(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_index(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.fill_rng_2d ins(%min, %max, %seed: f64, f64, i32) outs(%O : tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
 }
@@ -263,7 +263,7 @@ func @generalize_index(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>)
 
 // -----
 
-func @generalize_const(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @generalize_const(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.fill_rng_2d ins(%min, %max, %seed: f64, f64, i32) outs(%O : tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
 }
@@ -276,7 +276,7 @@ func @generalize_const(%min: f64, %max: f64, %seed: i32, %O: tensor<16x32xf32>)
 // -----
 
 // Verifies the default value of the fun attribute is an exp op.
-func @generalize_elemwise_exp(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_exp(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
 }
@@ -287,7 +287,7 @@ func @generalize_elemwise_exp(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>)
 // -----
 
 // Verifies the fun attribute controls the unary function used.
-func @generalize_elemwise_log(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_log(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary {fun = #linalg.unary_fn<log>}
                               ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -299,7 +299,7 @@ func @generalize_elemwise_log(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>)
 // -----
 
 // Verifies the fun attribute controls the unary function used.
-func @generalize_elemwise_abs(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_abs(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary {fun = #linalg.unary_fn<abs>}
                               ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -311,7 +311,7 @@ func @generalize_elemwise_abs(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>)
 // -----
 
 // Verifies the fun attribute controls the unary function used.
-func @generalize_elemwise_ceil(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_ceil(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary {fun = #linalg.unary_fn<ceil>}
                               ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -323,7 +323,7 @@ func @generalize_elemwise_ceil(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>
 // -----
 
 // Verifies the fun attribute controls the unary function used.
-func @generalize_elemwise_floor(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_floor(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary {fun = #linalg.unary_fn<floor>}
                               ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -335,7 +335,7 @@ func @generalize_elemwise_floor(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32
 // -----
 
 // Verifies the fun attribute controls the unary function used.
-func @generalize_elemwise_negf(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_negf(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_unary {fun = #linalg.unary_fn<negf>}
                               ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -347,7 +347,7 @@ func @generalize_elemwise_negf(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>
 // -----
 
 // Verifies the default value of the fun attribute is an add op.
-func @generalize_elemwise_add(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_add(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_binary ins(%lhs, %rhs: tensor<4x8xf32>, tensor<4x8xf32>)
                               outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
@@ -359,7 +359,7 @@ func @generalize_elemwise_add(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %o
 // -----
 
 // Verifies the fun attribute controls the binary function used.
-func @generalize_elemwise_mul(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_mul(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_binary {fun = #linalg.binary_fn<mul>}
                               ins(%lhs, %rhs: tensor<4x8xf32>, tensor<4x8xf32>)
                               outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
@@ -372,7 +372,7 @@ func @generalize_elemwise_mul(%lhs : tensor<4x8xf32>, %rhs : tensor<4x8xf32>, %o
 // -----
 
 // Verifies pointwise ops support rank zero input tensors
-func @generalize_elemwise_rank_zero(%lhs : tensor<f32>, %rhs : tensor<f32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_elemwise_rank_zero(%lhs : tensor<f32>, %rhs : tensor<f32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.elemwise_binary {fun = #linalg.binary_fn<sub>}
                               ins(%lhs, %rhs: tensor<f32>, tensor<f32>)
                               outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
@@ -387,7 +387,7 @@ func @generalize_elemwise_rank_zero(%lhs : tensor<f32>, %rhs : tensor<f32>, %out
 // -----
 
 // Verifies the fun attribute controls the binary function used.
-func @generalize_copy(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
+func.func @generalize_copy(%lhs : tensor<4x8xf32>, %output : tensor<4x8xf32>) -> tensor<4x8xf32> {
   %0 = linalg.copy ins(%lhs: tensor<4x8xf32>) outs(%output: tensor<4x8xf32>) -> tensor<4x8xf32>
   return %0: tensor<4x8xf32>
 }

diff  --git a/mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir b/mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir
index 2ebec15b840d1..4c98037410f1e 100644
--- a/mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir
@@ -7,7 +7,7 @@
 // CHECK:           %[[FILL:.*]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x32x32x1xf32>) -> tensor<1x32x32x1xf32>
 // CHECK:           %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 2, 2, 0] [1, 28, 28, 1] [1, 1, 1, 1] : tensor<1x28x28x1xf32> into tensor<1x32x32x1xf32>
 // CHECK:           return %[[PADDED]] : tensor<1x32x32x1xf32>
-func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
+func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
   %cst = arith.constant 0.000000e+00 : f32
   %0 = tensor.pad %arg0 low[0, 2, 2, 0] high[0, 2, 2, 0]  {
   ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):  
@@ -35,7 +35,7 @@ func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> tensor
 // CHECK:           %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]]{{\[}}%[[C0]], %[[C0]], %[[OFFSET]], %[[C0]]] [4, %[[DIM1_1]], 2, %[[DIM3_1]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32>
 // CHECK:           return %[[PADDED]] : tensor<4x?x?x?xf32>
 // CHECK:         }
-func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tensor<4x?x?x?xf32> {
+func.func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tensor<4x?x?x?xf32> {
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.0 : f32
   %out = tensor.pad %arg0 low[%c0, %c0, %arg1, %c0] high[%c0, %c0, %c0, %arg1]  {

diff  --git a/mlir/test/Dialect/Linalg/hoist-padding.mlir b/mlir/test/Dialect/Linalg/hoist-padding.mlir
index 8dc5297bfec2e..5ac26232d9c0e 100644
--- a/mlir/test/Dialect/Linalg/hoist-padding.mlir
+++ b/mlir/test/Dialect/Linalg/hoist-padding.mlir
@@ -6,7 +6,7 @@
 
 //      MATVEC:  static_size_divisible
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
-func @static_size_divisible(%arg0: tensor<24x12xf32>,
+func.func @static_size_divisible(%arg0: tensor<24x12xf32>,
                             %arg1: tensor<12xf32>,
                             %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %cst = arith.constant 0.000000e+00 : f32
@@ -53,7 +53,7 @@ func @static_size_divisible(%arg0: tensor<24x12xf32>,
 
 //      MATVEC:  static_size_not_divisible
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
-func @static_size_not_divisible(%arg0: tensor<24x12xf32>,
+func.func @static_size_not_divisible(%arg0: tensor<24x12xf32>,
                                 %arg1: tensor<12xf32>,
                                 %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %cst = arith.constant 0.000000e+00 : f32
@@ -109,7 +109,7 @@ func @static_size_not_divisible(%arg0: tensor<24x12xf32>,
 
 //      MATVEC:  dynamic_size
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<?xf32>
-func @dynamic_size(%arg0: tensor<24x?xf32>,
+func.func @dynamic_size(%arg0: tensor<24x?xf32>,
                    %arg1: tensor<?xf32>,
                    %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %cst = arith.constant 0.000000e+00 : f32
@@ -162,7 +162,7 @@ func @dynamic_size(%arg0: tensor<24x?xf32>,
 
 //      MATVEC:  non_constant_padding
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
-func @non_constant_padding(%arg0: tensor<24x12xf32>,
+func.func @non_constant_padding(%arg0: tensor<24x12xf32>,
                    %arg1: tensor<12xf32>,
                    %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %c4 = arith.constant 4 : index
@@ -196,7 +196,7 @@ func @non_constant_padding(%arg0: tensor<24x12xf32>,
 
 //      MATVEC:  non_constant_op_padding
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
-func @non_constant_op_padding(%arg0: tensor<24x12xf32>,
+func.func @non_constant_op_padding(%arg0: tensor<24x12xf32>,
                       %arg1: tensor<12xf32>,
                       %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %c0 = arith.constant 0 : index
@@ -232,7 +232,7 @@ func @non_constant_op_padding(%arg0: tensor<24x12xf32>,
 //      MATVEC:  non_index_operand
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
 // MATVEC-SAME:    %[[ARG3:[0-9a-zA-Z]*]]: i32
-func @non_index_operand(%arg0: tensor<24x12xf32>,
+func.func @non_index_operand(%arg0: tensor<24x12xf32>,
                         %arg1: tensor<12xf32>,
                         %arg2: tensor<24xf32>,
                         %arg3: i32) -> tensor<24xf32> {
@@ -269,7 +269,7 @@ func @non_index_operand(%arg0: tensor<24x12xf32>,
 //      MATVEC:  memory_effect
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
 // MATVEC-SAME:    %[[ARG3:[0-9a-zA-Z]*]]: memref<?xindex>
-func @memory_effect(%arg0: tensor<24x12xf32>,
+func.func @memory_effect(%arg0: tensor<24x12xf32>,
                     %arg1: tensor<12xf32>,
                     %arg2: tensor<24xf32>,
                     %arg3: memref<?xindex>) -> tensor<24xf32> {
@@ -306,7 +306,7 @@ func @memory_effect(%arg0: tensor<24x12xf32>,
 //      MATVEC:  index_result_loop
 // MATVEC-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<12xf32>
 // MATVEC-SAME:    %[[ARG3:[0-9a-zA-Z]*]]: index
-func @index_result_loop(%arg0: tensor<24x12xf32>,
+func.func @index_result_loop(%arg0: tensor<24x12xf32>,
                         %arg1: tensor<12xf32>,
                         %arg2: tensor<24xf32>,
                         %arg3: index) -> tensor<24xf32> {
@@ -349,7 +349,7 @@ func @index_result_loop(%arg0: tensor<24x12xf32>,
 //      MATMUL:  tile_and_fuse
 // MATMUL-SAME:    %[[ARG0:[0-9a-zA-Z]*]]: tensor<12x6xf32>
 // MATMUL-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<6x24xf32>
-func @tile_and_fuse(%arg0: tensor<12x6xf32>,
+func.func @tile_and_fuse(%arg0: tensor<12x6xf32>,
                     %arg1: tensor<6x24xf32>,
                     %arg2: tensor<12x24xf32>) -> tensor<12x24xf32> {
   %c6 = arith.constant 6 : index
@@ -430,7 +430,7 @@ func @tile_and_fuse(%arg0: tensor<12x6xf32>,
 
 //      TRANSP:  transpose
 // TRANSP-SAME:    %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x?xf32>
-func @transpose(%arg0: tensor<24x?xf32>,
+func.func @transpose(%arg0: tensor<24x?xf32>,
                 %arg1: tensor<?xf32>,
                 %arg2: tensor<24xf32>) -> tensor<24xf32> {
   %cst = arith.constant 0.000000e+00 : f32

diff  --git a/mlir/test/Dialect/Linalg/hoisting.mlir b/mlir/test/Dialect/Linalg/hoisting.mlir
index aed8688314f99..1b1c6a8d2be2f 100644
--- a/mlir/test/Dialect/Linalg/hoisting.mlir
+++ b/mlir/test/Dialect/Linalg/hoisting.mlir
@@ -12,7 +12,7 @@
 //  CHECK-SAME:   %[[UB:[a-zA-Z0-9]*]]: index,
 //  CHECK-SAME:   %[[STEP:[a-zA-Z0-9]*]]: index,
 //  CHECK-SAME:   %[[CMP:[a-zA-Z0-9]*]]: i1
-func @hoist_vector_transfer_pairs(
+func.func @hoist_vector_transfer_pairs(
     %memref0: memref<?x?xf32>, %memref1: memref<?x?xf32>, %memref2: memref<?x?xf32>,
     %memref3: memref<?x?xf32>, %memref4: memref<?x?xf32>, %memref5: memref<?x?xf32>,
     %val: index, %lb : index, %ub : index, %step: index, %cmp: i1) {
@@ -87,7 +87,7 @@ func @hoist_vector_transfer_pairs(
 //  CHECK-SAME:   %[[STEP:[a-zA-Z0-9]*]]: index,
 //  CHECK-SAME:   %[[RANDOM:[a-zA-Z0-9]*]]: index,
 //  CHECK-SAME:   %[[CMP:[a-zA-Z0-9]*]]: i1
-func @hoist_vector_transfer_pairs_disjoint(
+func.func @hoist_vector_transfer_pairs_disjoint(
     %memref0: memref<?x?xf32>, %memref1: memref<?x?xf32>,
     %memref2: memref<?x?xf32>, %memref3: memref<?x?xf32>, %val: index, %lb : index, %ub : index,
     %step: index, %random_index : index, %cmp: i1) {
@@ -158,7 +158,7 @@ func @hoist_vector_transfer_pairs_disjoint(
 // -----
 
 // CHECK-LABEL: func @hoist_vector_transfer_pairs_tensor
-func @hoist_vector_transfer_pairs_tensor(
+func.func @hoist_vector_transfer_pairs_tensor(
     %tensor0: tensor<?x?xf32>, %tensor1: tensor<?x?xf32>, %tensor2: tensor<?x?xf32>,
     %tensor3: tensor<?x?xf32>, %tensor4: tensor<?x?xf32>, %tensor5: tensor<?x?xf32>,
     %val: index, %lb : index, %ub : index, %step: index) ->
@@ -243,7 +243,7 @@ func @hoist_vector_transfer_pairs_tensor(
 //  CHECK-SAME:   %[[TENSOR1:[a-zA-Z0-9]*]]: tensor<?x?xf32>,
 //  CHECK-SAME:   %[[TENSOR2:[a-zA-Z0-9]*]]: tensor<?x?xf32>,
 //  CHECK-SAME:   %[[TENSOR3:[a-zA-Z0-9]*]]: tensor<?x?xf32>,
-func @hoist_vector_transfer_pairs_disjoint_tensor(
+func.func @hoist_vector_transfer_pairs_disjoint_tensor(
     %tensor0: tensor<?x?xf32>, %tensor1: tensor<?x?xf32>,
     %tensor2: tensor<?x?xf32>, %tensor3: tensor<?x?xf32>,
     %val: index, %lb : index, %ub : index, %step: index,
@@ -332,7 +332,7 @@ func @hoist_vector_transfer_pairs_disjoint_tensor(
 //  CHECK-SAME:   %[[TENSOR3:[a-zA-Z0-9]*]]: tensor<?x?xf32>,
 //  CHECK-SAME:   %[[TENSOR4:[a-zA-Z0-9]*]]: tensor<?x?xf32>,
 //  CHECK-SAME:   %[[TENSOR5:[a-zA-Z0-9]*]]: tensor<?x?xf32>
-func @hoist_vector_transfer_pairs_tensor_and_slices(
+func.func @hoist_vector_transfer_pairs_tensor_and_slices(
     %tensor0: tensor<?x?xf32>, %tensor1: tensor<?x?xf32>, %tensor2: tensor<?x?xf32>,
     %tensor3: tensor<?x?xf32>, %tensor4: tensor<?x?xf32>, %tensor5: tensor<?x?xf32>,
     %val: index, %lb : index, %ub : index, %step: index) ->

diff  --git a/mlir/test/Dialect/Linalg/inline-scalar-operands.mlir b/mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
index 04ae8aa1140b6..f3d469d61acc9 100644
--- a/mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
+++ b/mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
@@ -5,7 +5,7 @@
 #map3 = affine_map<(d0) -> ()>
 
 // CHECK: func @inline_zerod(%[[ARG:.*]]: tensor<4xf32>, %[[SCALAR:.*]]: tensor<f32>)
-func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4xf32> {
+func.func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4xf32> {
     %0 = linalg.init_tensor [4] : tensor<4xf32>
     // CHECK: linalg.generic {indexing_maps = [#[[MAP]], #[[MAP]]],
     // CHECK-SAME: iterator_types = ["parallel"]} ins(%[[ARG]] : tensor<4xf32>)
@@ -29,7 +29,7 @@ func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4xf32>
 #map3 = affine_map<(d0) -> (0)>
 
 // CHECK: func @inline_oned(%[[ARG:.*]]: tensor<4xf32>, %[[SCALAR:.*]]: tensor<1xf32>)
-func @inline_oned(%arg0: tensor<4xf32>, %scalar: tensor<1xf32>) -> tensor<4xf32> {
+func.func @inline_oned(%arg0: tensor<4xf32>, %scalar: tensor<1xf32>) -> tensor<4xf32> {
     // CHECK: %[[ZERO:.*]] = arith.constant 0 : index
     %0 = linalg.init_tensor [4] : tensor<4xf32>
     // CHECK: linalg.generic {indexing_maps = [#[[MAP]], #[[MAP]]],

diff  --git a/mlir/test/Dialect/Linalg/inlining.mlir b/mlir/test/Dialect/Linalg/inlining.mlir
index 033213c2a954c..660f59d186eab 100644
--- a/mlir/test/Dialect/Linalg/inlining.mlir
+++ b/mlir/test/Dialect/Linalg/inlining.mlir
@@ -13,13 +13,13 @@
   iterator_types = ["parallel"]
 }
 
-func @inline_into(%arg0: memref<?xf32>) {
+func.func @inline_into(%arg0: memref<?xf32>) {
   // CHECK: linalg.generic
   call @inlined_fn(%arg0) : (memref<?xf32>) -> ()
   return
 }
 
-func @inlined_fn(%arg0: memref<?xf32>) {
+func.func @inlined_fn(%arg0: memref<?xf32>) {
   // CHECK: linalg.generic
   linalg.generic #trait
      ins(%arg0 : memref<?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/interchange.mlir b/mlir/test/Dialect/Linalg/interchange.mlir
index b767867df0c5f..1d422eef242b9 100644
--- a/mlir/test/Dialect/Linalg/interchange.mlir
+++ b/mlir/test/Dialect/Linalg/interchange.mlir
@@ -4,7 +4,7 @@
 #map0 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
 #map1 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3)>
 
-func @interchange_generic_op(%arg0 : memref<1x2x3x4x5xindex>, %arg1 : memref<1x2x4xindex>) {
+func.func @interchange_generic_op(%arg0 : memref<1x2x3x4x5xindex>, %arg1 : memref<1x2x4xindex>) {
   linalg.generic {
     indexing_maps = [#map0, #map1],
     iterator_types = ["parallel", "parallel", "reduction", "parallel", "reduction"]}

diff  --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index f5f4ce70ce3ac..f58deab260343 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -split-input-file -verify-diagnostics
 
-func @load_number_of_indices(%v : memref<f32>) {
+func.func @load_number_of_indices(%v : memref<f32>) {
   // expected-error @+2 {{incorrect number of indices for load}}
   %c0 = arith.constant 0 : index
   memref.load %v[%c0] : memref<f32>
@@ -8,7 +8,7 @@ func @load_number_of_indices(%v : memref<f32>) {
 
 // -----
 
-func @store_number_of_indices(%v : memref<f32>) {
+func.func @store_number_of_indices(%v : memref<f32>) {
   // expected-error @+3 {{store index operand count not equal to memref rank}}
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
@@ -17,21 +17,21 @@ func @store_number_of_indices(%v : memref<f32>) {
 
 // -----
 
-func @yield_parent(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @yield_parent(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   // expected-error @+1 {{op expected parent op with LinalgOp interface}}
   linalg.yield %arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>
 }
 
 // -----
 
-func @index_parent() {
+func.func @index_parent() {
   // expected-error @+1 {{op expected parent op with LinalgOp interface}}
   linalg.index 0 : index
 }
 
 // -----
 
-func @index_dim_lower_than_number_of_loops(%arg0: memref<f32>) {
+func.func @index_dim_lower_than_number_of_loops(%arg0: memref<f32>) {
   // expected-error @+6 {{op expected dim (2) to be lower than the number of loops (0) of the enclosing LinalgOp}}
   linalg.generic {
       indexing_maps =  [ affine_map<() -> ()> ],
@@ -45,7 +45,7 @@ func @index_dim_lower_than_number_of_loops(%arg0: memref<f32>) {
 
 // -----
 
-func @index_dim_negative(%arg0: memref<f32>) {
+func.func @index_dim_negative(%arg0: memref<f32>) {
   // expected-error @+6 {{op attribute 'dim' failed to satisfy constraint: 64-bit signless integer attribute whose minimum value is 0}}
   linalg.generic {
       indexing_maps =  [ affine_map<() -> ()> ],
@@ -59,7 +59,7 @@ func @index_dim_negative(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_no_region(%arg0: memref<f32>) {
+func.func @generic_no_region(%arg0: memref<f32>) {
   // expected-error @+5 {{expected '{' to begin a region}}
   linalg.generic {
     indexing_maps =  [ affine_map<() -> (0)> ],
@@ -69,7 +69,7 @@ func @generic_no_region(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_mismatched_num_returns(%arg0: memref<f32>) {
+func.func @generic_mismatched_num_returns(%arg0: memref<f32>) {
   // expected-error @+6 {{op expected number of yield values (1) to match the number of operands of the enclosing LinalgOp (0)}}
   linalg.generic {
       indexing_maps =  [ affine_map<() -> ()> ],
@@ -82,7 +82,7 @@ func @generic_mismatched_num_returns(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_wrong_dim_in_map(%arg0: memref<1xi32>) {
+func.func @generic_wrong_dim_in_map(%arg0: memref<1xi32>) {
   // expected-error @+1 {{op expected indexing_map #0 to have 1 dim(s) to match the number of loops}}
   linalg.generic {
     indexing_maps =  [ affine_map<() -> (0)> ],
@@ -95,7 +95,7 @@ func @generic_wrong_dim_in_map(%arg0: memref<1xi32>) {
 
 // -----
 
-func @generic_wrong_iterator(%arg0: memref<1xi32>) {
+func.func @generic_wrong_iterator(%arg0: memref<1xi32>) {
   // expected-error @+1 {{op unexpected iterator_type (random)}}
   linalg.generic {
     indexing_maps =  [ affine_map<(i) -> (i)> ],
@@ -108,7 +108,7 @@ func @generic_wrong_iterator(%arg0: memref<1xi32>) {
 
 // -----
 
-func @generic_one_d_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @generic_one_d_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   // expected-error @+1 {{expected operand rank (1) to match the result rank of indexing_map #0 (2)}}
   linalg.generic {
     indexing_maps =  [ affine_map<() -> (0, 0)> ],
@@ -121,7 +121,7 @@ func @generic_one_d_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>)
 
 // -----
 
-func @generic_scalar_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @generic_scalar_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   %cst = arith.constant 0.0 : f32
   // expected-error @+1 {{expected operand rank (0) to match the result rank of indexing_map #0 (1)}}
   linalg.generic {
@@ -136,7 +136,7 @@ func @generic_scalar_view(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>)
 
 // -----
 
-func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   // expected-error @+7 {{'linalg.yield' op type of yield operand 1 ('i4') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
   linalg.generic {
     indexing_maps =  [ affine_map<(i) -> (i)> ],
@@ -150,7 +150,7 @@ func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(o
 
 // -----
 
-func @generic_singular_maps(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>, %arg1: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @generic_singular_maps(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>, %arg1: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   // expected-error @+1 {{expected the shape-to-loops map to be non-null}}
   linalg.generic {
     indexing_maps =  [
@@ -171,7 +171,7 @@ func @generic_singular_maps(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>
 
 // -----
 
-func @generic_empty_region(%arg0: memref<f32>) {
+func.func @generic_empty_region(%arg0: memref<f32>) {
   %f0 = arith.constant 0.0: f32
   // expected-error @+1 {{op expects region #0 to have 0 or 1 blocks}}
   linalg.generic {
@@ -188,7 +188,7 @@ func @generic_empty_region(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_empty_region(%arg0: memref<f32>) {
+func.func @generic_empty_region(%arg0: memref<f32>) {
   %f0 = arith.constant 0.0: f32
   // expected-error @+1 {{op expects to have 1 region with 1 block}}
   linalg.generic {
@@ -201,7 +201,7 @@ func @generic_empty_region(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
+func.func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
   // expected-error @+6 {{'linalg.yield' op expected number of yield values (2) to match the number of operands of the enclosing LinalgOp (1)}}
   linalg.generic {
       indexing_maps =  [ affine_map<() -> ()>, affine_map<() -> ()> ],
@@ -214,7 +214,7 @@ func @generic_mismatched_num_arguments(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_shaped_operand_block_arg_type(%arg0: memref<f32>) {
+func.func @generic_shaped_operand_block_arg_type(%arg0: memref<f32>) {
   // expected-error @+6 {{'linalg.yield' op type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
   linalg.generic {
     indexing_maps =  [ affine_map<() -> ()> ],
@@ -227,7 +227,7 @@ func @generic_shaped_operand_block_arg_type(%arg0: memref<f32>) {
 
 // -----
 
-func @generic_scalar_operand_block_arg_type(%arg0: tensor<f32>) {
+func.func @generic_scalar_operand_block_arg_type(%arg0: tensor<f32>) {
   // expected-error @+6 {{'linalg.yield' op type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
   linalg.generic {
     indexing_maps =  [ affine_map<() -> ()> ],
@@ -240,7 +240,7 @@ func @generic_scalar_operand_block_arg_type(%arg0: tensor<f32>) {
 
 // -----
 
-func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
+func.func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>) {
   // expected-error @+7 {{type of yield operand 1 ('i1') doesn't match the element type of the enclosing linalg.generic op ('f32')}}
   linalg.generic {
     indexing_maps = [ affine_map<(i) -> (i)> ],
@@ -254,7 +254,7 @@ func @generic_result_0_element_type(%arg0: memref<?xf32, affine_map<(i)[off]->(o
 
 // -----
 
-func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>,
+func.func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>,
                                  %arg1: tensor<?xf32>) {
   // expected-error @+1 {{expected type of operand #1 ('tensor<?xf32>') to match type of corresponding result ('tensor<f32>')}}
   %0 = linalg.generic {
@@ -269,7 +269,7 @@ func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off
 
 // -----
 
-func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>,
+func.func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off + i)>>,
                                  %arg1: tensor<?xf32>) {
   // expected-error @+1 {{unexpected output tensor expression in indexing map #0 a.k.a 'd0' is function of reduction iterator 'd0'}}
   %0 = linalg.generic {
@@ -284,7 +284,7 @@ func @generic_result_tensor_type(%arg0: memref<?xf32, affine_map<(i)[off]->(off
 
 // -----
 
-func @generic(%arg0: memref<?x?xf32>) {
+func.func @generic(%arg0: memref<?x?xf32>) {
   // expected-error @+6 {{block with no terminator, has %0 = "arith.addf"(%arg1, %arg1) : (f32, f32) -> f32}}
   linalg.generic  {
     indexing_maps = [ affine_map<(i, j) -> (i, j)> ],
@@ -310,7 +310,7 @@ func @generic(%arg0: memref<?x?xf32>) {
 //
 // // -----
 
-func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?xf32>) {
+func.func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?xf32>) {
   // expected-error @+1 {{expected operand rank (2) to match the result rank of indexing_map #1 (3)}}
   linalg.batch_matmul ins(%a3, %b3: memref<?x?x?xf32>, memref<?x?xf32>)
                      outs(%c3 : memref<?x?x?xf32>)
@@ -319,7 +319,7 @@ func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?x
 
 // -----
 
-func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
+func.func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
   // expected-error @+3 {{region expects 3 args, got 2}}
   %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
                        -> (tensor<?x?xf32>, tensor<?x?xf32>)
@@ -328,7 +328,7 @@ func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
 
 // -----
 
-func @matching_inits(%m: memref<?x?xf32>, %t: tensor<?x?xf32>) {
+func.func @matching_inits(%m: memref<?x?xf32>, %t: tensor<?x?xf32>) {
   // expected-error @+1 {{expected type of operand #2 ('tensor<?x?xf32>') to match type of corresponding result ('tensor<?xf32>')}}
   %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
                       outs(%t : tensor<?x?xf32>)
@@ -338,7 +338,7 @@ func @matching_inits(%m: memref<?x?xf32>, %t: tensor<?x?xf32>) {
 
 // -----
 
-func @init_tensor_err(%arg0 : index, %arg1 : index)
+func.func @init_tensor_err(%arg0 : index, %arg1 : index)
 {
   // expected-error @+1 {{specified type 'tensor<4x?x?x5xf32>' does not match the inferred type 'tensor<4x5x?x?xf32>'}}
   %1 = linalg.init_tensor [4, 5, %arg0, %arg1] : tensor<4x?x?x5xf32>
@@ -347,7 +347,7 @@ func @init_tensor_err(%arg0 : index, %arg1 : index)
 
 // -----
 
-func @init_tensor_err(%arg0 : index)
+func.func @init_tensor_err(%arg0 : index)
 {
   // expected-error @+1 {{expected 4 sizes values}}
   %1 = linalg.init_tensor [4, 5, %arg0] : tensor<4x?x?x5xf32>
@@ -356,7 +356,7 @@ func @init_tensor_err(%arg0 : index)
 
 // -----
 
-func @init_tensor_err(%arg0 : index)
+func.func @init_tensor_err(%arg0 : index)
 {
   // expected-error @+1 {{expected 2 dynamic sizes values}}
   %1 = "linalg.init_tensor"(%arg0) {static_sizes = [4, -1, -1, 5]} : (index) -> tensor<4x?x?x5xf32>
@@ -365,7 +365,7 @@ func @init_tensor_err(%arg0 : index)
 
 // -----
 
-func @illegal_fill_tensor_no_return(%arg0 : index, %arg1 : index, %arg2 : f32)
+func.func @illegal_fill_tensor_no_return(%arg0 : index, %arg1 : index, %arg2 : f32)
 {
   %0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
   // expected-error @+1 {{expected the number of results (0) to be equal to the number of output tensors (1)}}
@@ -374,7 +374,7 @@ func @illegal_fill_tensor_no_return(%arg0 : index, %arg1 : index, %arg2 : f32)
 
 // -----
 
-func @illegal_fill_memref_with_tensor_return
+func.func @illegal_fill_memref_with_tensor_return
   (%arg0 : memref<?x?xf32>, %arg1 : f32) -> tensor<?x?xf32>
 {
   // expected-error @+1 {{expected the number of results (1) to be equal to the number of output tensors (0)}}
@@ -384,7 +384,7 @@ func @illegal_fill_memref_with_tensor_return
 
 // -----
 
-func @illegal_fill_tensor_with_memref_return
+func.func @illegal_fill_tensor_with_memref_return
   (%arg0 : tensor<?x?xf32>, %arg1 : f32) -> memref<?x?xf32>
 {
   // expected-error @+1 {{result #0 must be ranked tensor of any type values, but got 'memref<?x?xf32>'}}
@@ -394,7 +394,7 @@ func @illegal_fill_tensor_with_memref_return
 
 // -----
 
-func @invalid_static_matmul(%arg0: memref<2x4xf32>, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
+func.func @invalid_static_matmul(%arg0: memref<2x4xf32>, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
   // expected-error @+1 {{inferred input/output operand #1 has shape's dimension #0 to be 4, but found 3}}
   linalg.matmul ins(%arg0, %arg1 : memref<2x4xf32>, memref<3x4xf32>)
                       outs(%arg2 :memref<2x4xf32>)
@@ -403,7 +403,7 @@ func @invalid_static_matmul(%arg0: memref<2x4xf32>, %arg1: memref<3x4xf32>, %arg
 
 // -----
 
-func @invalid_static_2d_conv(%input : memref<1x3x4x2xf32>, %filter: memref<3x2x2x1xf32>, %output: memref<1x2x3x1xf32>) {
+func.func @invalid_static_2d_conv(%input : memref<1x3x4x2xf32>, %filter: memref<3x2x2x1xf32>, %output: memref<1x2x3x1xf32>) {
   // expected-error @+1 {{inferred input/output operand #0 has shape's dimension #1 to be greater than or equal to 4, but found 3}}
   linalg.conv_2d_nhwc_hwcf
     { dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
@@ -422,7 +422,7 @@ func @invalid_static_2d_conv(%input : memref<1x3x4x2xf32>, %filter: memref<3x2x2
         iterator_types = ["parallel"]
 }
 
-func @invalid_reverse(%A: memref<5xf32>, %B: memref<5xf32>) {
+func.func @invalid_reverse(%A: memref<5xf32>, %B: memref<5xf32>) {
   // expected-error @+1 {{unexpected result less than 0 at expression #0 in}}
   linalg.generic #attrs ins(%A: memref<5xf32>) outs(%B: memref<5xf32>) {
                 ^bb0(%a: f32, %b: f32):

diff  --git a/mlir/test/Dialect/Linalg/library-calls.mlir b/mlir/test/Dialect/Linalg/library-calls.mlir
index 6c16acad4629e..e7b1772d16da1 100644
--- a/mlir/test/Dialect/Linalg/library-calls.mlir
+++ b/mlir/test/Dialect/Linalg/library-calls.mlir
@@ -1,11 +1,11 @@
 // RUN: mlir-opt %s -convert-linalg-to-std | FileCheck %s
 
-func private @print_memref_f32(memref<*xf32>)
+func.func private @print_memref_f32(memref<*xf32>)
 
 // CHECK:  func private @linalg_fill_f32_viewsxsxf32(f32, memref<?x?xf32, {{.*}}>) attributes {llvm.emit_c_interface}
 // CHECK:  func private @linalg_matmul_viewsxsxf32_viewsxsxf32_viewsxsxf32(memref<?x?xf32, {{.*}}>, memref<?x?xf32, {{.*}}>, memref<?x?xf32, {{.*}}>) attributes {llvm.emit_c_interface}
 
-func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
+func.func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %f0 = arith.constant 0.0 : f32

diff  --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 8b3e76b686c7b..f35d76c700534 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -14,7 +14,7 @@
 // CHECKPARALLEL-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 // CHECKPARALLEL-DAG: #[[$stride1Dilation1:.*]] = affine_map<(d0, d1) -> (d0  + d1)>
 
-func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
+func.func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %A = memref.view %arg0[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32>
@@ -59,7 +59,7 @@ func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
 
 
 
-func @matvec(%arg0: memref<?xi8>, %M: index, %N: index) {
+func.func @matvec(%arg0: memref<?xi8>, %M: index, %N: index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %2 = memref.view %arg0[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32>
@@ -100,7 +100,7 @@ func @matvec(%arg0: memref<?xi8>, %M: index, %N: index) {
 //       CHECKPARALLEL:     store %[[res]], %[[C]][%{{.*}}] : memref<?xf32>
 
 
-func @dot(%arg0: memref<?xi8>, %M: index) {
+func.func @dot(%arg0: memref<?xi8>, %M: index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %1 = memref.view %arg0[%c0][%M] : memref<?xi8> to memref<?xf32>
@@ -137,7 +137,7 @@ func @dot(%arg0: memref<?xi8>, %M: index) {
 //       CHECKPARALLEL:   store %[[res]], %[[C]][] : memref<f32>
 
 
-func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
+func.func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
   linalg.dot ins(%arg0, %arg1 : memref<?xf32, offset: ?, strides: [1]>,
                                 memref<?xf32, offset: ?, strides: [1]>)
             outs(%arg2:  memref<f32>)
@@ -165,7 +165,7 @@ func @dot_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf3
 //   CHECKPARALLEL-DAG:   %[[res:.*]] = arith.addf %[[c]], %[[inc]] : f32
 //       CHECKPARALLEL:   store %[[res]], %{{.*}}[] : memref<f32>
 
-func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
+func.func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?xf32, offset: ?, strides: [1]>)
   return
 }
@@ -179,7 +179,7 @@ func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}) = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) {
 //       CHECKPARALLEL:     store %{{.*}}, %{{.*}}[%{{.*}}] : memref<?xf32, #[[$strided1D]]>
 
-func @fill_view0(%arg0: memref<f32>, %arg1: f32) {
+func.func @fill_view0(%arg0: memref<f32>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<f32>)
   return
 }
@@ -189,7 +189,7 @@ func @fill_view0(%arg0: memref<f32>, %arg1: f32) {
 // CHECKPARALLEL-LABEL: func @fill_view0(%{{.*}}: memref<f32>, %{{.*}}: f32) {
 //       CHECKPARALLEL:   store %{{.*}}, %{{.*}}[] : memref<f32>
 
-func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
+func.func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
   return
 }
@@ -205,7 +205,7 @@ func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
 //       CHECKPARALLEL:   scf.parallel (%{{.*}}, %{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}, %{{.*}}) {
 //       CHECKPARALLEL:     store %{{.*}}, {{.*}} : memref<?x?x?xf32, #[[$strided3D]]>
 
-func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>) {
+func.func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.generic {
     iterator_types = ["parallel"],
     indexing_maps = [ affine_map<(i) -> (i)>, affine_map<(i) -> (i)>] }
@@ -241,7 +241,7 @@ func @copy_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf
   library_call = "some_external_function_name_2",
   doc = "B(i,j,k), C(i,k,j) = foo(A(i, j), B(i,j,k), C(i,k,j))"
 }
-func @generic_region(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg2: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
+func.func @generic_region(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg2: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   linalg.generic #trait2
     ins(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>)
    outs(%arg1, %arg2 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
@@ -283,7 +283,7 @@ func @generic_region(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1:
   library_call = "some_external_function_name_2",
   doc = "B(i,j,k), C(i,k,j) = foo(A(i, j) * B(i,j,k), i * j * k + C(i,k,j))"
 }
-func @generic_index_region(
+func.func @generic_index_region(
         %arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
         %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>,
         %arg2: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
@@ -353,7 +353,7 @@ func @generic_index_region(
   library_call = "some_broadcast_external_fn"
 }
 
-func @generic_op_zero_rank(%arg0: memref<f32>, %arg1: memref<3x4xf32>)
+func.func @generic_op_zero_rank(%arg0: memref<f32>, %arg1: memref<3x4xf32>)
 {
   linalg.generic #trait_broadcast
       ins(%arg0 : memref<f32>)
@@ -379,7 +379,7 @@ func @generic_op_zero_rank(%arg0: memref<f32>, %arg1: memref<3x4xf32>)
 //       CHECKPARALLEL:   %[[a:.*]] = memref.load %[[ARG0]][]
 //       CHECKPARALLEL:   store %[[a]], %[[ARG1]][%[[i]], %[[j]]]
 
-func @generic_op_scalar(%arg0: f32, %arg1: memref<3x4xf32>)
+func.func @generic_op_scalar(%arg0: f32, %arg1: memref<3x4xf32>)
 {
   linalg.generic #trait_broadcast
       ins(%arg0 : f32)
@@ -403,7 +403,7 @@ func @generic_op_scalar(%arg0: f32, %arg1: memref<3x4xf32>)
 //       CHECKPARALLEL: scf.parallel (%[[i:[a-zA-Z0-9_]*]], %[[j:[a-zA-Z0-9_]*]])
 //       CHECKPARALLEL:   store %[[ARG0]], %[[ARG1]][%[[i]], %[[j]]]
 
-func @generic_index_op_zero_rank(%arg0: memref<i32>, %arg1: memref<3x4xi32>)
+func.func @generic_index_op_zero_rank(%arg0: memref<i32>, %arg1: memref<3x4xi32>)
 {
   linalg.generic #trait_broadcast
       ins(%arg0 : memref<i32>)
@@ -453,7 +453,7 @@ func @generic_index_op_zero_rank(%arg0: memref<i32>, %arg1: memref<3x4xi32>)
   library_call = "some_reduce_external_fn"
 }
 
-func @generic_op_1D_reduce(%arg0: memref<?xf32>, %arg1: memref<f32>)
+func.func @generic_op_1D_reduce(%arg0: memref<?xf32>, %arg1: memref<f32>)
 {
   linalg.generic #trait_reduce_1D
       ins(%arg0 : memref<?xf32>)
@@ -497,7 +497,7 @@ func @generic_op_1D_reduce(%arg0: memref<?xf32>, %arg1: memref<f32>)
   library_call = "some_reduce_external_fn"
 }
 
-func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
+func.func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
                                 %arg1: memref<f32>,
                                 %arg2: memref<f32>)
 {
@@ -545,7 +545,7 @@ func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
   iterator_types = ["parallel"],
   library_call = "some_external_fn"
 }
-func @generic_const_init(%arg0: memref<?xf32>) {
+func.func @generic_const_init(%arg0: memref<?xf32>) {
         %cst = arith.constant 1.0 : f32
   linalg.generic #trait_const_fill outs(%arg0 : memref<?xf32>) {
     ^bb0(%arg1: f32):   
@@ -577,7 +577,7 @@ func @generic_const_init(%arg0: memref<?xf32>) {
   indexing_maps = #scalar_access,
   library_call = "some_external_fn"
 }
-func @scalar_code(%arg0: memref<f32>, %arg1 : memref<f32>, %arg2 : memref<f32>, %arg3 : i1)
+func.func @scalar_code(%arg0: memref<f32>, %arg1 : memref<f32>, %arg2 : memref<f32>, %arg3 : i1)
 {
   linalg.generic #scalar_trait
     ins(%arg0, %arg1 : memref<f32>, memref<f32>)
@@ -621,7 +621,7 @@ func @scalar_code(%arg0: memref<f32>, %arg1 : memref<f32>, %arg2 : memref<f32>,
 //----------------------------------------------------------------------------//
 // Named ops to loops.
 //----------------------------------------------------------------------------//
-func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
+func.func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
   linalg.batch_matmul ins(%A, %B : memref<?x?x?xf32>, memref<?x?x?xf32>)
                      outs(%C : memref<?x?x?xf32>)
   return
@@ -663,7 +663,7 @@ func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memre
 //       CHECKPARALLEL:       store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref<?x?x?xf32>
 
 
-func @conv1d_no_symbols(%in : memref<?xf32>, %filter : memref<?xf32>, %out : memref<?xf32>) -> () {
+func.func @conv1d_no_symbols(%in : memref<?xf32>, %filter : memref<?xf32>, %out : memref<?xf32>) -> () {
   linalg.conv_1d ins(%in, %filter : memref<?xf32>, memref<?xf32>)
                 outs(%out : memref<?xf32>)
   return
@@ -706,7 +706,7 @@ func @conv1d_no_symbols(%in : memref<?xf32>, %filter : memref<?xf32>, %out : mem
 //       CHECKPARALLEL:     store %[[res]], %[[arg2]][%[[b]]] : memref<?xf32>
 
 
-func @conv2d_no_symbols(%in : memref<?x?xf32>, %filter : memref<?x?xf32>, %out : memref<?x?xf32>) -> () {
+func.func @conv2d_no_symbols(%in : memref<?x?xf32>, %filter : memref<?x?xf32>, %out : memref<?x?xf32>) -> () {
   linalg.conv_2d ins(%in, %filter : memref<?x?xf32>, memref<?x?xf32>)
                 outs(%out: memref<?x?xf32>)
   return
@@ -759,7 +759,7 @@ func @conv2d_no_symbols(%in : memref<?x?xf32>, %filter : memref<?x?xf32>, %out :
 //       CHECKPARALLEL:       store %[[res]], %[[arg2]][%[[arg3]], %[[arg4]]] : memref<?x?xf32>
 
 
-func @conv3d_no_symbols(%in : memref<?x?x?xf32>, %filter : memref<?x?x?xf32>, %out : memref<?x?x?xf32>) -> () {
+func.func @conv3d_no_symbols(%in : memref<?x?x?xf32>, %filter : memref<?x?x?xf32>, %out : memref<?x?x?xf32>) -> () {
   linalg.conv_3d ins(%in, %filter : memref<?x?x?xf32>, memref<?x?x?xf32>)
                 outs(%out : memref<?x?x?xf32>)
   return
@@ -825,7 +825,7 @@ func @conv3d_no_symbols(%in : memref<?x?x?xf32>, %filter : memref<?x?x?xf32>, %o
 
 // -----
 
-func @lower_to_loops_with_rank_reducing_subviews(
+func.func @lower_to_loops_with_rank_reducing_subviews(
     %arg0 : memref<?xi32>, %arg1 : memref<?x?xi32>, %arg2 : index,
     %arg3 : index, %arg4 : index) {
   %0 = memref.subview %arg0[%arg2] [%arg3] [1]

diff  --git a/mlir/test/Dialect/Linalg/lower-pad-tensor.mlir b/mlir/test/Dialect/Linalg/lower-pad-tensor.mlir
index c6a3b1eed30f1..132752b035cc0 100644
--- a/mlir/test/Dialect/Linalg/lower-pad-tensor.mlir
+++ b/mlir/test/Dialect/Linalg/lower-pad-tensor.mlir
@@ -3,7 +3,7 @@
 // CHECK-DAG:   #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 // CHECK-DAG:   #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0 + 1, d1 + 1, d2 + 1, d3 + 2)>
 // CHECK-LABEL: func @pad_tensor_with_memrefs
-func @pad_tensor_with_memrefs(%arg0: memref<1x28x28x1xf32>) -> memref<2x31x31x3xf32> {
+func.func @pad_tensor_with_memrefs(%arg0: memref<1x28x28x1xf32>) -> memref<2x31x31x3xf32> {
   %cst = arith.constant 0.000000e+00 : f32
   %0 = bufferization.to_tensor %arg0 : memref<1x28x28x1xf32>
   %1 = tensor.pad %0 low[1, 1, 1, 2] high[0, 2, 2, 0]  {
@@ -23,7 +23,7 @@ func @pad_tensor_with_memrefs(%arg0: memref<1x28x28x1xf32>) -> memref<2x31x31x3x
 // CHECK-DAG:   #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
 // CHECK-DAG:   #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0 + 1, d1 + 2, d2 + 2)>
 // CHECK-LABEL: func @pad_tensor_no_memrefs
-func @pad_tensor_no_memrefs(%arg0: tensor<1x28x28xf32>) -> tensor<2x32x32xf32> {
+func.func @pad_tensor_no_memrefs(%arg0: tensor<1x28x28xf32>) -> tensor<2x32x32xf32> {
   %cst = arith.constant 0.000000e+00 : f32
   %0 = tensor.pad %arg0 low[1, 2, 2] high[0, 2, 2]  {
   ^bb0(%arg1: index, %arg2: index, %arg3: index):  
@@ -41,7 +41,7 @@ func @pad_tensor_no_memrefs(%arg0: tensor<1x28x28xf32>) -> tensor<2x32x32xf32> {
 // CHECK-DAG:   #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 // CHECK-DAG:   #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 + 2, d2 + 2, d3)>
 // CHECK-LABEL: func @pad_tensor_detailed
-func @pad_tensor_detailed(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
+func.func @pad_tensor_detailed(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
   %cst = arith.constant 0.000000e+00 : f32
   %0 = tensor.pad %arg0 low[0, 2, 2, 0] high[0, 2, 2, 0]  {
   ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):  

diff  --git a/mlir/test/Dialect/Linalg/named-ops.mlir b/mlir/test/Dialect/Linalg/named-ops.mlir
index 50ad331a3c9af..992da7e80ad09 100644
--- a/mlir/test/Dialect/Linalg/named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/named-ops.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwcm_tensor
-func @depthwise_conv_2d_nhwc_hwcm_tensor(%input: tensor<2x4x5x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x3x4x2x3xf32> {
+func.func @depthwise_conv_2d_nhwc_hwcm_tensor(%input: tensor<2x4x5x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x3x4x2x3xf32> {
   %zero = arith.constant 0.000000e+00 : f32
   %init = linalg.init_tensor [2, 3, 4, 2, 3] : tensor<2x3x4x2x3xf32>
   %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32>
@@ -17,7 +17,7 @@ func @depthwise_conv_2d_nhwc_hwcm_tensor(%input: tensor<2x4x5x2xf32>, %filter: t
 }
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwcm_memref
-func @depthwise_conv_2d_nhwc_hwcm_memref(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwcm_memref(%input: memref<2x4x5x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x3x4x2x3xf32>) {
   // CHECK:      linalg.depthwise_conv_2d_nhwc_hwcm
   // CHECK-SAME:   {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
   // CHECK-SAME:   ins(%{{.+}}, %{{.+}} : memref<2x4x5x2xf32>, memref<2x2x2x3xf32>)
@@ -30,7 +30,7 @@ func @depthwise_conv_2d_nhwc_hwcm_memref(%input: memref<2x4x5x2xf32>, %filter: m
 }
 
 // CHECK-LABEL: func @depthwise_conv_1d_nw_tensor
-func @depthwise_conv_1d_nw_tensor(%input: tensor<1x113x96xf32>, %filter: tensor<3x96xf32>) -> tensor<1x56x96xf32> {
+func.func @depthwise_conv_1d_nw_tensor(%input: tensor<1x113x96xf32>, %filter: tensor<3x96xf32>) -> tensor<1x56x96xf32> {
   %init = linalg.init_tensor [1, 56, 96] : tensor<1x56x96xf32>
   // CHECK:      %{{.+}} = linalg.depthwise_conv_1d_nw
   // CHECK-SAME:   {dilations = dense<1> : vector<1xi64>, strides = dense<2> : vector<1xi64>}
@@ -43,7 +43,7 @@ func @depthwise_conv_1d_nw_tensor(%input: tensor<1x113x96xf32>, %filter: tensor<
 }
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwc_tensor
-func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>) -> tensor<1x56x56x96xf32> {
+func.func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>) -> tensor<1x56x56x96xf32> {
   %init = linalg.init_tensor [1, 56, 56, 96] : tensor<1x56x56x96xf32>
   // CHECK:      %{{.+}} = linalg.depthwise_conv_2d_nhwc_hwc
   // CHECK-SAME:   {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
@@ -56,7 +56,7 @@ func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filte
 }
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwc_memref
-func @depthwise_conv_2d_nhwc_hwc_memref(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwc_memref(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
   // CHECK:      linalg.depthwise_conv_2d_nhwc_hwc
   // CHECK-SAME:   {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
   // CHECK-SAME:   ins(%{{.+}}, %{{.+}} : memref<1x113x113x96xf32>, memref<3x3x96xf32>)
@@ -67,7 +67,7 @@ func @depthwise_conv_2d_nhwc_hwc_memref(%input: memref<1x113x113x96xf32>, %filte
   return
 }
 
-func @depthwise_conv_2d_nhwc_hwcm_tensor_dilated(%input: tensor<2x8x9x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x6x7x2x3xf32> {
+func.func @depthwise_conv_2d_nhwc_hwcm_tensor_dilated(%input: tensor<2x8x9x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x6x7x2x3xf32> {
   %zero = arith.constant 0.000000e+00 : f32
   %init = linalg.init_tensor [2, 6, 7, 2, 3] : tensor<2x6x7x2x3xf32>
   %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x6x7x2x3xf32>) -> tensor<2x6x7x2x3xf32>
@@ -83,7 +83,7 @@ func @depthwise_conv_2d_nhwc_hwcm_tensor_dilated(%input: tensor<2x8x9x2xf32>, %f
 }
 
 // CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwcm_memref_dilated
-func @depthwise_conv_2d_nhwc_hwcm_memref_dilated(%input: memref<2x8x9x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x6x7x2x3xf32>) {
+func.func @depthwise_conv_2d_nhwc_hwcm_memref_dilated(%input: memref<2x8x9x2xf32>, %filter: memref<2x2x2x3xf32>, %output: memref<2x6x7x2x3xf32>) {
   // CHECK:      linalg.depthwise_conv_2d_nhwc_hwcm
   // CHECK-SAME:   {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
   // CHECK-SAME:   ins(%{{.+}}, %{{.+}} : memref<2x8x9x2xf32>, memref<2x2x2x3xf32>)
@@ -98,7 +98,7 @@ func @depthwise_conv_2d_nhwc_hwcm_memref_dilated(%input: memref<2x8x9x2xf32>, %f
 // -----
 
 // CHECK-LABEL: func @depthwise_conv_2d_input_nhwc_filter_default_attributes
-func @depthwise_conv_2d_input_nhwc_filter_default_attributes(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
+func.func @depthwise_conv_2d_input_nhwc_filter_default_attributes(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
   // CHECK:      linalg.depthwise_conv_2d_nhwc_hwc
   // CHECK-NOT:  strides =
   // CHECK-NOT:  dilations =
@@ -110,7 +110,7 @@ func @depthwise_conv_2d_input_nhwc_filter_default_attributes(%input: memref<1x11
 
 // -----
 
-func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_element_type(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
+func.func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_element_type(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
   // expected-error @+1 {{op attribute 'strides' failed to satisfy constraint: 64-bit signless int elements attribute of shape [2]}}
   linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2.0> : vector<2xf32>}
     ins(%input, %filter: memref<1x113x113x96xf32>, memref<3x3x96xf32>)
@@ -120,7 +120,7 @@ func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_element_type(%input: memr
 
 // -----
 
-func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_size(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
+func.func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_size(%input: memref<1x113x113x96xf32>, %filter: memref<3x3x96xf32>, %output: memref<1x56x56x96xf32>) {
   // expected-error @+1 {{op attribute 'strides' failed to satisfy constraint: 64-bit signless int elements attribute of shape [2]}}
   linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<3xi64> }
     ins(%input, %filter: memref<1x113x113x96xf32>, memref<3x3x96xf32>)
@@ -131,7 +131,7 @@ func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_size(%input: memref<1x113
 // -----
 
 // CHECK-LABEL: func @conv_1d_nwc_wcf
-func @conv_1d_nwc_wcf(%input: tensor<?x?x?xf32>, %filter: tensor<?x?x?xf32>, %init: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+func.func @conv_1d_nwc_wcf(%input: tensor<?x?x?xf32>, %filter: tensor<?x?x?xf32>, %init: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
   // CHECK:      %{{.+}} = linalg.conv_1d_nwc_wcf
   // CHECK-SAME:   dilations = dense<1> : tensor<1xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<1xi64>
@@ -147,7 +147,7 @@ func @conv_1d_nwc_wcf(%input: tensor<?x?x?xf32>, %filter: tensor<?x?x?xf32>, %in
 // -----
 
 // CHECK-LABEL: func @conv_1d_nwc_wcf
-func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %output: memref<?x?x?xf32>) {
+func.func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %output: memref<?x?x?xf32>) {
   // CHECK:      linalg.conv_1d_nwc_wcf
   // CHECK-SAME:   dilations = dense<1> : tensor<1xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<1xi64>
@@ -163,7 +163,7 @@ func @conv_1d_nwc_wcf(%input: memref<?x?x?xf32>, %filter: memref<?x?x?xf32>, %ou
 // -----
 
 // CHECK-LABEL: func @conv_2d_nhwc_hwcf
-func @conv_2d_nhwc_hwcf(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>, %init: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
+func.func @conv_2d_nhwc_hwcf(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>, %init: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
   // CHECK:      %{{.+}} = linalg.conv_2d_nhwc_hwcf
   // CHECK-SAME:   dilations = dense<1> : tensor<2xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<2xi64>
@@ -179,7 +179,7 @@ func @conv_2d_nhwc_hwcf(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32
 // -----
 
 // CHECK-LABEL: func @conv_2d_nhwc_hwcf
-func @conv_2d_nhwc_hwcf(%input: memref<?x?x?x?xf32>, %filter: memref<?x?x?x?xf32>, %output: memref<?x?x?x?xf32>) {
+func.func @conv_2d_nhwc_hwcf(%input: memref<?x?x?x?xf32>, %filter: memref<?x?x?x?xf32>, %output: memref<?x?x?x?xf32>) {
   // CHECK:      linalg.conv_2d_nhwc_hwcf
   // CHECK-SAME:   dilations = dense<1> : tensor<2xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<2xi64>
@@ -195,7 +195,7 @@ func @conv_2d_nhwc_hwcf(%input: memref<?x?x?x?xf32>, %filter: memref<?x?x?x?xf32
 // -----
 
 // CHECK-LABEL: func @conv_3d_ndhwc_dhwcf
-func @conv_3d_ndhwc_dhwcf(%input: tensor<?x?x?x?x?xf32>, %filter: tensor<?x?x?x?x?xf32>, %init: tensor<?x?x?x?x?xf32>) -> tensor<?x?x?x?x?xf32> {
+func.func @conv_3d_ndhwc_dhwcf(%input: tensor<?x?x?x?x?xf32>, %filter: tensor<?x?x?x?x?xf32>, %init: tensor<?x?x?x?x?xf32>) -> tensor<?x?x?x?x?xf32> {
   // CHECK:      %{{.+}} = linalg.conv_3d_ndhwc_dhwcf
   // CHECK-SAME:   dilations = dense<1> : tensor<3xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<3xi64>
@@ -211,7 +211,7 @@ func @conv_3d_ndhwc_dhwcf(%input: tensor<?x?x?x?x?xf32>, %filter: tensor<?x?x?x?
 // -----
 
 // CHECK-LABEL: func @conv_3d_ndhwc_dhwcf
-func @conv_3d_ndhwc_dhwcf(%input: memref<?x?x?x?x?xf32>, %filter: memref<?x?x?x?x?xf32>, %output: memref<?x?x?x?x?xf32>) {
+func.func @conv_3d_ndhwc_dhwcf(%input: memref<?x?x?x?x?xf32>, %filter: memref<?x?x?x?x?xf32>, %output: memref<?x?x?x?x?xf32>) {
   // CHECK:      linalg.conv_3d_ndhwc_dhwcf
   // CHECK-SAME:   dilations = dense<1> : tensor<3xi64>
   // CHECK-SAME:   strides = dense<1> : tensor<3xi64>
@@ -232,7 +232,7 @@ func @conv_3d_ndhwc_dhwcf(%input: memref<?x?x?x?x?xf32>, %filter: memref<?x?x?x?
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
-func @pooling_nhwc_sum_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
+func.func @pooling_nhwc_sum_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -251,7 +251,7 @@ func @pooling_nhwc_sum_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xf32>, memref<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xf32>)
-func @pooling_nhwc_sum(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
+func.func @pooling_nhwc_sum(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
   linalg.pooling_nhwc_sum {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xf32>, memref<3x3xf32>)
     outs(%output: memref<1x2x2x1xf32>)
@@ -266,7 +266,7 @@ func @pooling_nhwc_sum(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %out
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x1x4x4xf32>, tensor<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>
-func @pooling_nchw_sum_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
+func.func @pooling_nchw_sum_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
   %init = linalg.init_tensor [1, 1, 2, 2] : tensor<1x1x2x2xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -285,7 +285,7 @@ func @pooling_nchw_sum_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x1x4x4xf32>, memref<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x1x2x2xf32>)
-func @pooling_nchw_sum(%input: memref<1x1x4x4xf32>, %fake: memref<3x3xf32>, %output: memref<1x1x2x2xf32>) {
+func.func @pooling_nchw_sum(%input: memref<1x1x4x4xf32>, %fake: memref<3x3xf32>, %output: memref<1x1x2x2xf32>) {
   linalg.pooling_nchw_sum {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x1x4x4xf32>, memref<3x3xf32>)
     outs(%output: memref<1x1x2x2xf32>)
@@ -300,7 +300,7 @@ func @pooling_nchw_sum(%input: memref<1x1x4x4xf32>, %fake: memref<3x3xf32>, %out
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
-func @pooling_nhwc_max_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
+func.func @pooling_nhwc_max_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -319,7 +319,7 @@ func @pooling_nhwc_max_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x1x4x4xf32>, tensor<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>
 
-func @pooling_nchw_max_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
+func.func @pooling_nchw_max_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
   %init = linalg.init_tensor [1, 1, 2, 2] : tensor<1x1x2x2xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -338,7 +338,7 @@ func @pooling_nchw_max_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xf32>, memref<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xf32>)
-func @pooling_nhwc_max(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
+func.func @pooling_nhwc_max(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
   linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xf32>, memref<3x3xf32>)
     outs(%output: memref<1x2x2x1xf32>)
@@ -353,7 +353,7 @@ func @pooling_nhwc_max(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %out
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi8>, tensor<3x3xi8>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xi8>) -> tensor<1x2x2x1xi8>
-func @pooling_nhwc_i8_max_tensor(%input: tensor<1x4x4x1xi8>) -> tensor<1x2x2x1xi8> {
+func.func @pooling_nhwc_i8_max_tensor(%input: tensor<1x4x4x1xi8>) -> tensor<1x2x2x1xi8> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xi8>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi8>
   %cst = arith.constant 0 : i8
@@ -372,7 +372,7 @@ func @pooling_nhwc_i8_max_tensor(%input: tensor<1x4x4x1xi8>) -> tensor<1x2x2x1xi
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xi8>, memref<3x3xi8>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xi8>)
-func @pooling_nhwc_i8_max(%input: memref<1x4x4x1xi8>, %fake: memref<3x3xi8>, %output: memref<1x2x2x1xi8>) {
+func.func @pooling_nhwc_i8_max(%input: memref<1x4x4x1xi8>, %fake: memref<3x3xi8>, %output: memref<1x2x2x1xi8>) {
   linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xi8>, memref<3x3xi8>)
     outs(%output: memref<1x2x2x1xi8>)
@@ -387,7 +387,7 @@ func @pooling_nhwc_i8_max(%input: memref<1x4x4x1xi8>, %fake: memref<3x3xi8>, %ou
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi16>, tensor<3x3xi16>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xi16>) -> tensor<1x2x2x1xi16>
-func @pooling_nhwc_i16_max_tensor(%input: tensor<1x4x4x1xi16>) -> tensor<1x2x2x1xi16> {
+func.func @pooling_nhwc_i16_max_tensor(%input: tensor<1x4x4x1xi16>) -> tensor<1x2x2x1xi16> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xi16>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi16>
   %cst = arith.constant 0 : i16
@@ -406,7 +406,7 @@ func @pooling_nhwc_i16_max_tensor(%input: tensor<1x4x4x1xi16>) -> tensor<1x2x2x1
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xi16>, memref<3x3xi16>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xi16>)
-func @pooling_nhwc_i16_max(%input: memref<1x4x4x1xi16>, %fake: memref<3x3xi16>, %output: memref<1x2x2x1xi16>) {
+func.func @pooling_nhwc_i16_max(%input: memref<1x4x4x1xi16>, %fake: memref<3x3xi16>, %output: memref<1x2x2x1xi16>) {
   linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xi16>, memref<3x3xi16>)
     outs(%output: memref<1x2x2x1xi16>)
@@ -421,7 +421,7 @@ func @pooling_nhwc_i16_max(%input: memref<1x4x4x1xi16>, %fake: memref<3x3xi16>,
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi32>, tensor<3x3xi32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xi32>) -> tensor<1x2x2x1xi32>
-func @pooling_nhwc_i32_max_tensor(%input: tensor<1x4x4x1xi32>) -> tensor<1x2x2x1xi32> {
+func.func @pooling_nhwc_i32_max_tensor(%input: tensor<1x4x4x1xi32>) -> tensor<1x2x2x1xi32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xi32>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi32>
   %cst = arith.constant 0 : i32
@@ -440,7 +440,7 @@ func @pooling_nhwc_i32_max_tensor(%input: tensor<1x4x4x1xi32>) -> tensor<1x2x2x1
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xi32>, memref<3x3xi32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xi32>)
-func @pooling_nhwc_i32_max(%input: memref<1x4x4x1xi32>, %fake: memref<3x3xi32>, %output: memref<1x2x2x1xi32>) {
+func.func @pooling_nhwc_i32_max(%input: memref<1x4x4x1xi32>, %fake: memref<3x3xi32>, %output: memref<1x2x2x1xi32>) {
   linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xi32>, memref<3x3xi32>)
     outs(%output: memref<1x2x2x1xi32>)
@@ -456,7 +456,7 @@ func @pooling_nhwc_i32_max(%input: memref<1x4x4x1xi32>, %fake: memref<3x3xi32>,
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
-func @pooling_nhwc_min_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
+func.func @pooling_nhwc_min_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -475,7 +475,7 @@ func @pooling_nhwc_min_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32
 // CHECK-SAME:      strides = dense<1> : tensor<2xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x1xf32>, memref<3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x1xf32>)
-func @pooling_nhwc_min(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
+func.func @pooling_nhwc_min(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %output: memref<1x2x2x1xf32>) {
   linalg.pooling_nhwc_min {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
     ins(%input, %fake: memref<1x4x4x1xf32>, memref<3x3xf32>)
     outs(%output: memref<1x2x2x1xf32>)
@@ -490,7 +490,7 @@ func @pooling_nhwc_min(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>, %out
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
-func @pooling_ndhwc_sum_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
+func.func @pooling_ndhwc_sum_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -509,7 +509,7 @@ func @pooling_ndhwc_sum_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x2x1xf32>)
-func @pooling_ndhwc_sum(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
+func.func @pooling_ndhwc_sum(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
   linalg.pooling_ndhwc_sum {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
     ins(%input, %fake: memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
     outs(%output: memref<1x2x2x2x1xf32>)
@@ -524,7 +524,7 @@ func @pooling_ndhwc_sum(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>,
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
-func @pooling_ndhwc_max_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
+func.func @pooling_ndhwc_max_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -543,7 +543,7 @@ func @pooling_ndhwc_max_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x2x1xf32>)
-func @pooling_ndhwc_max(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
+func.func @pooling_ndhwc_max(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
   linalg.pooling_ndhwc_max {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
     ins(%input, %fake: memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
     outs(%output: memref<1x2x2x2x1xf32>)
@@ -558,7 +558,7 @@ func @pooling_ndhwc_max(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>,
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
-func @pooling_ndhwc_min_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
+func.func @pooling_ndhwc_min_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
   %fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
   %init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
   %cst = arith.constant 0.000000e+00 : f32
@@ -577,7 +577,7 @@ func @pooling_ndhwc_min_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x
 // CHECK-SAME:      strides = dense<1> : tensor<3xi64>
 // CHECK-SAME:      ins(%{{.+}}, %{{.+}} : memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
 // CHECK-SAME:      outs(%{{.+}} : memref<1x2x2x2x1xf32>)
-func @pooling_ndhwc_min(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
+func.func @pooling_ndhwc_min(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>, %output: memref<1x2x2x2x1xf32>) {
   linalg.pooling_ndhwc_min {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
     ins(%input, %fake: memref<1x4x4x4x1xf32>, memref<3x3x3xf32>)
     outs(%output: memref<1x2x2x2x1xf32>)
@@ -589,7 +589,7 @@ func @pooling_ndhwc_min(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3xf32>,
 #map0 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 * 2, d2 * 2 + d5, d6)>
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
-func @conv_interface_wrong_input_indexing_map(
+func.func @conv_interface_wrong_input_indexing_map(
     %arg0 : tensor<?x?x?x?xf32>, %arg2 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
   // expected-error @+1 {{unexpected input index map for convolutions}}
   %0 = "linalg.conv_2d_nhwc_hwcf"(%arg0, %arg1, %arg2) ({
@@ -606,7 +606,7 @@ func @conv_interface_wrong_input_indexing_map(
 #map0 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 + d4, d2 + d5, d6)>
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3, d5 + 1)>
 #map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
-func @conv_interface_wrong_num_operands(
+func.func @conv_interface_wrong_num_operands(
     %arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?x?xf32>, %arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
   // expected-error @+1 {{expected output/filter indexing maps to be projected permutations}}
   %0 = "linalg.conv_2d_nhwc_hwcf"(%arg0, %arg1, %arg2) ({

diff  --git a/mlir/test/Dialect/Linalg/namedop_conversion.mlir b/mlir/test/Dialect/Linalg/namedop_conversion.mlir
index 5f33f650930e2..8b779f2e496ba 100644
--- a/mlir/test/Dialect/Linalg/namedop_conversion.mlir
+++ b/mlir/test/Dialect/Linalg/namedop_conversion.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -linalg-named-op-conversion -split-input-file | FileCheck %s
 
 // CHECK-LABEL: @depthwise_conv
-func @depthwise_conv(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<?x?x?x1xf32>, %arg2: tensor<?x?x?x?x1xf32>) -> tensor<?x?x?x?x1xf32> {
+func.func @depthwise_conv(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<?x?x?x1xf32>, %arg2: tensor<?x?x?x?x1xf32>) -> tensor<?x?x?x?x1xf32> {
   // CHECK-DAG: %[[KERNEL:.+]] = tensor.collapse_shape %arg1 {{\[\[}}0], [1], [2, 3]]
   // CHECK-DAG: %[[INIT:.+]] = tensor.collapse_shape %arg2 {{\[\[}}0], [1], [2], [3, 4]]
   // CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwc {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]] : tensor<?x?x?x?xf32>, tensor<?x?x?xf32>) outs(%[[INIT]] : tensor<?x?x?x?xf32>)
@@ -14,7 +14,7 @@ func @depthwise_conv(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<?x?x?x1xf32>, %ar
 // -----
 
 // CHECK-LABEL: @depthwise_conv_q
-func @depthwise_conv_q(%arg0: tensor<?x?x?x?xi8>, %arg1: tensor<?x?x?x1xi8>, %arg2: tensor<?x?x?x?x1xi32>, %arg3 : i32, %arg4 : i32) -> tensor<?x?x?x?x1xi32> {
+func.func @depthwise_conv_q(%arg0: tensor<?x?x?x?xi8>, %arg1: tensor<?x?x?x1xi8>, %arg2: tensor<?x?x?x?x1xi32>, %arg3 : i32, %arg4 : i32) -> tensor<?x?x?x?x1xi32> {
   // CHECK-DAG: %[[KERNEL:.+]] = tensor.collapse_shape %arg1 {{\[\[}}0], [1], [2, 3]]
   // CHECK-DAG: %[[INIT:.+]] = tensor.collapse_shape %arg2 {{\[\[}}0], [1], [2], [3, 4]]
   // CHECK-DAG: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwc_q {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %[[KERNEL]], %arg3, %arg4 : tensor<?x?x?x?xi8>, tensor<?x?x?xi8>, i32, i32) outs(%[[INIT]] : tensor<?x?x?x?xi32>)

diff  --git a/mlir/test/Dialect/Linalg/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Linalg/one-shot-module-bufferize-allow-return-allocs.mlir
index 183a07c8c8d63..21cd551eb3ae0 100644
--- a/mlir/test/Dialect/Linalg/one-shot-module-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-module-bufferize-allow-return-allocs.mlir
@@ -15,7 +15,7 @@
 // CHECK-LABEL: func @create_tensor() -> memref<10xf32> {
 //       CHECK:   %[[alloc:.*]] = memref.alloc
 //       CHECK:   return %[[alloc]]
-func @create_tensor() -> tensor<10xf32> {
+func.func @create_tensor() -> tensor<10xf32> {
   %0 = linalg.init_tensor [10] : tensor<10xf32>
   return %0 : tensor<10xf32>
 }
@@ -24,7 +24,7 @@ func @create_tensor() -> tensor<10xf32> {
 // CHECK: %[[call:.*]] = call @create_tensor() : () -> memref<10xf32>
 // CHECK: %[[extracted:.*]] = memref.load %[[call]]
 // CHECK: return %[[extracted]]
-func @caller(%idx: index) -> f32 {
+func.func @caller(%idx: index) -> f32 {
   %0 = call @create_tensor() : () -> (tensor<10xf32>)
   %1 = tensor.extract %0[%idx] : tensor<10xf32>
   return %1 : f32
@@ -40,7 +40,7 @@ func @caller(%idx: index) -> f32 {
 //   CHECK-NOT:   alloc
 //   CHECK-NOT:   copy
 //       CHECK:   memref.subview
-func @return_slice(%t: tensor<?xf32>, %sz: index) -> (tensor<?xf32>) {
+func.func @return_slice(%t: tensor<?xf32>, %sz: index) -> (tensor<?xf32>) {
   %0 = tensor.extract_slice %t[4][%sz][1] : tensor<?xf32> to tensor<?xf32>
   return %0 : tensor<?xf32>
 }
@@ -54,7 +54,7 @@ func @return_slice(%t: tensor<?xf32>, %sz: index) -> (tensor<?xf32>) {
 //       CHECK:   linalg.fill ins({{.*}}) outs(%[[t]]
 //       CHECK:   memref.load %[[call]]
 //       CHECK:   memref.load %[[t]]
-func @main(%t: tensor<?xf32>, %sz: index, %idx: index) -> (f32, f32) {
+func.func @main(%t: tensor<?xf32>, %sz: index, %idx: index) -> (f32, f32) {
   %cst = arith.constant 1.0 : f32
   %0 = call @return_slice(%t, %sz) : (tensor<?xf32>, index) -> (tensor<?xf32>)
   %filled = linalg.fill ins(%cst : f32) outs(%t : tensor<?xf32>) -> tensor<?xf32>

diff  --git a/mlir/test/Dialect/Linalg/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-module-bufferize.mlir
index 24fe44ca58a34..b3086e4da8286 100644
--- a/mlir/test/Dialect/Linalg/one-shot-module-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-module-bufferize.mlir
@@ -11,10 +11,10 @@
 // Bufferization of bodiless function with no tensor return value.
 
 // CHECK-LABEL: func private @private_func
-func private @private_func(tensor<?xf32>) -> ()
+func.func private @private_func(tensor<?xf32>) -> ()
 
 // CHECK-LABEL: func @empty_func()
-func @empty_func() -> () {
+func.func @empty_func() -> () {
   return
 }
 
@@ -23,12 +23,12 @@ func @empty_func() -> () {
 // A bodiless function that returns something that is not a tensor.
 
 // CHECK: func private @external_func_with_return_val(memref<4xi32, #{{.*}}>) -> f32
-func private @external_func_with_return_val(tensor<4xi32>) -> f32
+func.func private @external_func_with_return_val(tensor<4xi32>) -> f32
 
 // -----
 
 // CHECK-LABEL: func private @private_func
-func private @private_func(tensor<?xf32>) -> (f32)
+func.func private @private_func(tensor<?xf32>) -> (f32)
 
 // private_func may modify the buffer arg, but that's OK because %t is writable.
 // No alloc/copy should be inserted.
@@ -38,7 +38,7 @@ func private @private_func(tensor<?xf32>) -> (f32)
 //   CHECK-NOT: alloc
 //   CHECK-NOT: copy
 //       CHECK: call @private_func(%[[t]])
-func @main(%t: tensor<?xf32> {linalg.inplaceable = true}) -> (f32) {
+func.func @main(%t: tensor<?xf32> {linalg.inplaceable = true}) -> (f32) {
   %0 = call @private_func(%t) : (tensor<?xf32>) -> (f32)
   return %0 : f32
 }
@@ -46,7 +46,7 @@ func @main(%t: tensor<?xf32> {linalg.inplaceable = true}) -> (f32) {
 // -----
 
 // CHECK-LABEL: func private @private_func
-func private @private_func(tensor<?xf32>) -> (f32)
+func.func private @private_func(tensor<?xf32>) -> (f32)
 
 // private_func may modify the buffer arg, %t is not writable. A copy is needed.
 
@@ -57,7 +57,7 @@ func private @private_func(tensor<?xf32>) -> (f32)
 //   CHECK-DAG: %[[casted:.*]] = memref.cast %[[alloc]]
 //       CHECK: call @private_func(%[[casted]])
 //       CHECK: memref.dealloc %[[alloc]]
-func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
+func.func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
   %0 = call @private_func(%t) : (tensor<?xf32>) -> (f32)
   return %0 : f32
 }
@@ -67,7 +67,7 @@ func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
 // Test bufferization of a function without tensor args.
 
 // CHECK-LABEL: func @func_without_tensor_args
-func @func_without_tensor_args(%v : vector<10xf32>) -> () {
+func.func @func_without_tensor_args(%v : vector<10xf32>) -> () {
   // CHECK: %[[alloc:.*]] = memref.alloc()
   %0 = linalg.init_tensor[10] : tensor<10xf32>
 
@@ -90,7 +90,7 @@ func @func_without_tensor_args(%v : vector<10xf32>) -> () {
 
 // CHECK-LABEL: func @inner_func(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
+func.func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
   // CHECK-NOT: copy
   %f = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
@@ -105,7 +105,7 @@ func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
 
 // CHECK-LABEL: func @call_func_with_non_tensor_return(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @call_func_with_non_tensor_return(
+func.func @call_func_with_non_tensor_return(
     %t0: tensor<?xf32> {linalg.inplaceable = true}) -> (f32, tensor<?xf32>) {
   // CHECK-NOT: alloc
   // CHECK-NOT: copy
@@ -122,7 +122,7 @@ func @call_func_with_non_tensor_return(
 
 // CHECK-LABEL: func @inner_func(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
+func.func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
   // CHECK-NOT: copy
   %f = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
@@ -137,7 +137,7 @@ func @inner_func(%t: tensor<?xf32>) -> (tensor<?xf32>, f32) {
 
 // CHECK-LABEL: func @call_func_with_non_tensor_return(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @call_func_with_non_tensor_return(
+func.func @call_func_with_non_tensor_return(
     %t0: tensor<?xf32> {linalg.inplaceable = false}) -> (f32, tensor<?xf32>) {
   // CHECK: %[[alloc:.*]] = memref.alloc
   // CHECK-DAG: memref.copy %[[arg0]], %[[alloc]]
@@ -157,13 +157,13 @@ func @call_func_with_non_tensor_return(
 // inserted then. (No copies in the other functions.)
 
 // CHECK-LABEL: func private @f0(
-func private @f0(tensor<?xf32>) -> (f32)
+func.func private @f0(tensor<?xf32>) -> (f32)
 
 // CHECK-LABEL: func @f1(
 //  CHECK-SAME:     %[[t1:.*]]: memref<?xf32
 //       CHECK:   %[[r1:.*]] = call @f0(%[[t1]])
 //       CHECK:   return %[[r1]]
-func @f1(%t: tensor<?xf32>) -> (f32) {
+func.func @f1(%t: tensor<?xf32>) -> (f32) {
   %0 = call @f0(%t) : (tensor<?xf32>) -> (f32)
   return %0 : f32
 }
@@ -172,7 +172,7 @@ func @f1(%t: tensor<?xf32>) -> (f32) {
 //  CHECK-SAME:     %[[t2:.*]]: memref<?xf32
 //       CHECK:   %[[r2:.*]] = call @f1(%[[t2]])
 //       CHECK:   return %[[r2]]
-func @f2(%t: tensor<?xf32>) -> (f32) {
+func.func @f2(%t: tensor<?xf32>) -> (f32) {
   %0 = call @f1(%t) : (tensor<?xf32>) -> (f32)
   return %0 : f32
 }
@@ -184,7 +184,7 @@ func @f2(%t: tensor<?xf32>) -> (f32) {
 //   CHECK-DAG: %[[casted:.*]] = memref.cast %[[alloc]]
 //       CHECK: call @f2(%[[casted]])
 //       CHECK: memref.dealloc %[[alloc]]
-func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
+func.func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
   %0 = call @f2(%t) : (tensor<?xf32>) -> (f32)
   return %0 : f32
 }
@@ -196,7 +196,7 @@ func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
 // CHECK-LABEL: func @does_not_read(
 //   CHECK-NOT:   alloc
 //   CHECK-NOT:   copy
-func @does_not_read(%t: tensor<?xf32>) -> tensor<?xf32> {
+func.func @does_not_read(%t: tensor<?xf32>) -> tensor<?xf32> {
   %f0 = arith.constant 0.0 : f32
   %r = linalg.fill ins(%f0 : f32) outs(%t : tensor<?xf32>) -> tensor<?xf32>
   return %r : tensor<?xf32>
@@ -211,7 +211,7 @@ func @does_not_read(%t: tensor<?xf32>) -> tensor<?xf32> {
 //       CHECK:   call @does_not_read(%[[casted]])
 //       CHECK:   %[[r:.*]] = memref.load %[[alloc]]
 //       CHECK:   memref.dealloc %[[alloc]]
-func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> f32 {
+func.func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> f32 {
   %0 = call @does_not_read(%t) : (tensor<?xf32>) -> (tensor<?xf32>)
   %idx = arith.constant 4 : index
   %r = tensor.extract %0[%idx] : tensor<?xf32>
@@ -226,10 +226,10 @@ func @main(%t: tensor<?xf32> {linalg.inplaceable = false}) -> f32 {
 
 //      CHECK: memref.global "private" constant @__constant_4xi32 : memref<4xi32> = dense<[1, 2, 3, 4]>
 //      CHECK: func private @some_external_func(memref<4xi32, #[[$DYN_1D_MAP]]>)
-func private @some_external_func(tensor<4xi32>)
+func.func private @some_external_func(tensor<4xi32>)
 
 //      CHECK: func @main()
-func @main() {
+func.func @main() {
 //  CHECK-DAG:   %[[A:.*]] = memref.get_global @__constant_4xi32 : memref<4xi32>
   %A = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
 
@@ -252,10 +252,10 @@ func @main() {
 
 //      CHECK: memref.global "private" constant @__constant_4xi32 : memref<4xi32> = dense<[1, 2, 3, 4]>
 //      CHECK: func private @some_external_func_within_scf_execute(memref<4xi32, #[[$DYN_1D_MAP]]>)
-func private @some_external_func_within_scf_execute(tensor<4xi32>)
+func.func private @some_external_func_within_scf_execute(tensor<4xi32>)
 
 //      CHECK: func @main()
-func @main() {
+func.func @main() {
 //  CHECK-DAG:   %[[A:.*]] = memref.get_global @__constant_4xi32 : memref<4xi32>
   %A = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
 
@@ -280,7 +280,7 @@ func @main() {
 
 // CHECK-LABEL: func @execute_region_test(
 //  CHECK-SAME:     %[[m1:.*]]: memref<?xf32
-func @execute_region_test(%t1 : tensor<?xf32>)
+func.func @execute_region_test(%t1 : tensor<?xf32>)
     -> (f32, tensor<?xf32>, f32)
 {
   %f1 = arith.constant 0.0 : f32
@@ -307,13 +307,13 @@ func @execute_region_test(%t1 : tensor<?xf32>)
 //      CHECK: #[[$DYN_1D_MAP:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 
 //      CHECK:  func private @some_external_func(memref<?xf32, #[[$DYN_1D_MAP]]>)
-func private @some_external_func(tensor<?xf32>)
+func.func private @some_external_func(tensor<?xf32>)
 
 //      CHECK:  func @scf_for_with_tensor_insert_slice(
 // CHECK-SAME:    %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[B:[a-zA-Z0-9]*]]: memref<?xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[C:[a-zA-Z0-9]*]]: memref<4xf32, #[[$DYN_1D_MAP]]>
-func @scf_for_with_tensor_insert_slice(
+func.func @scf_for_with_tensor_insert_slice(
     %A : tensor<?xf32>, %B : tensor<?xf32>, %C : tensor<4xf32>,
     %lb : index, %ub : index, %step : index)
   -> (tensor<?xf32>, tensor<?xf32>)
@@ -343,7 +343,7 @@ func @scf_for_with_tensor_insert_slice(
 // CHECK-SAME:    %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[B:[a-zA-Z0-9]*]]: memref<?xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[C:[a-zA-Z0-9]*]]: memref<4xf32, #[[$DYN_1D_MAP]]>
-func @bar(
+func.func @bar(
     %A : tensor<?xf32> {linalg.inplaceable = true},
     %B : tensor<?xf32> {linalg.inplaceable = true},
     %C : tensor<4xf32> {linalg.inplaceable = true},
@@ -375,7 +375,7 @@ func @bar(
 // CHECK-SAME:    %[[A:[a-zA-Z0-9]*]]: memref<64xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[B:[a-zA-Z0-9]*]]: memref<64xf32, #[[$DYN_1D_MAP]]>
 // CHECK-SAME:    %[[C:[a-zA-Z0-9]*]]: memref<f32, #[[$DYN_0D_MAP]]>
-func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> tensor<f32> {
+func.func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> tensor<f32> {
   // CHECK-NEXT:   %[[C0:.*]] = arith.constant 0{{.*}} : f32
   %v0 = arith.constant 0.0 : f32
 
@@ -391,7 +391,7 @@ func @init_and_dot(%a: tensor<64xf32>, %b: tensor<64xf32>, %c: tensor<f32>) -> t
 }
 
 //      CHECK:  func @main()
-func @main() {
+func.func @main() {
   //  CHECK-DAG:   %[[C0:.*]] = arith.constant 0{{.*}} : f32
   //  CHECK-DAG:   %[[C1:.*]] = arith.constant 1{{.*}} : f32
   //  CHECK-DAG:   %[[C2:.*]] = arith.constant 2{{.*}} : f32
@@ -434,20 +434,20 @@ func @main() {
 }
 
 //     CHECK:   func private @print_memref_f32(memref<*xf32>)
-func private @print_memref_f32(tensor<*xf32>)
+func.func private @print_memref_f32(tensor<*xf32>)
 
 // -----
 
 // CHECK: #[[$DYNAMIC:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 
 // CHECK: func private @external_func(memref<?xf32, #[[$DYNAMIC]]>)
-func private @external_func(tensor<?xf32>)
+func.func private @external_func(tensor<?xf32>)
 
 //      CHECK: func @callee(
 // CHECK-SAME:   %[[A:[0-9a-zA-Z]*]]: memref<?xf32>
 // CHECK-SAME:   %[[B:[0-9a-zA-Z]*]]: memref<?xf32, #[[$DYNAMIC]]>
 // CHECK-SAME:   %[[C:[0-9a-zA-Z]*]]: memref<?xf32, #[[$DYNAMIC]]>
-func @callee(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -> (i)>},
+func.func @callee(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -> (i)>},
              %B : tensor<?xf32>,
              %C : tensor<?xf32>) {
 // CHECK-NEXT: %[[CASTED:.*]] = memref.cast %[[A]] : memref<?xf32> to memref<?xf32, #[[$DYNAMIC]]>
@@ -467,7 +467,7 @@ func @callee(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -
 // CHECK-SAME:   %[[A:[0-9a-zA-Z]*]]: memref<?xf32>
 // CHECK-SAME:   %[[B:[0-9a-zA-Z]*]]: memref<?xf32>
 // CHECK-SAME:   %[[C:[0-9a-zA-Z]*]]: memref<?xf32, #[[$DYNAMIC]]>
-func @entry(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -> (i)>, linalg.inplaceable = false},
+func.func @entry(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -> (i)>, linalg.inplaceable = false},
             %B : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] -> (i)>, linalg.inplaceable = false},
             %C : tensor<?xf32> {linalg.inplaceable = false}) {
 // Note: `callee` does not write to its bbArg directly, but `external_func`
@@ -495,7 +495,7 @@ func @entry(%A : tensor<?xf32> {linalg.buffer_layout = affine_map<(i)[s0, s1] ->
 
 // CHECK-LABEL: func @inner_func(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @inner_func(%t: tensor<?xf32>) -> tensor<?xf32> {
+func.func @inner_func(%t: tensor<?xf32>) -> tensor<?xf32> {
   %f = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
   // CHECK: memref.store %{{.*}}, %[[arg0]]
@@ -505,7 +505,7 @@ func @inner_func(%t: tensor<?xf32>) -> tensor<?xf32> {
 
 // CHECK-LABEL: func @equivalent_func_arg(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @equivalent_func_arg(%t0: tensor<?xf32> {linalg.inplaceable = true},
+func.func @equivalent_func_arg(%t0: tensor<?xf32> {linalg.inplaceable = true},
                           %c0: index, %c10: index, %c1: index) -> tensor<?xf32> {
   // CHECK-NOT: alloc
   // CHECK-NOT: copy
@@ -524,7 +524,7 @@ func @equivalent_func_arg(%t0: tensor<?xf32> {linalg.inplaceable = true},
 
 // CHECK-LABEL: func @inner_func_2(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
+func.func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
   %f = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
   // CHECK: memref.store %{{.*}}, %[[arg0]]
@@ -534,7 +534,7 @@ func @inner_func_2(%t: tensor<?xf32>) -> tensor<?xf32> {
 
 // CHECK-LABEL: func @equivalent_func_arg_2(
 //  CHECK-SAME:     %[[arg0:.*]]: memref<?xf32
-func @equivalent_func_arg_2(%t0: tensor<?xf32> {linalg.inplaceable = true},
+func.func @equivalent_func_arg_2(%t0: tensor<?xf32> {linalg.inplaceable = true},
                             %c0: index, %c10: index, %c1: index) -> tensor<?xf32> {
   // CHECK: scf.for {{.*}} {
   %1 = scf.for %iv = %c0 to %c10 step %c1 iter_args(%t1 = %t0) -> (tensor<?xf32>) {

diff  --git a/mlir/test/Dialect/Linalg/pad_fusion.mlir b/mlir/test/Dialect/Linalg/pad_fusion.mlir
index 78f42f9dff559..5d814c31615ce 100644
--- a/mlir/test/Dialect/Linalg/pad_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/pad_fusion.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -test-linalg-pad-fusion -split-input-file %s | FileCheck %s
 
-func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
+func.func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
     %arg3 : index, %arg4 : index, %arg5 : f32) -> tensor<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -51,7 +51,7 @@ func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : index,
 
 // -----
 
-func @mixed_pad_fusion(%arg0 : tensor<?x42xf32>, %arg1 : index, %arg2 : index,
+func.func @mixed_pad_fusion(%arg0 : tensor<?x42xf32>, %arg1 : index, %arg2 : index,
     %arg3 : f32) -> tensor<49x?xf32> {
   %c0 = arith.constant 0 : index
   %d0 = tensor.dim %arg0, %c0 : tensor<?x42xf32>

diff  --git a/mlir/test/Dialect/Linalg/parallel-loops.mlir b/mlir/test/Dialect/Linalg/parallel-loops.mlir
index cf787643626de..5b0f0b936db8d 100644
--- a/mlir/test/Dialect/Linalg/parallel-loops.mlir
+++ b/mlir/test/Dialect/Linalg/parallel-loops.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -convert-linalg-to-parallel-loops -split-input-file | FileCheck %s
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
-func @linalg_generic_sum(%lhs: memref<2x2xf32>,
+func.func @linalg_generic_sum(%lhs: memref<2x2xf32>,
                          %rhs: memref<2x2xf32>,
                          %sum: memref<2x2xf32>) {
   linalg.generic {
@@ -38,7 +38,7 @@ func @linalg_generic_sum(%lhs: memref<2x2xf32>,
   indexing_maps = #accesses
 }
 
-func @lower_outer_parallel(%A: memref<?x?x?x?xf32>, %B: memref<?x?x?xf32>) {
+func.func @lower_outer_parallel(%A: memref<?x?x?x?xf32>, %B: memref<?x?x?xf32>) {
   linalg.generic #trait
       ins(%A : memref<?x?x?x?xf32>)
      outs(%B : memref<?x?x?xf32>) {
@@ -71,7 +71,7 @@ func @lower_outer_parallel(%A: memref<?x?x?x?xf32>, %B: memref<?x?x?xf32>) {
   indexing_maps = #accesses
 }
 
-func @lower_mixed_parallel(%A: memref<?x?x?x?x?x?xf32>, %B: memref<?x?x?x?xf32>) {
+func.func @lower_mixed_parallel(%A: memref<?x?x?x?x?x?xf32>, %B: memref<?x?x?x?xf32>) {
   linalg.generic #trait
       ins(%A : memref<?x?x?x?x?x?xf32>)
      outs(%B : memref<?x?x?x?xf32>) {

diff  --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir
index 98e6b069e4754..1f883be0c6e91 100644
--- a/mlir/test/Dialect/Linalg/promote.mlir
+++ b/mlir/test/Dialect/Linalg/promote.mlir
@@ -8,7 +8,7 @@
 
 // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
 
-func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
+func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c4 = arith.constant 4 : index
   %c3 = arith.constant 3 : index
   %c2 = arith.constant 2 : index
@@ -81,7 +81,7 @@ func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
 
 // -----
 
-func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
+func.func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
   %c4 = arith.constant 4 : index
   %c3 = arith.constant 3 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Linalg/promotion_options.mlir b/mlir/test/Dialect/Linalg/promotion_options.mlir
index 9420ece55a140..063f1cfdb6558 100644
--- a/mlir/test/Dialect/Linalg/promotion_options.mlir
+++ b/mlir/test/Dialect/Linalg/promotion_options.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-linalg-promotion-options -split-input-file | FileCheck %s
 
-func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
    linalg.matmul {__internal_linalg_transform__ = "START"}
      ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir
index c4e7d5552678e..f11c884aa2768 100644
--- a/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -test-linalg-elementwise-fusion-patterns=control-fusion-by-expansion %s -split-input-file | FileCheck %s
 
-func @control_producer_reshape_fusion(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?xf32>) -> tensor<?x?xf32> {
+func.func @control_producer_reshape_fusion(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?xf32>) -> tensor<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %0 = tensor.collapse_shape %arg0 [[0, 1], [2]] : tensor<?x?x?xf32> into tensor<?x?xf32>
@@ -34,7 +34,7 @@ func @control_producer_reshape_fusion(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<
 
 // -----
 
-func @control_consumer_reshape_fusion(%arg0 : tensor<1x?x?xf32>, %arg1 : tensor<1x?x?xf32>) -> tensor<1x?x?xf32> {
+func.func @control_consumer_reshape_fusion(%arg0 : tensor<1x?x?xf32>, %arg1 : tensor<1x?x?xf32>) -> tensor<1x?x?xf32> {
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
   %cst = arith.constant 0.0 : f32

diff  --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
index 5aebfcadc33e7..ebee7e75ac5a1 100644
--- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir
@@ -3,7 +3,7 @@
 #map0 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
 #map1 = affine_map<(d0, d1, d2) -> (d1, d2, d0)>
 #map2 = affine_map<(d0, d1, d2) -> ()>
-func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
+func.func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
                                          %arg1 : tensor<?x?x?xf32>,
                                          %arg2 : f32) ->
                                          tensor<?x?x?xf32>
@@ -48,7 +48,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> ()>
-func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
+func.func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
                                          %arg1 : tensor<?x?xf32>,
                                          %arg2 : f32) ->
                                          tensor<?x4x?x5xf32>
@@ -91,7 +91,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
 
 // -----
 
-func @reshape_as_consumer_permutation
+func.func @reshape_as_consumer_permutation
   (%a : tensor<?x?x?xf32>, %b : tensor<?x?xf32>)
     -> tensor<?x2x?x3x4x?xf32> {
   %c = linalg.generic {
@@ -134,7 +134,7 @@ func @reshape_as_consumer_permutation
 #map1 = affine_map<(d0, d1, d2) -> (d0, d1)>
 #map2 = affine_map<(d0, d1, d2) -> (d2)>
 
-func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
+func.func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
                                             -> tensor<8x33x4xf32> {
   %cst = arith.constant dense<2.000000e+00> : tensor<264x4xf32>
   %0 = linalg.init_tensor [264, 4] : tensor<264x4xf32>
@@ -170,7 +170,7 @@ func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
 
 #map0 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
 #map1 = affine_map<(d0, d1, d2) -> (d1, d2, d0)>
-func @indexed_consumer_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xi32>,
+func.func @indexed_consumer_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xi32>,
                                          %arg1 : tensor<?x?x?xi32>) ->
                                          tensor<?x?x?xi32>
 {
@@ -221,7 +221,7 @@ func @indexed_consumer_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xi32>,
 // -----
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
-func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
+func.func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
                                          %arg1 : tensor<?x?xi32>) ->
                                          tensor<?x?x4x5xi32>
 {
@@ -266,7 +266,7 @@ func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
 
 // -----
 
-func @reshape_as_consumer_permutation
+func.func @reshape_as_consumer_permutation
   (%a : tensor<210x6x4xi32>, %b : tensor<210x4xi32>)
     -> tensor<2x3x4x5x6x7xi32> {
   %shape = linalg.init_tensor [6, 4, 210] : tensor<6x4x210xi32>
@@ -334,7 +334,7 @@ func @reshape_as_consumer_permutation
 
 // -----
 
-func @reshape_as_producer_projected_permutation(
+func.func @reshape_as_producer_projected_permutation(
     %arg0 : tensor<33x8x?xi32>, %shape : tensor<264x?x4xi32>) -> tensor<264x?x4xi32>
 {
   %0 = tensor.collapse_shape %arg0 [[0, 1], [2]]
@@ -392,7 +392,7 @@ func @reshape_as_producer_projected_permutation(
 
 #map0 = affine_map<(d0, d1) -> (d0, d1)>
 #map1 = affine_map<(d0, d1) -> (d1, d0)>
-func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
+func.func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
                                                    %arg1 : tensor<?x?xf32>) ->
                                                    tensor<?x?x4x5xf32>
 {
@@ -430,7 +430,7 @@ func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
 
 // -----
 
-func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> {
+func.func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> {
   %0 = tensor.collapse_shape %arg0 [[0, 1]]
       : tensor<1x5xf32> into tensor<5xf32>
   %1 = linalg.init_tensor [5, 5] : tensor<5x5xf32>
@@ -451,7 +451,7 @@ func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> {
 
 // -----
 
-func @unit_dim_reshape_collapse(%arg0 : tensor<5xf32>) -> tensor<5x1x5xf32> {
+func.func @unit_dim_reshape_collapse(%arg0 : tensor<5xf32>) -> tensor<5x1x5xf32> {
   %0 = linalg.init_tensor [5, 5] : tensor<5x5xf32>
   %1 = linalg.generic
     {indexing_maps = [affine_map<(d0, d1) -> (d0)>,
@@ -472,7 +472,7 @@ func @unit_dim_reshape_collapse(%arg0 : tensor<5xf32>) -> tensor<5x1x5xf32> {
 
 // -----
 
-func @unit_dim_reshape_expansion_full
+func.func @unit_dim_reshape_expansion_full
   (%arg0 : tensor<1x?x1x2x1x4xf32>, %arg1 : tensor<?x2x4xf32>)
   -> tensor<?x2x4xf32> {
   %c1 = arith.constant 1 : index
@@ -509,7 +509,7 @@ func @unit_dim_reshape_expansion_full
 
 // -----
 
-func @no_fuse_dynamic_dims(%arg0: tensor<?x?xf32>) -> tensor<?xf32> {
+func.func @no_fuse_dynamic_dims(%arg0: tensor<?x?xf32>) -> tensor<?xf32> {
   %c0 = arith.constant 0 : index
   %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<?x?xf32> into tensor<?xf32>
   %1 = tensor.dim %0, %c0 : tensor<?xf32>
@@ -533,7 +533,7 @@ func @no_fuse_dynamic_dims(%arg0: tensor<?x?xf32>) -> tensor<?xf32> {
 
 // -----
 
-func @no_fuse_mismatched_dynamism(%arg0: tensor<2x1xi64>, %arg1: tensor<?xi64>) -> tensor<2xi64> {
+func.func @no_fuse_mismatched_dynamism(%arg0: tensor<2x1xi64>, %arg1: tensor<?xi64>) -> tensor<2xi64> {
   %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<2x1xi64> into tensor<2xi64>
   %1 = linalg.init_tensor [2] : tensor<2xi64>
   %2 = linalg.generic

diff  --git a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
index 37edb04a7f587..089b30694231f 100644
--- a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
@@ -7,7 +7,7 @@
 // dimensions should be deprecated.
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
+func.func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
   -> tensor<?x?x4x?xi32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] :
     tensor<?x?x?xi32> into tensor<?x?x4x?xi32>
@@ -40,7 +40,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
 // -----
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
+func.func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
   -> tensor<?x?xi32> {
   %0 = linalg.generic {
     indexing_maps = [#map0, #map0],
@@ -73,7 +73,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
 
 #map2 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<3x7x5xf32> {
+func.func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<3x7x5xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2]]
       : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [3, 7, 5] : tensor<3x7x5xf32>
@@ -98,7 +98,7 @@ func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf3
 
 #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
-func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x7x3xf32> {
+func.func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x7x3xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2]]
       : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [5, 7, 3] : tensor<5x7x3xf32>
@@ -125,7 +125,7 @@ func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32
 #map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
 #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
 #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x3x7xf32> {
+func.func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x3x7xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2]]
       : tensor<3x35xf32> into tensor<3x5x7xf32>
   %1 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32>
@@ -153,7 +153,7 @@ func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf3
 #map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
 #map2 = affine_map<(d0, d1, d2) -> (d0)>
 #map3 = affine_map<(d0, d1, d2) -> (d1, d2)>
-func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf32>) -> tensor<5x21xf32> {
+func.func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf32>) -> tensor<5x21xf32> {
   %0 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32>
   %1 = linalg.generic
     {indexing_maps = [#map0, #map1],
@@ -181,7 +181,7 @@ func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf
 // -----
 
 #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
-func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
+func.func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
                                            %arg1 : tensor<?x?x?x5xf32>) ->
                                            tensor<?x?xf32>
 {
@@ -209,7 +209,7 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
 
 // -----
 
-func @generic_op_permultation_reshape_consumer_fusion_unused_dim(%arg0 : tensor<6x1xf32>) -> tensor<6xi32> {
+func.func @generic_op_permultation_reshape_consumer_fusion_unused_dim(%arg0 : tensor<6x1xf32>) -> tensor<6xi32> {
   %0 = linalg.init_tensor [6, 1] : tensor<6x1xi32>
   %1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
                                         affine_map<(d0, d1) -> (d0, d1)>],
@@ -238,7 +238,7 @@ func @generic_op_permultation_reshape_consumer_fusion_unused_dim(%arg0 : tensor<
 
 #map0 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d2, d4, d0, d6, d3, d5, d1)>
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>
-func @permuted_dims_fusion_expand_shape(%arg0 : tensor<3x8x7x240xf32>) -> tensor<4x6x3x8x2x5x7xf32> {
+func.func @permuted_dims_fusion_expand_shape(%arg0 : tensor<3x8x7x240xf32>) -> tensor<4x6x3x8x2x5x7xf32> {
   %0 = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6]]
       : tensor<3x8x7x240xf32> into tensor<3x2x4x7x8x5x6xf32>
   %1 = linalg.init_tensor [4, 6, 3, 8, 2, 5, 7] : tensor<4x6x3x8x2x5x7xf32>
@@ -264,7 +264,7 @@ func @permuted_dims_fusion_expand_shape(%arg0 : tensor<3x8x7x240xf32>) -> tensor
 
 #map0 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d2, d4, d0, d6, d3, d5, d1)>
 #map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>
-func @permuted_dims_fusion_collapse_shape(%arg0 : tensor<4x6x3x8x2x5x7xf32>) -> tensor<3x8x7x240xf32> {
+func.func @permuted_dims_fusion_collapse_shape(%arg0 : tensor<4x6x3x8x2x5x7xf32>) -> tensor<3x8x7x240xf32> {
   %0 = linalg.init_tensor [3, 2, 4, 7, 8, 5, 6] : tensor<3x2x4x7x8x5x6xf32>
   %1 = linalg.generic {
       indexing_maps = [#map1, #map0],

diff  --git a/mlir/test/Dialect/Linalg/reshape_linearization_fusion_with_unit_dims.mlir b/mlir/test/Dialect/Linalg/reshape_linearization_fusion_with_unit_dims.mlir
index 435be965c1e94..80826057c6bd3 100644
--- a/mlir/test/Dialect/Linalg/reshape_linearization_fusion_with_unit_dims.mlir
+++ b/mlir/test/Dialect/Linalg/reshape_linearization_fusion_with_unit_dims.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -linalg-fold-reshape-ops-by-linearization=allow-folding-unit-dim-reshapes -split-input-file %s | FileCheck %s
 
 #map = affine_map<(d0, d1) -> (d0, d1)>
-func @do_not_fold1(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?x1xf32>
+func.func @do_not_fold1(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?x1xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -27,7 +27,7 @@ func @do_not_fold1(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?
 // -----
 
 #map = affine_map<(d0, d1) -> (d0, d1)>
-func @do_not_fold2(%arg0 : tensor<?x?x1xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32>
+func.func @do_not_fold2(%arg0 : tensor<?x?x1xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32>
 {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
index 27f014ed66147..d5c18a49944d4 100644
--- a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
+++ b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -resolve-shaped-type-result-dims -split-input-file %s | FileCheck %s
 
-func @init_tensor_static_dim() -> (index, index) {
+func.func @init_tensor_static_dim() -> (index, index) {
   %c0 = arith.constant 0 : index
   %c2 = arith.constant 2 : index
   %c6 = arith.constant 6 : index
@@ -16,7 +16,7 @@ func @init_tensor_static_dim() -> (index, index) {
 
 // -----
 
-func @init_tensor_dynamic_dim(%arg0 : index) -> (index) {
+func.func @init_tensor_dynamic_dim(%arg0 : index) -> (index) {
   %c2 = arith.constant 2 : index
   %0 = linalg.init_tensor [4, 5, %arg0] : tensor<4x5x?xf32>
   %1 = tensor.dim %0, %c2 : tensor<4x5x?xf32>
@@ -28,7 +28,7 @@ func @init_tensor_dynamic_dim(%arg0 : index) -> (index) {
 
 // -----
 
-func @init_tensor_dynamic_dim2(%arg0 : index, %arg1 : index) -> (index, index) {
+func.func @init_tensor_dynamic_dim2(%arg0 : index, %arg1 : index) -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
@@ -43,7 +43,7 @@ func @init_tensor_dynamic_dim2(%arg0 : index, %arg1 : index) -> (index, index) {
 
 // -----
 
-func @remove_dim_result_uses
+func.func @remove_dim_result_uses
   (%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
    %arg2 : tensor<?x?xf32>) -> (index, index) {
   %c0 = arith.constant 0 : index
@@ -82,7 +82,7 @@ func @remove_dim_result_uses
 
 // -----
 
-func @remove_dim_result_uses_outs
+func.func @remove_dim_result_uses_outs
   (%arg0 : tensor<?xf32>, %arg1 : index) -> (index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -105,7 +105,7 @@ func @remove_dim_result_uses_outs
 
 // -----
 
-func @remove_dim_result_uses_sequence
+func.func @remove_dim_result_uses_sequence
   (%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
    %arg2 : tensor<?x?xf32>) -> (index, index, index, index) {
   %c0 = arith.constant 0 : index
@@ -144,7 +144,7 @@ func @remove_dim_result_uses_sequence
 
 // -----
 
-func @keep_result_dim_uses_sequence2
+func.func @keep_result_dim_uses_sequence2
   (%arg0 : tensor<?xf32>, %arg1 : index) -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -173,7 +173,7 @@ func @keep_result_dim_uses_sequence2
 
 #map = affine_map<(d0) -> (d0)>
 
-func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
+func.func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
     %arg_1: tensor<?xf32>) -> (index, index) {
   %0, %1 = linalg.generic {
     indexing_maps = [#map, #map, #map],
@@ -199,7 +199,7 @@ func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
 
 // -----
 
-func @dim_reshape_expansion(%arg0 : tensor<6x5x?xf32>) -> (index, index, index)
+func.func @dim_reshape_expansion(%arg0 : tensor<6x5x?xf32>) -> (index, index, index)
 {
   %c1 = arith.constant 1 : index
   %c3 = arith.constant 3 : index
@@ -223,7 +223,7 @@ func @dim_reshape_expansion(%arg0 : tensor<6x5x?xf32>) -> (index, index, index)
 
 // -----
 
-func @dim_reshape_collapse(%arg0 : tensor<2x3x5x4x?x7xf32>) -> (index, index)
+func.func @dim_reshape_collapse(%arg0 : tensor<2x3x5x4x?x7xf32>) -> (index, index)
 {
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -244,7 +244,7 @@ func @dim_reshape_collapse(%arg0 : tensor<2x3x5x4x?x7xf32>) -> (index, index)
 
 // -----
 
-func @dim_of_pad_op(%arg0 : tensor<2x?x?xf32>, %arg1 : index, %arg2 : index,
+func.func @dim_of_pad_op(%arg0 : tensor<2x?x?xf32>, %arg1 : index, %arg2 : index,
     %arg3: f32) -> (index, index, index)
 {
    %c0 = arith.constant 0 : index

diff  --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index eeb9ab8c96669..ba21a1481aa64 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -11,7 +11,7 @@
 // CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
 // CHECK-DAG: #[[$strided3DT:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
 
-func @views(%arg0: index) {
+func.func @views(%arg0: index) {
   %c0 = arith.constant 0 : index
   %0 = arith.muli %arg0, %arg0 : index
   %1 = memref.alloc (%0) : memref<?xi8>
@@ -31,7 +31,7 @@ func @views(%arg0: index) {
 
 // -----
 
-func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
           %arg1: memref<?xf32, offset: ?, strides: [1]>,
           %arg2: memref<?xf32, offset: ?, strides: [1]>,
           %arg3: memref<f32>) {
@@ -62,7 +62,7 @@ func @ops(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 
 // -----
 
-func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
+func.func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?xf32, offset: ?, strides: [1]>)
   return
 }
@@ -72,7 +72,7 @@ func @fill_view(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: f32) {
 
 // -----
 
-func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
+func.func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %0 = memref.transpose %arg0 (i, j, k) -> (k, j, i) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]> to memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>>
   return
 }
@@ -83,7 +83,7 @@ func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
 // -----
 
 
-func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
+func.func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>)
   return
 }
@@ -105,7 +105,7 @@ func @fill_view3(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1:
   library_call = "some_external_function_name_1"
 }
 
-func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
+func.func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
               %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %cst = arith.constant 0.0 : f32
   linalg.generic #trait_0
@@ -126,7 +126,7 @@ func @generic(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
 //  CHECK-SAME:     outs({{.*}} : memref<?x?x?xf32, #[[$strided3D]]>)
 //  CHECK-SAME:     {foo = 1 : i64}
 
-func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
+func.func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
                                 %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %cst = arith.constant 0.0 : f32
   linalg.generic #trait_0
@@ -149,7 +149,7 @@ func @generic_with_tensor_input(%arg0: tensor<?x?xvector<3x4xi4>>,
 // -----
 
 #map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
-func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
+func.func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
   linalg.generic  {indexing_maps = [#map0],
                    iterator_types = ["parallel", "parallel", "parallel"]}
                   outs(%arg0 : memref<?x?x?xf32>) {
@@ -178,7 +178,7 @@ func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
   library_call = "some_external_function_name_1"
 }
 
-func @generic_with_tensor_input_and_output(
+func.func @generic_with_tensor_input_and_output(
     %arg0: tensor<?x?xvector<3x4xi4>>, %arg1: tensor<?x?x?xf32>)
     -> (tensor<?x?x?xf32>) {
   %0 = linalg.generic #trait_1
@@ -203,7 +203,7 @@ func @generic_with_tensor_input_and_output(
 
 // -----
 
-func @generic_with_multiple_tensor_outputs(
+func.func @generic_with_multiple_tensor_outputs(
     %arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: i32)
     -> (tensor<i32>, tensor<i32>) {
   %c0 = arith.constant 0 : index
@@ -247,7 +247,7 @@ func @generic_with_multiple_tensor_outputs(
   library_call = "some_broadcast_external_fn"
 }
 
-func @generic_op_zero_rank(%arg0: tensor<f32>, %arg1 : tensor<3x4xf32>) -> (tensor<3x4xf32>)
+func.func @generic_op_zero_rank(%arg0: tensor<f32>, %arg1 : tensor<3x4xf32>) -> (tensor<3x4xf32>)
 {
   %0 = linalg.generic #trait_broadcast
        ins(%arg0 : tensor<f32>)
@@ -272,7 +272,7 @@ func @generic_op_zero_rank(%arg0: tensor<f32>, %arg1 : tensor<3x4xf32>) -> (tens
   library_call = "some_external_function_name_2"
 }
 
-func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
+func.func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>,
                      %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   linalg.generic #trait_3
        ins(%arg0 : memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1]>)
@@ -303,7 +303,7 @@ func @generic_region(%arg0: memref<?x?xvector<3x4xi4>, offset: ?, strides: [?, 1
 // -----
 
 
-func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?x?xf32>, %c3: memref<?x?x?xf32>,
+func.func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?x?xf32>, %c3: memref<?x?x?xf32>,
                 %ta3: tensor<?x?x?xf32>, %tb3: tensor<?x?x?xf32>, %tc3: tensor<?x?x?xf32>)
   -> (tensor<?x?x?xf32>, tensor<?x?x?xf32>)
 {
@@ -330,7 +330,7 @@ func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?x?xf32>, %c3: memref<?x?x
 // -----
 
 #attr = {"foo"}
-func @init_tensor(%arg0 : index, %arg1 : index)
+func.func @init_tensor(%arg0 : index, %arg1 : index)
 {
   %0 = linalg.init_tensor [3, 42] : tensor<3x42xf32>
   %1 = linalg.init_tensor [4, %arg0, %arg1, 5] : tensor<4x?x?x5xf32>
@@ -344,7 +344,7 @@ func @init_tensor(%arg0 : index, %arg1 : index)
 
 // -----
 
-func @fill_tensor(%arg0 : index, %arg1 : index, %arg2 : f32) -> tensor<?x?xf32> {
+func.func @fill_tensor(%arg0 : index, %arg1 : index, %arg2 : f32) -> tensor<?x?xf32> {
   %0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
   %1 = linalg.fill ins(%arg2 : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32>
   return %1 : tensor<?x?xf32>

diff  --git a/mlir/test/Dialect/Linalg/split_reduction.mlir b/mlir/test/Dialect/Linalg/split_reduction.mlir
index c95510d43d12f..117d58fad1bac 100644
--- a/mlir/test/Dialect/Linalg/split_reduction.mlir
+++ b/mlir/test/Dialect/Linalg/split_reduction.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-split-reduction  -split-input-file  | FileCheck %s
 
-func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
+func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: tensor<16x32xf32>) -> tensor<16x32xf32> {
   %0 = linalg.matmul ins(%A, %B: tensor<16x256xf32>, tensor<256x32xf32>)
                     outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
   return %0: tensor<16x32xf32>
@@ -33,7 +33,7 @@ func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: tensor<1
 
 // -----
 
-func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: tensor<f32>) -> tensor<f32> {
+func.func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: tensor<f32>) -> tensor<f32> {
   %red = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>,
                                           affine_map<(d0) -> ()>,
                                           affine_map<(d0) -> ()>],
@@ -75,7 +75,7 @@ func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: tensor<f
 
 // -----
 
-func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>, %output: tensor<5x2xf32>)
+func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>, %output: tensor<5x2xf32>)
   -> tensor<5x2xf32>
 {
   %0 = linalg.generic {

diff  --git a/mlir/test/Dialect/Linalg/standard.mlir b/mlir/test/Dialect/Linalg/standard.mlir
index 83544f69caf30..586588ff27f68 100644
--- a/mlir/test/Dialect/Linalg/standard.mlir
+++ b/mlir/test/Dialect/Linalg/standard.mlir
@@ -4,7 +4,7 @@
 // CHECK-DAG: #[[$map6:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
 // CHECK-DAG: #[[$map7:.*]] = affine_map<()[s0] -> (s0)>
 
-func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
+func.func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
           %arg1: memref<?xf32, offset: ?, strides: [1]>,
           %arg2: memref<f32>) {
   linalg.dot ins(%arg0, %arg1: memref<?xf32, offset: ?, strides: [1]>,
@@ -45,7 +45,7 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
 !matrix_type_B = type memref<?x?x!vector_type_B>
 !matrix_type_C = type memref<?x?x!vector_type_C>
 
-func @matmul_vec_impl(%A: !matrix_type_A, %B: !matrix_type_B, %C: !matrix_type_C) {
+func.func @matmul_vec_impl(%A: !matrix_type_A, %B: !matrix_type_B, %C: !matrix_type_C) {
   linalg.generic #matmul_trait
       ins(%A, %B : !matrix_type_A, !matrix_type_B)
      outs(%C : !matrix_type_C) {

diff  --git a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir
index 64bb9d1ea9eff..b4417641c9f83 100644
--- a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir
+++ b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir
@@ -4,7 +4,7 @@
 //  CHECK-SAME:     %[[ARG0:.*]]: tensor<4x5xf32>
 //       CHECK:   %[[RESULT:.*]] = tensor.extract_slice %[[ARG0]][1, 2] [2, 1] [1, 1] : tensor<4x5xf32> to tensor<2x1xf32>
 //       CHECK:   return %[[RESULT]]
-func @static_data_only(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_data_only(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<2x1xf32> {
   %0 = tensor.pad %arg0 low[0, 0] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -23,7 +23,7 @@ func @static_data_only(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.generate
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<2x4xf32>
-func @static_high_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_high_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<2x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -42,7 +42,7 @@ func @static_high_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.generate
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<2x3xf32>
-func @static_low_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_low_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<2x3xf32> {
   %0 = tensor.pad %arg0 low[3, 7] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -61,7 +61,7 @@ func @static_low_pad_only(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.generate
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<1x3xf32>
-func @static_low_pad_only_2(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_low_pad_only_2(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<1x3xf32> {
   %0 = tensor.pad %arg0 low[3, 7] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -80,7 +80,7 @@ func @static_low_pad_only_2(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.pad %[[SUBTENSOR]] low[0, 0] high[1, 3]
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<3x4xf32>
-func @static_mixed_data_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_mixed_data_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<3x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -99,7 +99,7 @@ func @static_mixed_data_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.pad %[[SUBTENSOR]] low[1, 3] high[0, 0]
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<3x4xf32>
-func @static_mixed_data_low_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_mixed_data_low_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<3x4xf32> {
   %0 = tensor.pad %arg0 low[3, 7] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -117,7 +117,7 @@ func @static_mixed_data_low_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:   %[[RESULT:.*]] = tensor.pad %[[ARG0]] low[1, 1] high[2, 3]
 //       CHECK:     tensor.yield %[[PAD]]
 //       CHECK:   return %[[RESULT]] : tensor<7x9xf32>
-func @static_mixed_data_low_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
+func.func @static_mixed_data_low_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
     -> tensor<7x9xf32> {
   %0 = tensor.pad %arg0 low[2, 3] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
@@ -143,7 +143,7 @@ func @static_mixed_data_low_high_pad(%arg0 : tensor<4x5xf32>, %pad : f32)
 //       CHECK:     scf.yield %[[PADTENSOR]]
 //       CHECK:   }
 //       CHECK:   return %[[RESULT]]
-func @dynamic_high_pad(%arg0 : tensor<?x5xf32>, %h1: index, %pad : f32) -> tensor<3x4xf32> {
+func.func @dynamic_high_pad(%arg0 : tensor<?x5xf32>, %h1: index, %pad : f32) -> tensor<3x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0] high[%h1, 8] {
     ^bb0(%arg1: index, %arg2: index):
       tensor.yield %pad : f32
@@ -168,7 +168,7 @@ func @dynamic_high_pad(%arg0 : tensor<?x5xf32>, %h1: index, %pad : f32) -> tenso
 //       CHECK:     scf.yield %[[PADTENSOR]]
 //       CHECK:   }
 //       CHECK:   return %[[RESULT]]
-func @dynamic_extract_size(%arg0 : tensor<?x5xf32>, %s1: index, %pad : f32) -> tensor<?x4xf32> {
+func.func @dynamic_extract_size(%arg0 : tensor<?x5xf32>, %s1: index, %pad : f32) -> tensor<?x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0] high[7, 8] {
     ^bb0(%arg1: index, %arg2: index):
       tensor.yield %pad : f32
@@ -185,7 +185,7 @@ func @dynamic_extract_size(%arg0 : tensor<?x5xf32>, %s1: index, %pad : f32) -> t
 //       CHECK:   else
 //       CHECK:     %[[SLICE:.*]] = tensor.extract_slice
 //       CHECK:     tensor.pad %[[SLICE]] low[0, 0]
-func @dynamic_zero_low_padding(%arg0 : tensor<?x?xf32>, %pad : f32,
+func.func @dynamic_zero_low_padding(%arg0 : tensor<?x?xf32>, %pad : f32,
                                %o1 : index, %o2 : index,
                                %s1 : index, %s2 : index)
     -> tensor<?x?xf32> {
@@ -205,7 +205,7 @@ func @dynamic_zero_low_padding(%arg0 : tensor<?x?xf32>, %pad : f32,
 //       CHECK:   else
 //       CHECK:     %[[SLICE:.*]] = tensor.extract_slice
 //       CHECK:     tensor.pad %[[SLICE]] low[%{{.*}}, %{{.*}}] high[0, 0]
-func @dynamic_zero_high_padding(%arg0 : tensor<?x?xf32>, %pad : f32,
+func.func @dynamic_zero_high_padding(%arg0 : tensor<?x?xf32>, %pad : f32,
                                 %o1 : index, %o2 : index,
                                 %s1 : index, %s2 : index)
     -> tensor<?x?xf32> {

diff  --git a/mlir/test/Dialect/Linalg/tile-and-distribute.mlir b/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
index d2f47eb5af63b..2f39e998a6824 100644
--- a/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-tile-and-distribute-options -split-input-file | FileCheck %s
 
-func @gemm1(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm1(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul {__internal_linalg_transform__ = "distribute1"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -26,7 +26,7 @@ func @gemm1(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 
 // -----
 
-func @gemm2(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm2(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul  {__internal_linalg_transform__ = "distribute2"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -58,7 +58,7 @@ func @gemm2(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 
 // -----
 
-func @gemm3(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm3(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul {__internal_linalg_transform__ = "distribute3"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -87,7 +87,7 @@ func @gemm3(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 
 // -----
 
-func @gemm4(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm4(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul {__internal_linalg_transform__ = "distribute4"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -116,7 +116,7 @@ func @gemm4(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 
 // -----
 
-func @gemm5(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm5(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul {__internal_linalg_transform__ = "distribute5"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -147,7 +147,7 @@ func @gemm5(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 
 // -----
 
-func @gemm6(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
+func.func @gemm6(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 {
   linalg.matmul {__internal_linalg_transform__ = "distribute6"}
     ins(%a, %b: memref<?x?xf32>, memref<?x?xf32>)
@@ -181,7 +181,7 @@ func @gemm6(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
 // CHECK-SAME:    %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
 // CHECK-SAME:    %[[TB:[0-9a-z]+]]: tensor<?x?xf32>
 // CHECK-SAME:    %[[TC:[0-9a-z]+]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
   %arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>)
     -> tensor<?x?xf32> {
 //  CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index

diff  --git a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir
index 41162c9b6397a..6f7c5caf8ccb0 100644
--- a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -test-linalg-greedy-fusion -split-input-file | FileCheck %s
 
-func @matmul_tensors(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+func.func @matmul_tensors(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
   %t0 = linalg.matmul ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
                      outs(%arg2: tensor<?x?xf32>)
     -> tensor<?x?xf32>
@@ -65,7 +65,7 @@ func @matmul_tensors(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tens
 
 // -----
 
-func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>, %elementwise: tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32> {
+func.func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>, %elementwise: tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32> {
   %c112 = arith.constant 112 : index
   %c32 = arith.constant 32 : index
   %c16 = arith.constant 16 : index
@@ -141,7 +141,7 @@ func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3
 
 // -----
 
-func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>, %elementwise: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
+func.func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>, %elementwise: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
   %cst = arith.constant 0.0 : f32
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -295,7 +295,7 @@ func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?x
 //     CHECK:       tensor.extract_slice
 //     CHECK:       linalg.generic
 //     CHECK:       tensor.insert_slice
-func @pad_generic_static(%small_input: tensor<58x1xf32>, %large_input: tensor<64x128xf32>) -> tensor<64x128xf32> {
+func.func @pad_generic_static(%small_input: tensor<58x1xf32>, %large_input: tensor<64x128xf32>) -> tensor<64x128xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c16 = arith.constant 16 : index

diff  --git a/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
index e1b2def4aadee..5a0b2c4b120bf 100644
--- a/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
@@ -45,7 +45,7 @@
 //     CHECK-PEEL-12:       linalg.matmul ins({{.*}} : tensor<?x?xf32>, tensor<?x36xf32>) outs({{.*}} : tensor<?x36xf32>)
 //     CHECK-PEEL-12:     }
 //     CHECK-PEEL-12:   }
-func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700xf32>)
+func.func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700xf32>)
     -> tensor<1500x1700xf32> {
   %out = linalg.init_tensor [1500, 1700] : tensor<1500x1700xf32>
   %r = linalg.matmul {__internal_linalg_transform__ = "tile"}
@@ -96,7 +96,7 @@ func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700
 //     CHECK-PEEL-12:       }
 //     CHECK-PEEL-12:     }
 //     CHECK-PEEL-12:   }
-func @matmul_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
+func.func @matmul_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
     -> tensor<?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index

diff  --git a/mlir/test/Dialect/Linalg/tile-conv.mlir b/mlir/test/Dialect/Linalg/tile-conv.mlir
index ebdd97ef080d1..b76d6f0d164f5 100644
--- a/mlir/test/Dialect/Linalg/tile-conv.mlir
+++ b/mlir/test/Dialect/Linalg/tile-conv.mlir
@@ -5,7 +5,7 @@
 //  CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
 //  CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 3)>
 
-func @conv(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>, %arg2 : memref<?x?xf32>) {
+func.func @conv(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>, %arg2 : memref<?x?xf32>) {
   linalg.conv_2d ins(%arg0, %arg1 : memref<?x?xf32>, memref<?x?xf32>) outs(%arg2 : memref<?x?xf32>)
   return
 }

diff  --git a/mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir b/mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir
index a5a5f56495b09..bf0e96d1f25dd 100644
--- a/mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir
+++ b/mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir
@@ -5,7 +5,7 @@
 //      CHECK: func @fill_matmul_tensors(
 // CHECK-SAME:    %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
 // CHECK-SAME:    %[[TB:[0-9a-z]+]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
-func @fill_matmul_tensors(
+func.func @fill_matmul_tensors(
   %arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
     -> tensor<?x?xf32> {
 //  CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index

diff  --git a/mlir/test/Dialect/Linalg/tile-indexed.mlir b/mlir/test/Dialect/Linalg/tile-indexed.mlir
index 586cac259c45e..fdca6fbdeec9c 100644
--- a/mlir/test/Dialect/Linalg/tile-indexed.mlir
+++ b/mlir/test/Dialect/Linalg/tile-indexed.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -linalg-tile="tile-sizes=25,0" -split-input-file | FileCheck %s -check-prefix=TILE-25n0
 // RUN: mlir-opt %s -linalg-tile="tile-sizes=0,25" -split-input-file | FileCheck %s -check-prefix=TILE-0n25
 
-func @indexed_vector(%arg0: memref<50xindex>) {
+func.func @indexed_vector(%arg0: memref<50xindex>) {
   linalg.generic {indexing_maps = [affine_map<(i) -> (i)>],
                   iterator_types = ["parallel"]}
      outs(%arg0 : memref<50xindex>) {
@@ -36,7 +36,7 @@ func @indexed_vector(%arg0: memref<50xindex>) {
 
 // -----
 
-func @indexed_matrix(%arg0: memref<50x50xindex>) {
+func.func @indexed_matrix(%arg0: memref<50x50xindex>) {
   linalg.generic {indexing_maps = [affine_map<(i, j) -> (i, j)>],
                   iterator_types = ["parallel", "parallel"]}
     outs(%arg0 : memref<50x50xindex>) {

diff  --git a/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir b/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir
index a8dfdd940673a..6295f9106cf11 100644
--- a/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir
+++ b/mlir/test/Dialect/Linalg/tile-pad-tensor-op.mlir
@@ -47,7 +47,7 @@
 //       TILE1:     tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [%[[DIM0]], {{.*}}] [1, 1]
 //       TILE1:   return %[[RESULT]]
 
-func @dynamic_pad_tensor(%input_tensor: tensor<?x?xf32>,
+func.func @dynamic_pad_tensor(%input_tensor: tensor<?x?xf32>,
                          %pad_value: f32) -> tensor<?x?xf32> {
   %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
     ^bb0(%arg1: index, %arg2: index):
@@ -90,7 +90,7 @@ func @dynamic_pad_tensor(%input_tensor: tensor<?x?xf32>,
 //       TILE1:     tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [15, {{.*}}] [1, 1]
 //       TILE1:   return %[[RESULT]]
 
-func @static_pad_tensor(%input_tensor: tensor<7x9xf32>,
+func.func @static_pad_tensor(%input_tensor: tensor<7x9xf32>,
                         %pad_value: f32) -> tensor<15x16xf32> {
   %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
     ^bb0(%arg1: index, %arg2: index):
@@ -117,7 +117,7 @@ func @static_pad_tensor(%input_tensor: tensor<7x9xf32>,
 //       TILE1:     %[[R3:.*]] = tensor.insert_slice %[[R2]] into %[[INNER_OUT]][0, %[[IV]]] [14, 3] [1, 1] : tensor<14x3xf32> into tensor<14x15xf32>
 //       TILE1:     scf.yield %[[R3]] : tensor<14x15xf32>
 //       TILE1:   return %[[RESULT]] : tensor<14x15xf32>
-func @static_pad_tile_evenly(%input_tensor: tensor<7x9xf32>,
+func.func @static_pad_tile_evenly(%input_tensor: tensor<7x9xf32>,
                              %output_tensor: tensor<14x15xf32>,
                              %pad_value: f32) -> tensor<14x15xf32> {
   %0 = tensor.pad %input_tensor low[0, 0] high[7, 6] {

diff  --git a/mlir/test/Dialect/Linalg/tile-parallel-reduce.mlir b/mlir/test/Dialect/Linalg/tile-parallel-reduce.mlir
index b8d68d9809827..dcad7a046c399 100644
--- a/mlir/test/Dialect/Linalg/tile-parallel-reduce.mlir
+++ b/mlir/test/Dialect/Linalg/tile-parallel-reduce.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -linalg-tile="tile-sizes=2 loop-type=parallel" -split-input-file | FileCheck %s -check-prefix=TILE1
 // RUN: mlir-opt %s -linalg-tile="tile-sizes=2,4 loop-type=parallel" -split-input-file | FileCheck %s -check-prefix=TILE2
 
-func @gemm(%arg0 : memref<?x?xf32>,
+func.func @gemm(%arg0 : memref<?x?xf32>,
            %arg1 : memref<?x?xf32>,
            %arg2 : memref<?x?xf32>)
 {
@@ -55,7 +55,7 @@ func @gemm(%arg0 : memref<?x?xf32>,
   indexing_maps = #accesses
 }
 
-func @reduction(%arg0 : memref<?x?x?xf32>,
+func.func @reduction(%arg0 : memref<?x?x?xf32>,
                 %arg1 : memref<?x?xf32>,
                 %arg2 : memref<?xf32>)
 {

diff  --git a/mlir/test/Dialect/Linalg/tile-parallel.mlir b/mlir/test/Dialect/Linalg/tile-parallel.mlir
index e66632e1fcf23..1e479b9423d2b 100644
--- a/mlir/test/Dialect/Linalg/tile-parallel.mlir
+++ b/mlir/test/Dialect/Linalg/tile-parallel.mlir
@@ -11,7 +11,7 @@
   iterator_types = ["parallel", "parallel"]
 }
 
-func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @sum(%lhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
           %rhs: memref<?x?xf32, offset: ?, strides: [?, 1]>,
           %sum: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.generic #pointwise_2d_trait

diff  --git a/mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir b/mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
index 4826798d7a441..e83cab3df82a1 100644
--- a/mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
+++ b/mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
@@ -14,7 +14,7 @@
 //       CHECK:       %[[S2:.*]] = tensor.extract_slice %[[ARG1]][%[[IV1]], 0] [1, 2000] [1, 1] : tensor<?x2000xf32> to tensor<1x2000xf32>
 //       CHECK:       %[[S3:.*]] = tensor.extract_slice %{{.*}}[%[[IV0]], 0] [1, 2000] [1, 1] : tensor<?x2000xf32> to tensor<1x2000xf32>
 //       CHECK:       linalg.matmul ins(%[[S1]], %[[S2]] : tensor<1x1xf32>, tensor<1x2000xf32>) outs(%[[S3]] : tensor<1x2000xf32>) -> tensor<1x2000xf32>
-func @matmul_partly_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x2000xf32>)
+func.func @matmul_partly_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x2000xf32>)
     -> tensor<?x2000xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -37,7 +37,7 @@ func @matmul_partly_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x2000x
 #map1 = affine_map<()[s0] -> ((s0 floordiv 32) * 32)>
 #map2 = affine_map<(d0)[s0] -> (d0 - (s0 floordiv 32) * 32)>
 
-func @tiled_and_peeled_matmul(%arg0: tensor<257x259xf32>, %arg1: tensor<259x258xf32>, %arg2: tensor<257x258xf32>) -> tensor<257x258xf32> {
+func.func @tiled_and_peeled_matmul(%arg0: tensor<257x259xf32>, %arg1: tensor<259x258xf32>, %arg2: tensor<257x258xf32>) -> tensor<257x258xf32> {
   %c257 = arith.constant 257 : index
   %c64 = arith.constant 64 : index
   %cst = arith.constant 0.000000e+00 : f32

diff  --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir
index 741ee80e56cfd..7bf8e3062c896 100644
--- a/mlir/test/Dialect/Linalg/tile-tensors.mlir
+++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir
@@ -4,7 +4,7 @@
 // CHECK-SAME:    %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
 // CHECK-SAME:    %[[TB:[0-9a-z]+]]: tensor<?x?xf32>
 // CHECK-SAME:    %[[TC:[0-9a-z]+]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
-func @matmul_tensors(
+func.func @matmul_tensors(
   %arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>)
     -> tensor<?x?xf32> {
 //      CHECK: %[[TD0:.*]] = scf.for {{.*}} to {{.*}} step {{.*}} iter_args(%[[TC0:.*]] = %[[TC]]) -> (tensor<?x?xf32>) {
@@ -29,7 +29,7 @@ func @matmul_tensors(
 
 // -----
 
-func @generic_op_tensors(
+func.func @generic_op_tensors(
   %arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
@@ -83,7 +83,7 @@ func @generic_op_tensors(
 //      CHECK:  fold_extract_slice
 // CHECK-SAME:    %[[ARG0:[0-9a-zA-Z]*]]: tensor<?x128xf32>
 // CHECK-SAME:    %[[ARG1:[0-9a-zA-Z]*]]: tensor<?x42xf32>
-func @fold_extract_slice(
+func.func @fold_extract_slice(
   %arg0 : tensor<?x128xf32>, %arg1 : tensor<?x42xf32>, %arg2 : tensor<?x42x?xf32>) -> tensor<?x42xf32> {
 
   //      CHECK:    %[[C0:.*]] = arith.constant 0

diff  --git a/mlir/test/Dialect/Linalg/tile-zero.mlir b/mlir/test/Dialect/Linalg/tile-zero.mlir
index 1a09906e52ce7..147b7c7d377d6 100644
--- a/mlir/test/Dialect/Linalg/tile-zero.mlir
+++ b/mlir/test/Dialect/Linalg/tile-zero.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -test-linalg-transform-patterns=test-tile-pattern %s | FileCheck %s
 
-func @matmul_zero_tile(
+func.func @matmul_zero_tile(
   %arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
   %0 = linalg.matmul {__internal_linalg_transform__ = "tile"}
       ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)

diff  --git a/mlir/test/Dialect/Linalg/tile.mlir b/mlir/test/Dialect/Linalg/tile.mlir
index 37f1e2ca2a327..8ec5a929dbba1 100644
--- a/mlir/test/Dialect/Linalg/tile.mlir
+++ b/mlir/test/Dialect/Linalg/tile.mlir
@@ -24,7 +24,7 @@
 //  TILE-02-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
 // TILE-234-DAG: #[[$stride_99_1_layout_map:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 99 + s0 + d1)>
 
-func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.matmul
@@ -99,7 +99,7 @@ func @matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // the "min" in subview size computation. This test uses buffer sizes divisible
 // by respective tile sizes (M=10 divisble by 2, N=12 divisible by 2 and 3,
 // K=16 divisble by 2 and 4).
-func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>,
+func.func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>,
                     %arg1: memref<16x12xf32, offset: ?, strides: [?, 1]>,
                     %arg2: memref<10x12xf32, offset: ?, strides: [?, 1]>) {
   linalg.matmul
@@ -155,7 +155,7 @@ func @matmul_static(%arg0: memref<10x16xf32, offset: ?, strides: [?, 1]>,
 //
 //       TILE-234:        linalg.matmul ins(%[[sAik]], %[[sBkj]]{{.*}} outs(%[[sCij]]
 
-func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<?xf32, offset: ?, strides: [1]>) {
+func.func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.matvec
     ins(%arg0, %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                       memref<?xf32, offset: ?, strides: [1]>)
@@ -219,7 +219,7 @@ func @matvec(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?
 //
 //       TILE-234:      linalg.matvec ins(%[[sAij]], %[[sBj]]{{.*}} outs(%[[sCi]]
 
-func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
+func.func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, offset: ?, strides: [1]>, %arg2: memref<f32>) {
   linalg.dot
     ins(%arg0, %arg1: memref<?xf32, offset: ?, strides: [1]>, memref<?xf32, offset: ?, strides: [1]>)
    outs(%arg2: memref<f32>)
@@ -253,7 +253,7 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: memref<?xf32, of
 //       TILE-234:    %[[sBi:.*]] = memref.subview %{{.*}}[%[[I]]] [%[[szM]]] [1] : memref<?xf32, #[[$strided1D]]> to memref<?xf32, #[[$strided1D]]>
 //       TILE-234:    linalg.dot ins(%[[sAi]], %[[sBi]]{{.*}} outs(
 
-func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
+func.func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<127x99xf32>)
   return
 }
@@ -281,7 +281,7 @@ func @fill_static(%arg0: memref<127x99xf32>, %arg1: f32) {
 //       TILE-234:       linalg.fill{{.*}} : memref<?x3xf32, #[[$stride_99_1_layout_map]]>
 
 
-func @fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: f32) {
+func.func @fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: f32) {
   linalg.fill ins(%arg1 : f32) outs(%arg0 : memref<?x?xf32, offset: ?, strides: [?, 1]>)
   return
 }
@@ -313,7 +313,7 @@ func @fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: f32) {
   iterator_types = ["parallel", "parallel"]
 }
 
-func @pointwise(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @pointwise(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                 %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.generic #pointwise_2d_trait
     ins(%arg0, %arg1 : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>)

diff  --git a/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir b/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
index 9a387cd45c534..05a1946ee8924 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns-matmul-to-vector.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-matmul-to-vector-patterns-tile-1d | FileCheck %s -check-prefix=CHECK-1D
 // RUN: mlir-opt %s -test-linalg-transform-patterns=test-matmul-to-vector-patterns-tile-2d | FileCheck %s -check-prefix=CHECK-2D
 
-func @matmul(%A: memref<1584x1584xf32, offset: 0, strides: [1584, 1]>,
+func.func @matmul(%A: memref<1584x1584xf32, offset: 0, strides: [1584, 1]>,
                   %B: memref<1584x1584xf32, offset: 0, strides: [1584, 1]>,
                   %C: memref<1584x1584xf32, offset: 0, strides: [1584, 1]>) {
   linalg.matmul {__internal_linalg_transform__ = "START"}

diff  --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir
index 4cbc3d52486c1..b776a2dc30076 100644
--- a/mlir/test/Dialect/Linalg/transform-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir
@@ -9,7 +9,7 @@
 // CHECK-DAG: #[[$nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
 // CHECK-DAG: #[[$km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
 
-func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
+func.func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
           %y: memref<?xf32, offset: ?, strides: [1]>,
           %v: memref<f32>) {
   linalg.dot { __internal_linalg_transform__ = "MEM" }
@@ -32,7 +32,7 @@ func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
 // CHECK:               arith.addf
 // CHECK:               store
 
-func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %x: memref<?xf32, offset: ?, strides: [1]>,
              %y: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.matvec
@@ -51,7 +51,7 @@ func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:               ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?xf32, #[[$STRIDED_1D]]>)
 // CHECK:              outs({{.*}}: memref<?xf32, #[[$STRIDED_1D]]>)
 
-func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.matmul { __internal_linalg_transform__ = "MEM" }
@@ -102,7 +102,7 @@ func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
   library_call = "linalg_matmul",
   iterator_types = ["parallel", "parallel", "reduction"]
 }
-func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
            %B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
            %C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.generic #generic_matmul_trait
@@ -125,7 +125,7 @@ func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK-SAME:     memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
 // CHECK-SAME:     memref<?x?xf32, #[[$STRIDED_2D_u_1]]>
 
-func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %x: memref<?xf32, offset: ?, strides: [1]>,
              %y: memref<?xf32, offset: ?, strides: [1]>) {
   linalg.matvec {__internal_linalg_transform__ = "__with_perm__"}
@@ -144,7 +144,7 @@ func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:               ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?xf32, #[[$STRIDED_1D]]>)
 // CHECK:              outs({{.*}}: memref<?xf32, #[[$STRIDED_1D]]>)
 
-func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
              %C: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   linalg.matmul {__internal_linalg_transform__ = "__with_perm__"}
@@ -177,7 +177,7 @@ func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK:                                  ins({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>, memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
 // CHECK:                                   outs({{.*}}: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>)
 
-func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                              %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                              %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   %c2000 = arith.constant 2000 : index
@@ -236,7 +236,7 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
 // CHECK-SAME:                 ins(%[[v0]], %[[v1]] : memref<?x?xf32>, memref<?x?xf32>)
 // CHECK-SAME:                outs(%[[v2]] : memref<?x?xf32>)
 
-func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
+func.func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                              %arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
                              %arg2: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   %c2000 = arith.constant 2000 : index
@@ -288,7 +288,7 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
 // CHECK-SAME:           ins(%[[v0]], %[[s1]] : memref<?x?xf32>, memref<?x?xf32, #[[$STRIDED_2D]]>)
 // CHECK-SAME:          outs(%[[s2]] : memref<?x?xf32, #[[$STRIDED_2D]]>)
 
-func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
+func.func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
   %c2000 = arith.constant 2000 : index
   %c4000 = arith.constant 4000 : index
   %c0 = arith.constant 0 : index
@@ -310,7 +310,7 @@ func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
 // CHECK:         memref.copy %[[s0]], %[[l0]] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
 // CHECK:         linalg.fill ins(%[[cf]] : f32) outs(%[[v0]] : memref<?x?xf32>)
 
-func @aligned_promote_fill_complex(%arg0: memref<?x?xcomplex<f32>, offset: ?, strides: [?, 1]>) {
+func.func @aligned_promote_fill_complex(%arg0: memref<?x?xcomplex<f32>, offset: ?, strides: [?, 1]>) {
   %c2000 = arith.constant 2000 : index
   %c4000 = arith.constant 4000 : index
   %c0 = arith.constant 0 : index
@@ -333,7 +333,7 @@ func @aligned_promote_fill_complex(%arg0: memref<?x?xcomplex<f32>, offset: ?, st
 // CHECK:         memref.copy %[[s0]], %[[l0]] : memref<?x?xcomplex<f32>, #map{{.*}}> to memref<?x?xcomplex<f32>, #map{{.*}}>
 // CHECK:         linalg.fill ins(%[[cc]] : complex<f32>) outs(%[[v0]] : memref<?x?xcomplex<f32>>)
 
-func @tile_permute_parallel_loop(%arg0: memref<?x?xf32>,
+func.func @tile_permute_parallel_loop(%arg0: memref<?x?xf32>,
                                  %arg1: memref<?x?xf32>,
                                  %arg2: memref<?x?xf32>) {
   linalg.matmul {__internal_linalg_transform__ = "par__with_perm__"}

diff  --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index e728013a073e7..3414450ceae95 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -3,7 +3,7 @@
 // -----
 
 // CHECK-LABEL: contraction_dot
-func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32>) {
+func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32>) {
 
 // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584xf32>
 // CHECK: vector.multi_reduction <add>, %{{.*}} [0] : vector<1584xf32> to f32
@@ -16,7 +16,7 @@ func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref<f32
 // -----
 
 // CHECK-LABEL: contraction_matvec
-func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: memref<1584xf32>) {
+func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: memref<1584xf32>) {
 
 // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584xf32>
 // CHECK: vector.multi_reduction <add>, %{{.*}} [1] : vector<1584x1584xf32> to vector<1584xf32>
@@ -29,7 +29,7 @@ func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: me
 // -----
 
 // CHECK-LABEL: contraction_matmul
-func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %C: memref<1584x1584xf32>) {
+func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %C: memref<1584x1584xf32>) {
 // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584xf32>
 // CHECK: vector.multi_reduction <add>, %{{.*}} [2] : vector<1584x1584x1584xf32> to vector<1584x1584xf32>
 // CHECK: arith.addf %{{.*}}, %{{.*}} : vector<1584x1584xf32>
@@ -41,7 +41,7 @@ func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %
 // -----
 
 // CHECK-LABEL: contraction_batch_matmul
-func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) {
+func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) {
 // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584x1584xf32>
 // CHECK: vector.multi_reduction <add>, %{{.*}} [3] : vector<1584x1584x1584x1584xf32> to vector<1584x1584x1584xf32>
 // CHECK: arith.addf %{{.*}}, %{{.*}} : vector<1584x1584x1584xf32>
@@ -65,7 +65,7 @@ func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1
 }
 
 // CHECK-LABEL: func @vectorization_test
-func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
+func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
                          %C: memref<8x32xf32>) {
   //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32>
   //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32>
@@ -99,7 +99,7 @@ func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
 }
 
 // CHECK-LABEL: func @generic_output_transpose
-func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
+func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
                          %C: memref<32x8xf32>) {
   //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32>
   //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32>
@@ -133,7 +133,7 @@ func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
 }
 
 // CHECK-LABEL: func @vectorization_test_integer
-func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>,
+func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>,
                                  %C: memref<8x32xi32>) {
   //       CHECK: vector.transfer_read %{{.*}} : memref<8x16xi32>, vector<8x32x16xi32>
   //       CHECK: vector.transfer_read %{{.*}} : memref<16x32xi32>, vector<8x32x16xi32>
@@ -157,7 +157,7 @@ func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>,
 // -----
 
 // CHECK-LABEL: func @vectorization_test_2
-func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
+func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
                          %C: memref<8x32xf32>) {
   //       CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32>
   //       CHECK: vector.multi_reduction <add>, %{{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32>
@@ -171,7 +171,7 @@ func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
 // -----
 
 // CHECK-LABEL: func @test_vectorize_scalar_input
-func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
+func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
   //       CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
   //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
   linalg.generic {
@@ -188,7 +188,7 @@ func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @test_vectorize_fill
-func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
+func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
   //       CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32>
   //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
   linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>)
@@ -198,7 +198,7 @@ func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @test_vectorize_fill
-func @test_vectorize_fill_scalar(%A : memref<f32>, %arg0 : f32) {
+func.func @test_vectorize_fill_scalar(%A : memref<f32>, %arg0 : f32) {
   // CHECK-SAME: (%[[M:.*]]: memref<f32>, %[[val:.*]]: f32)
   //      CHECK:   %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector<f32>
   //      CHECK:   vector.transfer_write %[[VEC]], %[[M]][] : vector<f32>, memref<f32>
@@ -209,7 +209,7 @@ func @test_vectorize_fill_scalar(%A : memref<f32>, %arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @test_vectorize_copy
-func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
+func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
   //       CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32>
   //       CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32>
   memref.copy %A, %B :  memref<8x16xf32> to memref<8x16xf32>
@@ -219,7 +219,7 @@ func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) {
 // -----
 
 // CHECK-LABEL: func @test_vectorize_copy_scalar
-func @test_vectorize_copy_scalar(%A : memref<f32>, %B : memref<f32>) {
+func.func @test_vectorize_copy_scalar(%A : memref<f32>, %B : memref<f32>) {
   //  CHECK-SAME: (%[[A:.*]]: memref<f32>, %[[B:.*]]: memref<f32>)
   //       CHECK:   %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref<f32>, vector<f32>
   //       CHECK:   %[[val:.*]] = vector.extractelement %[[V]][] : vector<f32>
@@ -233,7 +233,7 @@ func @test_vectorize_copy_scalar(%A : memref<f32>, %B : memref<f32>) {
 
 // CHECK-LABEL: func @test_vectorize_trailing_index
   //  CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>)
-func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
+func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
   //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex>
   //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
   linalg.generic {
@@ -254,7 +254,7 @@ func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) {
 
 // CHECK-LABEL: func @test_vectorize_inner_index
   //  CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>)
-func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
+func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
   //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<[0, 1]> : vector<2xindex>
   //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
   linalg.generic {
@@ -277,7 +277,7 @@ func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) {
 // CHECK-LABEL: func @generic_vectorize
   //  CHECK-SAME: (%[[ARG0:.*]]: memref<4x256xf32>, %[[ARG1:.*]]: memref<4x256xf32>,
   //  CHECK-SAME:  %[[ARG2:.*]]: memref<256xf32>, %[[ARG3:.*]]: f32)
-func @generic_vectorize(%arg0: memref<4x256xf32>,
+func.func @generic_vectorize(%arg0: memref<4x256xf32>,
                         %arg1: memref<4x256xf32>,
                         %arg2: memref<256xf32>, %i: f32) {
   //   CHECK-DAG:   %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32>
@@ -355,7 +355,7 @@ func @generic_vectorize(%arg0: memref<4x256xf32>,
 // CHECK-LABEL: func @generic_vectorize_tensor
 //  CHECK-SAME: (%[[ARG0:.*]]: tensor<4x256xf32>, %[[ARG1:.*]]: tensor<4x256xf32>,
 //  CHECK-SAME:  %[[ARG2:.*]]: tensor<256xf32>, %[[ARG3:.*]]: f32)
-func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
+func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
   %arg1: tensor<4x256xf32>, %arg2: tensor<256xf32>,
   %i: f32) -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
     tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
@@ -451,7 +451,7 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
 //     CHECK:   %[[ADD0:.*]] = arith.addf %[[V2]], %[[SUB]] : vector<4x4x4x4xf32>
 //     CHECK:   %[[ADD1:.*]] = arith.addf %[[V3]], %[[ADD0]] : vector<4x4x4x4xf32>
 //     CHECK: vector.transfer_write %[[ADD1]], {{.*}} : vector<4x4x4x4xf32>, memref<4x4x4x4xf32>
-func @generic_vectorize_broadcast_transpose(
+func.func @generic_vectorize_broadcast_transpose(
   %A: memref<4xf32>, %B: memref<4x4xf32>, %C: memref<4x4x4x4xf32>) {
   linalg.generic {
   indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3)>,
@@ -494,7 +494,7 @@ func @generic_vectorize_broadcast_transpose(
 //       CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32>
 //       CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32>
 //       CHECK: vector.transfer_write {{.*}} : vector<7x14x8x16xf32>, memref<7x14x8x16xf32>
-func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
+func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
                          %C: memref<16x14x7x8xf32>, %D: memref<7x14x8x16xf32>) {
   linalg.generic #matmul_trait
     ins(%A, %B, %C : memref<14x7xf32>, memref<16x14xf32>, memref<16x14x7x8xf32>)
@@ -512,7 +512,7 @@ func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>,
 // CHECK-LABEL: func @matmul_tensors
 //  CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>,
 //  CHECK-SAME:  %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32>
-func @matmul_tensors(
+func.func @matmul_tensors(
   %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>)
     -> tensor<8x12xf32> {
   //   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
@@ -546,7 +546,7 @@ func @matmul_tensors(
 //       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, false, true]} : tensor<2x?x2xf32>, vector<2x3x2xf32>
 //       CHECK:   %[[RESULT:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x3x2xf32>, tensor<2x3x4xf32>
 //       CHECK:   return %[[RESULT]]
-func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32> {
+func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] {
     ^bb0(%arg1: index, %arg2: index, %arg3: index):
       tensor.yield %pad_value : f32
@@ -567,7 +567,7 @@ func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32>
 //       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : tensor<2x5x2xf32>, vector<2x5x2xf32>
 //       CHECK:   %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x5x2xf32>, tensor<2x6x4xf32>
 //       CHECK:   return %[[WRITE]]
-func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6x4xf32> {
+func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6x4xf32> {
   %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] {
     ^bb0(%arg1: index, %arg2: index, %arg3: index):
       tensor.yield %pad_value : f32
@@ -594,7 +594,7 @@ func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6
 //       CHECK:   %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32>
 //       CHECK:   %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32>
 //       CHECK:   return %[[RESULT]]
-func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
+func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
                   %pad_value: f32) -> tensor<6x?x?x?xf32> {
   %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] {
     ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
@@ -612,7 +612,7 @@ func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
 //   CHECK-DAG:   %[[C5:.*]] = arith.constant 5.0
 //       CHECK:   %[[RESULT:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32>
 //       CHECK:   return %[[RESULT]]
-func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> {
+func.func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> {
   %c0 = arith.constant 0 : index
   %c5 = arith.constant 5.0 : f32
   %c6 = arith.constant 6.0 : f32
@@ -627,7 +627,7 @@ func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> {
 
 // -----
 
-func private @make_vector() -> vector<7x9xf32>
+func.func private @make_vector() -> vector<7x9xf32>
 
 // CHECK-LABEL: func @pad_and_transfer_write_static
 //  CHECK-SAME:     %[[ARG0:.*]]: tensor<5x6xf32>
@@ -636,7 +636,7 @@ func private @make_vector() -> vector<7x9xf32>
 //       CHECK:   %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32>
 //       CHECK:   %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[ARG0]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor<5x6xf32>
 //       CHECK:   return %[[RESULT]]
-func @pad_and_transfer_write_static(
+func.func @pad_and_transfer_write_static(
     %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
   %c0 = arith.constant 0 : index
   %c5 = arith.constant 5.0 : f32
@@ -653,7 +653,7 @@ func @pad_and_transfer_write_static(
 
 // -----
 
-func private @make_vector() -> vector<7x9xf32>
+func.func private @make_vector() -> vector<7x9xf32>
 
 // CHECK-LABEL: func @pad_and_transfer_write_dynamic_static
 //  CHECK-SAME:     %[[ARG0:.*]]: tensor<?x?xf32>, %[[SIZE:.*]]: index, %[[PADDING:.*]]: index
@@ -663,7 +663,7 @@ func private @make_vector() -> vector<7x9xf32>
 //       CHECK:   %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32>
 //       CHECK:   %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[SUB]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor<?x6xf32>
 //       CHECK:   return %[[RESULT]]
-func @pad_and_transfer_write_dynamic_static(
+func.func @pad_and_transfer_write_dynamic_static(
     %arg0: tensor<?x?xf32>, %size: index, %padding: index) -> tensor<?x6xf32> {
   %c0 = arith.constant 0 : index
   %c5 = arith.constant 5.0 : f32
@@ -682,7 +682,7 @@ func @pad_and_transfer_write_dynamic_static(
 
 // -----
 
-func private @make_vector() -> tensor<12x13xf32>
+func.func private @make_vector() -> tensor<12x13xf32>
 
 // CHECK-LABEL: func @pad_and_insert_slice_source
 //  CHECK-SAME:     %[[ARG0:.*]]: tensor<5x6xf32>
@@ -693,7 +693,7 @@ func private @make_vector() -> tensor<12x13xf32>
 //       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32>
 //       CHECK:   %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[VEC0]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<7x9xf32>, tensor<12x13xf32>
 //       CHECK:   return %[[WRITE]]
-func @pad_and_insert_slice_source(
+func.func @pad_and_insert_slice_source(
     %arg0: tensor<5x6xf32>) -> tensor<12x13xf32> {
   %c0 = arith.constant 0 : index
   %c5 = arith.constant 5.0 : f32
@@ -708,13 +708,13 @@ func @pad_and_insert_slice_source(
 
 // -----
 
-func private @make_vector() -> tensor<12x13xf32>
+func.func private @make_vector() -> tensor<12x13xf32>
 
 // CHECK-LABEL: func @pad_and_insert_slice_dest
 // Check the insert slice is not rewritten if the padded result is used by the destination operand.
 //       CHECK:   %[[T1:.*]] = call @make_vector() : () -> tensor<12x13xf32>
 //       CHECK:   = tensor.insert_slice %[[T1]] into
-func @pad_and_insert_slice_dest(
+func.func @pad_and_insert_slice_dest(
     %arg0: tensor<1x5x6xf32>) -> tensor<1x12x13xf32> {
   %c5 = arith.constant 5.0 : f32
   %0 = tensor.pad %arg0 low[0, 0, 0] high[0, 7, 7] {
@@ -740,7 +740,7 @@ func @pad_and_insert_slice_dest(
 //       CHECK:   %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : tensor<5x6xf32>, vector<5x6xf32>
 //       CHECK:   %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C3]], %[[C4]]] {in_bounds = [true, true]} : vector<5x6xf32>, tensor<12x13xf32>
 //       CHECK:   return %[[WRITE]]
-func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32> {
+func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32> {
   %c0 = arith.constant 0 : index
   %c5 = arith.constant 5.0 : f32
   %0 = tensor.pad %arg0 low[3, 4] high[4, 3] {
@@ -758,7 +758,7 @@ func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32
 // -----
 
 // CHECK-LABEL: func @sum_exp
-func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
+func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
   -> tensor<4x16xf32>
 {
   // CHECK: vector.transfer_read {{.*}} : tensor<4x16x8xf32>, vector<4x16x8xf32>
@@ -790,7 +790,7 @@ func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
 // CHECK-DAG: #[[$M3:.*]] =  affine_map<(d0, d1) -> (d1, d0)>
 
 // CHECK-LABEL: func @sum_exp_2
-func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>)
+func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>)
   -> tensor<5x2xf32>
 {
   // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32>
@@ -824,7 +824,7 @@ func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: ten
 // -----
 
 // CHECK-LABEL:   func @red_max_2d(
-func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
+func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
   // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32>
   // CHECK: linalg.init_tensor [4] : tensor<4xf32>
   // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
@@ -848,7 +848,7 @@ func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 // -----
 
 // CHECK-LABEL:   func @red_min_2d(
-func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
+func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
   // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32>
   // CHECK: linalg.init_tensor [4] : tensor<4xf32>
   // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
@@ -873,7 +873,7 @@ func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 // -----
 
 // CHECK-LABEL:   func @red_mul_2d(
-func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
+func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
   // CHECK: linalg.init_tensor [4] : tensor<4xf32>
   // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
   // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
@@ -896,7 +896,7 @@ func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
 // -----
 
 // CHECK-LABEL:   func @red_or_2d(
-func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
+func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
   // CHECK: linalg.init_tensor [4] : tensor<4xi1>
   // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
   // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
@@ -919,7 +919,7 @@ func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 // -----
 
 // CHECK-LABEL:   func @red_and_2d(
-func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
+func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
   // CHECK: linalg.init_tensor [4] : tensor<4xi1>
   // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
   // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
@@ -942,7 +942,7 @@ func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 // -----
 
 // CHECK-LABEL:   func @red_xor_2d(
-func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
+func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
   // CHECK: linalg.init_tensor [4] : tensor<4xi1>
   // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
   // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
@@ -967,7 +967,7 @@ func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
 // CHECK-DAG: #[[$M5:.*]] = affine_map<(d0, d1) -> (d0, 0)>
 
 // CHECK-LABEL:   func @explicit_broadcast(
-func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> {
+func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> {
   // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32>
   // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M5]]} : tensor<4x1xf32>, vector<4x4xf32>
   // CHECK: subf {{.*}} : vector<4x4xf32>
@@ -993,7 +993,7 @@ func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tens
 // CHECK-DAG: #[[$M6:.*]] = affine_map<(d0, d1) -> (d0, 0)>
 
 // CHECK-LABEL:   func @fused_broadcast_red_2d
-func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4xf32> {
+func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4xf32> {
   // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32>
   // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M6]]} : tensor<4x1xf32>, vector<4x4xf32>
   // CHECK: subf {{.*}} : vector<4x4xf32>
@@ -1023,7 +1023,7 @@ func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) ->
 
 //  CHECK-LABEL: func @reduce_1d(
 //   CHECK-SAME:   %[[A:.*]]: tensor<32xf32>
-func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
+func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
   //  CHECK-DAG: %[[vF0:.*]] = arith.constant dense<0.000000e+00> : vector<f32>
   //  CHECK-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f32
   //  CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index

diff  --git a/mlir/test/Dialect/Linalg/vectorize-convolution.mlir b/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
index 3168e7f67d515..a4eb9d26e9c8e 100644
--- a/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -split-input-file -test-linalg-transform-patterns=test-linalg-to-vector-patterns %s | FileCheck %s
 
-func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x3x8xf32>, %output: memref<4x2x8xf32>) {
+func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x3x8xf32>, %output: memref<4x2x8xf32>) {
   linalg.conv_1d_nwc_wcf
     {dilations = dense<1> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
     ins(%input, %filter : memref<4x6x3xf32>, memref<1x3x8xf32>)
@@ -61,7 +61,7 @@ func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x3x8xf
 
 // -----
 
-func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf32>, %output: memref<4x2x8xf32>) {
+func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf32>, %output: memref<4x2x8xf32>) {
   linalg.conv_1d_nwc_wcf
     {dilations = dense<2> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
     ins(%input, %filter : memref<4x6x3xf32>, memref<2x3x8xf32>)
@@ -138,7 +138,7 @@ func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf
 
 // -----
 
-func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf32>, %output: memref<4x2x8xf32>) {
+func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf32>, %output: memref<4x2x8xf32>) {
   linalg.conv_1d_nwc_wcf
     {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
     ins(%input, %filter : memref<4x6x3xf32>, memref<2x3x8xf32>)
@@ -187,7 +187,7 @@ func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<2x3x8xf
 
 // -----
 
-func @depthwise_conv1d_nwc_wc_3x5x4_memref(%input: memref<3x5x4xf32>, %filter: memref<2x4xf32>, %output: memref<3x2x4xf32>) {
+func.func @depthwise_conv1d_nwc_wc_3x5x4_memref(%input: memref<3x5x4xf32>, %filter: memref<2x4xf32>, %output: memref<3x2x4xf32>) {
   linalg.depthwise_conv_1d_nwc_wc
     {dilations = dense<2> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
     ins(%input, %filter : memref<3x5x4xf32>, memref<2x4xf32>)


        


More information about the Mlir-commits mailing list