[Mlir-commits] [mlir] 227ed2f - [mlir][NFC] Update textual references of `func` to `func.func` in Affine/ tests

River Riddle llvmlistbot at llvm.org
Wed Apr 20 22:24:24 PDT 2022


Author: River Riddle
Date: 2022-04-20T22:17:27-07:00
New Revision: 227ed2f448e26cfc646e30ac17b03eac9578c297

URL: https://github.com/llvm/llvm-project/commit/227ed2f448e26cfc646e30ac17b03eac9578c297
DIFF: https://github.com/llvm/llvm-project/commit/227ed2f448e26cfc646e30ac17b03eac9578c297.diff

LOG: [mlir][NFC] Update textual references of `func` to `func.func` in Affine/ tests

The special case parsing of `func` operations is being removed.

Added: 
    

Modified: 
    mlir/test/Dialect/Affine/SuperVectorize/compose_maps.mlir
    mlir/test/Dialect/Affine/SuperVectorize/uniform_divergent.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vector_utils.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction_2d.mlir
    mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
    mlir/test/Dialect/Affine/affine-data-copy.mlir
    mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
    mlir/test/Dialect/Affine/affine-loop-normalize.mlir
    mlir/test/Dialect/Affine/canonicalize.mlir
    mlir/test/Dialect/Affine/constant-fold.mlir
    mlir/test/Dialect/Affine/dma-generate.mlir
    mlir/test/Dialect/Affine/dma.mlir
    mlir/test/Dialect/Affine/inlining.mlir
    mlir/test/Dialect/Affine/invalid.mlir
    mlir/test/Dialect/Affine/load-store-invalid.mlir
    mlir/test/Dialect/Affine/load-store.mlir
    mlir/test/Dialect/Affine/loop-coalescing.mlir
    mlir/test/Dialect/Affine/loop-permute.mlir
    mlir/test/Dialect/Affine/loop-tiling-parametric.mlir
    mlir/test/Dialect/Affine/loop-tiling-validity.mlir
    mlir/test/Dialect/Affine/loop-tiling.mlir
    mlir/test/Dialect/Affine/loop-unswitch.mlir
    mlir/test/Dialect/Affine/memref-stride-calculation.mlir
    mlir/test/Dialect/Affine/ops.mlir
    mlir/test/Dialect/Affine/parallelize.mlir
    mlir/test/Dialect/Affine/scalrep.mlir
    mlir/test/Dialect/Affine/simplify-structures.mlir
    mlir/test/Dialect/Affine/slicing-utils.mlir
    mlir/test/Dialect/Affine/unroll-jam.mlir
    mlir/test/Dialect/Affine/unroll.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/compose_maps.mlir b/mlir/test/Dialect/Affine/SuperVectorize/compose_maps.mlir
index 8b3fc8b07bbb4..3b7820cec67a2 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/compose_maps.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/compose_maps.mlir
@@ -9,7 +9,7 @@
 // will produce the sequence of compositions: f, g(f), h(g(f)) and print the
 // AffineMap h(g(f)), which is what FileCheck checks against.
 
-func @simple1() {
+func.func @simple1() {
   // CHECK: Composed map: (d0) -> (d0)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 1)> } : () -> ()
@@ -18,7 +18,7 @@ func @simple1() {
 
 // -----
 
-func @simple2() {
+func.func @simple2() {
   // CHECK: Composed map: (d0)[s0, s1] -> (d0 - s0 + s1)
   "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 - s0 + 1)> } : () -> ()
@@ -27,7 +27,7 @@ func @simple2() {
 
 // -----
 
-func @simple3a() {
+func.func @simple3a() {
   // CHECK: Composed map: (d0, d1)[s0, s1, s2, s3] -> ((d0 ceildiv s2) * s0, (d1 ceildiv s3) * s1)
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 ceildiv s0, d1 ceildiv s1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 * s0, d1 * s1)> } : () -> ()
@@ -36,7 +36,7 @@ func @simple3a() {
 
 // -----
 
-func @simple3b() {
+func.func @simple3b() {
   // CHECK: Composed map: (d0, d1)[s0, s1] -> (d0 mod s0, d1 mod s1)
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 mod s0, d1 mod s1)> } : () -> ()
   return
@@ -44,7 +44,7 @@ func @simple3b() {
 
 // -----
 
-func @simple3c() {
+func.func @simple3c() {
   // CHECK: Composed map: (d0, d1)[s0, s1, s2, s3, s4, s5] -> ((d0 ceildiv s4) * s4 + d0 mod s2, (d1 ceildiv s5) * s5 + d1 mod s3)
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> ((d0 ceildiv s0) * s0, (d1 ceildiv s1) * s1, d0, d1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 + d2 mod s2, d1 + d3 mod s3)> } : () -> ()
@@ -53,7 +53,7 @@ func @simple3c() {
 
 // -----
 
-func @simple4() {
+func.func @simple4() {
   // CHECK: Composed map: (d0, d1)[s0, s1] -> (d1 * s1, d0 ceildiv s0)
   "test_affine_map"() { affine_map = affine_map<(d0, d1) -> (d1, d0)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 * s1, d1 ceildiv s0)> } : () -> ()
@@ -62,7 +62,7 @@ func @simple4() {
 
 // -----
 
-func @simple5a() {
+func.func @simple5a() {
   // CHECK: Composed map: (d0) -> (d0 * 3 + 18)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
@@ -73,7 +73,7 @@ func @simple5a() {
 
 // -----
 
-func @simple5b() {
+func.func @simple5b() {
   // CHECK: Composed map: (d0) -> ((d0 + 6) ceildiv 2)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
@@ -84,7 +84,7 @@ func @simple5b() {
 
 // -----
 
-func @simple5c() {
+func.func @simple5c() {
   // CHECK: Composed map: (d0) -> (d0 * 8 + 48)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
@@ -95,7 +95,7 @@ func @simple5c() {
 
 // -----
 
-func @simple5d() {
+func.func @simple5d() {
   // CHECK: Composed map: (d0) -> ((d0 * 4) floordiv 3 + 8)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
@@ -106,7 +106,7 @@ func @simple5d() {
 
 // -----
 
-func @simple5e() {
+func.func @simple5e() {
   // CHECK: Composed map: (d0) -> ((d0 + 6) ceildiv 8)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 + 7)> } : () -> ()
@@ -116,7 +116,7 @@ func @simple5e() {
 
 // -----
 
-func @simple5f() {
+func.func @simple5f() {
   // CHECK: Composed map: (d0) -> ((d0 * 4 - 4) floordiv 3)
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 - 1)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0) -> (d0 * 4)> } : () -> ()
@@ -126,7 +126,7 @@ func @simple5f() {
 
 // -----
 
-func @perm_and_proj() {
+func.func @perm_and_proj() {
   // CHECK: Composed map: (d0, d1, d2, d3) -> (d1, d3, d0)
   "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3) -> (d3, d1, d2, d0)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3)> } : () -> ()
@@ -135,7 +135,7 @@ func @perm_and_proj() {
 
 // -----
 
-func @symbols1() {
+func.func @symbols1() {
   // CHECK: Composed map: (d0)[s0] -> (d0 + s0 + 1, d0 - s0 - 1)
   "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0, d0 - s0)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1) -> (d0 + 1, d1 - 1)> } : () -> ()
@@ -144,7 +144,7 @@ func @symbols1() {
 
 // -----
 
-func @drop() {
+func.func @drop() {
   // CHECK: Composed map: (d0, d1, d2)[s0, s1] -> (d0 * 2 + d1 + d2 + s1)
   "test_affine_map"() { affine_map = affine_map<(d0, d1, d2)[s0, s1] -> (d0 + s1, d1 + s0, d0 + d1 + d2)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1, d2) -> (d0 + d2)> } : () -> ()
@@ -153,7 +153,7 @@ func @drop() {
 
 // -----
 
-func @multi_symbols() {
+func.func @multi_symbols() {
   // CHECK: Composed map: (d0)[s0, s1, s2] -> (d0 + s1 + s2 + 1, d0 - s0 - s2 - 1)
   "test_affine_map"() { affine_map = affine_map<(d0)[s0] -> (d0 + s0, d0 - s0)> } : () -> ()
   "test_affine_map"() { affine_map = affine_map<(d0, d1)[s0, s1] -> (d0 + 1 + s1, d1 - 1 - s0)> } : () -> ()

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/uniform_divergent.mlir b/mlir/test/Dialect/Affine/SuperVectorize/uniform_divergent.mlir
index a4637730798b7..545f2d0813484 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/uniform_divergent.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/uniform_divergent.mlir
@@ -5,7 +5,7 @@
 // CHECK-LABEL: @uniform_arg
 // CHECK-SAME:  %[[in:.*]]: memref<512xf32>,
 // CHECK-SAME:  %[[uniform:.*]]: f32
-func @uniform_arg(%in : memref<512xf32>, %uniform : f32) {
+func.func @uniform_arg(%in : memref<512xf32>, %uniform : f32) {
   affine.for %i = 0 to 512 {
     %ld = affine.load %in[%i] : memref<512xf32>
     %add = arith.addf %ld, %uniform : f32
@@ -22,7 +22,7 @@ func @uniform_arg(%in : memref<512xf32>, %uniform : f32) {
 // CHECK-LABEL: @multi_use_uniform_arg
 // CHECK-SAME:  %[[in:.*]]: memref<512xf32>
 // CHECK-SAME:  %[[uniform:.*]]: f32
-func @multi_use_uniform_arg(%in : memref<512xf32>, %uniform : f32) {
+func.func @multi_use_uniform_arg(%in : memref<512xf32>, %uniform : f32) {
   affine.for %i = 0 to 512 {
     %ld = affine.load %in[%i] : memref<512xf32>
     %user0 = arith.addf %ld, %uniform : f32
@@ -40,7 +40,7 @@ func @multi_use_uniform_arg(%in : memref<512xf32>, %uniform : f32) {
 // -----
 
 // CHECK-LABEL: @uniform_load
-func @uniform_load(%A : memref<?x?xf32>, %C : memref<?x?xf32>) {
+func.func @uniform_load(%A : memref<?x?xf32>, %C : memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %N = memref.dim %A, %c0 : memref<?x?xf32>
   affine.for %i = 0 to %N {

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vector_utils.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vector_utils.mlir
index 198f840c5f501..79533942c787a 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vector_utils.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vector_utils.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -affine-super-vectorizer-test -vector-shape-ratio 2 -vector-shape-ratio 5 -vector-shape-ratio 2 2>&1 | FileCheck %s -check-prefix=TEST-3x4x5x8
 // RUN: mlir-opt %s -affine-super-vectorizer-test -vectorize-affine-loop-nest 2>&1 | FileCheck %s -check-prefix=VECNEST
 
-func @vector_add_2d(%arg0: index, %arg1: index) -> f32 {
+func.func @vector_add_2d(%arg0: index, %arg1: index) -> f32 {
   // Nothing should be matched in this first block.
   // CHECK-NOT:matched: {{.*}} = memref.alloc{{.*}}
   // CHECK-NOT:matched: {{.*}} = arith.constant 0{{.*}}
@@ -38,7 +38,7 @@ func @vector_add_2d(%arg0: index, %arg1: index) -> f32 {
 }
 
 // VECNEST-LABEL: func @double_loop_nest
-func @double_loop_nest(%a: memref<20x30xf32>, %b: memref<20xf32>) {
+func.func @double_loop_nest(%a: memref<20x30xf32>, %b: memref<20xf32>) {
 
   affine.for %i = 0 to 20 {
     %b_ld = affine.load %b[%i] : memref<20xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
index b09a1b9316eb3..bc77a3fc2e9f2 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_1d.mlir
@@ -4,7 +4,7 @@
 // CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
 
 // CHECK-LABEL: func @vec1d_1
-func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -32,7 +32,7 @@ func @vec1d_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec1d_2
-func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -58,7 +58,7 @@ func @vec1d_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec1d_3
-func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -89,7 +89,7 @@ func @vec1d_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vector_add_2d
-func @vector_add_2d(%M : index, %N : index) -> f32 {
+func.func @vector_add_2d(%M : index, %N : index) -> f32 {
   %A = memref.alloc (%M, %N) : memref<?x?xf32, 0>
   %B = memref.alloc (%M, %N) : memref<?x?xf32, 0>
   %C = memref.alloc (%M, %N) : memref<?x?xf32, 0>
@@ -143,7 +143,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
 // -----
 
 // CHECK-LABEL: func @vec_constant_with_two_users
-func @vec_constant_with_two_users(%M : index, %N : index) -> (f32, f32) {
+func.func @vec_constant_with_two_users(%M : index, %N : index) -> (f32, f32) {
   %A = memref.alloc (%M, %N) : memref<?x?xf32, 0>
   %B = memref.alloc (%M) : memref<?xf32, 0>
   %f1 = arith.constant 1.0 : f32
@@ -166,7 +166,7 @@ func @vec_constant_with_two_users(%M : index, %N : index) -> (f32, f32) {
 // -----
 
 // CHECK-LABEL: func @vec_block_arg
-func @vec_block_arg(%A : memref<32x512xi32>) {
+func.func @vec_block_arg(%A : memref<32x512xi32>) {
   // CHECK:      affine.for %[[IV0:[arg0-9]+]] = 0 to 512 step 128 {
   // CHECK-NEXT:   affine.for %[[IV1:[arg0-9]+]] = 0 to 32 {
   // CHECK-NEXT:     %[[BROADCAST:.*]] = vector.broadcast %[[IV1]] : index to vector<128xindex>
@@ -186,7 +186,7 @@ func @vec_block_arg(%A : memref<32x512xi32>) {
 // CHECK-DAG: #[[$map0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 2 + d1 - 1)>
 // CHECK-DAG: #[[$map1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d2)>
 // CHECK-LABEL: func @vec_block_arg_2
-func @vec_block_arg_2(%A : memref<?x512xindex>) {
+func.func @vec_block_arg_2(%A : memref<?x512xindex>) {
   %c0 = arith.constant 0 : index
   %N = memref.dim %A, %c0 : memref<?x512xindex>
   // CHECK:      affine.for %[[IV0:[arg0-9]+]] = 0 to %{{.*}} {
@@ -216,7 +216,7 @@ func @vec_block_arg_2(%A : memref<?x512xindex>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_1
-func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -240,7 +240,7 @@ func @vec_rejected_1(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_2
-func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -264,7 +264,7 @@ func @vec_rejected_2(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_3
-func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -293,7 +293,7 @@ func @vec_rejected_3(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_4
-func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -320,7 +320,7 @@ func @vec_rejected_4(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_5
-func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -348,7 +348,7 @@ func @vec_rejected_5(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_6
-func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -378,7 +378,7 @@ func @vec_rejected_6(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_7
-func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -406,7 +406,7 @@ func @vec_rejected_7(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
 
 // CHECK-LABEL: func @vec_rejected_8
-func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -440,7 +440,7 @@ func @vec_rejected_8(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: #[[$map_proj_d0d1_0:map[0-9]+]] = affine_map<(d0, d1) -> (0)>
 
 // CHECK-LABEL: func @vec_rejected_9
-func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -473,7 +473,7 @@ func @vec_rejected_9(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 #set0 = affine_set<(i) : (i >= 0)>
 
 // CHECK-LABEL: func @vec_rejected_10
-func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -499,7 +499,7 @@ func @vec_rejected_10(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @vec_rejected_11
-func @vec_rejected_11(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+func.func @vec_rejected_11(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
   // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
   // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
   // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -529,7 +529,7 @@ func @vec_rejected_11(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
 
 // This should not vectorize due to the sequential dependence in the loop.
 // CHECK-LABEL: @vec_rejected_sequential
-func @vec_rejected_sequential(%A : memref<?xf32>) {
+func.func @vec_rejected_sequential(%A : memref<?xf32>) {
   %c0 = arith.constant 0 : index
   %N = memref.dim %A, %c0 : memref<?xf32>
   affine.for %i = 0 to %N {
@@ -544,7 +544,7 @@ func @vec_rejected_sequential(%A : memref<?xf32>) {
 // -----
 
 // CHECK-LABEL: @vec_no_load_store_ops
-func @vec_no_load_store_ops(%a: f32, %b: f32) {
+func.func @vec_no_load_store_ops(%a: f32, %b: f32) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 128 {
    %add = arith.addf %a, %b : f32
@@ -562,7 +562,7 @@ func @vec_no_load_store_ops(%a: f32, %b: f32) {
 // This should not be vectorized due to the unsupported block argument (%i).
 // Support for operands with linear evolution is needed.
 // CHECK-LABEL: @vec_rejected_unsupported_block_arg
-func @vec_rejected_unsupported_block_arg(%A : memref<512xi32>) {
+func.func @vec_rejected_unsupported_block_arg(%A : memref<512xi32>) {
   affine.for %i = 0 to 512 {
     // CHECK-NOT: vector
     %idx = arith.index_cast %i : index to i32
@@ -575,7 +575,7 @@ func @vec_rejected_unsupported_block_arg(%A : memref<512xi32>) {
 
 // '%i' loop is vectorized, including the inner reduction over '%j'.
 
-func @vec_non_vecdim_reduction(%in: memref<128x256xf32>, %out: memref<256xf32>) {
+func.func @vec_non_vecdim_reduction(%in: memref<128x256xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 128 iter_args(%red_iter = %cst) -> (f32) {
@@ -603,7 +603,7 @@ func @vec_non_vecdim_reduction(%in: memref<128x256xf32>, %out: memref<256xf32>)
 
 // '%i' loop is vectorized, including the inner reductions over '%j'.
 
-func @vec_non_vecdim_reductions(%in0: memref<128x256xf32>, %in1: memref<128x256xi32>,
+func.func @vec_non_vecdim_reductions(%in0: memref<128x256xf32>, %in1: memref<128x256xi32>,
                                 %out0: memref<256xf32>, %out1: memref<256xi32>) {
  %zero = arith.constant 0.000000e+00 : f32
  %one = arith.constant 1 : i32
@@ -642,7 +642,7 @@ func @vec_non_vecdim_reductions(%in0: memref<128x256xf32>, %in1: memref<128x256x
 
 // '%i' loop is vectorized, including the inner last value computation over '%j'.
 
-func @vec_no_vecdim_last_value(%in: memref<128x256xf32>, %out: memref<256xf32>) {
+func.func @vec_no_vecdim_last_value(%in: memref<128x256xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %last_val = affine.for %j = 0 to 128 iter_args(%last_iter = %cst) -> (f32) {
@@ -669,7 +669,7 @@ func @vec_no_vecdim_last_value(%in: memref<128x256xf32>, %out: memref<256xf32>)
 // The inner reduction loop '%j' is not vectorized if we do not request
 // reduction vectorization.
 
-func @vec_vecdim_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vec_vecdim_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
index bfa24aafc204e..b3cd3eed91d6a 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_2d.mlir
@@ -9,7 +9,7 @@
 // VECT-DAG: #[[$map_proj_d0d1_zerod1:map[0-9]+]] = affine_map<(d0, d1) -> (0, d1)>
 // VECT-DAG: #[[$map_proj_d0d1_d0zero:map[0-9]+]] = affine_map<(d0, d1) -> (d0, 0)>
 
-func @vec2d(%A : memref<?x?x?xf32>) {
+func.func @vec2d(%A : memref<?x?x?xf32>) {
    %c0 = arith.constant 0 : index
    %c1 = arith.constant 1 : index
    %c2 = arith.constant 2 : index
@@ -46,7 +46,7 @@ func @vec2d(%A : memref<?x?x?xf32>) {
    return
 }
 
-func @vector_add_2d(%M : index, %N : index) -> f32 {
+func.func @vector_add_2d(%M : index, %N : index) -> f32 {
   %A = memref.alloc (%M, %N) : memref<?x?xf32, 0>
   %B = memref.alloc (%M, %N) : memref<?x?xf32, 0>
   %C = memref.alloc (%M, %N) : memref<?x?xf32, 0>
@@ -99,7 +99,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 {
 }
 
 // VECT-LABEL: func @vectorize_matmul
-func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
+func.func @vectorize_matmul(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %M = memref.dim %arg0, %c0 : memref<?x?xf32>

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
index 4212a6d61b32f..17ff2c38c4eb5 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_3d.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -affine-super-vectorize="virtual-vector-size=32,64,256 test-fastest-varying=2,1,0" | FileCheck %s
 
-func @vec3d(%A : memref<?x?x?xf32>) {
+func.func @vec3d(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
index 55527ab174cc2..6b8f03ba9c6b5 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_2d.mlir
@@ -3,7 +3,7 @@
 // Permutation maps used in vectorization.
 // CHECK: #[[map_proj_d0d1d2_d0d2:map[0-9]*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
 
-func @vec2d(%A : memref<?x?x?xf32>) {
+func.func @vec2d(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
index a12dd9b9f67c6..05465d734d0b3 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_outer_loop_transpose_2d.mlir
@@ -3,7 +3,7 @@
 // Permutation maps used in vectorization.
 // CHECK: #[[map_proj_d0d1d2_d2d0:map[0-9]*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
 
-func @vec2d(%A : memref<?x?x?xf32>) {
+func.func @vec2d(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -36,7 +36,7 @@ func @vec2d(%A : memref<?x?x?xf32>) {
   return
 }
 
-func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
+func.func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
index 7f660b8c0a179..036a7b2674127 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
@@ -2,7 +2,7 @@
 
 // The inner reduction loop '%j' is vectorized.
 
-func @vecdim_reduction(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -29,7 +29,7 @@ func @vecdim_reduction(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 
 // -----
 
-func @vecdim_reduction_minf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_minf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0x7F800000 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -56,7 +56,7 @@ func @vecdim_reduction_minf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 
 // -----
 
-func @vecdim_reduction_maxf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_maxf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0xFF800000 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -83,7 +83,7 @@ func @vecdim_reduction_maxf(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 
 // -----
 
-func @vecdim_reduction_minsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
+func.func @vecdim_reduction_minsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
  %cst = arith.constant 2147483647 : i32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (i32) {
@@ -110,7 +110,7 @@ func @vecdim_reduction_minsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
 
 // -----
 
-func @vecdim_reduction_maxsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
+func.func @vecdim_reduction_maxsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
  %cst = arith.constant -2147483648 : i32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (i32) {
@@ -137,7 +137,7 @@ func @vecdim_reduction_maxsi(%in: memref<256x512xi32>, %out: memref<256xi32>) {
 
 // -----
 
-func @vecdim_reduction_minui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
+func.func @vecdim_reduction_minui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
  %cst = arith.constant -1 : i32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (i32) {
@@ -164,7 +164,7 @@ func @vecdim_reduction_minui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
 
 // -----
 
-func @vecdim_reduction_maxui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
+func.func @vecdim_reduction_maxui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
  %cst = arith.constant 0 : i32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (i32) {
@@ -194,7 +194,7 @@ func @vecdim_reduction_maxui(%in: memref<256x512xi32>, %out: memref<256xi32>) {
 // The inner reduction loop '%j' is vectorized. (The order of addf's operands is
 // 
diff erent than in the previous test case).
 
-func @vecdim_reduction_comm(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_comm(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -224,7 +224,7 @@ func @vecdim_reduction_comm(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 // The inner reduction loop '%j' is vectorized. Transforming the input before
 // performing the accumulation doesn't cause any problem.
 
-func @vecdim_reduction_expsin(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_expsin(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -257,7 +257,7 @@ func @vecdim_reduction_expsin(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 
 // Two reductions at the same time. The inner reduction loop '%j' is vectorized.
 
-func @two_vecdim_reductions(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>) {
+func.func @two_vecdim_reductions(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>) {
  %cst = arith.constant 1.000000e+00 : f32
  affine.for %i = 0 to 256 {
    // Note that we pass the same constant '1.0' as initial values for both
@@ -300,7 +300,7 @@ func @two_vecdim_reductions(%in: memref<256x512xf32>, %out_sum: memref<256xf32>,
 
 // The integer case.
 
-func @two_vecdim_reductions_int(%in: memref<256x512xi64>, %out_sum: memref<256xi64>, %out_prod: memref<256xi64>) {
+func.func @two_vecdim_reductions_int(%in: memref<256x512xi64>, %out_sum: memref<256xi64>, %out_prod: memref<256xi64>) {
  %cst0 = arith.constant 0 : i64
  %cst1 = arith.constant 1 : i64
  affine.for %i = 0 to 256 {
@@ -336,7 +336,7 @@ func @two_vecdim_reductions_int(%in: memref<256x512xi64>, %out_sum: memref<256xi
 
 // The outer reduction loop '%j' is vectorized.
 
-func @vecdim_reduction_nested(%in: memref<256x512xf32>, %out: memref<1xf32>) {
+func.func @vecdim_reduction_nested(%in: memref<256x512xf32>, %out: memref<1xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  %outer_red = affine.for %j = 0 to 512 iter_args(%outer_iter = %cst) -> (f32) {
    %inner_red = affine.for %i = 0 to 256 iter_args(%inner_iter = %cst) -> (f32) {
@@ -371,7 +371,7 @@ func @vecdim_reduction_nested(%in: memref<256x512xf32>, %out: memref<1xf32>) {
 // The inner reduction loop '%j' computes partial sums as a side effect and
 // is not vectorized.
 
-func @vecdim_partial_sums_1_rejected(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>, %out_partsum: memref<256x512xf32>) {
+func.func @vecdim_partial_sums_1_rejected(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>, %out_partsum: memref<256x512xf32>) {
  %cst = arith.constant 1.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %sum, %prod = affine.for %j = 0 to 512 iter_args(%part_sum = %cst, %part_prod = %cst) -> (f32, f32) {
@@ -395,7 +395,7 @@ func @vecdim_partial_sums_1_rejected(%in: memref<256x512xf32>, %out_sum: memref<
 // The inner reduction loop '%j' computes partial sums as a side effect and
 // is not vectorized.
 
-func @vecdim_partial_sums_2_rejected(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>, %out_partsum: memref<256x512xf32>) {
+func.func @vecdim_partial_sums_2_rejected(%in: memref<256x512xf32>, %out_sum: memref<256xf32>, %out_prod: memref<256xf32>, %out_partsum: memref<256x512xf32>) {
  %cst = arith.constant 1.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %sum, %prod = affine.for %j = 0 to 512 iter_args(%part_sum = %cst, %part_prod = %cst) -> (f32, f32) {
@@ -419,7 +419,7 @@ func @vecdim_partial_sums_2_rejected(%in: memref<256x512xf32>, %out_sum: memref<
 // The inner reduction loop '%j' performs an unknown reduction operation and is
 // not vectorized.
 
-func @vecdim_unknown_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_unknown_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 1.000000e+00 : f32
  %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
    %add = arith.addf %red_iter, %red_iter : f32
@@ -437,7 +437,7 @@ func @vecdim_unknown_reduction_rejected(%in: memref<256x512xf32>, %out: memref<2
 // The inner reduction loop '%j' doesn't perform any operation which is not
 // recognized as a standard reduction.
 
-func @vecdim_none_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_none_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 1.000000e+00 : f32
  %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) {
    affine.yield %red_iter : f32
@@ -454,7 +454,7 @@ func @vecdim_none_reduction_rejected(%in: memref<256x512xf32>, %out: memref<256x
 // The number of iterations is not divisable by the vector size, so a mask has
 // to be applied to the last update of the accumulator.
 
-func @vecdim_reduction_masked(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_masked(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to 500 iter_args(%red_iter = %cst) -> (f32) {
@@ -487,7 +487,7 @@ func @vecdim_reduction_masked(%in: memref<256x512xf32>, %out: memref<256xf32>) {
 
 // The number of iteration is not known, so a mask has to be applied.
 
-func @vecdim_reduction_masked_unknown_ub(%in: memref<256x512xf32>, %out: memref<256xf32>, %bnd: index) {
+func.func @vecdim_reduction_masked_unknown_ub(%in: memref<256x512xf32>, %out: memref<256xf32>, %bnd: index) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to %bnd iter_args(%red_iter = %cst) -> (f32) {
@@ -521,7 +521,7 @@ func @vecdim_reduction_masked_unknown_ub(%in: memref<256x512xf32>, %out: memref<
 // The lower bound is nonzero, but the number of iterations is divisible by the
 // vector size, so masking is not needed.
 
-func @vecdim_reduction_nonzero_lb(%in: memref<256x512xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_nonzero_lb(%in: memref<256x512xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 127 to 511 iter_args(%red_iter = %cst) -> (f32) {
@@ -542,7 +542,7 @@ func @vecdim_reduction_nonzero_lb(%in: memref<256x512xf32>, %out: memref<256xf32
 
 // The lower bound is unknown, so we need to create a mask.
 
-func @vecdim_reduction_masked_unknown_lb(%in: memref<256x512xf32>, %out: memref<256xf32>, %lb: index) {
+func.func @vecdim_reduction_masked_unknown_lb(%in: memref<256x512xf32>, %out: memref<256xf32>, %lb: index) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = %lb to 512 iter_args(%red_iter = %cst) -> (f32) {
@@ -569,7 +569,7 @@ func @vecdim_reduction_masked_unknown_lb(%in: memref<256x512xf32>, %out: memref<
 
 // The upper bound is a minimum expression.
 
-func @vecdim_reduction_complex_ub(%in: memref<256x512xf32>, %out: memref<256xf32>, %M: index, %N: index) {
+func.func @vecdim_reduction_complex_ub(%in: memref<256x512xf32>, %out: memref<256xf32>, %M: index, %N: index) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_red = affine.for %j = 0 to min affine_map<(d0, d1) -> (d0, d1*2)>(%M, %N) iter_args(%red_iter = %cst) -> (f32) {
@@ -598,7 +598,7 @@ func @vecdim_reduction_complex_ub(%in: memref<256x512xf32>, %out: memref<256xf32
 
 // The same mask is applied to both reductions.
 
-func @vecdim_two_reductions_masked(%in: memref<256x512xf32>, %out: memref<512xf32>) {
+func.func @vecdim_two_reductions_masked(%in: memref<256x512xf32>, %out: memref<512xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %final_sum, %final_expsum = affine.for %j = 0 to 500 iter_args(%sum_iter = %cst, %expsum_iter = %cst) -> (f32, f32) {

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction_2d.mlir
index 5f50154bedd7d..9377cf61949b9 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction_2d.mlir
@@ -5,7 +5,7 @@
 //       error message is produced.
 
 // expected-error at +1 {{Vectorizing reductions is supported only for 1-D vectors}}
-func @vecdim_reduction_2d(%in: memref<256x512x1024xf32>, %out: memref<256xf32>) {
+func.func @vecdim_reduction_2d(%in: memref<256x512x1024xf32>, %out: memref<256xf32>) {
  %cst = arith.constant 0.000000e+00 : f32
  affine.for %i = 0 to 256 {
    %sum_j = affine.for %j = 0 to 512 iter_args(%red_iter_j = %cst) -> (f32) {

diff  --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
index 5ea7e84e267fb..f1662b78242ed 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_transpose_2d.mlir
@@ -3,7 +3,7 @@
 // Permutation maps used in vectorization.
 // CHECK-DAG: #[[map_proj_d0d1d2_d2d1:map[0-9]*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
 
-func @vec2d(%A : memref<?x?x?xf32>) {
+func.func @vec2d(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -36,7 +36,7 @@ func @vec2d(%A : memref<?x?x?xf32>) {
   return
 }
 
-func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
+func.func @vec2d_imperfectly_nested(%A : memref<?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index

diff  --git a/mlir/test/Dialect/Affine/affine-data-copy.mlir b/mlir/test/Dialect/Affine/affine-data-copy.mlir
index da74f28ce09c5..b845f9db960c2 100644
--- a/mlir/test/Dialect/Affine/affine-data-copy.mlir
+++ b/mlir/test/Dialect/Affine/affine-data-copy.mlir
@@ -23,7 +23,7 @@
 
 // CHECK-LABEL: func @matmul
 // FILTER-LABEL: func @matmul
-func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<4096x4096xf32>) -> memref<4096x4096xf32> {
+func.func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<4096x4096xf32>) -> memref<4096x4096xf32> {
   affine.for %i = 0 to 4096 step 128 {
     affine.for %j = 0 to 4096 step 128 {
       affine.for %k = 0 to 4096 step 128 {
@@ -130,7 +130,7 @@ func @matmul(%A: memref<4096x4096xf32>, %B: memref<4096x4096xf32>, %C: memref<40
 // CHECK-SMALL-LABEL: func @single_elt_buffers
 // FILTER-LABEL: func @single_elt_buffers
 // MEMREF_REGION-LABEL: func @single_elt_buffers
-func @single_elt_buffers(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1024xf32>, %arg2: memref<1024x1024xf32>) -> memref<1024x1024xf32> {
+func.func @single_elt_buffers(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1024xf32>, %arg2: memref<1024x1024xf32>) -> memref<1024x1024xf32> {
   affine.for %i = 0 to 1024 {
     affine.for %j = 0 to 1024 {
       affine.for %k = 0 to 1024 {
@@ -205,7 +205,7 @@ func @single_elt_buffers(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1024xf
 // CHECK-DAG: [[$MAP_MIN_UB2:map[0-9]+]] = affine_map<(d0) -> (4096, d0 + 100)>
 
 // CHECK-LABEL: func @min_upper_bound
-func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
+func.func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
   affine.for %i = 0 to 4096 step 100 {
     affine.for %ii = affine_map<(d0) -> (d0)>(%i) to min #map_ub(%i) {
       %5 = affine.load %A[%ii] : memref<4096xf32>
@@ -248,7 +248,7 @@ func @min_upper_bound(%A: memref<4096xf32>) -> memref<4096xf32> {
 // CHECK-LABEL: max_lower_bound(%{{.*}}: memref<2048x516xf64>,
 // CHECK-SAME: [[i:arg[0-9]+]]
 // CHECK-SAME: [[j:arg[0-9]+]]
-func @max_lower_bound(%M: memref<2048x516xf64>, %i : index, %j : index) {
+func.func @max_lower_bound(%M: memref<2048x516xf64>, %i : index, %j : index) {
   affine.for %ii = 0 to 2048 {
     affine.for %jj = max #lb()[%i, %j] to min #ub()[%i, %j] {
       affine.load %M[%ii, %jj] : memref<2048x516xf64>
@@ -274,7 +274,7 @@ func @max_lower_bound(%M: memref<2048x516xf64>, %i : index, %j : index) {
 // -----
 
 // CHECK-LABEL: func @empty_loops
-func @empty_loops(%arg0: memref<1024x1024xf64>) {
+func.func @empty_loops(%arg0: memref<1024x1024xf64>) {
   // Empty loops - so no copy generation happens.
   affine.for %i = 0 to 0 {
     affine.load %arg0[0, %i] : memref<1024x1024xf64>

diff  --git a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
index 22e9f5d7d77a1..31c86e4405208 100644
--- a/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
+++ b/mlir/test/Dialect/Affine/affine-loop-invariant-code-motion.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -affine-loop-invariant-code-motion -split-input-file | FileCheck %s
 
-func @nested_loops_both_having_invariant_code() {
+func.func @nested_loops_both_having_invariant_code() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -29,7 +29,7 @@ func @nested_loops_both_having_invariant_code() {
 // The store-load forwarding can see through affine apply's since it relies on
 // dependence information.
 // CHECK-LABEL: func @store_affine_apply
-func @store_affine_apply() -> memref<10xf32> {
+func.func @store_affine_apply() -> memref<10xf32> {
   %cf7 = arith.constant 7.0 : f32
   %m = memref.alloc() : memref<10xf32>
   affine.for %arg0 = 0 to 10 {
@@ -48,7 +48,7 @@ func @store_affine_apply() -> memref<10xf32> {
 
 // -----
 
-func @nested_loops_code_invariant_to_both() {
+func.func @nested_loops_code_invariant_to_both() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -70,7 +70,7 @@ func @nested_loops_code_invariant_to_both() {
 // -----
 
 // CHECK-LABEL: func @nested_loops_inner_loops_invariant_to_outermost_loop
-func @nested_loops_inner_loops_invariant_to_outermost_loop(%m : memref<10xindex>) {
+func.func @nested_loops_inner_loops_invariant_to_outermost_loop(%m : memref<10xindex>) {
   affine.for %arg0 = 0 to 20 {
     affine.for %arg1 = 0 to 30 {
       %v0 = affine.for %arg2 = 0 to 10 iter_args (%prevAccum = %arg1) -> index {
@@ -96,7 +96,7 @@ func @nested_loops_inner_loops_invariant_to_outermost_loop(%m : memref<10xindex>
 
 // -----
 
-func @single_loop_nothing_invariant() {
+func.func @single_loop_nothing_invariant() {
   %m1 = memref.alloc() : memref<10xf32>
   %m2 = memref.alloc() : memref<10xf32>
   affine.for %arg0 = 0 to 10 {
@@ -119,7 +119,7 @@ func @single_loop_nothing_invariant() {
 
 // -----
 
-func @invariant_code_inside_affine_if() {
+func.func @invariant_code_inside_affine_if() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
 
@@ -147,7 +147,7 @@ func @invariant_code_inside_affine_if() {
 
 // -----
 
-func @dependent_stores() {
+func.func @dependent_stores() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -177,7 +177,7 @@ func @dependent_stores() {
 
 // -----
 
-func @independent_stores() {
+func.func @independent_stores() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -207,7 +207,7 @@ func @independent_stores() {
 
 // -----
 
-func @load_dependent_store() {
+func.func @load_dependent_store() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -236,7 +236,7 @@ func @load_dependent_store() {
 
 // -----
 
-func @load_after_load() {
+func.func @load_after_load() {
   %m = memref.alloc() : memref<10xf32>
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -266,7 +266,7 @@ func @load_after_load() {
 
 // -----
 
-func @invariant_affine_if() {
+func.func @invariant_affine_if() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -295,7 +295,7 @@ func @invariant_affine_if() {
 
 // -----
 
-func @invariant_affine_if2() {
+func.func @invariant_affine_if2() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -324,7 +324,7 @@ func @invariant_affine_if2() {
 
 // -----
 
-func @invariant_affine_nested_if() {
+func.func @invariant_affine_nested_if() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -358,7 +358,7 @@ func @invariant_affine_nested_if() {
 
 // -----
 
-func @invariant_affine_nested_if_else() {
+func.func @invariant_affine_nested_if_else() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -396,7 +396,7 @@ func @invariant_affine_nested_if_else() {
 
 // -----
 
-func @invariant_affine_nested_if_else2() {
+func.func @invariant_affine_nested_if_else2() {
   %m = memref.alloc() : memref<10xf32>
   %m2 = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
@@ -436,7 +436,7 @@ func @invariant_affine_nested_if_else2() {
 
 // -----
 
-func @invariant_affine_nested_if2() {
+func.func @invariant_affine_nested_if2() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -470,7 +470,7 @@ func @invariant_affine_nested_if2() {
 
 // -----
 
-func @invariant_affine_for_inside_affine_if() {
+func.func @invariant_affine_for_inside_affine_if() {
   %m = memref.alloc() : memref<10xf32>
   %cf8 = arith.constant 8.0 : f32
   affine.for %arg0 = 0 to 10 {
@@ -504,7 +504,7 @@ func @invariant_affine_for_inside_affine_if() {
 
 // -----
 
-func @invariant_constant_and_load() {
+func.func @invariant_constant_and_load() {
   %m = memref.alloc() : memref<100xf32>
   %m2 = memref.alloc() : memref<100xf32>
   affine.for %arg0 = 0 to 5 {
@@ -526,7 +526,7 @@ func @invariant_constant_and_load() {
 
 // -----
 
-func @nested_load_store_same_memref() {
+func.func @nested_load_store_same_memref() {
   %m = memref.alloc() : memref<10xf32>
   %cst = arith.constant 8.0 : f32
   %c0 = arith.constant 0 : index
@@ -551,7 +551,7 @@ func @nested_load_store_same_memref() {
 
 // -----
 
-func @nested_load_store_same_memref2() {
+func.func @nested_load_store_same_memref2() {
   %m = memref.alloc() : memref<10xf32>
   %cst = arith.constant 8.0 : f32
   %c0 = arith.constant 0 : index
@@ -578,7 +578,7 @@ func @nested_load_store_same_memref2() {
 // -----
 
 // CHECK-LABEL:   func @do_not_hoist_dependent_side_effect_free_op
-func @do_not_hoist_dependent_side_effect_free_op(%arg0: memref<10x512xf32>) {
+func.func @do_not_hoist_dependent_side_effect_free_op(%arg0: memref<10x512xf32>) {
   %0 = memref.alloca() : memref<1xf32>
   %cst = arith.constant 8.0 : f32
   affine.for %i = 0 to 512 {
@@ -608,7 +608,7 @@ func @do_not_hoist_dependent_side_effect_free_op(%arg0: memref<10x512xf32>) {
 // -----
 
 // CHECK-LABEL: func @vector_loop_nothing_invariant
-func @vector_loop_nothing_invariant() {
+func.func @vector_loop_nothing_invariant() {
   %m1 = memref.alloc() : memref<40xf32>
   %m2 = memref.alloc() : memref<40xf32>
   affine.for %arg0 = 0 to 10 {
@@ -630,7 +630,7 @@ func @vector_loop_nothing_invariant() {
 // -----
 
 // CHECK-LABEL: func @vector_loop_all_invariant
-func @vector_loop_all_invariant() {
+func.func @vector_loop_all_invariant() {
   %m1 = memref.alloc() : memref<4xf32>
   %m2 = memref.alloc() : memref<4xf32>
   %m3 = memref.alloc() : memref<4xf32>
@@ -656,7 +656,7 @@ func @vector_loop_all_invariant() {
 
 #set = affine_set<(d0): (d0 - 10 >= 0)>
 // CHECK-LABEL:   func @affine_if_not_invariant(
-func @affine_if_not_invariant(%buffer: memref<1024xf32>) -> f32 {
+func.func @affine_if_not_invariant(%buffer: memref<1024xf32>) -> f32 {
   %sum_init_0 = arith.constant 0.0 : f32
   %sum_init_1 = arith.constant 1.0 : f32
   %res = affine.for %i = 0 to 10 step 2 iter_args(%sum_iter = %sum_init_0) -> f32 {
@@ -690,7 +690,7 @@ func @affine_if_not_invariant(%buffer: memref<1024xf32>) -> f32 {
 // -----
 
 // CHECK-LABEL:   func @affine_for_not_invariant(
-func @affine_for_not_invariant(%in : memref<30x512xf32, 1>,
+func.func @affine_for_not_invariant(%in : memref<30x512xf32, 1>,
                                %out : memref<30x1xf32, 1>) {
   %sum_0 = arith.constant 0.0 : f32
   %cst_0 = arith.constant 1.1 : f32
@@ -720,7 +720,7 @@ func @affine_for_not_invariant(%in : memref<30x512xf32, 1>,
 // -----
 
 // CHECK-LABEL: func @use_of_iter_operands_invariant
-func @use_of_iter_operands_invariant(%m : memref<10xindex>) {
+func.func @use_of_iter_operands_invariant(%m : memref<10xindex>) {
   %sum_1 = arith.constant 0 : index
   %v0 = affine.for %arg1 = 0 to 11 iter_args (%prevAccum = %sum_1) -> index {
     %prod = arith.muli %sum_1, %sum_1 : index
@@ -739,7 +739,7 @@ func @use_of_iter_operands_invariant(%m : memref<10xindex>) {
 // -----
 
 // CHECK-LABEL: func @use_of_iter_args_not_invariant
-func @use_of_iter_args_not_invariant(%m : memref<10xindex>) {
+func.func @use_of_iter_args_not_invariant(%m : memref<10xindex>) {
   %sum_1 = arith.constant 0 : index
   %v0 = affine.for %arg1 = 0 to 11 iter_args (%prevAccum = %sum_1) -> index {
     %newAccum = arith.addi %prevAccum, %sum_1 : index

diff  --git a/mlir/test/Dialect/Affine/affine-loop-normalize.mlir b/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
index 7c4414efa8425..e33b036dea417 100644
--- a/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
+++ b/mlir/test/Dialect/Affine/affine-loop-normalize.mlir
@@ -7,7 +7,7 @@
 // CHECK-DAG: [[$MAP2:#map[0-9]+]] = affine_map<(d0, d1) -> (d0 + d1)>
 
 // CHECK-LABEL: func @normalize_parallel()
-func @normalize_parallel() {
+func.func @normalize_parallel() {
   %cst = arith.constant 1.0 : f32
   %0 = memref.alloc() : memref<2x4xf32>
   // CHECK: affine.parallel (%[[i0:.*]], %[[j0:.*]]) = (0, 0) to (4, 2)
@@ -27,7 +27,7 @@ func @normalize_parallel() {
 // -----
 
 // CHECK-LABEL: func @relative_bounds
-func @relative_bounds(%arg: index) {
+func.func @relative_bounds(%arg: index) {
   // CHECK: affine.for %{{.*}} = 0 to 4
   affine.for %i = affine_map<(d0) -> (d0)>(%arg) to affine_map<(d0) -> (d0 + 4)>(%arg) {
   }
@@ -40,7 +40,7 @@ func @relative_bounds(%arg: index) {
 // parent block.
 
 // CHECK-LABEL: func @single_iteration_loop
-func @single_iteration_loop(%in: memref<1xf32>, %out: memref<1xf32>) {
+func.func @single_iteration_loop(%in: memref<1xf32>, %out: memref<1xf32>) {
   affine.for %i = 0 to 1 {
     %1 = affine.load %in[%i] : memref<1xf32>
     affine.store %1, %out[%i] : memref<1xf32>
@@ -68,7 +68,7 @@ func @single_iteration_loop(%in: memref<1xf32>, %out: memref<1xf32>) {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @simple_loop_nest(){
+func.func @simple_loop_nest(){
   affine.for %i0 = 2 to 32 step 2 {
     affine.for %i1 =  0 to 32 step 3 {
       "test.foo"(%i0, %i1) : (index, index) -> ()
@@ -97,7 +97,7 @@ func @simple_loop_nest(){
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @loop_with_unknown_upper_bound(%arg0: memref<?x?xf32>, %arg1: index) {
+func.func @loop_with_unknown_upper_bound(%arg0: memref<?x?xf32>, %arg1: index) {
   %c0 = arith.constant 0 : index
   %0 = memref.dim %arg0, %c0 : memref<?x?xf32>
   affine.for %i0 = 2 to %0 step 32 {
@@ -128,7 +128,7 @@ func @loop_with_unknown_upper_bound(%arg0: memref<?x?xf32>, %arg1: index) {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @loop_with_multiple_upper_bounds(%arg0: memref<?x?xf32>, %arg1 : index) {
+func.func @loop_with_multiple_upper_bounds(%arg0: memref<?x?xf32>, %arg1 : index) {
   %c0 = arith.constant 0 : index
   %0 = memref.dim %arg0, %c0 : memref<?x?xf32>
   affine.for %i0 = 2 to %0 step 32{
@@ -185,7 +185,7 @@ func @loop_with_multiple_upper_bounds(%arg0: memref<?x?xf32>, %arg1 : index) {
 #map3 = affine_map<() -> (0)>
 #map4 = affine_map<()[s0] -> (s0)>
 
-func @tiled_matmul(%0: memref<1024x1024xf32>, %1: memref<1024x1024xf32>, %2: memref<1024x1024xf32>) {
+func.func @tiled_matmul(%0: memref<1024x1024xf32>, %1: memref<1024x1024xf32>, %2: memref<1024x1024xf32>) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %3 = memref.dim %0, %c0 : memref<1024x1024xf32>

diff  --git a/mlir/test/Dialect/Affine/canonicalize.mlir b/mlir/test/Dialect/Affine/canonicalize.mlir
index 7cebf8af9c19b..e6ba4ed8cdb3e 100644
--- a/mlir/test/Dialect/Affine/canonicalize.mlir
+++ b/mlir/test/Dialect/Affine/canonicalize.mlir
@@ -6,7 +6,7 @@
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0 + 1)>
 
 // CHECK-LABEL: func @compose_affine_maps_1dto2d_no_symbols() {
-func @compose_affine_maps_1dto2d_no_symbols() {
+func.func @compose_affine_maps_1dto2d_no_symbols() {
   %0 = memref.alloc() : memref<4x4xf32>
 
   affine.for %i0 = 0 to 15 {
@@ -52,7 +52,7 @@ func @compose_affine_maps_1dto2d_no_symbols() {
 // CHECK-DAG: #[[$MAP7a:.*]] = affine_map<(d0) -> (d0 * 2 + 1)>
 
 // CHECK-LABEL: func @compose_affine_maps_1dto2d_with_symbols() {
-func @compose_affine_maps_1dto2d_with_symbols() {
+func.func @compose_affine_maps_1dto2d_with_symbols() {
   %0 = memref.alloc() : memref<4x4xf32>
 
   affine.for %i0 = 0 to 15 {
@@ -94,7 +94,7 @@ func @compose_affine_maps_1dto2d_with_symbols() {
 // CHECK-DAG: #[[$MAP8a:.*]] = affine_map<(d0, d1) -> (d1 + (d0 ceildiv 8) * 8 - (d1 floordiv 8) * 8)>
 
 // CHECK-LABEL: func @compose_affine_maps_2d_tile
-func @compose_affine_maps_2d_tile(%0: memref<16x32xf32>, %1: memref<16x32xf32>) {
+func.func @compose_affine_maps_2d_tile(%0: memref<16x32xf32>, %1: memref<16x32xf32>) {
   %c4 = arith.constant 4 : index
   %c8 = arith.constant 8 : index
 
@@ -134,7 +134,7 @@ func @compose_affine_maps_2d_tile(%0: memref<16x32xf32>, %1: memref<16x32xf32>)
 // CHECK-DAG: #[[$MAP12:.*]] = affine_map<(d0) -> (d0 * 7 - 49)>
 
 // CHECK-LABEL: func @compose_affine_maps_dependent_loads() {
-func @compose_affine_maps_dependent_loads() {
+func.func @compose_affine_maps_dependent_loads() {
   %0 = memref.alloc() : memref<16x32xf32>
   %1 = memref.alloc() : memref<16x32xf32>
 
@@ -186,7 +186,7 @@ func @compose_affine_maps_dependent_loads() {
 // CHECK-DAG: #[[$MAP13B:.*]] = affine_map<(d0) -> ((d0 * 4 - 4) floordiv 3)>
 
 // CHECK-LABEL: func @compose_affine_maps_diamond_dependency
-func @compose_affine_maps_diamond_dependency(%arg0: f32, %arg1: memref<4x4xf32>) {
+func.func @compose_affine_maps_diamond_dependency(%arg0: f32, %arg1: memref<4x4xf32>) {
   affine.for %i0 = 0 to 15 {
     %a = affine.apply affine_map<(d0) -> (d0 - 1)> (%i0)
     %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
@@ -207,7 +207,7 @@ func @compose_affine_maps_diamond_dependency(%arg0: f32, %arg1: memref<4x4xf32>)
 // CHECK-DAG: #[[$MAP14:.*]] = affine_map<()[s0, s1] -> ((s0 * 4 + s1 * 4) floordiv s0)>
 
 // CHECK-LABEL: func @compose_affine_maps_multiple_symbols
-func @compose_affine_maps_multiple_symbols(%arg0: index, %arg1: index) -> index {
+func.func @compose_affine_maps_multiple_symbols(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (s0 + d0)> (%arg0)[%arg1]
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
   %e = affine.apply affine_map<(d0)[s0] -> (d0 floordiv s0)> (%c)[%arg1]
@@ -218,7 +218,7 @@ func @compose_affine_maps_multiple_symbols(%arg0: index, %arg1: index) -> index
 // -----
 
 // CHECK-LABEL: func @arg_used_as_dim_and_symbol
-func @arg_used_as_dim_and_symbol(%arg0: memref<100x100xf32>, %arg1: index, %arg2: f32) -> (memref<100x100xf32, 1>, memref<1xi32>) {
+func.func @arg_used_as_dim_and_symbol(%arg0: memref<100x100xf32>, %arg1: index, %arg2: f32) -> (memref<100x100xf32, 1>, memref<1xi32>) {
   %c9 = arith.constant 9 : index
   %1 = memref.alloc() : memref<100x100xf32, 1>
   %2 = memref.alloc() : memref<1xi32>
@@ -238,7 +238,7 @@ func @arg_used_as_dim_and_symbol(%arg0: memref<100x100xf32>, %arg1: index, %arg2
 // -----
 
 // CHECK-LABEL: func @trivial_maps
-func @trivial_maps() {
+func.func @trivial_maps() {
   // CHECK-NOT: affine.apply
 
   %0 = memref.alloc() : memref<10xf32>
@@ -261,7 +261,7 @@ func @trivial_maps() {
 // CHECK-DAG: #[[$MAP15:.*]] = affine_map<()[s0] -> (s0 - 42)>
 
 // CHECK-LABEL: func @partial_fold_map
-func @partial_fold_map(%arg1: index, %arg2: index) -> index {
+func.func @partial_fold_map(%arg1: index, %arg2: index) -> index {
   // TODO: Constant fold one index into affine.apply
   %c42 = arith.constant 42 : index
   %2 = affine.apply affine_map<(d0, d1) -> (d0 - d1)> (%arg1, %c42)
@@ -274,7 +274,7 @@ func @partial_fold_map(%arg1: index, %arg2: index) -> index {
 // CHECK-DAG: #[[$MAP_symbolic_composition_a:.*]] = affine_map<()[s0] -> (s0 * 512)>
 
 // CHECK-LABEL: func @symbolic_composition_a(%{{.*}}: index, %{{.*}}: index) -> index {
-func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
+func.func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0 * 4)>(%arg0)
   %1 = affine.apply affine_map<()[s0, s1] -> (8 * s0)>()[%0, %arg0]
   %2 = affine.apply affine_map<()[s0, s1] -> (16 * s1)>()[%arg1, %1]
@@ -287,7 +287,7 @@ func @symbolic_composition_a(%arg0: index, %arg1: index) -> index {
 // CHECK-DAG: #[[$MAP_symbolic_composition_b:.*]] = affine_map<()[s0] -> (s0 * 4)>
 
 // CHECK-LABEL: func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
-func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
+func.func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %0]
   // CHECK: %{{.*}} = affine.apply #[[$MAP_symbolic_composition_b]]()[%{{.*}}]
@@ -299,7 +299,7 @@ func @symbolic_composition_b(%arg0: index, %arg1: index, %arg2: index, %arg3: in
 // CHECK-DAG: #[[$MAP_symbolic_composition_c:.*]] = affine_map<()[s0, s1] -> (s0 * 3 + s1)>
 
 // CHECK-LABEL: func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
-func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
+func.func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<(d0) -> (d0)>(%arg1)
   %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
@@ -314,7 +314,7 @@ func @symbolic_composition_c(%arg0: index, %arg1: index, %arg2: index, %arg3: in
 // CHECK-LABEL: func @symbolic_composition_d(
 //  CHECK-SAME:   %[[ARG0:[0-9a-zA-Z]+]]: index
 //  CHECK-SAME:   %[[ARG1:[0-9a-zA-Z]+]]: index
-func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
+func.func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: index) -> index {
   %0 = affine.apply affine_map<(d0) -> (d0)>(%arg0)
   %1 = affine.apply affine_map<()[s0] -> (s0)>()[%arg1]
   %2 = affine.apply affine_map<()[s0, s1, s2, s3] -> (s0 + s1 + s2 + s3)>()[%0, %0, %0, %1]
@@ -327,7 +327,7 @@ func @symbolic_composition_d(%arg0: index, %arg1: index, %arg2: index, %arg3: in
 // CHECK-DAG: #[[$MAP_mix_dims_and_symbols_b:.*]] = affine_map<()[s0, s1] -> (s0 * 42 + s1 + 6)>
 
 // CHECK-LABEL: func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
-func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
+func.func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   // CHECK: {{.*}} = affine.apply #[[$MAP_mix_dims_and_symbols_b]]()[%{{.*}}, %{{.*}}]
@@ -340,7 +340,7 @@ func @mix_dims_and_symbols_b(%arg0: index, %arg1: index) -> index {
 // CHECK-DAG: #[[$MAP_mix_dims_and_symbols_c:.*]] = affine_map<()[s0, s1] -> (s0 * 168 + s1 * 4 - 4)>
 
 // CHECK-LABEL: func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
-func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
+func.func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
@@ -353,7 +353,7 @@ func @mix_dims_and_symbols_c(%arg0: index, %arg1: index) -> index {
 // CHECK-DAG: #[[$MAP_mix_dims_and_symbols_d:.*]] = affine_map<()[s0, s1] -> ((s0 * 42 + s1 + 6) ceildiv 8)>
 
 // CHECK-LABEL: func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
-func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
+func.func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
@@ -367,7 +367,7 @@ func @mix_dims_and_symbols_d(%arg0: index, %arg1: index) -> index {
 // CHECK-DAG: #[[$MAP_mix_dims_and_symbols_e:.*]] = affine_map<()[s0, s1] -> ((s0 * 168 + s1 * 4 - 4) floordiv 3)>
 
 // CHECK-LABEL: func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
-func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
+func.func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
@@ -380,7 +380,7 @@ func @mix_dims_and_symbols_e(%arg0: index, %arg1: index) -> index {
 // -----
 
 // CHECK-LABEL: func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
-func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
+func.func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] -> (d0 - 1 + 42 * s0)> (%arg0)[%arg1]
   %b = affine.apply affine_map<(d0) -> (d0 + 7)> (%a)
   %c = affine.apply affine_map<(d0) -> (d0 * 4)> (%a)
@@ -397,7 +397,7 @@ func @mix_dims_and_symbols_f(%arg0: index, %arg1: index) -> index {
 // CHECK-DAG: #[[$MAP_symbolic_composition_b:.*]] = affine_map<()[s0] -> (s0 * 4)>
 
 // CHECK-LABEL: func @mix_dims_and_symbols_g(%arg0: index, %arg1: index) -> (index, index, index) {
-func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
+func.func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
   %K = affine.apply affine_map<(d0) -> (4*d0)> (%M)
   %res1 = affine.apply affine_map<()[s0, s1] -> (4 * s0)>()[%N, %K]
   %res2 = affine.apply affine_map<()[s0, s1] -> (s1)>()[%N, %K]
@@ -413,7 +413,7 @@ func @mix_dims_and_symbols_g(%M: index, %N: index) -> (index, index, index) {
 // CHECK-DAG: #[[$symbolic_semi_affine:.*]] = affine_map<(d0)[s0] -> (d0 floordiv (s0 + 1))>
 
 // CHECK-LABEL: func @symbolic_semi_affine(%arg0: index, %arg1: index, %arg2: memref<?xf32>) {
-func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
+func.func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
   %f1 = arith.constant 1.0 : f32
   affine.for %i0 = 1 to 100 {
     %1 = affine.apply affine_map<()[s0] -> (s0 + 1)> ()[%M]
@@ -430,7 +430,7 @@ func @symbolic_semi_affine(%M: index, %N: index, %A: memref<?xf32>) {
 // CHECK: #[[$MAP1:.*]] = affine_map<()[s0] -> (100, s0)>
 
 // CHECK-LABEL:  func @constant_fold_bounds(%arg0: index) {
-func @constant_fold_bounds(%N : index) {
+func.func @constant_fold_bounds(%N : index) {
   // CHECK:      arith.constant 3 : index
   // CHECK-NEXT: "foo"() : () -> index
   %c9 = arith.constant 9 : index
@@ -461,7 +461,7 @@ func @constant_fold_bounds(%N : index) {
 // -----
 
 // CHECK-LABEL:  func @fold_empty_loops()
-func @fold_empty_loops() -> index {
+func.func @fold_empty_loops() -> index {
   %c0 = arith.constant 0 : index
   affine.for %i = 0 to 10 {
   }
@@ -476,7 +476,7 @@ func @fold_empty_loops() -> index {
 // -----
 
 // CHECK-LABEL:  func @fold_empty_loop()
-func @fold_empty_loop() -> (index, index) {
+func.func @fold_empty_loop() -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -492,7 +492,7 @@ func @fold_empty_loop() -> (index, index) {
 // -----
 
 // CHECK-LABEL:  func @fold_empty_loops_trip_count_1()
-func @fold_empty_loops_trip_count_1() -> (index, index, index, index) {
+func.func @fold_empty_loops_trip_count_1() -> (index, index, index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -512,7 +512,7 @@ func @fold_empty_loops_trip_count_1() -> (index, index, index, index) {
 // -----
 
 // CHECK-LABEL:  func @fold_empty_loop_trip_count_0()
-func @fold_empty_loop_trip_count_0() -> (index, index) {
+func.func @fold_empty_loop_trip_count_0() -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c2 = arith.constant 2 : index
@@ -528,7 +528,7 @@ func @fold_empty_loop_trip_count_0() -> (index, index) {
 // -----
 
 // CHECK-LABEL:  func @fold_empty_loop_trip_count_unknown
-func @fold_empty_loop_trip_count_unknown(%in : index) -> (index, index) {
+func.func @fold_empty_loop_trip_count_unknown(%in : index) -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %res:2 = affine.for %i = 0 to %in iter_args(%arg0 = %c0, %arg1 = %c1) -> (index, index) {
@@ -543,7 +543,7 @@ func @fold_empty_loop_trip_count_unknown(%in : index) -> (index, index) {
 // -----
 
 // CHECK-LABEL:  func @empty_loops_not_folded_1
-func @empty_loops_not_folded_1(%in : index) -> index {
+func.func @empty_loops_not_folded_1(%in : index) -> index {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   // CHECK: affine.for
@@ -556,7 +556,7 @@ func @empty_loops_not_folded_1(%in : index) -> index {
 // -----
 
 // CHECK-LABEL:  func @empty_loops_not_folded_2
-func @empty_loops_not_folded_2(%in : index) -> (index, index) {
+func.func @empty_loops_not_folded_2(%in : index) -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   // CHECK: affine.for
@@ -569,7 +569,7 @@ func @empty_loops_not_folded_2(%in : index) -> (index, index) {
 // -----
 
 // CHECK-LABEL:  func @empty_loops_not_folded_3
-func @empty_loops_not_folded_3() -> (index, index) {
+func.func @empty_loops_not_folded_3() -> (index, index) {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   // CHECK: affine.for
@@ -583,7 +583,7 @@ func @empty_loops_not_folded_3() -> (index, index) {
 
 // CHECK-LABEL:  func @fold_zero_iter_loops
 // CHECK-SAME: %[[ARG:.*]]: index
-func @fold_zero_iter_loops(%in : index) -> index {
+func.func @fold_zero_iter_loops(%in : index) -> index {
   %c1 = arith.constant 1 : index
   affine.for %i = 0 to 0 {
     affine.for %j = 0 to -1 {
@@ -604,7 +604,7 @@ func @fold_zero_iter_loops(%in : index) -> index {
 // CHECK-LABEL: func @canonicalize_affine_if
 //  CHECK-SAME:   %[[M:[0-9a-zA-Z]*]]: index,
 //  CHECK-SAME:   %[[N:[0-9a-zA-Z]*]]: index)
-func @canonicalize_affine_if(%M : index, %N : index) {
+func.func @canonicalize_affine_if(%M : index, %N : index) {
   %c1022 = arith.constant 1022 : index
   // Drop unused operand %M, propagate %c1022, and promote %N to symbolic.
   affine.for %i = 0 to 1024 {
@@ -628,7 +628,7 @@ func @canonicalize_affine_if(%M : index, %N : index) {
 // CHECK-LABEL: func @canonicalize_bounds
 // CHECK-SAME: %[[M:.*]]: index,
 // CHECK-SAME: %[[N:.*]]: index)
-func @canonicalize_bounds(%M : index, %N : index) {
+func.func @canonicalize_bounds(%M : index, %N : index) {
   %c0 = arith.constant 0 : index
   %c1024 = arith.constant 1024 : index
   // Drop unused operand %N, drop duplicate operand %M, propagate %c1024, and
@@ -655,7 +655,7 @@ func @canonicalize_bounds(%M : index, %N : index) {
 // Compose maps into affine load and store ops.
 
 // CHECK-LABEL: @compose_into_affine_load_store
-func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
+func.func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
   // CHECK: affine.for %[[IV:.*]] = 0 to 1024
   affine.for %i = 0 to 1024 {
     // Make sure the unused operand (%u below) gets dropped as well.
@@ -676,7 +676,7 @@ func @compose_into_affine_load_store(%A : memref<1024xf32>, %u : index) {
 
 // -----
 
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   %c511 = arith.constant 511 : index
   %c1 = arith.constant 0 : index
   %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0 + 1)> (%c1)[%c511]
@@ -689,7 +689,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   %c3 = arith.constant 3 : index
   %c20 = arith.constant 20 : index
   %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 floordiv 4, (s0 mod 5) + 1)> (%c20)[%c3]
@@ -702,7 +702,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   %c511 = arith.constant 511 : index
   %c1 = arith.constant 0 : index
   %0 = affine.max affine_map<(d0)[s0] -> (1000, d0 + 512, s0 + 1)> (%c1)[%c511]
@@ -715,7 +715,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   %c3 = arith.constant 3 : index
   %c20 = arith.constant 20 : index
   %0 = affine.max affine_map<(d0)[s0] -> (1000, d0 floordiv 4, (s0 mod 5) + 1)> (%c20)[%c3]
@@ -730,7 +730,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // CHECK: #[[$MAP:.*]] = affine_map<(d0, d1) -> (d1 - 2, d0)>
 
-func @affine_min(%arg0: index) {
+func.func @affine_min(%arg0: index) {
   affine.for %i = 0 to %arg0 {
     affine.for %j = 0 to %arg0 {
       %c2 = arith.constant 2 : index
@@ -756,7 +756,7 @@ func @affine_min(%arg0: index) {
 // CHECK: #[[$MAP:.*]] = affine_map<()[s0, s1] -> (1024, s0 - s1 * 1024)>
 
 // CHECK: func @rep(%[[ARG0:.*]]: index, %[[ARG1:.*]]: index)
-func @rep(%arg0 : index, %arg1 : index) -> index {
+func.func @rep(%arg0 : index, %arg1 : index) -> index {
   // CHECK-NOT: arith.constant
   %c0 = arith.constant 0 : index
   %c1024 = arith.constant 1024 : index
@@ -772,7 +772,7 @@ func @rep(%arg0 : index, %arg1 : index) -> index {
 
 // CHECK-DAG: #[[ub:.*]] = affine_map<()[s0] -> (s0 + 2)>
 
-func @drop_duplicate_bounds(%N : index) {
+func.func @drop_duplicate_bounds(%N : index) {
   // affine.for %i = max #lb(%arg0) to min #ub(%arg0)
   affine.for %i = max affine_map<(d0) -> (d0, d0)>(%N) to min affine_map<(d0) -> (d0 + 2, d0 + 2)>(%N) {
     "foo"() : () -> ()
@@ -787,7 +787,7 @@ func @drop_duplicate_bounds(%N : index) {
 #map3 = affine_map<(d0) -> (d0 * 5)>
 
 // CHECK-LABEL: func @affine_parallel_const_bounds
-func @affine_parallel_const_bounds() {
+func.func @affine_parallel_const_bounds() {
   %cst = arith.constant 1.0 : f32
   %c0 = arith.constant 0 : index
   %c4 = arith.constant 4 : index
@@ -805,7 +805,7 @@ func @affine_parallel_const_bounds() {
 
 // -----
 
-func @compose_affine_maps_div_symbol(%A : memref<i64>, %i0 : index, %i1 : index) {
+func.func @compose_affine_maps_div_symbol(%A : memref<i64>, %i0 : index, %i1 : index) {
   %0 = affine.apply affine_map<()[s0] -> (2 * s0)> ()[%i0]
   %1 = affine.apply affine_map<()[s0] -> (3 * s0)> ()[%i0]
   %2 = affine.apply affine_map<(d0)[s0, s1] -> (d0 mod s1 + s0 * s1 + s0 * 4)> (%i1)[%0, %1]
@@ -825,7 +825,7 @@ func @compose_affine_maps_div_symbol(%A : memref<i64>, %i0 : index, %i1 : index)
 
 // CHECK: func @deduplicate_affine_min_expressions
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @deduplicate_affine_min_expressions(%i0: index, %i1: index) -> index {
+func.func @deduplicate_affine_min_expressions(%i0: index, %i1: index) -> index {
   // CHECK:  affine.min #[[MAP]]()[%[[I0]], %[[I1]]]
   %0 = affine.min affine_map<()[s0, s1] -> (s0 + s1, s0 * s1, s1 + s0, s0 * s1)> ()[%i0, %i1]
   return %0: index
@@ -837,7 +837,7 @@ func @deduplicate_affine_min_expressions(%i0: index, %i1: index) -> index {
 
 // CHECK: func @deduplicate_affine_max_expressions
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @deduplicate_affine_max_expressions(%i0: index, %i1: index) -> index {
+func.func @deduplicate_affine_max_expressions(%i0: index, %i1: index) -> index {
   // CHECK:  affine.max #[[MAP]]()[%[[I0]], %[[I1]]]
   %0 = affine.max affine_map<()[s0, s1] -> (s0 + s1, s0 * s1, s1 + s0, s0 * s1)> ()[%i0, %i1]
   return %0: index
@@ -850,7 +850,7 @@ func @deduplicate_affine_max_expressions(%i0: index, %i1: index) -> index {
 
 // CHECK: func @merge_affine_min_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
-func @merge_affine_min_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (index, index) {
+func.func @merge_affine_min_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (index, index) {
   %0 = affine.min affine_map<(d0)[s0] -> (16, d0 - s0)> (%i0)[%i1]
 
  // CHECK: affine.min #[[MAP0]]()[%[[I2]], %[[I1]], %[[I0]]]
@@ -867,7 +867,7 @@ func @merge_affine_min_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (i
 
 // CHECK: func @merge_multiple_affine_min_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
-func @merge_multiple_affine_min_ops(%i0: index, %i1: index, %i2: index) -> index {
+func.func @merge_multiple_affine_min_ops(%i0: index, %i1: index, %i2: index) -> index {
   %0 = affine.min affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.min affine_map<()[s0] -> (s0 + 8, s0 * 4)> ()[%i1]
   // CHECK: affine.min #[[MAP]]()[%[[I2]], %[[I0]], %[[I1]]]
@@ -881,7 +881,7 @@ func @merge_multiple_affine_min_ops(%i0: index, %i1: index, %i2: index) -> index
 
 // CHECK: func @merge_multiple_uses_of_affine_min_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @merge_multiple_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
+func.func @merge_multiple_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
   %0 = affine.min affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   // CHECK: affine.min #[[MAP]]()[%[[I1]], %[[I0]]]
   %2 = affine.min affine_map<()[s0, s1, s2] -> (s0, s1, s2 * 2)> ()[%0, %0, %i1]
@@ -895,7 +895,7 @@ func @merge_multiple_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
 
 // CHECK: func @merge_mixed_uses_of_affine_min_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @merge_mixed_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
+func.func @merge_mixed_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
   // CHECK: %[[AFFINE:.+]] = affine.min #[[MAP0]]()[%[[I0]]]
   %0 = affine.min affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   // %0 is bound to a symbol that is both a standalone expression and a part
@@ -908,7 +908,7 @@ func @merge_mixed_uses_of_affine_min_ops(%i0: index, %i1: index) -> index {
 // -----
 
 // CHECK-LABEL: func @dont_merge_affine_min_if_not_single_dim
-func @dont_merge_affine_min_if_not_single_dim(%i0: index, %i1: index, %i2: index) -> index {
+func.func @dont_merge_affine_min_if_not_single_dim(%i0: index, %i1: index, %i2: index) -> index {
   // CHECK-COUNT-2: affine.min
   %0 = affine.min affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.min affine_map<(d0)[s0] -> (s0 + 4, 7 + d0)> (%0)[%i2]
@@ -918,7 +918,7 @@ func @dont_merge_affine_min_if_not_single_dim(%i0: index, %i1: index, %i2: index
 // -----
 
 // CHECK-LABEL: func @dont_merge_affine_min_if_not_single_sym
-func @dont_merge_affine_min_if_not_single_sym(%i0: index, %i1: index, %i2: index) -> index {
+func.func @dont_merge_affine_min_if_not_single_sym(%i0: index, %i1: index, %i2: index) -> index {
   // CHECK-COUNT-2: affine.min
   %0 = affine.min affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.min affine_map<()[s0, s1] -> (s0 + 4, 7 + s1)> ()[%0, %i2]
@@ -932,7 +932,7 @@ func @dont_merge_affine_min_if_not_single_sym(%i0: index, %i1: index, %i2: index
 
 // CHECK: func @merge_affine_max_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index, %[[I3:.+]]: index)
-func @merge_affine_max_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (index, index) {
+func.func @merge_affine_max_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (index, index) {
   %0 = affine.max affine_map<(d0)[s0] -> (16, d0 - s0)> (%i0)[%i1]
 
  // CHECK: affine.max #[[MAP0]]()[%[[I2]], %[[I1]], %[[I0]]]
@@ -949,7 +949,7 @@ func @merge_affine_max_ops(%i0: index, %i1: index, %i2: index, %i3: index) -> (i
 
 // CHECK: func @merge_multiple_affine_max_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index, %[[I2:.+]]: index)
-func @merge_multiple_affine_max_ops(%i0: index, %i1: index, %i2: index) -> index {
+func.func @merge_multiple_affine_max_ops(%i0: index, %i1: index, %i2: index) -> index {
   %0 = affine.max affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.max affine_map<()[s0] -> (s0 + 8, s0 * 4)> ()[%i1]
   // CHECK: affine.max #[[MAP]]()[%[[I2]], %[[I0]], %[[I1]]]
@@ -963,7 +963,7 @@ func @merge_multiple_affine_max_ops(%i0: index, %i1: index, %i2: index) -> index
 
 // CHECK: func @merge_multiple_uses_of_affine_max_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @merge_multiple_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
+func.func @merge_multiple_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
   %0 = affine.max affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   // CHECK: affine.max #[[MAP]]()[%[[I1]], %[[I0]]]
   %2 = affine.max affine_map<()[s0, s1, s2] -> (s0, s1, s2 * 2)> ()[%0, %0, %i1]
@@ -977,7 +977,7 @@ func @merge_multiple_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
 
 // CHECK: func @merge_mixed_uses_of_affine_max_ops
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @merge_mixed_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
+func.func @merge_mixed_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
   // CHECK: %[[AFFINE:.+]] = affine.max #[[MAP0]]()[%[[I0]]]
   %0 = affine.max affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   // %0 is bound to a symbol that is both a standalone expression and a part
@@ -990,7 +990,7 @@ func @merge_mixed_uses_of_affine_max_ops(%i0: index, %i1: index) -> index {
 // -----
 
 // CHECK-LABEL: func @dont_merge_affine_max_if_not_single_dim
-func @dont_merge_affine_max_if_not_single_dim(%i0: index, %i1: index, %i2: index) -> index {
+func.func @dont_merge_affine_max_if_not_single_dim(%i0: index, %i1: index, %i2: index) -> index {
   // CHECK-COUNT-2: affine.max
   %0 = affine.max affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.max affine_map<(d0)[s0] -> (s0 + 4, 7 + d0)> (%0)[%i2]
@@ -1000,7 +1000,7 @@ func @dont_merge_affine_max_if_not_single_dim(%i0: index, %i1: index, %i2: index
 // -----
 
 // CHECK-LABEL: func @dont_merge_affine_max_if_not_single_sym
-func @dont_merge_affine_max_if_not_single_sym(%i0: index, %i1: index, %i2: index) -> index {
+func.func @dont_merge_affine_max_if_not_single_sym(%i0: index, %i1: index, %i2: index) -> index {
   // CHECK-COUNT-2: affine.max
   %0 = affine.max affine_map<()[s0] -> (s0 + 16, s0 * 8)> ()[%i0]
   %1 = affine.max affine_map<()[s0, s1] -> (s0 + 4, 7 + s1)> ()[%0, %i2]
@@ -1018,7 +1018,7 @@ func @dont_merge_affine_max_if_not_single_sym(%i0: index, %i1: index, %i2: index
 // CHECK-SAME:   %[[N:.*]]: index)
 // CHECK: affine.for %{{.*}} = #[[$MAP0]]()[%[[N]]] to #[[$MAP1]]()[%[[N]]] {
 
-func @compose_affine_for_bounds(%N: index) {
+func.func @compose_affine_for_bounds(%N: index) {
   %u = affine.apply affine_map<(d0) -> (d0 + 2)>(%N)
   %l = affine.apply affine_map<(d0) -> (d0 - 2)>(%N)
   affine.for %i = %l to %u {
@@ -1036,7 +1036,7 @@ func @compose_affine_for_bounds(%N: index) {
 // CHECK-NEXT: affine.vector_load %{{.*}}[%[[IV]] + 1]
 // CHECK-NEXT: affine.vector_store %{{.*}}, %{{.*}}[%[[IV]] + 1]
 // CHECK-NEXT: affine.vector_load %{{.*}}[%[[IV]]]
-func @compose_into_affine_vector_load_vector_store(%A : memref<1024xf32>, %u : index) {
+func.func @compose_into_affine_vector_load_vector_store(%A : memref<1024xf32>, %u : index) {
   affine.for %i = 0 to 1024 {
     // Make sure the unused operand (%u below) gets dropped as well.
     %idx = affine.apply affine_map<(d0, d1) -> (d0 + 1)> (%i, %u)
@@ -1056,7 +1056,7 @@ func @compose_into_affine_vector_load_vector_store(%A : memref<1024xf32>, %u : i
 // CHECK-LABEL: func @no_fold_of_store
 //  CHECK:   %[[cst:.+]] = memref.cast %arg
 //  CHECK:   affine.store %[[cst]]
-func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
+func.func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
   %0 = memref.cast %arg : memref<32xi8> to memref<?xi8>
   affine.store %0, %holder[] : memref<memref<?xi8>>
   return
@@ -1069,7 +1069,7 @@ func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {
 
 // CHECK: func @canonicalize_single_min_max
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @canonicalize_single_min_max(%i0: index, %i1: index) -> (index, index) {
+func.func @canonicalize_single_min_max(%i0: index, %i1: index) -> (index, index) {
   // CHECK-NOT: affine.min
   // CHECK-NEXT: affine.apply #[[$MAP0]]()[%[[I0]]]
   %0 = affine.min affine_map<()[s0] -> (s0 + 16)> ()[%i0]
@@ -1087,7 +1087,7 @@ func @canonicalize_single_min_max(%i0: index, %i1: index) -> (index, index) {
 
 // CHECK-LABEL: func @canonicalize_multi_min_max
 // CHECK-SAME: (%[[I0:.+]]: index, %[[I1:.+]]: index)
-func @canonicalize_multi_min_max(%i0: index, %i1: index) -> (index, index) {
+func.func @canonicalize_multi_min_max(%i0: index, %i1: index) -> (index, index) {
   // CHECK-NEXT: affine.min #[[$MAP]]()[%[[I0]], %[[I1]]]
   %0 = affine.min affine_map<()[s0, s1] -> (s0 + s1, s1 + 16, 32)> ()[%i0, %i1]
 
@@ -1103,7 +1103,7 @@ module {
   memref.global "private" constant @__constant_1x5x1xf32 : memref<1x5x1xf32> = dense<[[[6.250000e-02], [2.500000e-01], [3.750000e-01], [2.500000e-01], [6.250000e-02]]]>
   memref.global "private" constant @__constant_32x64xf32 : memref<32x64xf32> = dense<0.000000e+00>
   // CHECK-LABEL: func @fold_const_init_global_memref
-  func @fold_const_init_global_memref() -> (f32, f32) {
+  func.func @fold_const_init_global_memref() -> (f32, f32) {
     %m = memref.get_global @__constant_1x5x1xf32 : memref<1x5x1xf32>
     %v0 = affine.load %m[0, 0, 0] : memref<1x5x1xf32>
     %v1 = affine.load %m[0, 1, 0] : memref<1x5x1xf32>
@@ -1114,7 +1114,7 @@ module {
   }
 
   // CHECK-LABEL: func @fold_const_splat_global
-  func @fold_const_splat_global() -> memref<32x64xf32> {
+  func.func @fold_const_splat_global() -> memref<32x64xf32> {
     // CHECK-NEXT: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
     %m = memref.get_global @__constant_32x64xf32 : memref<32x64xf32>
     %s = memref.alloc() : memref<32x64xf32>

diff  --git a/mlir/test/Dialect/Affine/constant-fold.mlir b/mlir/test/Dialect/Affine/constant-fold.mlir
index f1328966ed7f5..cdce39855acdf 100644
--- a/mlir/test/Dialect/Affine/constant-fold.mlir
+++ b/mlir/test/Dialect/Affine/constant-fold.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -test-constant-fold -split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: func @affine_apply
-func @affine_apply(%variable : index) -> (index, index, index) {
+func.func @affine_apply(%variable : index) -> (index, index, index) {
   %c177 = arith.constant 177 : index
   %c211 = arith.constant 211 : index
   %N = arith.constant 1075 : index
@@ -24,7 +24,7 @@ func @affine_apply(%variable : index) -> (index, index, index) {
 
 // CHECK: #[[map:.*]] = affine_map<(d0, d1) -> (42, d1)
 
-func @affine_min(%variable: index) -> (index, index) {
+func.func @affine_min(%variable: index) -> (index, index) {
   // CHECK: %[[C42:.*]] = arith.constant 42
   %c42 = arith.constant 42 : index
   %c44 = arith.constant 44 : index
@@ -44,7 +44,7 @@ func @affine_min(%variable: index) -> (index, index) {
 
 // CHECK: #[[map:.*]] = affine_map<(d0, d1) -> (42, d1)
 
-func @affine_min(%variable: index) -> (index, index) {
+func.func @affine_min(%variable: index) -> (index, index) {
   // CHECK: %[[C42:.*]] = arith.constant 42
   %c42 = arith.constant 42 : index
   // CHECK: %[[C44:.*]] = arith.constant 44

diff  --git a/mlir/test/Dialect/Affine/dma-generate.mlir b/mlir/test/Dialect/Affine/dma-generate.mlir
index 45ab21fe2da6e..6c4f8a2235adb 100644
--- a/mlir/test/Dialect/Affine/dma-generate.mlir
+++ b/mlir/test/Dialect/Affine/dma-generate.mlir
@@ -13,7 +13,7 @@
 // -----
 
 // CHECK-LABEL: func @loop_nest_1d() {
-func @loop_nest_1d() {
+func.func @loop_nest_1d() {
   %A = memref.alloc() : memref<256 x f32>
   %B = memref.alloc() : memref<512 x f32>
   %F = memref.alloc() : memref<256 x f32, 2>
@@ -107,7 +107,7 @@ func @loop_nest_1d() {
 // CHECK-NEXT:  dealloc [[BUFB]] : memref<512x32xf32, 2>
 // CHECK-NEXT:  return
 // CHECK-NEXT:}
-func @loop_nest_high_d(%A: memref<512 x 32 x f32>,
+func.func @loop_nest_high_d(%A: memref<512 x 32 x f32>,
     %B: memref<512 x 32 x f32>, %C: memref<512 x 32 x f32>) {
   // DMAs will be performed at this level (jT is the first loop without a stride).
   // A and B are read, while C is both read and written. A total of three new buffers
@@ -160,7 +160,7 @@ func @loop_nest_high_d(%A: memref<512 x 32 x f32>,
 // CHECK-NEXT:      dealloc %{{.*}} : memref<1x2xf32, 2>
 // CHECK-NEXT:    }
 // CHECK-NEXT:    return
-func @loop_nest_modulo() {
+func.func @loop_nest_modulo() {
   %A = memref.alloc() : memref<256 x 8 x f32>
   affine.for %i = 0 to 32 step 4 {
     // DMAs will be performed at this level (%j is the first unit stride loop)
@@ -178,7 +178,7 @@ func @loop_nest_modulo() {
 // DMA on tiled loop nest. This also tests the case where the bounds are
 // dependent on outer loop IVs.
 // CHECK-LABEL: func @loop_nest_tiled() -> memref<256x1024xf32> {
-func @loop_nest_tiled() -> memref<256x1024xf32> {
+func.func @loop_nest_tiled() -> memref<256x1024xf32> {
   %0 = memref.alloc() : memref<256x1024xf32>
   affine.for %i0 = 0 to 256 step 32 {
     affine.for %i1 = 0 to 1024 step 32 {
@@ -203,7 +203,7 @@ func @loop_nest_tiled() -> memref<256x1024xf32> {
 // -----
 
 // CHECK-LABEL: func @dma_constant_dim_access
-func @dma_constant_dim_access(%A : memref<100x100xf32>) {
+func.func @dma_constant_dim_access(%A : memref<100x100xf32>) {
   %one = arith.constant 1 : index
   %N = arith.constant 100 : index
   // CHECK:      memref.alloc() : memref<1x100xf32, 2>
@@ -223,7 +223,7 @@ func @dma_constant_dim_access(%A : memref<100x100xf32>) {
 // -----
 
 // CHECK-LABEL: func @dma_with_symbolic_accesses
-func @dma_with_symbolic_accesses(%A : memref<100x100xf32>, %M : index) {
+func.func @dma_with_symbolic_accesses(%A : memref<100x100xf32>, %M : index) {
   %N = arith.constant 9 : index
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to 100 {
@@ -247,7 +247,7 @@ func @dma_with_symbolic_accesses(%A : memref<100x100xf32>, %M : index) {
 // -----
 
 // CHECK-LABEL: func @dma_with_symbolic_loop_bounds
-func @dma_with_symbolic_loop_bounds(%A : memref<100x100xf32>, %M : index, %N: index) {
+func.func @dma_with_symbolic_loop_bounds(%A : memref<100x100xf32>, %M : index, %N: index) {
   %K = arith.constant 9 : index
 // The buffer size can't be bound by a constant smaller than the original
 // memref size; so the DMA buffer is the entire 100x100.
@@ -267,7 +267,7 @@ func @dma_with_symbolic_loop_bounds(%A : memref<100x100xf32>, %M : index, %N: in
 // -----
 
 // CHECK-LABEL: func @dma_unknown_size
-func @dma_unknown_size(%arg0: memref<?x?xf32>) {
+func.func @dma_unknown_size(%arg0: memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %arg0, %c0 : memref<? x ? x f32>
   %N = memref.dim %arg0, %c0 : memref<? x ? x f32>
@@ -285,7 +285,7 @@ func @dma_unknown_size(%arg0: memref<?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @dma_memref_3d
-func @dma_memref_3d(%arg0: memref<1024x1024x1024xf32>) {
+func.func @dma_memref_3d(%arg0: memref<1024x1024x1024xf32>) {
   affine.for %i = 0 to 1024 {
     affine.for %j = 0 to 1024 {
       affine.for %k = 0 to 1024 {
@@ -312,7 +312,7 @@ func @dma_memref_3d(%arg0: memref<1024x1024x1024xf32>) {
 // 2), i.e., the window ([2,320), [2,448)) in the original space.
 
 // CHECK-LABEL: func @multi_load_store_union() {
-func @multi_load_store_union() {
+func.func @multi_load_store_union() {
   %A = memref.alloc() : memref<512 x 512 x f32>
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 256 {
@@ -358,7 +358,7 @@ func @multi_load_store_union() {
 // -----
 
 // CHECK-LABEL: func @dma_loop_straightline_interspersed() {
-func @dma_loop_straightline_interspersed() {
+func.func @dma_loop_straightline_interspersed() {
   %c0 = arith.constant 0 : index
   %c255 = arith.constant 255 : index
   %A = memref.alloc() : memref<256 x f32>
@@ -406,7 +406,7 @@ func @dma_loop_straightline_interspersed() {
 // -----
 
 // CHECK-LABEL: func @dma_mixed_loop_blocks() {
-func @dma_mixed_loop_blocks() {
+func.func @dma_mixed_loop_blocks() {
   %c0 = arith.constant 0 : index
   %A = memref.alloc() : memref<256 x 256 x vector<8 x f32>>
   affine.for %i = 0 to 256 {
@@ -432,7 +432,7 @@ func @dma_mixed_loop_blocks() {
 // -----
 
 // CHECK-LABEL: func @relative_loop_bounds
-func @relative_loop_bounds(%arg0: memref<1027xf32>) {
+func.func @relative_loop_bounds(%arg0: memref<1027xf32>) {
   affine.for %i0 = 0 to 1024 {
     affine.for %i2 = affine_map<(d0) -> (d0)>(%i0) to affine_map<(d0) -> (d0 + 4)>(%i0) {
       %0 = arith.constant 0.0 : f32
@@ -453,7 +453,7 @@ func @relative_loop_bounds(%arg0: memref<1027xf32>) {
 
 // -----
 
-func @test_read_write_region_union() {
+func.func @test_read_write_region_union() {
   %0 = memref.alloc() : memref<256xf32>
   affine.for %i0 = 0 to 10 {
     // memref dims:  [0, 256)
@@ -489,7 +489,7 @@ func @test_read_write_region_union() {
 #map_ub = affine_map<(d0) -> (d0 + 3)>
 #map_acc = affine_map<(d0) -> (d0 floordiv 8)>
 // CHECK-LABEL: func @test_analysis_util
-func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>, %arg2: memref<2xf32>) -> (memref<144x9xf32>, memref<2xf32>) {
+func.func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>, %arg2: memref<2xf32>) -> (memref<144x9xf32>, memref<2xf32>) {
   %c0 = arith.constant 0 : index
   %0 = memref.alloc() : memref<64x1xf32>
   %1 = memref.alloc() : memref<144x4xf32>
@@ -522,7 +522,7 @@ func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>,
 #map16 = affine_map<(d0, d1) -> (((((d0 + d1 * 72) mod 2304) mod 1152) floordiv 9) floordiv 8)>
 // Test for test case in b/128303048 #4.
 // CHECK-LABEL: func @test_memref_bounds
-func @test_memref_bounds(%arg0: memref<4x4x16x1xvector<8x128xf32>>, %arg1: memref<144x9xvector<8x128xf32>>, %arg2: memref<2xvector<8x128xf32>>) -> (memref<144x9xvector<8x128xf32>>, memref<2xvector<8x128xf32>>) {
+func.func @test_memref_bounds(%arg0: memref<4x4x16x1xvector<8x128xf32>>, %arg1: memref<144x9xvector<8x128xf32>>, %arg2: memref<2xvector<8x128xf32>>) -> (memref<144x9xvector<8x128xf32>>, memref<2xvector<8x128xf32>>) {
   %c0 = arith.constant 0 : index
   affine.for %i8 = 0 to 9 step 3 {
     affine.for %i9 = #map3(%i8) to #map12(%i8) {
@@ -548,7 +548,7 @@ func @test_memref_bounds(%arg0: memref<4x4x16x1xvector<8x128xf32>>, %arg1: memre
 // %i0.
 
 // FAST-MEM-16KB-LABEL: func @load_store_same_memref
-func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
+func.func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
   // FAST-MEM-16KB:  affine.for %{{.*}} = 0 to 256 step 4
   affine.for %i0 = 0 to 256 step 4 {
     // FAST-MEM-16KB: [[BUF:%[0-9]+]] = memref.alloc() : memref<4x1024xf32, 2>
@@ -584,7 +584,7 @@ func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
 #map0 = affine_map<(d0) -> (d0)>
 #map1 = affine_map<(d0) -> (d0 + 4)>
 // FAST-MEM-16KB-LABEL: func @simple_matmul
-func @simple_matmul(%arg0: memref<8x8xvector<64xf32>>, %arg1: memref<8x8xvector<64xf32>>, %arg2: memref<8x8xvector<64xf32>>) -> memref<8x8xvector<64xf32>> {
+func.func @simple_matmul(%arg0: memref<8x8xvector<64xf32>>, %arg1: memref<8x8xvector<64xf32>>, %arg2: memref<8x8xvector<64xf32>>) -> memref<8x8xvector<64xf32>> {
   affine.for %i = 0 to 8 step 4 {
     affine.for %j = 0 to 8 step 4 {
       affine.for %k = 0 to 8 step 4 {

diff  --git a/mlir/test/Dialect/Affine/dma.mlir b/mlir/test/Dialect/Affine/dma.mlir
index c623ed86d596d..7a15206bff872 100644
--- a/mlir/test/Dialect/Affine/dma.mlir
+++ b/mlir/test/Dialect/Affine/dma.mlir
@@ -3,7 +3,7 @@
 // -----
 
 // Test with loop IVs.
-func @test0(%arg0 : index, %arg1 : index) {
+func.func @test0(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = memref.alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %2 = memref.alloc() : memref<1xi32>
@@ -24,7 +24,7 @@ func @test0(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs and optional stride arguments.
-func @test1(%arg0 : index, %arg1 : index) {
+func.func @test1(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = memref.alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %2 = memref.alloc() : memref<1xi32>
@@ -47,7 +47,7 @@ func @test1(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs and symbols (without symbol keyword).
-func @test2(%arg0 : index, %arg1 : index) {
+func.func @test2(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = memref.alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %2 = memref.alloc() : memref<1xi32>
@@ -69,7 +69,7 @@ func @test2(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs and symbols (with symbol keyword).
-func @test3(%arg0 : index, %arg1 : index) {
+func.func @test3(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = memref.alloc() : memref<100x100xf32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %2 = memref.alloc() : memref<1xi32>
@@ -92,7 +92,7 @@ func @test3(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs, symbols and constants in nested affine expressions.
-func @test4(%arg0 : index, %arg1 : index) {
+func.func @test4(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = memref.alloc() : memref<100x100xf32, 2>
   %2 = memref.alloc() : memref<1xi32>

diff  --git a/mlir/test/Dialect/Affine/inlining.mlir b/mlir/test/Dialect/Affine/inlining.mlir
index ce4fce593540a..f65f0326325cd 100644
--- a/mlir/test/Dialect/Affine/inlining.mlir
+++ b/mlir/test/Dialect/Affine/inlining.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -inline="default-pipeline=''" | FileCheck %s
 
 // Basic test that functions within affine operations are inlined.
-func @func_with_affine_ops(%N: index) {
+func.func @func_with_affine_ops(%N: index) {
   %c = arith.constant 200 : index
   affine.for %i = 1 to 10 {
     affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%c]  {
@@ -12,7 +12,7 @@ func @func_with_affine_ops(%N: index) {
 }
 
 // CHECK-LABEL: func @inline_with_affine_ops
-func @inline_with_affine_ops() {
+func.func @inline_with_affine_ops() {
   %c = arith.constant 1 : index
 
   // CHECK: affine.for
@@ -24,7 +24,7 @@ func @inline_with_affine_ops() {
 }
 
 // CHECK-LABEL: func @not_inline_in_affine_op
-func @not_inline_in_affine_op() {
+func.func @not_inline_in_affine_op() {
   %c = arith.constant 1 : index
 
   // CHECK-NOT: affine.if
@@ -38,7 +38,7 @@ func @not_inline_in_affine_op() {
 // -----
 
 // Test when an invalid operation is nested in an affine op.
-func @func_with_invalid_nested_op() {
+func.func @func_with_invalid_nested_op() {
   affine.for %i = 1 to 10 {
     "foo.opaque"() : () -> ()
   }
@@ -46,7 +46,7 @@ func @func_with_invalid_nested_op() {
 }
 
 // CHECK-LABEL: func @not_inline_invalid_nest_op
-func @not_inline_invalid_nest_op() {
+func.func @not_inline_invalid_nest_op() {
   // CHECK: call @func_with_invalid_nested_op
   call @func_with_invalid_nested_op() : () -> ()
   return
@@ -55,12 +55,12 @@ func @not_inline_invalid_nest_op() {
 // -----
 
 // Test that calls are inlined into affine structures.
-func @func_noop() {
+func.func @func_noop() {
   return
 }
 
 // CHECK-LABEL: func @inline_into_affine_ops
-func @inline_into_affine_ops() {
+func.func @inline_into_affine_ops() {
   // CHECK-NOT: call @func_noop
   affine.for %i = 1 to 10 {
     call @func_noop() : () -> ()
@@ -71,14 +71,14 @@ func @inline_into_affine_ops() {
 // -----
 
 // Test that calls with dimension arguments are properly inlined.
-func @func_dim(%arg0: index, %arg1: memref<?xf32>) {
+func.func @func_dim(%arg0: index, %arg1: memref<?xf32>) {
   affine.load %arg1[%arg0] : memref<?xf32>
   return
 }
 
 // CHECK-LABEL: @inline_dimension
 // CHECK: (%[[ARG0:.*]]: memref<?xf32>)
-func @inline_dimension(%arg0: memref<?xf32>) {
+func.func @inline_dimension(%arg0: memref<?xf32>) {
   // CHECK: affine.for %[[IV:.*]] =
   affine.for %i = 1 to 42 {
     // CHECK-NOT: call @func_dim
@@ -91,14 +91,14 @@ func @inline_dimension(%arg0: memref<?xf32>) {
 // -----
 
 // Test that calls with vector operations are also inlined.
-func @func_vector_dim(%arg0: index, %arg1: memref<32xf32>) {
+func.func @func_vector_dim(%arg0: index, %arg1: memref<32xf32>) {
   affine.vector_load %arg1[%arg0] : memref<32xf32>, vector<4xf32>
   return
 }
 
 // CHECK-LABEL: @inline_dimension_vector
 // CHECK: (%[[ARG0:.*]]: memref<32xf32>)
-func @inline_dimension_vector(%arg0: memref<32xf32>) {
+func.func @inline_dimension_vector(%arg0: memref<32xf32>) {
   // CHECK: affine.for %[[IV:.*]] =
   affine.for %i = 1 to 42 {
     // CHECK-NOT: call @func_dim
@@ -112,16 +112,16 @@ func @inline_dimension_vector(%arg0: memref<32xf32>) {
 
 // Test that calls that would result in violation of affine value
 // categorization (top-level value stop being top-level) are not inlined.
-func private @get_index() -> index
+func.func private @get_index() -> index
 
-func @func_top_level(%arg0: memref<?xf32>) {
+func.func @func_top_level(%arg0: memref<?xf32>) {
   %0 = call @get_index() : () -> index
   affine.load %arg0[%0] : memref<?xf32>
   return
 }
 
 // CHECK-LABEL: @no_inline_not_top_level
-func @no_inline_not_top_level(%arg0: memref<?xf32>) {
+func.func @no_inline_not_top_level(%arg0: memref<?xf32>) {
   affine.for %i = 1 to 42 {
     // CHECK: call @func_top_level
     call @func_top_level(%arg0) : (memref<?xf32>) -> ()

diff  --git a/mlir/test/Dialect/Affine/invalid.mlir b/mlir/test/Dialect/Affine/invalid.mlir
index e1ff020b00d79..80c2fd7206f4b 100644
--- a/mlir/test/Dialect/Affine/invalid.mlir
+++ b/mlir/test/Dialect/Affine/invalid.mlir
@@ -2,7 +2,7 @@
 
 // -----
 
-func @affine_apply_operand_non_index(%arg0 : i32) {
+func.func @affine_apply_operand_non_index(%arg0 : i32) {
   // Custom parser automatically assigns all arguments the `index` so we must
   // use the generic syntax here to exercise the verifier.
   // expected-error at +1 {{op operand #0 must be index, but got 'i32'}}
@@ -12,7 +12,7 @@ func @affine_apply_operand_non_index(%arg0 : i32) {
 
 // -----
 
-func @affine_apply_resul_non_index(%arg0 : index) {
+func.func @affine_apply_resul_non_index(%arg0 : index) {
   // Custom parser automatically assigns `index` as the result type so we must
   // use the generic syntax here to exercise the verifier.
   // expected-error at +1 {{op result #0 must be index, but got 'i32'}}
@@ -24,7 +24,7 @@ func @affine_apply_resul_non_index(%arg0 : index) {
 
 #map = affine_map<(d0)[s0] -> (d0 + s0)>
 
-func @affine_for_lower_bound_invalid_dim(%arg : index) {
+func.func @affine_for_lower_bound_invalid_dim(%arg : index) {
   affine.for %n0 = 0 to 7 {
     %dim = arith.addi %arg, %arg : index
 
@@ -39,7 +39,7 @@ func @affine_for_lower_bound_invalid_dim(%arg : index) {
 
 #map = affine_map<(d0)[s0] -> (d0 + s0)>
 
-func @affine_for_upper_bound_invalid_dim(%arg : index) {
+func.func @affine_for_upper_bound_invalid_dim(%arg : index) {
   affine.for %n0 = 0 to 7 {
     %dim = arith.addi %arg, %arg : index
 
@@ -51,7 +51,7 @@ func @affine_for_upper_bound_invalid_dim(%arg : index) {
 }
 
 // -----
-func @affine_load_invalid_dim(%M : memref<10xi32>) {
+func.func @affine_load_invalid_dim(%M : memref<10xi32>) {
   "unknown"() ({
   ^bb0(%arg: index):
     affine.load %M[%arg] : memref<10xi32>
@@ -67,7 +67,7 @@ func @affine_load_invalid_dim(%M : memref<10xi32>) {
 
 #map0 = affine_map<(d0)[s0] -> (d0 + s0)>
 
-func @affine_for_lower_bound_invalid_sym() {
+func.func @affine_for_lower_bound_invalid_sym() {
   affine.for %i0 = 0 to 7 {
     // expected-error at +1 {{operand cannot be used as a symbol}}
     affine.for %n0 = #map0(%i0)[%i0] to 7 {
@@ -80,7 +80,7 @@ func @affine_for_lower_bound_invalid_sym() {
 
 #map0 = affine_map<(d0)[s0] -> (d0 + s0)>
 
-func @affine_for_upper_bound_invalid_sym() {
+func.func @affine_for_upper_bound_invalid_sym() {
   affine.for %i0 = 0 to 7 {
     // expected-error at +1 {{operand cannot be used as a symbol}}
     affine.for %n0 = 0 to #map0(%i0)[%i0] {
@@ -93,7 +93,7 @@ func @affine_for_upper_bound_invalid_sym() {
 
 #set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
 
-func @affine_if_invalid_dim(%arg : index) {
+func.func @affine_if_invalid_dim(%arg : index) {
   affine.for %n0 = 0 to 7 {
     %dim = arith.addi %arg, %arg : index
 
@@ -107,7 +107,7 @@ func @affine_if_invalid_dim(%arg : index) {
 
 #set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
 
-func @affine_if_invalid_sym() {
+func.func @affine_if_invalid_sym() {
   affine.for %i0 = 0 to 7 {
     // expected-error at +1 {{operand cannot be used as a symbol}}
     affine.if #set0(%i0)[%i0] {}
@@ -119,7 +119,7 @@ func @affine_if_invalid_sym() {
 
 #set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
 
-func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
+func.func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg3: index) {
   affine.for %n0 = 0 to 7 {
     %0 = memref.alloc(%arg0, %arg1, %arg2, %arg3) : memref<?x?x?x?xf32>
     %c0 = arith.constant 0 : index
@@ -133,7 +133,7 @@ func @affine_if_invalid_dimop_dim(%arg0: index, %arg1: index, %arg2: index, %arg
 
 // -----
 
-func @affine_store_missing_l_square(%C: memref<4096x4096xf32>) {
+func.func @affine_store_missing_l_square(%C: memref<4096x4096xf32>) {
   %9 = arith.constant 0.0 : f32
   // expected-error at +1 {{expected '['}}
   affine.store %9, %C : memref<4096x4096xf32>
@@ -142,7 +142,7 @@ func @affine_store_missing_l_square(%C: memref<4096x4096xf32>) {
 
 // -----
 
-func @affine_store_wrong_value_type(%C: memref<f32>) {
+func.func @affine_store_wrong_value_type(%C: memref<f32>) {
   %c0 = arith.constant 0 : i32
   // expected-error at +1 {{value to store must have the same type as memref element type}}
   "affine.store"(%c0, %C) : (i32, memref<f32>) -> ()
@@ -151,7 +151,7 @@ func @affine_store_wrong_value_type(%C: memref<f32>) {
 
 // -----
 
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.min affine_map<(d0) -> (d0)> (%arg0, %arg1)
 
@@ -160,7 +160,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.min affine_map<()[s0] -> (s0)> (%arg0, %arg1)
 
@@ -169,7 +169,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.min affine_map<(d0) -> (d0)> ()
 
@@ -178,7 +178,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.max affine_map<(d0) -> (d0)> (%arg0, %arg1)
 
@@ -187,7 +187,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.max affine_map<()[s0] -> (s0)> (%arg0, %arg1)
 
@@ -196,7 +196,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{operand count and affine map dimension and symbol count must match}}
   %0 = affine.max affine_map<(d0) -> (d0)> ()
 
@@ -205,7 +205,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{the number of region arguments (1) and the number of map groups for lower (2) and upper bound (2), and the number of steps (2) must all match}}
   affine.parallel (%i) = (0, 0) to (100, 100) step (10, 10) {
   }
@@ -213,7 +213,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{the number of region arguments (2) and the number of map groups for lower (1) and upper bound (2), and the number of steps (2) must all match}}
   affine.parallel (%i, %j) = (0) to (100, 100) step (10, 10) {
   }
@@ -221,7 +221,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{the number of region arguments (2) and the number of map groups for lower (2) and upper bound (1), and the number of steps (2) must all match}}
   affine.parallel (%i, %j) = (0, 0) to (100) step (10, 10) {
   }
@@ -229,7 +229,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   // expected-error at +1 {{the number of region arguments (2) and the number of map groups for lower (2) and upper bound (2), and the number of steps (1) must all match}}
   affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10) {
   }
@@ -237,7 +237,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   affine.for %x = 0 to 7 {
     %y = arith.addi %x, %x : index
     // expected-error at +1 {{operand cannot be used as a dimension id}}
@@ -249,7 +249,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   affine.for %x = 0 to 7 {
     %y = arith.addi %x, %x : index
     // expected-error at +1 {{operand cannot be used as a symbol}}
@@ -261,7 +261,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   //  expected-error at +1 {{reduction must be specified for each output}}
   %1 = affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10, 10) -> (f32) {
@@ -273,7 +273,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   //  expected-error at +1 {{invalid reduction value: "bad"}}
   %1 = affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10, 10) reduce ("bad") -> (f32) {
@@ -285,7 +285,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
   %0 = memref.alloc() : memref<100x100xi32>
   %1 = affine.parallel (%i, %j) = (0, 0) to (100, 100) step (10, 10) reduce ("minf") -> (f32) {
     %2 = affine.load %0[%i, %j] : memref<100x100xi32>
@@ -297,7 +297,7 @@ func @affine_parallel(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @vector_load_invalid_vector_type() {
+func.func @vector_load_invalid_vector_type() {
   %0 = memref.alloc() : memref<100xf32>
   affine.for %i0 = 0 to 16 step 8 {
     // expected-error at +1 {{requires memref and vector types of the same elemental type}}
@@ -308,7 +308,7 @@ func @vector_load_invalid_vector_type() {
 
 // -----
 
-func @vector_store_invalid_vector_type() {
+func.func @vector_store_invalid_vector_type() {
   %0 = memref.alloc() : memref<100xf32>
   %1 = arith.constant dense<7.0> : vector<8xf64>
   affine.for %i0 = 0 to 16 step 8 {
@@ -320,7 +320,7 @@ func @vector_store_invalid_vector_type() {
 
 // -----
 
-func @vector_load_vector_memref() {
+func.func @vector_load_vector_memref() {
   %0 = memref.alloc() : memref<100xvector<8xf32>>
   affine.for %i0 = 0 to 4 {
     // expected-error at +1 {{requires memref and vector types of the same elemental type}}
@@ -331,7 +331,7 @@ func @vector_load_vector_memref() {
 
 // -----
 
-func @vector_store_vector_memref() {
+func.func @vector_store_vector_memref() {
   %0 = memref.alloc() : memref<100xvector<8xf32>>
   %1 = arith.constant dense<7.0> : vector<8xf32>
   affine.for %i0 = 0 to 4 {
@@ -343,7 +343,7 @@ func @vector_store_vector_memref() {
 
 // -----
 
-func @affine_if_with_then_region_args(%N: index) {
+func.func @affine_if_with_then_region_args(%N: index) {
   %c = arith.constant 200 : index
   %i = arith.constant 20: index
   // expected-error at +1 {{affine.if' op region #0 should have no arguments}}
@@ -356,7 +356,7 @@ func @affine_if_with_then_region_args(%N: index) {
 
 // -----
 
-func @affine_if_with_else_region_args(%N: index) {
+func.func @affine_if_with_else_region_args(%N: index) {
   %c = arith.constant 200 : index
   %i = arith.constant 20: index
   // expected-error at +1 {{affine.if' op region #1 should have no arguments}}
@@ -371,7 +371,7 @@ func @affine_if_with_else_region_args(%N: index) {
 
 // -----
 
-func @affine_for_iter_args_mismatch(%buffer: memref<1024xf32>) -> f32 {
+func.func @affine_for_iter_args_mismatch(%buffer: memref<1024xf32>) -> f32 {
   %sum_0 = arith.constant 0.0 : f32
   // expected-error at +1 {{mismatch between the number of loop-carried values and results}}
   %res = affine.for %i = 0 to 10 step 2 iter_args(%sum_iter = %sum_0) -> (f32, f32) {

diff  --git a/mlir/test/Dialect/Affine/load-store-invalid.mlir b/mlir/test/Dialect/Affine/load-store-invalid.mlir
index f836789aec7d8..4be34d56e443f 100644
--- a/mlir/test/Dialect/Affine/load-store-invalid.mlir
+++ b/mlir/test/Dialect/Affine/load-store-invalid.mlir
@@ -1,13 +1,13 @@
 // RUN: mlir-opt %s -split-input-file -verify-diagnostics
 
-func @load_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) {
+func.func @load_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) {
   // expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
   "affine.load"(%arg0, %arg1, %arg2, %arg3) : (memref<?x?xf32>, index, index, index) -> f32
 }
 
 // -----
 
-func @load_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) {
+func.func @load_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) {
   // expected-error at +1 {{op expects as many subscripts as affine map inputs}}
   "affine.load"(%arg0, %arg1, %arg2, %arg3)
     {map = affine_map<(i, j) -> (i, j)> } : (memref<?x?xf32>, index, index, index) -> f32
@@ -15,14 +15,14 @@ func @load_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2:
 
 // -----
 
-func @load_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index) {
+func.func @load_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index) {
   // expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
   "affine.load"(%arg0, %arg1) : (memref<?x?xf32>, index) -> f32
 }
 
 // -----
 
-func @load_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index) {
+func.func @load_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index) {
   // expected-error at +1 {{op expects as many subscripts as affine map inputs}}
   "affine.load"(%arg0, %arg1)
     {map = affine_map<(i, j) -> (i, j)> } : (memref<?x?xf32>, index) -> f32
@@ -30,7 +30,7 @@ func @load_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index) {
 
 // -----
 
-func @store_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index,
+func.func @store_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index,
                                 %arg3: index, %val: f32) {
   // expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
   "affine.store"(%val, %arg0, %arg1, %arg2, %arg3) : (f32, memref<?x?xf32>, index, index, index) -> ()
@@ -38,7 +38,7 @@ func @store_too_many_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %arg2: ind
 
 // -----
 
-func @store_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index,
+func.func @store_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2: index,
                                     %arg3: index, %val: f32) {
   // expected-error at +1 {{op expects as many subscripts as affine map inputs}}
   "affine.store"(%val, %arg0, %arg1, %arg2, %arg3)
@@ -47,14 +47,14 @@ func @store_too_many_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %arg2:
 
 // -----
 
-func @store_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %val: f32) {
+func.func @store_too_few_subscripts(%arg0: memref<?x?xf32>, %arg1: index, %val: f32) {
   // expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
   "affine.store"(%val, %arg0, %arg1) : (f32, memref<?x?xf32>, index) -> ()
 }
 
 // -----
 
-func @store_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %val: f32) {
+func.func @store_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %val: f32) {
   // expected-error at +1 {{op expects as many subscripts as affine map inputs}}
   "affine.store"(%val, %arg0, %arg1)
     {map = affine_map<(i, j) -> (i, j)> } : (f32, memref<?x?xf32>, index) -> ()
@@ -62,7 +62,7 @@ func @store_too_few_subscripts_map(%arg0: memref<?x?xf32>, %arg1: index, %val: f
 
 // -----
 
-func @load_non_affine_index(%arg0 : index) {
+func.func @load_non_affine_index(%arg0 : index) {
   %0 = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
     %1 = arith.muli %i0, %arg0 : index
@@ -74,7 +74,7 @@ func @load_non_affine_index(%arg0 : index) {
 
 // -----
 
-func @store_non_affine_index(%arg0 : index) {
+func.func @store_non_affine_index(%arg0 : index) {
   %0 = memref.alloc() : memref<10xf32>
   %1 = arith.constant 11.0 : f32
   affine.for %i0 = 0 to 10 {
@@ -87,7 +87,7 @@ func @store_non_affine_index(%arg0 : index) {
 
 // -----
 
-func @invalid_prefetch_rw(%i : index) {
+func.func @invalid_prefetch_rw(%i : index) {
   %0 = memref.alloc() : memref<10xf32>
   // expected-error at +1 {{rw specifier has to be 'read' or 'write'}}
   affine.prefetch %0[%i], rw, locality<0>, data  : memref<10xf32>
@@ -96,7 +96,7 @@ func @invalid_prefetch_rw(%i : index) {
 
 // -----
 
-func @invalid_prefetch_cache_type(%i : index) {
+func.func @invalid_prefetch_cache_type(%i : index) {
   %0 = memref.alloc() : memref<10xf32>
   // expected-error at +1 {{cache type has to be 'data' or 'instr'}}
   affine.prefetch %0[%i], read, locality<0>, false  : memref<10xf32>
@@ -105,7 +105,7 @@ func @invalid_prefetch_cache_type(%i : index) {
 
 // -----
 
-func @dma_start_non_affine_src_index(%arg0 : index) {
+func.func @dma_start_non_affine_src_index(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = memref.alloc() : memref<100xf32, 2>
   %2 = memref.alloc() : memref<1xi32, 4>
@@ -122,7 +122,7 @@ func @dma_start_non_affine_src_index(%arg0 : index) {
 
 // -----
 
-func @dma_start_non_affine_dst_index(%arg0 : index) {
+func.func @dma_start_non_affine_dst_index(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = memref.alloc() : memref<100xf32, 2>
   %2 = memref.alloc() : memref<1xi32, 4>
@@ -139,7 +139,7 @@ func @dma_start_non_affine_dst_index(%arg0 : index) {
 
 // -----
 
-func @dma_start_non_affine_tag_index(%arg0 : index) {
+func.func @dma_start_non_affine_tag_index(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = memref.alloc() : memref<100xf32, 2>
   %2 = memref.alloc() : memref<1xi32, 4>
@@ -156,7 +156,7 @@ func @dma_start_non_affine_tag_index(%arg0 : index) {
 
 // -----
 
-func @dma_wait_non_affine_tag_index(%arg0 : index) {
+func.func @dma_wait_non_affine_tag_index(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = memref.alloc() : memref<100xf32, 2>
   %2 = memref.alloc() : memref<1xi32, 4>

diff  --git a/mlir/test/Dialect/Affine/load-store.mlir b/mlir/test/Dialect/Affine/load-store.mlir
index fa667f5f36a1a..e214a0815746d 100644
--- a/mlir/test/Dialect/Affine/load-store.mlir
+++ b/mlir/test/Dialect/Affine/load-store.mlir
@@ -3,7 +3,7 @@
 // -----
 
 // Test with just loop IVs.
-func @test0(%arg0 : index, %arg1 : index) {
+func.func @test0(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -17,7 +17,7 @@ func @test0(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs and constants.
-func @test1(%arg0 : index, %arg1 : index) {
+func.func @test1(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -34,7 +34,7 @@ func @test1(%arg0 : index, %arg1 : index) {
 
 // Test with loop IVs and function args without 'symbol' keyword (should
 // be parsed as dim identifiers).
-func @test2(%arg0 : index, %arg1 : index) {
+func.func @test2(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -51,7 +51,7 @@ func @test2(%arg0 : index, %arg1 : index) {
 
 // Test with loop IVs and function args with 'symbol' keyword (should
 // be parsed as symbol identifiers).
-func @test3(%arg0 : index, %arg1 : index) {
+func.func @test3(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -69,7 +69,7 @@ func @test3(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with loop IVs, symbols and constants in nested affine expressions.
-func @test4(%arg0 : index, %arg1 : index) {
+func.func @test4(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -87,7 +87,7 @@ func @test4(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with swizzled loop IVs.
-func @test5(%arg0 : index, %arg1 : index) {
+func.func @test5(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<10x10x10xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -107,7 +107,7 @@ func @test5(%arg0 : index, %arg1 : index) {
 // Test with swizzled loop IVs, duplicate args, and function args used as dims.
 // Dim identifiers are assigned in parse order:
 // d0 = %i2, d1 = %arg0, d2 = %i0, d3 = %i1, d4 = %arg1
-func @test6(%arg0 : index, %arg1 : index) {
+func.func @test6(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<10x10x10xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -130,7 +130,7 @@ func @test6(%arg0 : index, %arg1 : index) {
 // Dim and symbol identifiers are assigned in parse order:
 // d0 = %i2, d1 = %i0, d2 = %i1
 // s0 = %arg0, s1 = %arg1
-func @test6(%arg0 : index, %arg1 : index) {
+func.func @test6(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<10x10x10xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -156,7 +156,7 @@ func @test6(%arg0 : index, %arg1 : index) {
 // CHECK: [[MAP0:#map[0-9]*]] = affine_map<(d0) -> (d0 + 1)>
 
 // Test with operands without special SSA name.
-func @test7() {
+func.func @test7() {
   %0 = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
     %1 = affine.apply affine_map<(d1) -> (d1 + 1)>(%i0)
@@ -171,7 +171,7 @@ func @test7() {
 // -----
 
 // Test with zero-dimensional operands.
-func @zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
+func.func @zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
   %0 = affine.load %arg0[] : memref<i32>
   affine.store %0, %arg1[] : memref<i32>
   // CHECK: affine.load %{{.*}}[] : memref<i32>
@@ -182,7 +182,7 @@ func @zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
 // -----
 
 // Test with loop IVs and constants.
-func @test_prefetch(%arg0 : index, %arg1 : index) {
+func.func @test_prefetch(%arg0 : index, %arg1 : index) {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
@@ -199,7 +199,7 @@ func @test_prefetch(%arg0 : index, %arg1 : index) {
 // -----
 
 // Test with just loop IVs.
-func @vector_load_vector_store_iv() {
+func.func @vector_load_vector_store_iv() {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 16 {
     affine.for %i1 = 0 to 16 step 8 {
@@ -218,7 +218,7 @@ func @vector_load_vector_store_iv() {
 // -----
 
 // Test with loop IVs and constants.
-func @vector_load_vector_store_iv_constant() {
+func.func @vector_load_vector_store_iv_constant() {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 16 step 4 {
@@ -236,7 +236,7 @@ func @vector_load_vector_store_iv_constant() {
 
 // -----
 
-func @vector_load_vector_store_2d() {
+func.func @vector_load_vector_store_2d() {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 16 step 2{
     affine.for %i1 = 0 to 16 step 8 {

diff  --git a/mlir/test/Dialect/Affine/loop-coalescing.mlir b/mlir/test/Dialect/Affine/loop-coalescing.mlir
index 93423fff670e4..1f191a14ed25d 100644
--- a/mlir/test/Dialect/Affine/loop-coalescing.mlir
+++ b/mlir/test/Dialect/Affine/loop-coalescing.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -split-input-file -allow-unregistered-dialect -affine-loop-coalescing %s | FileCheck %s
 
 // CHECK-LABEL: @one_3d_nest
-func @one_3d_nest() {
+func.func @one_3d_nest() {
   // Capture original bounds.  Note that for zero-based step-one loops, the
   // upper bound is also the number of iterations.
   // CHECK: %[[orig_lb:.*]] = arith.constant 0
@@ -44,7 +44,7 @@ func @one_3d_nest() {
 // multiple uses of loop induction variables get rewritten to the same values.
 
 // CHECK-LABEL: @multi_use
-func @multi_use() {
+func.func @multi_use() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c10 = arith.constant 10 : index
@@ -72,7 +72,7 @@ func @multi_use() {
   return
 }
 
-func @unnormalized_loops() {
+func.func @unnormalized_loops() {
   // CHECK: %[[orig_step_i:.*]] = arith.constant 2
   // CHECK: %[[orig_step_j:.*]] = arith.constant 3
   // CHECK: %[[orig_lb_i:.*]] = arith.constant 5
@@ -130,7 +130,7 @@ func @unnormalized_loops() {
 // CHECK-SAME: %[[orig_lb2:[A-Za-z0-9]+]]:
 // CHECK-SAME: %[[orig_ub2:[A-Za-z0-9]+]]:
 // CHECK-SAME: %[[orig_step2:[A-Za-z0-9]+]]:
-func @parametric(%lb1 : index, %ub1 : index, %step1 : index,
+func.func @parametric(%lb1 : index, %ub1 : index, %step1 : index,
                  %lb2 : index, %ub2 : index, %step2 : index) {
   // Compute the number of iterations for each of the loops and the total
   // number of iterations.
@@ -166,7 +166,7 @@ func @parametric(%lb1 : index, %ub1 : index, %step1 : index,
 }
 
 // CHECK-LABEL: @two_bands
-func @two_bands() {
+func.func @two_bands() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   %c10 = arith.constant 10 : index
@@ -201,7 +201,7 @@ func @two_bands() {
 // CHECK-DAG: #[[EIGHT:.*]] = affine_map<() -> (8)>
 // CHECK-DAG: #[[MOD:.*]] = affine_map<(d0)[s0] -> (d0 mod s0)>
 // CHECK-DAG: #[[DIV:.*]] = affine_map<(d0)[s0] -> (d0 floordiv s0)>
-func @coalesce_affine_for() {
+func.func @coalesce_affine_for() {
   affine.for %i = 0 to 16 {
     affine.for %j = 0 to 64 {
       affine.for %k = 0 to 8 {
@@ -232,7 +232,7 @@ func @coalesce_affine_for() {
 // CHECK-DAG: #[[PRODUCT:.*]] = affine_map<(d0)[s0] -> (d0 * s0)>
 // CHECK-DAG: #[[MOD:.*]] = affine_map<(d0)[s0] -> (d0 mod s0)>
 // CHECK-DAG: #[[FLOOR:.*]] = affine_map<(d0)[s0] -> (d0 floordiv s0)>
-func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
+func.func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %arg0, %c0 : memref<?x?xf32>
   %N = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -271,7 +271,7 @@ func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
 // CHECK-DAG: #[[SIXTY_FOUR:.*]] = affine_map<() -> (64)>
 // CHECK-DAG: #[[MOD:.*]] = affine_map<(d0)[s0] -> (d0 mod s0)>
 // CHECK-DAG: #[[DIV:.*]] = affine_map<(d0)[s0] -> (d0 floordiv s0)>
-func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
+func.func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %arg0, %c0 : memref<?x?xf32>
   %N = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -309,7 +309,7 @@ func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
 // CHECK-DAG: #[[MOD:.*]] = affine_map<(d0)[s0] -> (d0 mod s0)>
 // CHECK-DAG: #[[DIV:.*]] = affine_map<(d0)[s0] -> (d0 floordiv s0)>
 #myMap = affine_map<()[s1] -> (s1, -s1)>
-func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
+func.func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
  %c0 = arith.constant 0 : index
  %M = memref.dim %arg0, %c0 : memref<?x?xf32>
  %N = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -346,7 +346,7 @@ func @coalesce_affine_for(%arg0: memref<?x?xf32>) {
 // CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0) -> (696, d0 * 110 + 110)>
 #map0 = affine_map<(d0) -> (d0 * 110)>
 #map1 = affine_map<(d0) -> (696, d0 * 110 + 110)>
-func @test_loops_do_not_get_coalesced() {
+func.func @test_loops_do_not_get_coalesced() {
   affine.for %i = 0 to 7 {
     affine.for %j = #map0(%i) to min #map1(%i) {
     }

diff  --git a/mlir/test/Dialect/Affine/loop-permute.mlir b/mlir/test/Dialect/Affine/loop-permute.mlir
index 98eadfaabc9e4..118165b2fb2a2 100644
--- a/mlir/test/Dialect/Affine/loop-permute.mlir
+++ b/mlir/test/Dialect/Affine/loop-permute.mlir
@@ -6,7 +6,7 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -test-loop-permutation="permutation-map=2,1,0" | FileCheck %s --check-prefix=CHECK-210
 
 // CHECK-120-LABEL: func @permute
-func @permute(%U0 : index, %U1 : index, %U2 : index) {
+func.func @permute(%U0 : index, %U1 : index, %U2 : index) {
   "abc"() : () -> ()
   affine.for %arg0 = 0 to %U0 {
     affine.for %arg1 = 0 to %U1 {

diff  --git a/mlir/test/Dialect/Affine/loop-tiling-parametric.mlir b/mlir/test/Dialect/Affine/loop-tiling-parametric.mlir
index 4636d4fc42af5..372a7a0a462fd 100644
--- a/mlir/test/Dialect/Affine/loop-tiling-parametric.mlir
+++ b/mlir/test/Dialect/Affine/loop-tiling-parametric.mlir
@@ -23,7 +23,7 @@
 // CHECK-NEXT:          affine.for %[[J:.*]] = [[LBI]]{{.*}}[[ARG4]]{{.*}}[[ARG1]]{{.*}} to min [[UBI1]]{{.*}}[[ARG4]]{{.*}}[[ARG1]]
 // CHECK-NEXT:            affine.for %[[K:.*]] = [[LBI]]{{.*}}[[ARG5]]{{.*}}[[ARG2]]{{.*}} to min [[UBI2]]{{.*}}[[ARG5]]{{.*}}[[ARG2]]
 // CHECK-NEXT:              "test.foo"(%[[I]], %[[J]], %[[K]])
-func @loop_tiling_3d(%t0 : index, %t1 : index, %t2 : index) {
+func.func @loop_tiling_3d(%t0 : index, %t1 : index, %t2 : index) {
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 512 {
       affine.for %k = 0 to 1024 {
@@ -52,7 +52,7 @@ func @loop_tiling_3d(%t0 : index, %t1 : index, %t2 : index) {
 // CHECK-NEXT:          affine.for %[[J:.*]] = [[LBI]]{{.*}}[[ARG4]]{{.*}}[[ARG1]]{{.*}} to min [[UBI1]]{{.*}}[[ARG4]]{{.*}}[[ARG1]]{{.*}} step 3
 // CHECK-NEXT:            affine.for %[[K:.*]] = [[LBI]]{{.*}}[[ARG5]]{{.*}}[[ARG2]]{{.*}} to min [[UBI2]]{{.*}}[[ARG5]]{{.*}}[[ARG2]]{{.*}} step 2
 // CHECK-NEXT:              "test.foo"(%[[I]], %[[J]], %[[K]])
-func @loop_tiling_non_unit_step(%t0: index, %t1: index, %t2: index){
+func.func @loop_tiling_non_unit_step(%t0: index, %t1: index, %t2: index){
   affine.for %i = 0 to 256 step 4 {
     affine.for %j = 0 to 512  step 3 {
       affine.for %k = 0 to 1024 step 2 {
@@ -71,7 +71,7 @@ func @loop_tiling_non_unit_step(%t0: index, %t1: index, %t2: index){
 
 // CHECK: func @tile_loop_with_div_in_upper_bound([[ARG0:%arg[0-9]+]]: index, %{{.*}}: memref<?xi32>, %{{.*}}: index, %{{.*}}: index)
 #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
-func @tile_loop_with_div_in_upper_bound(%t5 : index, %A : memref<? x i32>, %L : index, %U : index) {
+func.func @tile_loop_with_div_in_upper_bound(%t5 : index, %A : memref<? x i32>, %L : index, %U : index) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %A, %c0 : memref<? x i32>
   affine.for %i = 0 to min #ub()[%M, %U] {
@@ -91,7 +91,7 @@ func @tile_loop_with_div_in_upper_bound(%t5 : index, %A : memref<? x i32>, %L :
 
 // CHECK: func @tile_loop_with_div_in_upper_bound_non_unit_step([[ARG0:%arg[0-9]+]]: index, %{{.*}}: memref<?xi32>, %{{.*}}: index, %{{.*}}: index)
 #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
-func @tile_loop_with_div_in_upper_bound_non_unit_step(%t5 : index, %A : memref<? x i32>, %L : index, %U : index) {
+func.func @tile_loop_with_div_in_upper_bound_non_unit_step(%t5 : index, %A : memref<? x i32>, %L : index, %U : index) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %A, %c0 : memref<? x i32>
   affine.for %i = 0 to min #ub()[%M, %U] step 4 {
@@ -121,7 +121,7 @@ func @tile_loop_with_div_in_upper_bound_non_unit_step(%t5 : index, %A : memref<?
 // CHECK-NEXT:            affine.for %[[K:.*]] = [[LBI0]]([[ARG5]]){{.*}}[[ARG2]]{{.*}} to min [[UBI2]]([[ARG5]]){{.*}}[[ARG2]]{{.*}}step 4{{.*}}
 // CHECK-NEXT:              "test.foo"(%[[I]], %[[J]], %[[K]]) : (index, index, index) -> ()
 #ubi = affine_map<()[s0] -> (s0 + 16)>
-func @tile_loop_with_non_zero_lb(%t0: index, %t1: index, %t2: index, %U: index){
+func.func @tile_loop_with_non_zero_lb(%t0: index, %t1: index, %t2: index, %U: index){
   affine.for %i = 8 to 256 {
     affine.for %j = 8 to #ubi()[%U] {
       affine.for %k = 8 to #ubi()[%U] step 4 {
@@ -153,7 +153,7 @@ func @tile_loop_with_non_zero_lb(%t0: index, %t1: index, %t2: index, %U: index){
 // CHECK-NEXT:                 arith.mulf %{{.*}}
 // CHECK-NEXT:                 arith.addf %{{.*}}
 // CHECK-NEXT:                 affine.store %{{.*}}[%[[I]], %[[J]]]
-func @simple_matmul(%t6 : index, %t7 : index, %t8 : index, %arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256xvector<64xf32>>, %arg2: memref<256x256xvector<64xf32>>) -> memref<256x256xvector<64xf32>> {
+func.func @simple_matmul(%t6 : index, %t7 : index, %t8 : index, %arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256xvector<64xf32>>, %arg2: memref<256x256xvector<64xf32>>) -> memref<256x256xvector<64xf32>> {
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 256 {
       affine.for %k = 0 to 250 {
@@ -188,7 +188,7 @@ func @simple_matmul(%t6 : index, %t7 : index, %t8 : index, %arg0: memref<256x256
 // CHECK-NEXT:             affine.load %{{.*}}%[[I0]], %[[I1]]
 // CHECK-NEXT:             arith.addf
 // CHECK-NEXT:             affine.store %{{.*}}%[[I0]], %[[I1]]
-func @tile_with_symbolic_loop_upper_bounds(%t9 : index, %t10: index, %arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
+func.func @tile_with_symbolic_loop_upper_bounds(%t9 : index, %t10: index, %arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   %c0 = arith.constant 0 : index
   %0 = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -215,7 +215,7 @@ func @tile_with_symbolic_loop_upper_bounds(%t9 : index, %t10: index, %arg0: memr
 // CHECK-DAG: [[UBO0:#map[0-9]+]] = affine_map<()[s0, s1, s2] -> ((s0 + s1) ceildiv s2)>
 
 // CHECK: func @tile_with_loop_upper_bounds_in_two_symbols([[ARG0:%arg[0-9]+]]: index{{.*}}){{.*}}
-func @tile_with_loop_upper_bounds_in_two_symbols(%t11 : index, %arg0: memref<?xf32>, %limit: index) {
+func.func @tile_with_loop_upper_bounds_in_two_symbols(%t11 : index, %arg0: memref<?xf32>, %limit: index) {
   %c0 = arith.constant 0 : index
   %dim0 = memref.dim %arg0, %c0 : memref<?xf32>
   affine.for %i0 = 0 to affine_map<()[s0, s1] -> (s0 + s1)> ()[%dim0, %limit] {
@@ -240,7 +240,7 @@ func @tile_with_loop_upper_bounds_in_two_symbols(%t11 : index, %arg0: memref<?xf
 // CHECK-NEXT:   affine.for [[ARG5:%arg[0-9]+]] = 0 to [[UBO1]]({{.*}}){{.*}}[[ARG1]]
 // CHECK-NEXT:     affine.for {{.*}} = [[LBI0]]([[ARG4]]){{.*}}[[ARG0]]{{.*}} to min [[UBI0]]({{.*}}, [[ARG4]]){{.*}}[[ARG0]]{{.*}}
 // CHECK-NEXT:       affine.for {{.*}} = [[LBI0]]([[ARG5]]){{.*}}[[ARG1]]{{.*}} to min [[UBI1]]({{.*}}, [[ARG5]]){{.*}}[[ARG1]]{{.*}}
-func @tile_with_upper_bounds_in_dimensions_and_symbols(%t12 : index, %t13 :index, %M: index, %N:  index, %K: index) {
+func.func @tile_with_upper_bounds_in_dimensions_and_symbols(%t12 : index, %t13 :index, %M: index, %N:  index, %K: index) {
   affine.for %i = 0 to affine_map<(d0)[s0] -> (d0 + s0 + 2)>(%M)[%K] {
     affine.for %j = 0 to affine_map<(d0)[s0] -> (d0 + s0 + 4)>(%N)[%K] {
       "test.foo" () : () -> ()
@@ -263,7 +263,7 @@ func @tile_with_upper_bounds_in_dimensions_and_symbols(%t12 : index, %t13 :index
 // CHECK-NEXT:   affine.for [[ARG5:%arg[0-9]+]] = 0 to [[UBO1]]({{.*}}){{.*}}[[ARG1]]{{.*}} step 4{{.*}}
 // CHECK-NEXT:     affine.for {{.*}} = [[LBI0]]([[ARG4]]){{.*}}[[ARG0]]{{.*}} to min [[UBI0]]({{.*}}, [[ARG4]]){{.*}}[[ARG0]]{{.*}} step 2{{.*}}
 // CHECK-NEXT:       affine.for {{.*}} = [[LBI0]]([[ARG5]]){{.*}}[[ARG1]]{{.*}} to min [[UBI1]]({{.*}}, [[ARG5]]){{.*}}[[ARG1]]{{.*}} step 4{{.*}}
-func @tile_with_upper_bounds_in_dimensions_and_symbols_non_unit_steps(%t12 : index, %t13 :index, %M: index, %N :  index, %K: index) {
+func.func @tile_with_upper_bounds_in_dimensions_and_symbols_non_unit_steps(%t12 : index, %t13 :index, %M: index, %N :  index, %K: index) {
   affine.for %i = 0 to affine_map<(d0)[s0] -> (d0 + s0 + 2)>(%M)[%K] step 2 {
     affine.for %j = 0 to affine_map<(d0)[s0] -> (d0 + s0 + 4)>(%N)[%K] step 4 {
       "test.foo" () : () -> ()

diff  --git a/mlir/test/Dialect/Affine/loop-tiling-validity.mlir b/mlir/test/Dialect/Affine/loop-tiling-validity.mlir
index 2b67f223f01c2..7fe940c8430f4 100644
--- a/mlir/test/Dialect/Affine/loop-tiling-validity.mlir
+++ b/mlir/test/Dialect/Affine/loop-tiling-validity.mlir
@@ -8,7 +8,7 @@
 // CHECK-DAG: [[$UB:#map[0-9]+]] = affine_map<(d0) -> (d0 + 32)>
 
 // CHECK-LABEL: func @legal_loop()
-func @legal_loop() {
+func.func @legal_loop() {
   %0 = memref.alloc() : memref<64xf32>
 
   affine.for %i = 0 to 64 {
@@ -30,7 +30,7 @@ func @legal_loop() {
 // The default tiling method (hyper-rect) will violate tiling legality.
 // We expect a remark that points that issue out to be emitted.
 
-func @illegal_loop_with_diag_dependence() {
+func.func @illegal_loop_with_diag_dependence() {
   %A = memref.alloc() : memref<64x64xf32>
 
   affine.for %i = 0 to 64 {

diff  --git a/mlir/test/Dialect/Affine/loop-tiling.mlir b/mlir/test/Dialect/Affine/loop-tiling.mlir
index 8ff91ecff7038..04c1cf334d140 100644
--- a/mlir/test/Dialect/Affine/loop-tiling.mlir
+++ b/mlir/test/Dialect/Affine/loop-tiling.mlir
@@ -34,7 +34,7 @@
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
 // CHECK-NEXT:  return
-func @loop_tiling() {
+func.func @loop_tiling() {
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 512 {
       affine.for %k = 0 to 1024 {
@@ -65,7 +65,7 @@ func @loop_tiling() {
 #lb = affine_map<()[s0] -> (0, s0)>
 #ub = affine_map<()[s0, s1] -> (s0, 4096 floordiv s1)>
 // CHECK-LABEL: func @loop_max_min_bound(%{{.*}}: memref<?xi32>, %{{.*}}: index, %{{.*}}: index) {
-func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
+func.func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
   %c0 = arith.constant 0 : index
   %M = memref.dim %A, %c0 : memref<? x i32>
   affine.for %i = max #lb()[%L] to min #ub()[%M, %U] {
@@ -86,7 +86,7 @@ func @loop_max_min_bound(%A : memref<? x i32>, %L : index, %U : index) {
 // possible here, they are adjusted to 4 x 4 x 5.
 
 // MODEL-LABEL: func @simple_matmul
-func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256xvector<64xf32>>, %arg2: memref<256x256xvector<64xf32>>) -> memref<256x256xvector<64xf32>> {
+func.func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256xvector<64xf32>>, %arg2: memref<256x256xvector<64xf32>>) -> memref<256x256xvector<64xf32>> {
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 256 {
       affine.for %k = 0 to 250 {
@@ -110,7 +110,7 @@ func @simple_matmul(%arg0: memref<256x256xvector<64xf32>>, %arg1: memref<256x256
 
 // CHECK-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + 32, s0)>
 
-func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
+func.func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   %c0 = arith.constant 0 : index
   %0 = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -156,7 +156,7 @@ func @tile_with_symbolic_loop_upper_bounds(%arg0: memref<?x?xf32>, %arg1: memref
 // CHECK-DAG: [[MAP1:#map[0-9]+]] = affine_map<()[s0, s1] -> (s0 + s1)>
 // CHECK-DAG: [[$UBMAP:#map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 + 32, s0 + s1)>
 
-func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
+func.func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
   %c0 = arith.constant 0 : index
   %dim0 = memref.dim %arg0, %c0 : memref<?xf32>
   affine.for %i0 = 0 to affine_map<()[s0, s1] -> (s0 + s1)> ()[%dim0, %limit] {
@@ -177,7 +177,7 @@ func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: i
 // CHECK-DAG:  #[[$ID:.*]] = affine_map<(d0) -> (d0)>
 // CHECK-DAG:  [[$UBMAP:#map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + 160, s0)>
 
-func @tile_loop_with_non_unit_step(%arg0 : memref<50xf32>, %arg1 : index) {
+func.func @tile_loop_with_non_unit_step(%arg0 : memref<50xf32>, %arg1 : index) {
   affine.for %i = 0 to %arg1 step 5 {
     affine.load %arg0[%i] : memref<50xf32>
   }
@@ -192,7 +192,7 @@ func @tile_loop_with_non_unit_step(%arg0 : memref<50xf32>, %arg1 : index) {
 
 // -----
 
-func @tile_size_larger_than_trip_count_symbolic_bound(%M: index, %N :  index) {
+func.func @tile_size_larger_than_trip_count_symbolic_bound(%M: index, %N :  index) {
   affine.for %i = affine_map<(d0) -> (d0)>(%M) to affine_map<(d0) -> (d0 + 2)>(%M) {
     affine.for %j = affine_map<(d0) -> (d0)>(%N) to affine_map<(d0) -> (d0 + 4)>(%N) {
       "test.foo" () : () -> ()
@@ -215,7 +215,7 @@ func @tile_size_larger_than_trip_count_symbolic_bound(%M: index, %N :  index) {
 
 // CHECK-LABEL: func @trip_count_one
 // SEPARATE-LABEL: func @trip_count_one
-func @trip_count_one(%arg0: memref<196608x1xf32>, %arg1: memref<196608x1xf32>)
+func.func @trip_count_one(%arg0: memref<196608x1xf32>, %arg1: memref<196608x1xf32>)
     -> memref<196608x1xf32> {
   affine.for %i1 = 0 to 196608 {
     affine.for %i3 = 0 to 1 {
@@ -231,7 +231,7 @@ func @trip_count_one(%arg0: memref<196608x1xf32>, %arg1: memref<196608x1xf32>)
 
 // -----
 
-func @separate_full_tile_2d(%M : index, %N : index) {
+func.func @separate_full_tile_2d(%M : index, %N : index) {
   affine.for %i = 0 to %M {
     affine.for %j = 0 to %N {
       "test.foo"() : () -> ()
@@ -244,7 +244,7 @@ func @separate_full_tile_2d(%M : index, %N : index) {
 
 #ub = affine_map<(d0)[s0] -> (d0, s0)>
 // CHECK-LABEL: func @non_hyperrectangular_loop
-func @non_hyperrectangular_loop() {
+func.func @non_hyperrectangular_loop() {
   %N = arith.constant 128 : index
   affine.for %i = 0 to %N {
     affine.for %j = 0 to min #ub(%i)[%N] {
@@ -264,7 +264,7 @@ func @non_hyperrectangular_loop() {
 // No tiling supported on loops with yield values.
 
 // CHECK-LABEL: func @yield_values
-func @yield_values(%init : index) {
+func.func @yield_values(%init : index) {
   %r = affine.for %i = 0 to 10 iter_args(%s = %init) -> index {
     "test.foo"() : () -> ()
     affine.yield %s : index
@@ -306,7 +306,7 @@ func @yield_values(%init : index) {
 
 // -----
 
-func @separate_full_tile_1d_max_min(%M : index, %N : index, %P : index, %Q : index) {
+func.func @separate_full_tile_1d_max_min(%M : index, %N : index, %P : index, %Q : index) {
   affine.for %i0 = max affine_map<(d0, d1) -> (d0, d1)>  (%M, %N) to min affine_map< (d0, d1) -> (d0, d1)> (%P, %Q) {
   }
   return

diff  --git a/mlir/test/Dialect/Affine/loop-unswitch.mlir b/mlir/test/Dialect/Affine/loop-unswitch.mlir
index 1ba3d30bdc6ba..084632ec28a14 100644
--- a/mlir/test/Dialect/Affine/loop-unswitch.mlir
+++ b/mlir/test/Dialect/Affine/loop-unswitch.mlir
@@ -3,7 +3,7 @@
 // CHECK-DAG: #[[$SET:.*]] = affine_set<(d0) : (d0 - 2 >= 0)>
 
 // CHECK-LABEL: func @if_else_imperfect
-func @if_else_imperfect(%A : memref<100xi32>, %B : memref<100xi32>, %v : i32) {
+func.func @if_else_imperfect(%A : memref<100xi32>, %B : memref<100xi32>, %v : i32) {
 // CHECK: %[[A:.*]]: memref<100xi32>, %[[B:.*]]: memref
   affine.for %i = 0 to 100 {
     affine.store %v, %A[%i] : memref<100xi32>
@@ -18,7 +18,7 @@ func @if_else_imperfect(%A : memref<100xi32>, %B : memref<100xi32>, %v : i32) {
   }
   return
 }
-func private @external()
+func.func private @external()
 
 // CHECK:       affine.for %[[I:.*]] = 0 to 100 {
 // CHECK-NEXT:    affine.store %{{.*}}, %[[A]][%[[I]]]
@@ -40,13 +40,13 @@ func private @external()
 
 // -----
 
-func private @foo()
-func private @bar()
-func private @abc()
-func private @xyz()
+func.func private @foo()
+func.func private @bar()
+func.func private @abc()
+func.func private @xyz()
 
 // CHECK-LABEL: func @if_then_perfect
-func @if_then_perfect(%A : memref<100xi32>, %v : i32) {
+func.func @if_then_perfect(%A : memref<100xi32>, %v : i32) {
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to 100 {
       affine.for %k = 0 to 100 {
@@ -66,7 +66,7 @@ func @if_then_perfect(%A : memref<100xi32>, %v : i32) {
 
 
 // CHECK-LABEL: func @if_else_perfect
-func @if_else_perfect(%A : memref<100xi32>, %v : i32) {
+func.func @if_else_perfect(%A : memref<100xi32>, %v : i32) {
   affine.for %i = 0 to 99 {
     affine.for %j = 0 to 100 {
       affine.for %k = 0 to 100 {
@@ -105,7 +105,7 @@ func @if_else_perfect(%A : memref<100xi32>, %v : i32) {
 // CHECK-NEXT: }
 
 // CHECK-LABEL: func @if_then_imperfect
-func @if_then_imperfect(%A : memref<100xi32>, %N : index, %v: i32) {
+func.func @if_then_imperfect(%A : memref<100xi32>, %N : index, %v: i32) {
   affine.for %i = 0 to 100 {
     affine.store %v, %A[0] : memref<100xi32>
     affine.if affine_set<(d0) : (d0 - 2 >= 0)>(%N) {
@@ -128,7 +128,7 @@ func @if_then_imperfect(%A : memref<100xi32>, %N : index, %v: i32) {
 
 // Check if unused operands are dropped: hence, hoisting is possible.
 // CHECK-LABEL: func @hoist_after_canonicalize
-func @hoist_after_canonicalize() {
+func.func @hoist_after_canonicalize() {
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to 100 {
       affine.if affine_set<(d0) : (d0 - 2 >= 0)>(%j)  {
@@ -153,7 +153,7 @@ func @hoist_after_canonicalize() {
 // CHECK-NEXT: return
 
 // CHECK-LABEL: func @handle_dead_if
-func @handle_dead_if(%N : index) {
+func.func @handle_dead_if(%N : index) {
   affine.for %i = 0 to 100 {
       affine.if affine_set<(d0) : (d0 - 1 >= 0, -d0 + 99 >= 0)>(%N)  {
       }
@@ -182,7 +182,7 @@ func @handle_dead_if(%N : index) {
 #set0 = affine_set<(d0, d1)[s0, s1] : (d0 * -16 + s0 - 16 >= 0, d1 * -3 + s1 - 3 >= 0)>
 
 // CHECK-LABEL: func @perfect_if_else
-func @perfect_if_else(%arg0 : memref<?x?xf64>, %arg1 : memref<?x?xf64>, %v : f64,
+func.func @perfect_if_else(%arg0 : memref<?x?xf64>, %arg1 : memref<?x?xf64>, %v : f64,
             %arg4 : index, %arg5 : index, %arg6 : index, %sym : index) {
   affine.for %arg7 = #lb0(%arg5) to min #ub0(%arg5)[%sym] {
     affine.parallel (%i0, %j0) = (0, 0) to (symbol(%sym), 100) step (10, 10) {
@@ -234,7 +234,7 @@ func @perfect_if_else(%arg0 : memref<?x?xf64>, %arg1 : memref<?x?xf64>, %v : f64
 // op that it is able to successfully hoist.
 
 // CHECK-LABEL: func @multiple_if
-func @multiple_if(%N : index) {
+func.func @multiple_if(%N : index) {
   affine.if affine_set<() : (0 == 0)>() {
     call @external() : () -> ()
   }
@@ -253,4 +253,4 @@ func @multiple_if(%N : index) {
 // CHECK-NEXT: }
 // CHECK-NEXT: return
 
-func private @external()
+func.func private @external()

diff  --git a/mlir/test/Dialect/Affine/memref-stride-calculation.mlir b/mlir/test/Dialect/Affine/memref-stride-calculation.mlir
index 1b39756746bc8..8a0fc4e251869 100644
--- a/mlir/test/Dialect/Affine/memref-stride-calculation.mlir
+++ b/mlir/test/Dialect/Affine/memref-stride-calculation.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -pass-pipeline="func.func(test-memref-stride-calculation)" -o /dev/null | FileCheck %s
 
-func @f(%0: index) {
+func.func @f(%0: index) {
 // CHECK-LABEL: Testing: f
   %1 = memref.alloc() : memref<3x4x5xf32>
 // CHECK: MemRefType offset: 0 strides: 20, 5, 1

diff  --git a/mlir/test/Dialect/Affine/ops.mlir b/mlir/test/Dialect/Affine/ops.mlir
index f792a87281b8e..ad6f3651c1b21 100644
--- a/mlir/test/Dialect/Affine/ops.mlir
+++ b/mlir/test/Dialect/Affine/ops.mlir
@@ -4,7 +4,7 @@
 // Check that the attributes for the affine operations are round-tripped.
 // Check that `affine.yield` is visible in the generic form.
 // CHECK-LABEL: @empty
-func @empty() {
+func.func @empty() {
   // CHECK: affine.for
   // CHECK-NEXT: } {some_attr = true}
   //
@@ -45,7 +45,7 @@ func @empty() {
 // Check that an explicit affine.yield is not printed in custom format.
 // Check that no extra terminator is introduced.
 // CHECK-LABEL: @affine.yield
-func @affine.yield() {
+func.func @affine.yield() {
   // CHECK: affine.for
   // CHECK-NEXT: }
   //
@@ -67,7 +67,7 @@ func @affine.yield() {
 // CHECK-DAG: #[[$MAP3:map[0-9]+]] = affine_map<() -> (77, 78, 79)>
 
 // CHECK-LABEL: @affine_min
-func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK: affine.min #[[$MAP0]](%arg0)[%arg1]
   %0 = affine.min affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
   // CHECK: affine.min #[[$MAP1]](%arg0, %arg1)[%arg2]
@@ -80,7 +80,7 @@ func @affine_min(%arg0 : index, %arg1 : index, %arg2 : index) {
 }
 
 // CHECK-LABEL: @affine_max
-func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK: affine.max #[[$MAP0]](%arg0)[%arg1]
   %0 = affine.max affine_map<(d0)[s0] -> (1000, d0 + 512, s0)> (%arg0)[%arg1]
   // CHECK: affine.max #[[$MAP1]](%arg0, %arg1)[%arg2]
@@ -94,7 +94,7 @@ func @affine_max(%arg0 : index, %arg1 : index, %arg2 : index) {
 
 // -----
 
-func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) {
+func.func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) {
   %c1 = arith.constant 1 : index
   %c0 = arith.constant 0 : index
   %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
@@ -118,7 +118,7 @@ func @valid_symbols(%arg0: index, %arg1: index, %arg2: index) {
 // Test symbol constraints for ops with AffineScope trait.
 
 // CHECK-LABEL: func @valid_symbol_affine_scope
-func @valid_symbol_affine_scope(%n : index, %A : memref<?xf32>) {
+func.func @valid_symbol_affine_scope(%n : index, %A : memref<?xf32>) {
   test.affine_scope {
     %c1 = arith.constant 1 : index
     %l = arith.subi %n, %c1 : index
@@ -155,7 +155,7 @@ func @valid_symbol_affine_scope(%n : index, %A : memref<?xf32>) {
 
 // CHECK-LABEL: func @parallel
 // CHECK-SAME: (%[[A:.*]]: memref<100x100xf32>, %[[N:.*]]: index)
-func @parallel(%A : memref<100x100xf32>, %N : index) {
+func.func @parallel(%A : memref<100x100xf32>, %N : index) {
   // CHECK: affine.parallel (%[[I0:.*]], %[[J0:.*]]) = (0, 0) to (symbol(%[[N]]), 100) step (10, 10)
   affine.parallel (%i0, %j0) = (0, 0) to (symbol(%N), 100) step (10, 10) {
     // CHECK: affine.parallel (%{{.*}}, %{{.*}}) = (%[[I0]], %[[J0]]) to (%[[I0]] + 10, %[[J0]] + 10) reduce ("minf", "maxf") -> (f32, f32)
@@ -171,7 +171,7 @@ func @parallel(%A : memref<100x100xf32>, %N : index) {
 
 // CHECK-LABEL: @parallel_min_max
 // CHECK: %[[A:.*]]: index, %[[B:.*]]: index, %[[C:.*]]: index, %[[D:.*]]: index
-func @parallel_min_max(%a: index, %b: index, %c: index, %d: index) {
+func.func @parallel_min_max(%a: index, %b: index, %c: index, %d: index) {
   // CHECK: affine.parallel (%{{.*}}, %{{.*}}, %{{.*}}) =
   // CHECK:                 (max(%[[A]], %[[B]])
   // CHECK:              to (%[[C]], min(%[[C]], %[[D]]), %[[B]])
@@ -185,7 +185,7 @@ func @parallel_min_max(%a: index, %b: index, %c: index, %d: index) {
 // -----
 
 // CHECK-LABEL: @parallel_no_ivs
-func @parallel_no_ivs() {
+func.func @parallel_no_ivs() {
   // CHECK: affine.parallel () = () to ()
   affine.parallel () = () to () {
     affine.yield
@@ -196,7 +196,7 @@ func @parallel_no_ivs() {
 // -----
 
 // CHECK-LABEL: func @affine_if
-func @affine_if() -> f32 {
+func.func @affine_if() -> f32 {
   // CHECK: %[[ZERO:.*]] = arith.constant {{.*}} : f32
   %zero = arith.constant 0.0 : f32
   // CHECK: %[[OUT:.*]] = affine.if {{.*}}() -> f32 {
@@ -218,7 +218,7 @@ func @affine_if() -> f32 {
 #set = affine_set<(d0): (d0 - 10 >= 0)>
 
 // CHECK-LABEL: func @yield_loop
-func @yield_loop(%buffer: memref<1024xf32>) -> f32 {
+func.func @yield_loop(%buffer: memref<1024xf32>) -> f32 {
   %sum_init_0 = arith.constant 0.0 : f32
   %res = affine.for %i = 0 to 10 step 2 iter_args(%sum_iter = %sum_init_0) -> f32 {
     %t = affine.load %buffer[%i] : memref<1024xf32>
@@ -244,7 +244,7 @@ func @yield_loop(%buffer: memref<1024xf32>) -> f32 {
 // CHECK-NEXT: return %[[output]] : f32
 
 // CHECK-LABEL: func @affine_for_multiple_yield
-func @affine_for_multiple_yield(%buffer: memref<1024xf32>) -> (f32, f32) {
+func.func @affine_for_multiple_yield(%buffer: memref<1024xf32>) -> (f32, f32) {
   %init_0 = arith.constant 0.0 : f32
   %res1, %res2 = affine.for %i = 0 to 10 step 2 iter_args(%iter_arg1 = %init_0, %iter_arg2 = %init_0) -> (f32, f32) {
     %t = affine.load %buffer[%i] : memref<1024xf32>

diff  --git a/mlir/test/Dialect/Affine/parallelize.mlir b/mlir/test/Dialect/Affine/parallelize.mlir
index c1852f3f57b58..b0e121543f2d6 100644
--- a/mlir/test/Dialect/Affine/parallelize.mlir
+++ b/mlir/test/Dialect/Affine/parallelize.mlir
@@ -3,7 +3,7 @@
 // RUN: mlir-opt %s -allow-unregistered-dialect -affine-parallelize='parallel-reductions=1' | FileCheck --check-prefix=REDUCE %s
 
 // CHECK-LABEL:    func @reduce_window_max() {
-func @reduce_window_max() {
+func.func @reduce_window_max() {
   %cst = arith.constant 0.000000e+00 : f32
   %0 = memref.alloc() : memref<1x8x8x64xf32>
   %1 = memref.alloc() : memref<1x18x18x64xf32>
@@ -75,7 +75,7 @@ func @reduce_window_max() {
 // CHECK:        }
 // CHECK:      }
 
-func @loop_nest_3d_outer_two_parallel(%N : index) {
+func.func @loop_nest_3d_outer_two_parallel(%N : index) {
   %0 = memref.alloc() : memref<1024 x 1024 x vector<64xf32>>
   %1 = memref.alloc() : memref<1024 x 1024 x vector<64xf32>>
   %2 = memref.alloc() : memref<1024 x 1024 x vector<64xf32>>
@@ -99,7 +99,7 @@ func @loop_nest_3d_outer_two_parallel(%N : index) {
 // CHECK:          affine.for %[[arg3:.*]] = 0 to %arg0 {
 
 // CHECK-LABEL: unknown_op_conservative
-func @unknown_op_conservative() {
+func.func @unknown_op_conservative() {
   affine.for %i = 0 to 10 {
 // CHECK:  affine.for %[[arg1:.*]] = 0 to 10 {
     "unknown"() : () -> ()
@@ -108,7 +108,7 @@ func @unknown_op_conservative() {
 }
 
 // CHECK-LABEL: non_affine_load
-func @non_affine_load() {
+func.func @non_affine_load() {
   %0 = memref.alloc() : memref<100 x f32>
   affine.for %i = 0 to 100 {
 // CHECK:  affine.for %{{.*}} = 0 to 100 {
@@ -118,7 +118,7 @@ func @non_affine_load() {
 }
 
 // CHECK-LABEL: for_with_minmax
-func @for_with_minmax(%m: memref<?xf32>, %lb0: index, %lb1: index,
+func.func @for_with_minmax(%m: memref<?xf32>, %lb0: index, %lb1: index,
                       %ub0: index, %ub1: index) {
   // CHECK: affine.parallel (%{{.*}}) = (max(%{{.*}}, %{{.*}})) to (min(%{{.*}}, %{{.*}}))
   affine.for %i = max affine_map<(d0, d1) -> (d0, d1)>(%lb0, %lb1)
@@ -129,7 +129,7 @@ func @for_with_minmax(%m: memref<?xf32>, %lb0: index, %lb1: index,
 }
 
 // CHECK-LABEL: nested_for_with_minmax
-func @nested_for_with_minmax(%m: memref<?xf32>, %lb0: index,
+func.func @nested_for_with_minmax(%m: memref<?xf32>, %lb0: index,
                              %ub0: index, %ub1: index) {
   // CHECK: affine.parallel (%[[I:.*]]) =
   affine.for %j = 0 to 10 {
@@ -143,7 +143,7 @@ func @nested_for_with_minmax(%m: memref<?xf32>, %lb0: index,
 }
 
 // MAX-NESTED-LABEL: @max_nested
-func @max_nested(%m: memref<?x?xf32>, %lb0: index, %lb1: index,
+func.func @max_nested(%m: memref<?x?xf32>, %lb0: index, %lb1: index,
                  %ub0: index, %ub1: index) {
   // MAX-NESTED: affine.parallel
   affine.for %i = affine_map<(d0) -> (d0)>(%lb0) to affine_map<(d0) -> (d0)>(%ub0) {
@@ -156,7 +156,7 @@ func @max_nested(%m: memref<?x?xf32>, %lb0: index, %lb1: index,
 }
 
 // MAX-NESTED-LABEL: @max_nested_1
-func @max_nested_1(%arg0: memref<4096x4096xf32>, %arg1: memref<4096x4096xf32>, %arg2: memref<4096x4096xf32>) {
+func.func @max_nested_1(%arg0: memref<4096x4096xf32>, %arg1: memref<4096x4096xf32>, %arg2: memref<4096x4096xf32>) {
   %0 = memref.alloc() : memref<4096x4096xf32>
   // MAX-NESTED: affine.parallel
   affine.for %arg3 = 0 to 4096 {
@@ -178,7 +178,7 @@ func @max_nested_1(%arg0: memref<4096x4096xf32>, %arg1: memref<4096x4096xf32>, %
 
 // CHECK-LABEL: @iter_args
 // REDUCE-LABEL: @iter_args
-func @iter_args(%in: memref<10xf32>) {
+func.func @iter_args(%in: memref<10xf32>) {
   // REDUCE: %[[init:.*]] = arith.constant
   %cst = arith.constant 0.000000e+00 : f32
   // CHECK-NOT: affine.parallel
@@ -197,7 +197,7 @@ func @iter_args(%in: memref<10xf32>) {
 
 // CHECK-LABEL: @nested_iter_args
 // REDUCE-LABEL: @nested_iter_args
-func @nested_iter_args(%in: memref<20x10xf32>) {
+func.func @nested_iter_args(%in: memref<20x10xf32>) {
   %cst = arith.constant 0.000000e+00 : f32
   // CHECK: affine.parallel
   affine.for %i = 0 to 20 {
@@ -214,7 +214,7 @@ func @nested_iter_args(%in: memref<20x10xf32>) {
 }
 
 // REDUCE-LABEL: @strange_butterfly
-func @strange_butterfly() {
+func.func @strange_butterfly() {
   %cst1 = arith.constant 0.0 : f32
   %cst2 = arith.constant 1.0 : f32
   // REDUCE-NOT: affine.parallel
@@ -228,7 +228,7 @@ func @strange_butterfly() {
 // An iter arg is used more than once. This is not a simple reduction and
 // should not be parallelized.
 // REDUCE-LABEL: @repeated_use
-func @repeated_use() {
+func.func @repeated_use() {
   %cst1 = arith.constant 0.0 : f32
   // REDUCE-NOT: affine.parallel
   affine.for %i = 0 to 10 iter_args(%it1 = %cst1) -> (f32) {
@@ -241,7 +241,7 @@ func @repeated_use() {
 // An iter arg is used in the chain of operations defining the value being
 // reduced, this is not a simple reduction and should not be parallelized.
 // REDUCE-LABEL: @use_in_backward_slice
-func @use_in_backward_slice() {
+func.func @use_in_backward_slice() {
   %cst1 = arith.constant 0.0 : f32
   %cst2 = arith.constant 1.0 : f32
   // REDUCE-NOT: affine.parallel
@@ -256,7 +256,7 @@ func @use_in_backward_slice() {
 // REDUCE-LABEL: @nested_min_max
 // CHECK-LABEL: @nested_min_max
 // CHECK: (%{{.*}}, %[[LB0:.*]]: index, %[[UB0:.*]]: index, %[[UB1:.*]]: index)
-func @nested_min_max(%m: memref<?xf32>, %lb0: index,
+func.func @nested_min_max(%m: memref<?xf32>, %lb0: index,
                      %ub0: index, %ub1: index) {
   // CHECK: affine.parallel (%[[J:.*]]) =
   affine.for %j = 0 to 10 {
@@ -273,7 +273,7 @@ func @nested_min_max(%m: memref<?xf32>, %lb0: index,
 // Test in the presence of locally allocated memrefs.
 
 // CHECK: func @local_alloc
-func @local_alloc() {
+func.func @local_alloc() {
   %cst = arith.constant 0.0 : f32
   affine.for %i = 0 to 100 {
     %m = memref.alloc() : memref<1xf32>
@@ -285,7 +285,7 @@ func @local_alloc() {
 }
 
 // CHECK: func @local_alloc_cast
-func @local_alloc_cast() {
+func.func @local_alloc_cast() {
   %cst = arith.constant 0.0 : f32
   affine.for %i = 0 to 100 {
     %m = memref.alloc() : memref<128xf32>
@@ -314,7 +314,7 @@ func @local_alloc_cast() {
 }
 
 // CHECK-LABEL: @iter_arg_memrefs
-func @iter_arg_memrefs(%in: memref<10xf32>) {
+func.func @iter_arg_memrefs(%in: memref<10xf32>) {
   %mi = memref.alloc() : memref<f32>
   // Loop-carried memrefs are treated as serializing the loop.
   // CHECK: affine.for

diff  --git a/mlir/test/Dialect/Affine/scalrep.mlir b/mlir/test/Dialect/Affine/scalrep.mlir
index 7a204745591b1..4c09e3e27fbde 100644
--- a/mlir/test/Dialect/Affine/scalrep.mlir
+++ b/mlir/test/Dialect/Affine/scalrep.mlir
@@ -7,7 +7,7 @@
 // CHECK-DAG: [[$MAP4:#map[0-9]+]] = affine_map<(d0) -> (d0 + 1)>
 
 // CHECK-LABEL: func @simple_store_load() {
-func @simple_store_load() {
+func.func @simple_store_load() {
   %cf7 = arith.constant 7.0 : f32
   %m = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
@@ -24,7 +24,7 @@ func @simple_store_load() {
 }
 
 // CHECK-LABEL: func @multi_store_load() {
-func @multi_store_load() {
+func.func @multi_store_load() {
   %c0 = arith.constant 0 : index
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
@@ -56,7 +56,7 @@ func @multi_store_load() {
 // The store-load forwarding can see through affine apply's since it relies on
 // dependence information.
 // CHECK-LABEL: func @store_load_affine_apply
-func @store_load_affine_apply() -> memref<10x10xf32> {
+func.func @store_load_affine_apply() -> memref<10x10xf32> {
   %cf7 = arith.constant 7.0 : f32
   %m = memref.alloc() : memref<10x10xf32>
   affine.for %i0 = 0 to 10 {
@@ -89,7 +89,7 @@ func @store_load_affine_apply() -> memref<10x10xf32> {
 }
 
 // CHECK-LABEL: func @store_load_nested
-func @store_load_nested(%N : index) {
+func.func @store_load_nested(%N : index) {
   %cf7 = arith.constant 7.0 : f32
   %m = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
@@ -113,7 +113,7 @@ func @store_load_nested(%N : index) {
 // writer; store/load forwarding will however be possible here once loop live
 // out SSA scalars are available.
 // CHECK-LABEL: func @multi_store_load_nested_no_fwd
-func @multi_store_load_nested_no_fwd(%N : index) {
+func.func @multi_store_load_nested_no_fwd(%N : index) {
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
   %m = memref.alloc() : memref<10xf32>
@@ -134,7 +134,7 @@ func @multi_store_load_nested_no_fwd(%N : index) {
 // No forwarding happens here since both stores have a value going into
 // the load.
 // CHECK-LABEL: func @store_load_store_nested_no_fwd
-func @store_load_store_nested_no_fwd(%N : index) {
+func.func @store_load_store_nested_no_fwd(%N : index) {
   %cf7 = arith.constant 7.0 : f32
   %cf9 = arith.constant 9.0 : f32
   %m = memref.alloc() : memref<10xf32>
@@ -153,7 +153,7 @@ func @store_load_store_nested_no_fwd(%N : index) {
 // Forwarding happens here since the last store postdominates all other stores
 // and other forwarding criteria are satisfied.
 // CHECK-LABEL: func @multi_store_load_nested_fwd
-func @multi_store_load_nested_fwd(%N : index) {
+func.func @multi_store_load_nested_fwd(%N : index) {
   %cf7 = arith.constant 7.0 : f32
   %cf8 = arith.constant 8.0 : f32
   %cf9 = arith.constant 9.0 : f32
@@ -179,7 +179,7 @@ func @multi_store_load_nested_fwd(%N : index) {
 
 // There is no unique load location for the store to forward to.
 // CHECK-LABEL: func @store_load_no_fwd
-func @store_load_no_fwd() {
+func.func @store_load_no_fwd() {
   %cf7 = arith.constant 7.0 : f32
   %m = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
@@ -197,7 +197,7 @@ func @store_load_no_fwd() {
 
 // Forwarding happens here as there is a one-to-one store-load correspondence.
 // CHECK-LABEL: func @store_load_fwd
-func @store_load_fwd() {
+func.func @store_load_fwd() {
   %cf7 = arith.constant 7.0 : f32
   %c0 = arith.constant 0 : index
   %m = memref.alloc() : memref<10xf32>
@@ -217,7 +217,7 @@ func @store_load_fwd() {
 // Although there is a dependence from the second store to the load, it is
 // satisfied by the outer surrounding loop, and does not prevent the first
 // store to be forwarded to the load.
-func @store_load_store_nested_fwd(%N : index) -> f32 {
+func.func @store_load_store_nested_fwd(%N : index) -> f32 {
   %cf7 = arith.constant 7.0 : f32
   %cf9 = arith.constant 9.0 : f32
   %c0 = arith.constant 0 : index
@@ -249,7 +249,7 @@ func @store_load_store_nested_fwd(%N : index) -> f32 {
 }
 
 // CHECK-LABEL: func @should_not_fwd
-func @should_not_fwd(%A: memref<100xf32>, %M : index, %N : index) -> f32 {
+func.func @should_not_fwd(%A: memref<100xf32>, %M : index, %N : index) -> f32 {
   %cf = arith.constant 0.0 : f32
   affine.store %cf, %A[%M] : memref<100xf32>
   // CHECK: affine.load %{{.*}}[%{{.*}}]
@@ -259,7 +259,7 @@ func @should_not_fwd(%A: memref<100xf32>, %M : index, %N : index) -> f32 {
 
 // Can store forward to A[%j, %i], but no forwarding to load on %A[%i, %j]
 // CHECK-LABEL: func @refs_not_known_to_be_equal
-func @refs_not_known_to_be_equal(%A : memref<100 x 100 x f32>, %M : index) {
+func.func @refs_not_known_to_be_equal(%A : memref<100 x 100 x f32>, %M : index) {
   %N = affine.apply affine_map<(d0) -> (d0 + 1)> (%M)
   %cf1 = arith.constant 1.0 : f32
   affine.for %i = 0 to 100 {
@@ -284,7 +284,7 @@ func @refs_not_known_to_be_equal(%A : memref<100 x 100 x f32>, %M : index) {
 // The test checks for value forwarding from vector stores to vector loads.
 // The value loaded from %in can directly be stored to %out by eliminating
 // store and load from %tmp.
-func @vector_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
   %tmp = memref.alloc() : memref<512xf32>
   affine.for %i = 0 to 16 {
     %ld0 = affine.vector_load %in[32*%i] : memref<512xf32>, vector<32xf32>
@@ -301,7 +301,7 @@ func @vector_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
 // CHECK-NEXT:   affine.vector_store %[[LDVAL]],{{.*}}
 // CHECK-NEXT: }
 
-func @vector_no_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_no_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
   %tmp = memref.alloc() : memref<512xf32>
   affine.for %i = 0 to 16 {
     %ld0 = affine.vector_load %in[32*%i] : memref<512xf32>, vector<32xf32>
@@ -321,7 +321,7 @@ func @vector_no_forwarding(%in : memref<512xf32>, %out : memref<512xf32>) {
 // CHECK-NEXT: }
 
 // CHECK-LABEL: func @simple_three_loads
-func @simple_three_loads(%in : memref<10xf32>) {
+func.func @simple_three_loads(%in : memref<10xf32>) {
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
     %v0 = affine.load %in[%i0] : memref<10xf32>
@@ -335,7 +335,7 @@ func @simple_three_loads(%in : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @nested_loads_const_index
-func @nested_loads_const_index(%in : memref<10xf32>) {
+func.func @nested_loads_const_index(%in : memref<10xf32>) {
   %c0 = arith.constant 0 : index
   // CHECK:       affine.load
   %v0 = affine.load %in[%c0] : memref<10xf32>
@@ -352,7 +352,7 @@ func @nested_loads_const_index(%in : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @nested_loads
-func @nested_loads(%N : index, %in : memref<10xf32>) {
+func.func @nested_loads(%N : index, %in : memref<10xf32>) {
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
     %v0 = affine.load %in[%i0] : memref<10xf32>
@@ -366,7 +366,7 @@ func @nested_loads(%N : index, %in : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @nested_loads_
diff erent_memref_accesses_no_cse
-func @nested_loads_
diff erent_memref_accesses_no_cse(%in : memref<10xf32>) {
+func.func @nested_loads_
diff erent_memref_accesses_no_cse(%in : memref<10xf32>) {
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
     %v0 = affine.load %in[%i0] : memref<10xf32>
@@ -380,7 +380,7 @@ func @nested_loads_
diff erent_memref_accesses_no_cse(%in : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @load_load_store
-func @load_load_store(%m : memref<10xf32>) {
+func.func @load_load_store(%m : memref<10xf32>) {
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
     %v0 = affine.load %m[%i0] : memref<10xf32>
@@ -393,7 +393,7 @@ func @load_load_store(%m : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @load_load_store_2_loops_no_cse
-func @load_load_store_2_loops_no_cse(%N : index, %m : memref<10xf32>) {
+func.func @load_load_store_2_loops_no_cse(%N : index, %m : memref<10xf32>) {
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
     %v0 = affine.load %m[%i0] : memref<10xf32>
@@ -408,7 +408,7 @@ func @load_load_store_2_loops_no_cse(%N : index, %m : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @load_load_store_3_loops_no_cse
-func @load_load_store_3_loops_no_cse(%m : memref<10xf32>) {
+func.func @load_load_store_3_loops_no_cse(%m : memref<10xf32>) {
 %cf1 = arith.constant 1.0 : f32
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
@@ -426,7 +426,7 @@ func @load_load_store_3_loops_no_cse(%m : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @load_load_store_3_loops
-func @load_load_store_3_loops(%m : memref<10xf32>) {
+func.func @load_load_store_3_loops(%m : memref<10xf32>) {
 %cf1 = arith.constant 1.0 : f32
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 20 {
@@ -444,7 +444,7 @@ func @load_load_store_3_loops(%m : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @loads_in_sibling_loops_const_index_no_cse
-func @loads_in_sibling_loops_const_index_no_cse(%m : memref<10xf32>) {
+func.func @loads_in_sibling_loops_const_index_no_cse(%m : memref<10xf32>) {
   %c0 = arith.constant 0 : index
   affine.for %i0 = 0 to 10 {
     // CHECK:       affine.load
@@ -459,7 +459,7 @@ func @loads_in_sibling_loops_const_index_no_cse(%m : memref<10xf32>) {
 }
 
 // CHECK-LABEL: func @load_load_affine_apply
-func @load_load_affine_apply(%in : memref<10x10xf32>) {
+func.func @load_load_affine_apply(%in : memref<10x10xf32>) {
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       %t0 = affine.apply affine_map<(d0, d1) -> (d1 + 1)>(%i0, %i1)
@@ -477,7 +477,7 @@ func @load_load_affine_apply(%in : memref<10x10xf32>) {
 }
 
 // CHECK-LABEL: func @vector_loads
-func @vector_loads(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_loads(%in : memref<512xf32>, %out : memref<512xf32>) {
   affine.for %i = 0 to 16 {
     // CHECK:       affine.vector_load
     %ld0 = affine.vector_load %in[32*%i] : memref<512xf32>, vector<32xf32>
@@ -490,7 +490,7 @@ func @vector_loads(%in : memref<512xf32>, %out : memref<512xf32>) {
 }
 
 // CHECK-LABEL: func @vector_loads_no_cse
-func @vector_loads_no_cse(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_loads_no_cse(%in : memref<512xf32>, %out : memref<512xf32>) {
   affine.for %i = 0 to 16 {
     // CHECK:       affine.vector_load
     %ld0 = affine.vector_load %in[32*%i] : memref<512xf32>, vector<32xf32>
@@ -502,7 +502,7 @@ func @vector_loads_no_cse(%in : memref<512xf32>, %out : memref<512xf32>) {
 }
 
 // CHECK-LABEL: func @vector_load_store_load_no_cse
-func @vector_load_store_load_no_cse(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_load_store_load_no_cse(%in : memref<512xf32>, %out : memref<512xf32>) {
   affine.for %i = 0 to 16 {
     // CHECK:       affine.vector_load
     %ld0 = affine.vector_load %in[32*%i] : memref<512xf32>, vector<32xf32>
@@ -516,7 +516,7 @@ func @vector_load_store_load_no_cse(%in : memref<512xf32>, %out : memref<512xf32
 }
 
 // CHECK-LABEL: func @reduction_multi_store
-func @reduction_multi_store() -> memref<1xf32> {
+func.func @reduction_multi_store() -> memref<1xf32> {
   %A = memref.alloc() : memref<1xf32>
   %cf0 = arith.constant 0.0 : f32
   %cf5 = arith.constant 5.0 : f32
@@ -540,7 +540,7 @@ func @reduction_multi_store() -> memref<1xf32> {
 }
 
 // CHECK-LABEL: func @vector_load_affine_apply_store_load
-func @vector_load_affine_apply_store_load(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @vector_load_affine_apply_store_load(%in : memref<512xf32>, %out : memref<512xf32>) {
   %cf1 = arith.constant 1: index
   affine.for %i = 0 to 15 {
     // CHECK:       affine.vector_load
@@ -557,7 +557,7 @@ func @vector_load_affine_apply_store_load(%in : memref<512xf32>, %out : memref<5
 
 // CHECK-LABEL: func @external_no_forward_load
 
-func @external_no_forward_load(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @external_no_forward_load(%in : memref<512xf32>, %out : memref<512xf32>) {
   affine.for %i = 0 to 16 {
     %ld0 = affine.load %in[32*%i] : memref<512xf32>
     affine.store %ld0, %out[32*%i] : memref<512xf32>
@@ -574,7 +574,7 @@ func @external_no_forward_load(%in : memref<512xf32>, %out : memref<512xf32>) {
 
 // CHECK-LABEL: func @external_no_forward_store
 
-func @external_no_forward_store(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @external_no_forward_store(%in : memref<512xf32>, %out : memref<512xf32>) {
   %cf1 = arith.constant 1.0 : f32
   affine.for %i = 0 to 16 {
     affine.store %cf1, %in[32*%i] : memref<512xf32>
@@ -590,7 +590,7 @@ func @external_no_forward_store(%in : memref<512xf32>, %out : memref<512xf32>) {
 
 // CHECK-LABEL: func @no_forward_cast
 
-func @no_forward_cast(%in : memref<512xf32>, %out : memref<512xf32>) {
+func.func @no_forward_cast(%in : memref<512xf32>, %out : memref<512xf32>) {
   %cf1 = arith.constant 1.0 : f32
   %cf2 = arith.constant 2.0 : f32
   %m2 = memref.cast %in : memref<512xf32> to memref<?xf32>
@@ -612,7 +612,7 @@ func @no_forward_cast(%in : memref<512xf32>, %out : memref<512xf32>) {
 // store to be forwarded to the load.
 
 // CHECK-LABEL: func @overlap_no_fwd
-func @overlap_no_fwd(%N : index) -> f32 {
+func.func @overlap_no_fwd(%N : index) -> f32 {
   %cf7 = arith.constant 7.0 : f32
   %cf9 = arith.constant 9.0 : f32
   %c0 = arith.constant 0 : index
@@ -644,7 +644,7 @@ func @overlap_no_fwd(%N : index) -> f32 {
 
 // CHECK-LABEL: func @redundant_store_elim
 
-func @redundant_store_elim(%out : memref<512xf32>) {
+func.func @redundant_store_elim(%out : memref<512xf32>) {
   %cf1 = arith.constant 1.0 : f32
   %cf2 = arith.constant 2.0 : f32
   affine.for %i = 0 to 16 {
@@ -660,7 +660,7 @@ func @redundant_store_elim(%out : memref<512xf32>) {
 
 // CHECK-LABEL: func @redundant_store_elim_fail
 
-func @redundant_store_elim_fail(%out : memref<512xf32>) {
+func.func @redundant_store_elim_fail(%out : memref<512xf32>) {
   %cf1 = arith.constant 1.0 : f32
   %cf2 = arith.constant 2.0 : f32
   affine.for %i = 0 to 16 {
@@ -677,7 +677,7 @@ func @redundant_store_elim_fail(%out : memref<512xf32>) {
 // CHECK-NEXT: }
 
 // CHECK-LABEL: @with_inner_ops
-func @with_inner_ops(%arg0: memref<?xf64>, %arg1: memref<?xf64>, %arg2: i1) {
+func.func @with_inner_ops(%arg0: memref<?xf64>, %arg1: memref<?xf64>, %arg2: i1) {
   %cst = arith.constant 0.000000e+00 : f64
   %cst_0 = arith.constant 3.140000e+00 : f64
   %cst_1 = arith.constant 1.000000e+00 : f64

diff  --git a/mlir/test/Dialect/Affine/simplify-structures.mlir b/mlir/test/Dialect/Affine/simplify-structures.mlir
index 826f98f0c043c..1cb5eb26cb3f9 100644
--- a/mlir/test/Dialect/Affine/simplify-structures.mlir
+++ b/mlir/test/Dialect/Affine/simplify-structures.mlir
@@ -4,10 +4,10 @@
 // CHECK-DAG: #[[$SET_7_11:.*]] = affine_set<(d0, d1) : (d0 * 7 + d1 * 5 + 88 == 0, d0 * 5 - d1 * 11 + 60 == 0, d0 * 11 + d1 * 7 - 24 == 0, d0 * 7 + d1 * 5 + 88 == 0)>
 
 // An external function that we will use in bodies to avoid DCE.
-func private @external() -> ()
+func.func private @external() -> ()
 
 // CHECK-LABEL: func @test_gaussian_elimination_empty_set0() {
-func @test_gaussian_elimination_empty_set0() {
+func.func @test_gaussian_elimination_empty_set0() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
       // CHECK-NOT: affine.if
@@ -20,7 +20,7 @@ func @test_gaussian_elimination_empty_set0() {
 }
 
 // CHECK-LABEL: func @test_gaussian_elimination_empty_set1() {
-func @test_gaussian_elimination_empty_set1() {
+func.func @test_gaussian_elimination_empty_set1() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
       // CHECK-NOT: affine.if
@@ -33,7 +33,7 @@ func @test_gaussian_elimination_empty_set1() {
 }
 
 // CHECK-LABEL: func @test_gaussian_elimination_non_empty_set2() {
-func @test_gaussian_elimination_non_empty_set2() {
+func.func @test_gaussian_elimination_non_empty_set2() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
       // CHECK: #[[$SET_2D]](%arg0, %arg1)
@@ -46,7 +46,7 @@ func @test_gaussian_elimination_non_empty_set2() {
 }
 
 // CHECK-LABEL: func @test_gaussian_elimination_empty_set3() {
-func @test_gaussian_elimination_empty_set3() {
+func.func @test_gaussian_elimination_empty_set3() {
   %c7 = arith.constant 7 : index
   %c11 = arith.constant 11 : index
   affine.for %arg0 = 1 to 10 {
@@ -67,7 +67,7 @@ func @test_gaussian_elimination_empty_set3() {
                                        d0 * 7 + d1 * 5 + s0 * 11 + s1 == 0)>
 
 // CHECK-LABEL: func @test_gaussian_elimination_non_empty_set4() {
-func @test_gaussian_elimination_non_empty_set4() {
+func.func @test_gaussian_elimination_non_empty_set4() {
   %c7 = arith.constant 7 : index
   %c11 = arith.constant 11 : index
   affine.for %arg0 = 1 to 10 {
@@ -89,7 +89,7 @@ func @test_gaussian_elimination_non_empty_set4() {
                                        d0 - 1 == 0, d0 + 2 == 0)>
 
 // CHECK-LABEL: func @test_gaussian_elimination_empty_set5() {
-func @test_gaussian_elimination_empty_set5() {
+func.func @test_gaussian_elimination_empty_set5() {
   %c7 = arith.constant 7 : index
   %c11 = arith.constant 11 : index
   affine.for %arg0 = 1 to 10 {
@@ -146,7 +146,7 @@ func @test_gaussian_elimination_empty_set5() {
 )>
 
 // CHECK-LABEL: func @test_fuzz_explosion
-func @test_fuzz_explosion(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index) {
+func.func @test_fuzz_explosion(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index) {
   affine.for %arg4 = 1 to 10 {
     affine.for %arg5 = 1 to 100 {
       affine.if #set_fuzz_virus(%arg4, %arg5, %arg0, %arg1, %arg2, %arg3) {
@@ -158,7 +158,7 @@ func @test_fuzz_explosion(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : i
 }
 
 // CHECK-LABEL: func @test_empty_set(%arg0: index) {
-func @test_empty_set(%N : index) {
+func.func @test_empty_set(%N : index) {
   affine.for %i = 0 to 10 {
     affine.for %j = 0 to 10 {
       // CHECK-NOT: affine.if
@@ -235,12 +235,12 @@ func @test_empty_set(%N : index) {
 // -----
 
 // An external function that we will use in bodies to avoid DCE.
-func private @external() -> ()
+func.func private @external() -> ()
 
 // CHECK-DAG: #[[$SET:.*]] = affine_set<()[s0] : (s0 >= 0, -s0 + 50 >= 0)
 
 // CHECK-LABEL: func @simplify_set
-func @simplify_set(%a : index, %b : index) {
+func.func @simplify_set(%a : index, %b : index) {
   // CHECK: affine.if #[[$SET]]
   affine.if affine_set<(d0, d1) : (d0 - d1 + d1 + d0 >= 0, 2 >= 0, d0 >= 0, -d0 + 50 >= 0, -d0 + 100 >= 0)>(%a, %b) {
     call @external() : () -> ()
@@ -261,7 +261,7 @@ func @simplify_set(%a : index, %b : index) {
 // CHECK-DAG: -> (s0 * 2 + 1)
 
 // Test "op local" simplification on affine.apply. DCE on arith.addi will not happen.
-func @affine.apply(%N : index) -> index {
+func.func @affine.apply(%N : index) -> index {
   %v = affine.apply affine_map<(d0, d1) -> (d0 + d1 + 1)>(%N, %N)
   %res = arith.addi %v, %v : index
   // CHECK: affine.apply #map{{.*}}()[%arg0]
@@ -272,7 +272,7 @@ func @affine.apply(%N : index) -> index {
 // -----
 
 // CHECK-LABEL: func @simplify_zero_dim_map
-func @simplify_zero_dim_map(%in : memref<f32>) -> f32 {
+func.func @simplify_zero_dim_map(%in : memref<f32>) -> f32 {
   %out = affine.load %in[] : memref<f32>
   return %out : f32
 }
@@ -285,7 +285,7 @@ func @simplify_zero_dim_map(%in : memref<f32>) -> f32 {
 
 // Tests the simplification of a semi-affine expression with a modulo operation on a floordiv and multiplication.
 // CHECK-LABEL: func @semiaffine_mod
-func @semiaffine_mod(%arg0: index, %arg1: index) -> index {
+func.func @semiaffine_mod(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] ->((-((d0 floordiv s0) * s0) + s0 * s0) mod s0)> (%arg0)[%arg1]
   // CHECK:       %[[CST:.*]] = arith.constant 0
   return %a : index
@@ -293,7 +293,7 @@ func @semiaffine_mod(%arg0: index, %arg1: index) -> index {
 
 // Tests the simplification of a semi-affine expression with a nested floordiv and a floordiv on modulo operation.
 // CHECK-LABEL: func @semiaffine_floordiv
-func @semiaffine_floordiv(%arg0: index, %arg1: index) -> index {
+func.func @semiaffine_floordiv(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] ->((-((d0 floordiv s0) * s0) + ((2 * s0) mod (3 * s0))) floordiv s0)> (%arg0)[%arg1]
   // CHECK: affine.apply #[[$map0]]()[%arg1, %arg0]
   return %a : index
@@ -301,7 +301,7 @@ func @semiaffine_floordiv(%arg0: index, %arg1: index) -> index {
 
 // Tests the simplification of a semi-affine expression with a ceildiv operation and a division of arith.constant 0 by a symbol.
 // CHECK-LABEL: func @semiaffine_ceildiv
-func @semiaffine_ceildiv(%arg0: index, %arg1: index) -> index {
+func.func @semiaffine_ceildiv(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] ->((-((d0 floordiv s0) * s0) + s0 * 42 + ((5-5) floordiv s0)) ceildiv  s0)> (%arg0)[%arg1]
   // CHECK: affine.apply #[[$map1]]()[%arg1, %arg0]
   return %a : index
@@ -309,7 +309,7 @@ func @semiaffine_ceildiv(%arg0: index, %arg1: index) -> index {
 
 // Tests the simplification of a semi-affine expression with a nested ceildiv operation and further simplifications after performing ceildiv.
 // CHECK-LABEL: func @semiaffine_composite_floor
-func @semiaffine_composite_floor(%arg0: index, %arg1: index) -> index {
+func.func @semiaffine_composite_floor(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] ->(((((s0 * 2) ceildiv 4) * 5) + s0 * 42) ceildiv s0)> (%arg0)[%arg1]
   // CHECK:       %[[CST:.*]] = arith.constant 47
   return %a : index
@@ -317,7 +317,7 @@ func @semiaffine_composite_floor(%arg0: index, %arg1: index) -> index {
 
 // Tests the simplification of a semi-affine expression with a modulo operation with a second operand that simplifies to symbol.
 // CHECK-LABEL: func @semiaffine_unsimplified_symbol
-func @semiaffine_unsimplified_symbol(%arg0: index, %arg1: index) -> index {
+func.func @semiaffine_unsimplified_symbol(%arg0: index, %arg1: index) -> index {
   %a = affine.apply affine_map<(d0)[s0] ->(s0 mod (2 * s0 - s0))> (%arg0)[%arg1]
   // CHECK:       %[[CST:.*]] = arith.constant 0
   return %a : index
@@ -326,11 +326,11 @@ func @semiaffine_unsimplified_symbol(%arg0: index, %arg1: index) -> index {
 // -----
 
 // Two external functions that we will use in bodies to avoid DCE.
-func private @external() -> ()
-func private @external1() -> ()
+func.func private @external() -> ()
+func.func private @external1() -> ()
 
 // CHECK-LABEL: func @test_always_true_if_elimination() {
-func @test_always_true_if_elimination() {
+func.func @test_always_true_if_elimination() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
       affine.if affine_set<(d0, d1) : (1 >= 0)> (%arg0, %arg1) {
@@ -350,7 +350,7 @@ func @test_always_true_if_elimination() {
 // CHECK-NEXT: }
 
 // CHECK-LABEL: func @test_always_false_if_elimination() {
-func @test_always_false_if_elimination() {
+func.func @test_always_false_if_elimination() {
   // CHECK: affine.for
   affine.for %arg0 = 1 to 10 {
     // CHECK: affine.for
@@ -370,7 +370,7 @@ func @test_always_false_if_elimination() {
 
 // Testing: affine.if is not trivially true or false, nothing happens.
 // CHECK-LABEL: func @test_dimensional_if_elimination() {
-func @test_dimensional_if_elimination() {
+func.func @test_dimensional_if_elimination() {
   affine.for %arg0 = 1 to 10 {
     affine.for %arg1 = 1 to 100 {
       // CHECK: affine.if
@@ -387,7 +387,7 @@ func @test_dimensional_if_elimination() {
 
 // Testing: affine.if gets removed.
 // CHECK-LABEL: func @test_num_results_if_elimination
-func @test_num_results_if_elimination() -> index {
+func.func @test_num_results_if_elimination() -> index {
   // CHECK: %[[zero:.*]] = arith.constant 0 : index
   %zero = arith.constant 0 : index
   %0 = affine.if affine_set<() : ()> () -> index {
@@ -406,7 +406,7 @@ func @test_num_results_if_elimination() -> index {
 // Testing: affine.if gets removed. `Else` block get promoted.
 // CHECK-LABEL: func @test_trivially_false_returning_two_results
 // CHECK-SAME: (%[[arg0:.*]]: index)
-func @test_trivially_false_returning_two_results(%arg0: index) -> (index, index) {
+func.func @test_trivially_false_returning_two_results(%arg0: index) -> (index, index) {
   // CHECK: %[[c7:.*]] = arith.constant 7 : index
   // CHECK: %[[c13:.*]] = arith.constant 13 : index
   %c7 = arith.constant 7 : index
@@ -428,7 +428,7 @@ func @test_trivially_false_returning_two_results(%arg0: index) -> (index, index)
 
 // Testing: affine.if gets removed. `Then` block get promoted.
 // CHECK-LABEL: func @test_trivially_true_returning_five_results
-func @test_trivially_true_returning_five_results() -> (index, index, index, index, index) {
+func.func @test_trivially_true_returning_five_results() -> (index, index, index, index, index) {
   // CHECK: %[[c12:.*]] = arith.constant 12 : index
   // CHECK: %[[c13:.*]] = arith.constant 13 : index
   %c12 = arith.constant 12 : index
@@ -459,7 +459,7 @@ func @test_trivially_true_returning_five_results() -> (index, index, index, inde
 
 // Testing: affine.if doesn't get removed.
 // CHECK-LABEL: func @test_not_trivially_true_or_false_returning_three_results
-func @test_not_trivially_true_or_false_returning_three_results() -> (index, index, index) {
+func.func @test_not_trivially_true_or_false_returning_three_results() -> (index, index, index) {
   // CHECK: %[[c8:.*]] = arith.constant 8 : index
   // CHECK: %[[c13:.*]] = arith.constant 13 : index
   %c8 = arith.constant 8 : index
@@ -488,7 +488,7 @@ func @test_not_trivially_true_or_false_returning_three_results() -> (index, inde
 // CHECK-DAG:   #[[$MODULO_AND_PRODUCT:.*]] = affine_map<()[s0, s1, s2, s3] -> (s0 * s1 + s3 - (-s0 + s3) mod s2)>
 // CHECK-LABEL: func @semiaffine_simplification_mod
 // CHECK-SAME:  (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index, %[[ARG5:.*]]: index)
-func @semiaffine_simplification_mod(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) {
+func.func @semiaffine_simplification_mod(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index, index) {
   %a = affine.apply affine_map<(d0, d1)[s0, s1, s2, s3] -> ((-(d1 * s0 - (s0 - s1) mod s2) + s3) + (d0 * s1 + d1 * s0))>(%arg0, %arg1)[%arg2, %arg3, %arg4, %arg5]
   %b = affine.apply affine_map<(d0)[s0, s1, s2, s3] -> (d0 mod (s0 - s1 * s2 + s3 - s0))>(%arg0)[%arg0, %arg1, %arg2, %arg3]
   %c = affine.apply affine_map<(d0)[s0, s1, s2] -> (d0 + (d0 + s0) mod s2 + s0 * s1 - (d0 + s0) mod s2 - (d0 - s0) mod s2)>(%arg0)[%arg1, %arg2, %arg3]
@@ -507,7 +507,7 @@ func @semiaffine_simplification_mod(%arg0: index, %arg1: index, %arg2: index, %a
 // CHECK-DAG:   #[[$SIMPLIFIED_CEILDIV_RHS:.*]] = affine_map<()[s0, s1, s2, s3] -> (s3 ceildiv (s2 - s0 * s1))>
 // CHECK-LABEL: func @semiaffine_simplification_floordiv_and_ceildiv
 // CHECK-SAME:  (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index)
-func @semiaffine_simplification_floordiv_and_ceildiv(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index) -> (index, index, index) {
+func.func @semiaffine_simplification_floordiv_and_ceildiv(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index) -> (index, index, index) {
   %a = affine.apply affine_map<(d0)[s0, s1, s2, s3] -> (d0 floordiv (s0 - s1 * s2 + s3 - s0))>(%arg0)[%arg0, %arg1, %arg2, %arg3]
   %b = affine.apply affine_map<(d0)[s0, s1, s2, s3] -> ((-(d0 * s1 - (s0 - s1) floordiv s2) + s3) + (d0 * s1 + s0))>(%arg0)[%arg1, %arg2, %arg3, %arg4]
   %c = affine.apply affine_map<(d0)[s0, s1, s2, s3] -> (d0 ceildiv (s0 - s1 * s2 + s3 - s0))>(%arg0)[%arg0, %arg1, %arg2, %arg3]
@@ -525,7 +525,7 @@ func @semiaffine_simplification_floordiv_and_ceildiv(%arg0: index, %arg1: index,
 // CHECK-DAG:   #[[$SUM_OF_PRODUCTS:.*]] = affine_map<()[s0, s1, s2, s3, s4] -> (s2 * s0 + s2 + s3 * s0 + s3 * s1 + s3 + s4 * s1 + s4)>
 // CHECK-LABEL: func @semiaffine_simplification_product
 // CHECK-SAME:  (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index, %[[ARG5:.*]]: index)
-func @semiaffine_simplification_product(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index) {
+func.func @semiaffine_simplification_product(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index) {
   %a = affine.apply affine_map<(d0)[s0, s1, s2, s3] -> ((-(s0 - (s0 - s1) * s2) + s3) + (d0 + s0))>(%arg0)[%arg1, %arg2, %arg3, %arg4]
   %b = affine.apply affine_map<(d0, d1, d2)[s0, s1] -> (d0 + d1 * s1 + d1 + d0 * s0 + d1 * s0 + d2 * s1 + d2)>(%arg0, %arg1, %arg2)[%arg3, %arg4]
   return %a, %b : index, index
@@ -539,7 +539,7 @@ func @semiaffine_simplification_product(%arg0: index, %arg1: index, %arg2: index
 // CHECK-DAG: #[[$SIMPLIFIED_MAP:.*]] = affine_map<()[s0, s1, s2, s3] -> ((-s0 + s2 + s3) mod (s0 + s1))>
 // CHECK-LABEL: func @semi_affine_simplification_euclidean_lemma
 // CHECK-SAME: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index, %[[ARG5:.*]]: index)
-func @semi_affine_simplification_euclidean_lemma(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index) {
+func.func @semi_affine_simplification_euclidean_lemma(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) -> (index, index) {
   %a = affine.apply affine_map<(d0, d1)[s0, s1] -> ((d0 + d1) - ((d0 + d1) floordiv (s0 - s1)) * (s0 - s1) - (d0 + d1) mod (s0 - s1))>(%arg0, %arg1)[%arg2, %arg3]
   %b = affine.apply affine_map<(d0, d1)[s0, s1] -> ((d0 + d1 - s0) - ((d0 + d1 - s0) floordiv (s0 + s1)) * (s0 + s1))>(%arg0, %arg1)[%arg2, %arg3]
   return %a, %b : index, index

diff  --git a/mlir/test/Dialect/Affine/slicing-utils.mlir b/mlir/test/Dialect/Affine/slicing-utils.mlir
index 110576a5a76b4..71bd8ad4ef9da 100644
--- a/mlir/test/Dialect/Affine/slicing-utils.mlir
+++ b/mlir/test/Dialect/Affine/slicing-utils.mlir
@@ -15,7 +15,7 @@
 // FWD-LABEL: slicing_test
 // BWD-LABEL: slicing_test
 // FWDBWD-LABEL: slicing_test
-func @slicing_test() {
+func.func @slicing_test() {
   // Fake 0 to align on 1 and match ASCII art.
   %0 = memref.alloc() : memref<1xi32>
 
@@ -222,7 +222,7 @@ func @slicing_test() {
 // FWD-LABEL: slicing_test_2
 // BWD-LABEL: slicing_test_2
 // FWDBWD-LABEL: slicing_test_2
-func @slicing_test_2() {
+func.func @slicing_test_2() {
   %c0 = arith.constant 0 : index
   %c2 = arith.constant 2 : index
   %c16 = arith.constant 16 : index
@@ -257,7 +257,7 @@ func @slicing_test_2() {
 // FWD-LABEL: slicing_test_3
 // BWD-LABEL: slicing_test_3
 // FWDBWD-LABEL: slicing_test_3
-func @slicing_test_3() {
+func.func @slicing_test_3() {
   %f = arith.constant 1.0 : f32
   %c = "slicing-test-op"(%f): (f32) -> index
   // FWD: matched: {{.*}} (f32) -> index forward static slice:
@@ -274,7 +274,7 @@ func @slicing_test_3() {
 // FWD-LABEL: slicing_test_function_argument
 // BWD-LABEL: slicing_test_function_argument
 // FWDBWD-LABEL: slicing_test_function_argument
-func @slicing_test_function_argument(%arg0: index) -> index {
+func.func @slicing_test_function_argument(%arg0: index) -> index {
   // BWD: matched: {{.*}} (index, index) -> index backward static slice:
   %0 = "slicing-test-op"(%arg0, %arg0): (index, index) -> index
   return %0 : index
@@ -285,7 +285,7 @@ func @slicing_test_function_argument(%arg0: index) -> index {
 // FWD-LABEL: slicing_test_multiple_return
 // BWD-LABEL: slicing_test_multiple_return
 // FWDBWD-LABEL: slicing_test_multiple_return
-func @slicing_test_multiple_return(%arg0: index) -> (index, index) {
+func.func @slicing_test_multiple_return(%arg0: index) -> (index, index) {
   // BWD: matched: {{.*}} (index, index) -> (index, index) backward static slice:
   // FWD: matched: %{{.*}}:2 = "slicing-test-op"(%arg0, %arg0) : (index, index) -> (index, index) forward static slice:
   // FWD: return %{{.*}}#0, %{{.*}}#1 : index, index

diff  --git a/mlir/test/Dialect/Affine/unroll-jam.mlir b/mlir/test/Dialect/Affine/unroll-jam.mlir
index 36c2bc483f4e7..01fbc977363f1 100644
--- a/mlir/test/Dialect/Affine/unroll-jam.mlir
+++ b/mlir/test/Dialect/Affine/unroll-jam.mlir
@@ -11,7 +11,7 @@
 // UJAM-FOUR-DAG: [[$MAP_PLUS_3:#map[0-9]+]] = affine_map<(d0) -> (d0 + 3)>
 
 // CHECK-LABEL: func @unroll_jam_imperfect_nest() {
-func @unroll_jam_imperfect_nest() {
+func.func @unroll_jam_imperfect_nest() {
   affine.for %i = 0 to 101 {
     %x = "addi32"(%i, %i) : (index, index) -> i32
     affine.for %j = 0 to 17 {
@@ -48,7 +48,7 @@ func @unroll_jam_imperfect_nest() {
 
 // CHECK-LABEL: func @loop_nest_unknown_count_1
 // CHECK-SAME: [[N:arg[0-9]+]]: index
-func @loop_nest_unknown_count_1(%N : index) {
+func.func @loop_nest_unknown_count_1(%N : index) {
   // CHECK-NEXT: affine.for %{{.*}} = 1 to [[$MAP_DIV_OFFSET]]()[%[[N]]] step 2 {
   // CHECK-NEXT:   affine.for %{{.*}} = 1 to 100 {
   // CHECK-NEXT:     "foo"() : () -> i32
@@ -71,7 +71,7 @@ func @loop_nest_unknown_count_1(%N : index) {
 
 // UJAM-FOUR-LABEL: func @loop_nest_unknown_count_2
 // UJAM-FOUR-SAME: %[[N:arg[0-9]+]]: index
-func @loop_nest_unknown_count_2(%N : index) {
+func.func @loop_nest_unknown_count_2(%N : index) {
   // UJAM-FOUR-NEXT: affine.for [[IV0:%arg[0-9]+]] = %[[N]] to  [[$UBMAP]]()[%[[N]]] step 4 {
   // UJAM-FOUR-NEXT:   affine.for [[IV1:%arg[0-9]+]] = 1 to 100 {
   // UJAM-FOUR-NEXT:     "foo"([[IV0]])
@@ -100,7 +100,7 @@ func @loop_nest_unknown_count_2(%N : index) {
 // CHECK-SAME: [[M:arg[0-9]+]]: index
 // CHECK-SAME: [[N:arg[0-9]+]]: index
 // CHECK-SAME: [[K:arg[0-9]+]]: index
-func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
+func.func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
   affine.for %i = 0 to min affine_map<()[s0, s1] -> (s0, s1, 1024)>()[%M, %N] {
     affine.for %j = 0 to %K {
       "test.foo"(%i, %j) : (index, index) -> ()
@@ -120,7 +120,7 @@ func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index)
 // The inner loop trip count changes each iteration of outer loop.
 // Do no unroll-and-jam.
 // CHECK-LABEL: func @no_unroll_jam_dependent_ubound
-func @no_unroll_jam_dependent_ubound(%in0: memref<?xf32, 1>) {
+func.func @no_unroll_jam_dependent_ubound(%in0: memref<?xf32, 1>) {
   affine.for %i = 0 to 100 {
     affine.for %k = 0 to affine_map<(d0) -> (d0 + 1)>(%i) {
       %y = "addi32"(%k, %k) : (index, index) -> i32
@@ -137,7 +137,7 @@ func @no_unroll_jam_dependent_ubound(%in0: memref<?xf32, 1>) {
 
 // Inner loop with one iter_arg.
 // CHECK-LABEL: func @unroll_jam_one_iter_arg
-func @unroll_jam_one_iter_arg() {
+func.func @unroll_jam_one_iter_arg() {
   affine.for %i = 0 to 101 {
     %cst = arith.constant 1 : i32
     %x = "addi32"(%i, %i) : (index, index) -> i32
@@ -177,7 +177,7 @@ func @unroll_jam_one_iter_arg() {
 
 // Inner loop with multiple iter_args.
 // CHECK-LABEL: func @unroll_jam_iter_args
-func @unroll_jam_iter_args() {
+func.func @unroll_jam_iter_args() {
   affine.for %i = 0 to 101 {
     %cst = arith.constant 0 : i32
     %cst1 = arith.constant 1 : i32
@@ -228,7 +228,7 @@ func @unroll_jam_iter_args() {
 // operand .
 // CHECK-LABEL: func @unroll_jam_iter_args_func_arg
 // CHECK-SAME:  [[INIT:%arg[0-9]+]]: i32
-func @unroll_jam_iter_args_func_arg(%in: i32) {
+func.func @unroll_jam_iter_args_func_arg(%in: i32) {
   affine.for %i = 0 to 101 {
     %x = "addi32"(%i, %i) : (index, index) -> i32
     %red = affine.for %j = 0 to 17 iter_args(%acc = %in) -> (i32) {
@@ -265,7 +265,7 @@ func @unroll_jam_iter_args_func_arg(%in: i32) {
 // Nested inner loops, each with one iter_arg. The inner most loop uses its
 // outer loop's iter_arg as its iter operand.
 // CHECK-LABEL: func @unroll_jam_iter_args_nested
-func @unroll_jam_iter_args_nested() {
+func.func @unroll_jam_iter_args_nested() {
   affine.for %i = 0 to 101 {
     %cst = arith.constant 1 : i32
     %x = "addi32"(%i, %i) : (index, index) -> i32
@@ -315,7 +315,7 @@ func @unroll_jam_iter_args_nested() {
 // Nested inner loops, each with one iter_arg. One loop uses its sibling loop's
 // result as its iter operand.
 // CHECK-LABEL: func @unroll_jam_iter_args_nested_affine_for_result
-func @unroll_jam_iter_args_nested_affine_for_result() {
+func.func @unroll_jam_iter_args_nested_affine_for_result() {
   affine.for %i = 0 to 101 {
     %cst = arith.constant 1 : i32
     %x = "addi32"(%i, %i) : (index, index) -> i32
@@ -379,7 +379,7 @@ func @unroll_jam_iter_args_nested_affine_for_result() {
 // Nested inner loops, each with one or more iter_args. Yeild the same value
 // multiple times.
 // CHECK-LABEL: func @unroll_jam_iter_args_nested_yield
-func @unroll_jam_iter_args_nested_yield() {
+func.func @unroll_jam_iter_args_nested_yield() {
   affine.for %i = 0 to 101 {
     %cst = arith.constant 1 : i32
     %x = "addi32"(%i, %i) : (index, index) -> i32
@@ -444,7 +444,7 @@ func @unroll_jam_iter_args_nested_yield() {
 
 // CHECK-LABEL: func @unroll_jam_nested_iter_args_mulf
 // CHECK-SAME:  [[INIT0:%arg[0-9]+]]: f32, [[INIT1:%arg[0-9]+]]: f32
-func @unroll_jam_nested_iter_args_mulf(%arg0: memref<21x30xf32, 1>, %init : f32, %init1 : f32) {
+func.func @unroll_jam_nested_iter_args_mulf(%arg0: memref<21x30xf32, 1>, %init : f32, %init1 : f32) {
   %0 = affine.for %arg3 = 0 to 21 iter_args(%arg4 = %init) -> (f32) {
     %1 = affine.for %arg5 = 0 to 30 iter_args(%arg6 = %init1) -> (f32) {
       %3 = affine.load %arg0[%arg3, %arg5] : memref<21x30xf32, 1>
@@ -485,7 +485,7 @@ func @unroll_jam_nested_iter_args_mulf(%arg0: memref<21x30xf32, 1>, %init : f32,
 
 // CHECK-LABEL: func @unroll_jam_iter_args_addi
 // CHECK-SAME:  [[INIT0:%arg[0-9]+]]: i32
-func @unroll_jam_iter_args_addi(%arg0: memref<21xi32, 1>, %init : i32) {
+func.func @unroll_jam_iter_args_addi(%arg0: memref<21xi32, 1>, %init : i32) {
   %0 = affine.for %arg3 = 0 to 21 iter_args(%arg4 = %init) -> (i32) {
     %1 = affine.load %arg0[%arg3] : memref<21xi32, 1>
     %2 = arith.addi %arg4, %1 : i32

diff  --git a/mlir/test/Dialect/Affine/unroll.mlir b/mlir/test/Dialect/Affine/unroll.mlir
index 336e3313abab8..022d9021af0fa 100644
--- a/mlir/test/Dialect/Affine/unroll.mlir
+++ b/mlir/test/Dialect/Affine/unroll.mlir
@@ -23,7 +23,7 @@
 // UNROLL-BY-4-DAG: [[$MAP11:#map[0-9]+]] = affine_map<(d0) -> (d0)>
 
 // UNROLL-FULL-LABEL: func @loop_nest_simplest() {
-func @loop_nest_simplest() {
+func.func @loop_nest_simplest() {
   // UNROLL-FULL: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
     // UNROLL-FULL: %c1_i32 = arith.constant 1 : i32
@@ -38,7 +38,7 @@ func @loop_nest_simplest() {
 }         // UNROLL-FULL }
 
 // UNROLL-FULL-LABEL: func @loop_nest_simple_iv_use() {
-func @loop_nest_simple_iv_use() {
+func.func @loop_nest_simple_iv_use() {
   // UNROLL-FULL: %c0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
@@ -58,7 +58,7 @@ func @loop_nest_simple_iv_use() {
 
 // Operations in the loop body have results that are used therein.
 // UNROLL-FULL-LABEL: func @loop_nest_body_def_use() {
-func @loop_nest_body_def_use() {
+func.func @loop_nest_body_def_use() {
   // UNROLL-FULL: %c0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
@@ -85,7 +85,7 @@ func @loop_nest_body_def_use() {
 }         // UNROLL-FULL }
 
 // UNROLL-FULL-LABEL: func @loop_nest_strided() {
-func @loop_nest_strided() {
+func.func @loop_nest_strided() {
   // UNROLL-FULL: %c2 = arith.constant 2 : index
   // UNROLL-FULL-NEXT: %c2_0 = arith.constant 2 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 {
@@ -118,7 +118,7 @@ func @loop_nest_strided() {
 }         // UNROLL-FULL }
 
 // UNROLL-FULL-LABEL: func @loop_nest_multiple_results() {
-func @loop_nest_multiple_results() {
+func.func @loop_nest_multiple_results() {
   // UNROLL-FULL: %c0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
@@ -144,7 +144,7 @@ func @loop_nest_multiple_results() {
 
 // Imperfect loop nest. Unrolling innermost here yields a perfect nest.
 // UNROLL-FULL-LABEL: func @loop_nest_seq_imperfect(%arg0: memref<128x128xf32>) {
-func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
+func.func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
   // UNROLL-FULL: %c0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: %c128 = arith.constant 128 : index
   %c128 = arith.constant 128 : index
@@ -182,7 +182,7 @@ func @loop_nest_seq_imperfect(%a : memref<128x128xf32>) {
 }
 
 // UNROLL-FULL-LABEL: func @loop_nest_seq_multiple() {
-func @loop_nest_seq_multiple() {
+func.func @loop_nest_seq_multiple() {
   // UNROLL-FULL: c0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: %c0_0 = arith.constant 0 : index
   // UNROLL-FULL-NEXT: %0 = affine.apply [[$MAP0]](%c0_0)
@@ -228,7 +228,7 @@ func @loop_nest_seq_multiple() {
 }         // UNROLL-FULL }
 
 // UNROLL-FULL-LABEL: func @loop_nest_unroll_full() {
-func @loop_nest_unroll_full() {
+func.func @loop_nest_unroll_full() {
   // UNROLL-FULL-NEXT: %0 = "foo"() : () -> i32
   // UNROLL-FULL-NEXT: %1 = "bar"() : () -> i32
   // UNROLL-FULL-NEXT:  return
@@ -240,7 +240,7 @@ func @loop_nest_unroll_full() {
 } // UNROLL-FULL }
 
 // SHORT-LABEL: func @loop_nest_outer_unroll() {
-func @loop_nest_outer_unroll() {
+func.func @loop_nest_outer_unroll() {
   // SHORT:      affine.for %arg0 = 0 to 4 {
   // SHORT-NEXT:   %0 = affine.apply [[$MAP0]](%arg0)
   // SHORT-NEXT:   %1 = "addi32"(%0, %0) : (index, index) -> index
@@ -263,7 +263,7 @@ func @loop_nest_outer_unroll() {
 // successfully run. Both %x and %y will get unrolled here as the min trip
 // count threshold set to 2.
 // SHORT-LABEL: func @loop_nest_seq_long() -> i32 {
-func @loop_nest_seq_long() -> i32 {
+func.func @loop_nest_seq_long() -> i32 {
   %A = memref.alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %B = memref.alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
   %C = memref.alloc() : memref<512 x 512 x i32, affine_map<(d0, d1) -> (d0, d1)>, 2>
@@ -316,7 +316,7 @@ func @loop_nest_seq_long() -> i32 {
 }
 
 // UNROLL-BY-4-LABEL: func @unroll_unit_stride_no_cleanup() {
-func @unroll_unit_stride_no_cleanup() {
+func.func @unroll_unit_stride_no_cleanup() {
   // UNROLL-BY-4: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
     // UNROLL-BY-4: for [[L1:%arg[0-9]+]] = 0 to 8 step 4 {
@@ -345,7 +345,7 @@ func @unroll_unit_stride_no_cleanup() {
 }
 
 // UNROLL-BY-4-LABEL: func @unroll_unit_stride_cleanup() {
-func @unroll_unit_stride_cleanup() {
+func.func @unroll_unit_stride_cleanup() {
   // UNROLL-BY-4: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
     // UNROLL-BY-4: for [[L1:%arg[0-9]+]] = 0 to 8 step 4 {
@@ -374,7 +374,7 @@ func @unroll_unit_stride_cleanup() {
 }
 
 // UNROLL-BY-4-LABEL: func @unroll_non_unit_stride_cleanup() {
-func @unroll_non_unit_stride_cleanup() {
+func.func @unroll_non_unit_stride_cleanup() {
   // UNROLL-BY-4: affine.for %arg0 = 0 to 100 {
   affine.for %i = 0 to 100 {
     // UNROLL-BY-4: for [[L1:%arg[0-9]+]] = 2 to 42 step 20 {
@@ -404,7 +404,7 @@ func @unroll_non_unit_stride_cleanup() {
 
 // Both the unrolled loop and the cleanup loop are single iteration loops.
 // UNROLL-BY-4-LABEL: func @loop_nest_single_iteration_after_unroll
-func @loop_nest_single_iteration_after_unroll(%N: index) {
+func.func @loop_nest_single_iteration_after_unroll(%N: index) {
   // UNROLL-BY-4: %c0 = arith.constant 0 : index
   // UNROLL-BY-4: %c4 = arith.constant 4 : index
   // UNROLL-BY-4: affine.for %arg1 = 0 to %arg0 {
@@ -429,7 +429,7 @@ func @loop_nest_single_iteration_after_unroll(%N: index) {
 
 // No cleanup will be generated here.
 // UNROLL-BY-4-LABEL: func @loop_nest_operand1() {
-func @loop_nest_operand1() {
+func.func @loop_nest_operand1() {
 // UNROLL-BY-4:      affine.for %arg0 = 0 to 100 step 2 {
 // UNROLL-BY-4-NEXT:   affine.for %arg1 = 0 to #map{{[0-9]+}}(%arg0) step 4
 // UNROLL-BY-4-NEXT:      %0 = "foo"() : () -> i32
@@ -449,7 +449,7 @@ func @loop_nest_operand1() {
 
 // No cleanup will be generated here.
 // UNROLL-BY-4-LABEL: func @loop_nest_operand2() {
-func @loop_nest_operand2() {
+func.func @loop_nest_operand2() {
 // UNROLL-BY-4:      affine.for %arg0 = 0 to 100 step 2 {
 // UNROLL-BY-4-NEXT:   affine.for %arg1 = [[$MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
 // UNROLL-BY-4-NEXT:     %0 = "foo"() : () -> i32
@@ -468,7 +468,7 @@ func @loop_nest_operand2() {
 }
 
 // UNROLL-BY-4-LABEL: func @floordiv_mod_ub
-func @floordiv_mod_ub(%M : index, %N : index) {
+func.func @floordiv_mod_ub(%M : index, %N : index) {
   affine.for %i = 0 to %N step 4 {
     // A cleanup should be generated here.
     affine.for %j = 0 to min affine_map<(d0)[s0] -> ((16 * d0) floordiv (4 * s0))>(%i)[%N] {
@@ -494,7 +494,7 @@ func @floordiv_mod_ub(%M : index, %N : index) {
 // Difference between loop bounds is constant, but not a multiple of unroll
 // factor. The cleanup loop happens to be a single iteration one and is promoted.
 // UNROLL-BY-4-LABEL: func @loop_nest_operand3() {
-func @loop_nest_operand3() {
+func.func @loop_nest_operand3() {
   // UNROLL-BY-4: affine.for %arg0 = 0 to 100 step 2 {
   affine.for %i = 0 to 100 step 2 {
     // UNROLL-BY-4: affine.for %arg1 = [[$MAP11]](%arg0) to #map{{[0-9]+}}(%arg0) step 4 {
@@ -512,7 +512,7 @@ func @loop_nest_operand3() {
 }
 
 // UNROLL-BY-4-LABEL: func @loop_nest_symbolic_bound(%arg0: index) {
-func @loop_nest_symbolic_bound(%N : index) {
+func.func @loop_nest_symbolic_bound(%N : index) {
   // UNROLL-BY-4: affine.for %arg1 = 0 to 100 {
   affine.for %i = 0 to 100 {
     // UNROLL-BY-4: affine.for %arg2 = 0 to #map{{[0-9]+}}()[%arg0] step 4 {
@@ -534,7 +534,7 @@ func @loop_nest_symbolic_bound(%N : index) {
 
 // UNROLL-BY-4-LABEL: func @loop_nest_symbolic_bound_with_step
 // UNROLL-BY-4-SAME: %[[N:.*]]: index
-func @loop_nest_symbolic_bound_with_step(%N : index) {
+func.func @loop_nest_symbolic_bound_with_step(%N : index) {
   // UNROLL-BY-4: affine.for %arg1 = 0 to 100 {
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to %N step 3 {
@@ -555,7 +555,7 @@ func @loop_nest_symbolic_bound_with_step(%N : index) {
 }
 
 // UNROLL-BY-4-LABEL: func @loop_nest_symbolic_and_min_upper_bound
-func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
+func.func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index) {
   affine.for %i = %M to min affine_map<()[s0, s1] -> (s0, s1, 1024)>()[%N, %K] {
     "test.foo"() : () -> ()
   }
@@ -570,7 +570,7 @@ func @loop_nest_symbolic_and_min_upper_bound(%M : index, %N : index, %K : index)
 // The trip count here is a multiple of four, but this can be inferred only
 // through composition. Check for no cleanup scf.
 // UNROLL-BY-4-LABEL: func @loop_nest_non_trivial_multiple_upper_bound
-func @loop_nest_non_trivial_multiple_upper_bound(%M : index, %N : index) {
+func.func @loop_nest_non_trivial_multiple_upper_bound(%M : index, %N : index) {
   %T = affine.apply affine_map<(d0) -> (4*d0 + 1)>(%M)
   %K = affine.apply affine_map<(d0) -> (d0 - 1)> (%T)
   affine.for %i = 0 to min affine_map<(d0, d1) -> (4 * d0, d1, 1024)>(%N, %K) {
@@ -583,7 +583,7 @@ func @loop_nest_non_trivial_multiple_upper_bound(%M : index, %N : index) {
 // UNROLL-BY-4: return
 
 // UNROLL-BY-4-LABEL: func @multi_upper_bound
-func @multi_upper_bound(%arg0: index) {
+func.func @multi_upper_bound(%arg0: index) {
   affine.for %i = 0 to min affine_map<()[s0] -> (8 * s0, 12 * s0)>()[%arg0] {
     "test.foo"() : () -> ()
   }
@@ -593,7 +593,7 @@ func @multi_upper_bound(%arg0: index) {
 }
 
 // UNROLL-BY-4-LABEL: func @multi_lower_bound
-func @multi_lower_bound(%arg0: index) {
+func.func @multi_lower_bound(%arg0: index) {
   affine.for %i = max affine_map<()[s0] -> (8 * s0, 12 * s0)>()[%arg0] to 100 {
     "test.foo"() : () -> ()
   }
@@ -605,7 +605,7 @@ func @multi_lower_bound(%arg0: index) {
 }
 
 // UNROLL-BY-4-LABEL: func @loop_nest_non_trivial_multiple_upper_bound_alt
-func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index) {
+func.func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index) {
   %K = affine.apply affine_map<(d0) -> (4*d0)> (%M)
   affine.for %i = 0 to min affine_map<()[s0, s1] -> (4 * s0, s1, 1024)>()[%N, %K] {
     "foo"() : () -> ()
@@ -621,7 +621,7 @@ func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index) {
 }
 
 // UNROLL-BY-1-LABEL: func @unroll_by_one_should_promote_single_iteration_loop()
-func @unroll_by_one_should_promote_single_iteration_loop() {
+func.func @unroll_by_one_should_promote_single_iteration_loop() {
   affine.for %i = 0 to 1 {
     %x = "foo"(%i) : (index) -> i32
   }
@@ -634,7 +634,7 @@ func @unroll_by_one_should_promote_single_iteration_loop() {
 // Test unrolling with affine.for iter_args.
 
 // UNROLL-BY-4-LABEL: loop_unroll_with_iter_args_and_cleanup
-func @loop_unroll_with_iter_args_and_cleanup(%arg0 : f32, %arg1 : f32, %n : index) -> (f32,f32) {
+func.func @loop_unroll_with_iter_args_and_cleanup(%arg0 : f32, %arg1 : f32, %n : index) -> (f32,f32) {
   %cf1 = arith.constant 1.0 : f32
   %cf2 = arith.constant 2.0 : f32
   %sum:2 = affine.for %iv = 0 to 10 iter_args(%i0 = %arg0, %i1 = %arg1) -> (f32, f32) {
@@ -662,7 +662,7 @@ func @loop_unroll_with_iter_args_and_cleanup(%arg0 : f32, %arg1 : f32, %n : inde
 // The epilogue being a single iteration loop gets promoted here.
 
 // UNROLL-BY-4-LABEL: unroll_with_iter_args_and_promotion
-func @unroll_with_iter_args_and_promotion(%arg0 : f32, %arg1 : f32) -> f32 {
+func.func @unroll_with_iter_args_and_promotion(%arg0 : f32, %arg1 : f32) -> f32 {
   %from = arith.constant 0 : index
   %to = arith.constant 10 : index
   %step = arith.constant 1 : index
@@ -683,7 +683,7 @@ func @unroll_with_iter_args_and_promotion(%arg0 : f32, %arg1 : f32) -> f32 {
 }
 
 // UNROLL-FULL: func @unroll_zero_trip_count_case
-func @unroll_zero_trip_count_case() {
+func.func @unroll_zero_trip_count_case() {
   // CHECK-NEXT: affine.for %{{.*}} = 0 to 0
   affine.for %i = 0 to 0 {
   }


        


More information about the Mlir-commits mailing list