[Mlir-commits] [mlir] 3028bf7 - [mlir][NFC] Update textual references of `func` to `func.func` in Conversion/ tests

River Riddle llvmlistbot at llvm.org
Wed Apr 20 22:24:22 PDT 2022


Author: River Riddle
Date: 2022-04-20T22:17:27-07:00
New Revision: 3028bf740e094a911c214c461b00c068e2aa8db5

URL: https://github.com/llvm/llvm-project/commit/3028bf740e094a911c214c461b00c068e2aa8db5
DIFF: https://github.com/llvm/llvm-project/commit/3028bf740e094a911c214c461b00c068e2aa8db5.diff

LOG: [mlir][NFC] Update textual references of `func` to `func.func` in Conversion/ tests

The special case parsing of `func` operations is being removed.

Added: 
    

Modified: 
    mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir
    mlir/test/Conversion/AffineToStandard/lower-affine.mlir
    mlir/test/Conversion/AffineToStandard/no-memref.mlir
    mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
    mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
    mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
    mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
    mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
    mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
    mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
    mlir/test/Conversion/ComplexToLLVM/convert-to-llvm.mlir
    mlir/test/Conversion/ComplexToLLVM/full-conversion.mlir
    mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
    mlir/test/Conversion/ComplexToStandard/full-conversion.mlir
    mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
    mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
    mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
    mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
    mlir/test/Conversion/FuncToLLVM/convert-types.mlir
    mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
    mlir/test/Conversion/FuncToLLVM/func-memref.mlir
    mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
    mlir/test/Conversion/FuncToLLVM/invalid.mlir
    mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
    mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
    mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUCommon/lower-wait-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUToSPIRV/builtins.mlir
    mlir/test/Conversion/GPUToSPIRV/load-store.mlir
    mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
    mlir/test/Conversion/GPUToSPIRV/simple.mlir
    mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
    mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
    mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
    mlir/test/Conversion/MathToLibm/convert-to-libm.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-glsl-spirv.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
    mlir/test/Conversion/MemRefToLLVM/convert-alloca-scope.mlir
    mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
    mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
    mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
    mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
    mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
    mlir/test/Conversion/NVGPUToNVVM/mma-sync-to-nvvm.mlir
    mlir/test/Conversion/OpenACCToLLVM/convert-data-operands-to-llvmir.mlir
    mlir/test/Conversion/OpenACCToSCF/convert-openacc-to-scf.mlir
    mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
    mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir
    mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
    mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
    mlir/test/Conversion/SCFToGPU/step_one.mlir
    mlir/test/Conversion/SCFToGPU/step_positive.mlir
    mlir/test/Conversion/SCFToOpenMP/reductions.mlir
    mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
    mlir/test/Conversion/SCFToSPIRV/for.mlir
    mlir/test/Conversion/SCFToSPIRV/if.mlir
    mlir/test/Conversion/SCFToSPIRV/while.mlir
    mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
    mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
    mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
    mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-callers.mlir
    mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-functions.mlir
    mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
    mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
    mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir
    mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
    mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir
    mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
    mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir
    mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir
    mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir
    mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir
    mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
    mlir/test/Conversion/VectorToSPIRV/simple.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir
index aea26a3d534b7..58580a194df0c 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -lower-affine --split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: func @affine_vector_load
-func @affine_vector_load(%arg0 : index) {
+func.func @affine_vector_load(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   affine.for %i0 = 0 to 16 {
     %1 = affine.vector_load %0[%i0 + symbol(%arg0) + 7] : memref<100xf32>, vector<8xf32>
@@ -17,7 +17,7 @@ func @affine_vector_load(%arg0 : index) {
 // -----
 
 // CHECK-LABEL: func @affine_vector_store
-func @affine_vector_store(%arg0 : index) {
+func.func @affine_vector_store(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = arith.constant dense<11.0> : vector<4xf32>
   affine.for %i0 = 0 to 16 {
@@ -37,7 +37,7 @@ func @affine_vector_store(%arg0 : index) {
 // -----
 
 // CHECK-LABEL: func @vector_load_2d
-func @vector_load_2d() {
+func.func @vector_load_2d() {
   %0 = memref.alloc() : memref<100x100xf32>
   affine.for %i0 = 0 to 16 step 2{
     affine.for %i1 = 0 to 16 step 8 {
@@ -54,7 +54,7 @@ func @vector_load_2d() {
 // -----
 
 // CHECK-LABEL: func @vector_store_2d
-func @vector_store_2d() {
+func.func @vector_store_2d() {
   %0 = memref.alloc() : memref<100x100xf32>
   %1 = arith.constant dense<11.0> : vector<2x8xf32>
   affine.for %i0 = 0 to 16 step 2{

diff  --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index c1fcf864034eb..46a8106484726 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -1,11 +1,11 @@
 // RUN: mlir-opt -lower-affine %s | FileCheck %s
 
 // CHECK-LABEL: func @empty() {
-func @empty() {
+func.func @empty() {
   return     // CHECK:  return
 }            // CHECK: }
 
-func private @body(index) -> ()
+func.func private @body(index) -> ()
 
 // Simple loops are properly converted.
 // CHECK-LABEL: func @simple_loop
@@ -17,7 +17,7 @@ func private @body(index) -> ()
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @simple_loop() {
+func.func @simple_loop() {
   affine.for %i = 1 to 42 {
     call @body(%i) : (index) -> ()
   }
@@ -26,7 +26,7 @@ func @simple_loop() {
 
 /////////////////////////////////////////////////////////////////////
 
-func @for_with_yield(%buffer: memref<1024xf32>) -> (f32) {
+func.func @for_with_yield(%buffer: memref<1024xf32>) -> (f32) {
   %sum_0 = arith.constant 0.0 : f32
   %sum = affine.for %i = 0 to 10 step 2 iter_args(%sum_iter = %sum_0) -> (f32) {
     %t = affine.load %buffer[%i] : memref<1024xf32>
@@ -50,9 +50,9 @@ func @for_with_yield(%buffer: memref<1024xf32>) -> (f32) {
 
 /////////////////////////////////////////////////////////////////////
 
-func private @pre(index) -> ()
-func private @body2(index, index) -> ()
-func private @post(index) -> ()
+func.func private @pre(index) -> ()
+func.func private @body2(index, index) -> ()
+func.func private @post(index) -> ()
 
 // CHECK-LABEL: func @imperfectly_nested_loops
 // CHECK-NEXT:   %[[c0:.*]] = arith.constant 0 : index
@@ -70,7 +70,7 @@ func private @post(index) -> ()
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @imperfectly_nested_loops() {
+func.func @imperfectly_nested_loops() {
   affine.for %i = 0 to 42 {
     call @pre(%i) : (index) -> ()
     affine.for %j = 7 to 56 step 2 {
@@ -83,8 +83,8 @@ func @imperfectly_nested_loops() {
 
 /////////////////////////////////////////////////////////////////////
 
-func private @mid(index) -> ()
-func private @body3(index, index) -> ()
+func.func private @mid(index) -> ()
+func.func private @body3(index, index) -> ()
 
 // CHECK-LABEL: func @more_imperfectly_nested_loops
 // CHECK-NEXT:   %[[c0:.*]] = arith.constant 0 : index
@@ -109,7 +109,7 @@ func private @body3(index, index) -> ()
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @more_imperfectly_nested_loops() {
+func.func @more_imperfectly_nested_loops() {
   affine.for %i = 0 to 42 {
     call @pre(%i) : (index) -> ()
     affine.for %j = 7 to 56 step 2 {
@@ -136,7 +136,7 @@ func @more_imperfectly_nested_loops() {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @affine_apply_loops_shorthand(%N : index) {
+func.func @affine_apply_loops_shorthand(%N : index) {
   affine.for %i = 0 to %N {
     affine.for %j = affine_map<(d0)[]->(d0)>(%i)[] to 42 {
       call @body2(%i, %j) : (index, index) -> ()
@@ -147,7 +147,7 @@ func @affine_apply_loops_shorthand(%N : index) {
 
 /////////////////////////////////////////////////////////////////////
 
-func private @get_idx() -> (index)
+func.func private @get_idx() -> (index)
 
 #set1 = affine_set<(d0) : (20 - d0 >= 0)>
 #set2 = affine_set<(d0) : (d0 - 10 >= 0)>
@@ -165,7 +165,7 @@ func private @get_idx() -> (index)
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @if_only() {
+func.func @if_only() {
   %i = call @get_idx() : () -> (index)
   affine.if #set1(%i) {
     call @body(%i) : (index) -> ()
@@ -188,7 +188,7 @@ func @if_only() {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @if_else() {
+func.func @if_else() {
   %i = call @get_idx() : () -> (index)
   affine.if #set1(%i) {
     call @body(%i) : (index) -> ()
@@ -225,7 +225,7 @@ func @if_else() {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @nested_ifs() {
+func.func @nested_ifs() {
   %i = call @get_idx() : () -> (index)
   affine.if #set1(%i) {
     affine.if #set2(%i) {
@@ -254,7 +254,7 @@ func @nested_ifs() {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return %[[v3]] : i64
 // CHECK-NEXT: }
-func @if_with_yield() -> (i64) {
+func.func @if_with_yield() -> (i64) {
   %cst0 = arith.constant 0 : i64
   %cst1 = arith.constant 1 : i64
   %i = call @get_idx() : () -> (index)
@@ -300,7 +300,7 @@ func @if_with_yield() -> (i64) {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @multi_cond(%N : index, %M : index, %K : index, %L : index) {
+func.func @multi_cond(%N : index, %M : index, %K : index, %L : index) {
   %i = call @get_idx() : () -> (index)
   affine.if #setN(%i)[%N,%M,%K,%L] {
     call @body(%i) : (index) -> ()
@@ -311,7 +311,7 @@ func @multi_cond(%N : index, %M : index, %K : index, %L : index) {
 }
 
 // CHECK-LABEL: func @if_for
-func @if_for() {
+func.func @if_for() {
 // CHECK-NEXT:   %[[v0:.*]] = call @get_idx() : () -> index
   %i = call @get_idx() : () -> (index)
 // CHECK-NEXT:   %[[c0:.*]] = arith.constant 0 : index
@@ -386,7 +386,7 @@ func @if_for() {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @loop_min_max(%N : index) {
+func.func @loop_min_max(%N : index) {
   affine.for %i = 0 to 42 {
     affine.for %j = max #lbMultiMap(%i)[%N] to min #ubMultiMap(%i)[%N] {
       call @body2(%i, %j) : (index, index) -> ()
@@ -420,7 +420,7 @@ func @loop_min_max(%N : index) {
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
 // CHECK-NEXT: }
-func @min_reduction_tree(%v1 : index, %v2 : index, %v3 : index, %v4 : index, %v5 : index, %v6 : index, %v7 : index) {
+func.func @min_reduction_tree(%v1 : index, %v2 : index, %v3 : index, %v4 : index, %v5 : index, %v6 : index, %v7 : index) {
   affine.for %i = 0 to min #map_7_values(%v1, %v2, %v3, %v4, %v5, %v6, %v7)[] {
     call @body(%i) : (index) -> ()
   }
@@ -438,7 +438,7 @@ func @min_reduction_tree(%v1 : index, %v2 : index, %v3 : index, %v4 : index, %v5
 #map6 = affine_map<(d0,d1,d2) -> (d0 + d1 + d2)>
 
 // CHECK-LABEL: func @affine_applies(
-func @affine_applies(%arg0 : index) {
+func.func @affine_applies(%arg0 : index) {
 // CHECK: %[[c0:.*]] = arith.constant 0 : index
   %zero = affine.apply #map0()
 
@@ -478,7 +478,7 @@ func @affine_applies(%arg0 : index) {
 }
 
 // CHECK-LABEL: func @args_ret_affine_apply(
-func @args_ret_affine_apply(index, index) -> (index, index) {
+func.func @args_ret_affine_apply(index, index) -> (index, index) {
 ^bb0(%0 : index, %1 : index):
 // CHECK-NEXT: return %{{.*}}, %{{.*}} : index, index
   %00 = affine.apply #map2 (%0)
@@ -501,7 +501,7 @@ func @args_ret_affine_apply(index, index) -> (index, index) {
 // affine.apply lowering.
 // --------------------------------------------------------------------------//
 // CHECK-LABEL: func @affine_apply_mod
-func @affine_apply_mod(%arg0 : index) -> (index) {
+func.func @affine_apply_mod(%arg0 : index) -> (index) {
 // CHECK-NEXT: %[[c42:.*]] = arith.constant 42 : index
 // CHECK-NEXT: %[[v0:.*]] = arith.remsi %{{.*}}, %[[c42]] : index
 // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index
@@ -520,7 +520,7 @@ func @affine_apply_mod(%arg0 : index) -> (index) {
 // affine.apply lowering.
 // --------------------------------------------------------------------------//
 // CHECK-LABEL: func @affine_apply_floordiv
-func @affine_apply_floordiv(%arg0 : index) -> (index) {
+func.func @affine_apply_floordiv(%arg0 : index) -> (index) {
 // CHECK-NEXT: %[[c42:.*]] = arith.constant 42 : index
 // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index
 // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index
@@ -542,7 +542,7 @@ func @affine_apply_floordiv(%arg0 : index) -> (index) {
 // affine.apply lowering.
 // --------------------------------------------------------------------------//
 // CHECK-LABEL: func @affine_apply_ceildiv
-func @affine_apply_ceildiv(%arg0 : index) -> (index) {
+func.func @affine_apply_ceildiv(%arg0 : index) -> (index) {
 // CHECK-NEXT:  %[[c42:.*]] = arith.constant 42 : index
 // CHECK-NEXT:  %[[c0:.*]] = arith.constant 0 : index
 // CHECK-NEXT:  %[[c1:.*]] = arith.constant 1 : index
@@ -559,7 +559,7 @@ func @affine_apply_ceildiv(%arg0 : index) -> (index) {
 }
 
 // CHECK-LABEL: func @affine_load
-func @affine_load(%arg0 : index) {
+func.func @affine_load(%arg0 : index) {
   %0 = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
     %1 = affine.load %0[%i0 + symbol(%arg0) + 7] : memref<10xf32>
@@ -572,7 +572,7 @@ func @affine_load(%arg0 : index) {
 }
 
 // CHECK-LABEL: func @affine_store
-func @affine_store(%arg0 : index) {
+func.func @affine_store(%arg0 : index) {
   %0 = memref.alloc() : memref<10xf32>
   %1 = arith.constant 11.0 : f32
   affine.for %i0 = 0 to 10 {
@@ -588,7 +588,7 @@ func @affine_store(%arg0 : index) {
 }
 
 // CHECK-LABEL: func @affine_load_store_zero_dim
-func @affine_load_store_zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
+func.func @affine_load_store_zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
   %0 = affine.load %arg0[] : memref<i32>
   affine.store %0, %arg1[] : memref<i32>
 // CHECK: %[[x:.*]] = memref.load %arg0[] : memref<i32>
@@ -597,7 +597,7 @@ func @affine_load_store_zero_dim(%arg0 : memref<i32>, %arg1 : memref<i32>) {
 }
 
 // CHECK-LABEL: func @affine_prefetch
-func @affine_prefetch(%arg0 : index) {
+func.func @affine_prefetch(%arg0 : index) {
   %0 = memref.alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
     affine.prefetch %0[%i0 + symbol(%arg0) + 7], read, locality<3>, data : memref<10xf32>
@@ -610,7 +610,7 @@ func @affine_prefetch(%arg0 : index) {
 }
 
 // CHECK-LABEL: func @affine_dma_start
-func @affine_dma_start(%arg0 : index) {
+func.func @affine_dma_start(%arg0 : index) {
   %0 = memref.alloc() : memref<100xf32>
   %1 = memref.alloc() : memref<100xf32, 2>
   %2 = memref.alloc() : memref<1xi32>
@@ -629,7 +629,7 @@ func @affine_dma_start(%arg0 : index) {
 }
 
 // CHECK-LABEL: func @affine_dma_wait
-func @affine_dma_wait(%arg0 : index) {
+func.func @affine_dma_wait(%arg0 : index) {
   %2 = memref.alloc() : memref<1xi32>
   %c64 = arith.constant 64 : index
   affine.for %i0 = 0 to 10 {
@@ -644,7 +644,7 @@ func @affine_dma_wait(%arg0 : index) {
 
 // CHECK-LABEL: func @affine_min
 // CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
-func @affine_min(%arg0: index, %arg1: index) -> index{
+func.func @affine_min(%arg0: index, %arg1: index) -> index{
   // CHECK: %[[Cm1:.*]] = arith.constant -1
   // CHECK: %[[neg1:.*]] = arith.muli %[[ARG1]], %[[Cm1:.*]]
   // CHECK: %[[first:.*]] = arith.addi %[[ARG0]], %[[neg1]]
@@ -659,7 +659,7 @@ func @affine_min(%arg0: index, %arg1: index) -> index{
 
 // CHECK-LABEL: func @affine_max
 // CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
-func @affine_max(%arg0: index, %arg1: index) -> index{
+func.func @affine_max(%arg0: index, %arg1: index) -> index{
   // CHECK: %[[Cm1:.*]] = arith.constant -1
   // CHECK: %[[neg1:.*]] = arith.muli %[[ARG1]], %[[Cm1:.*]]
   // CHECK: %[[first:.*]] = arith.addi %[[ARG0]], %[[neg1]]
@@ -674,7 +674,7 @@ func @affine_max(%arg0: index, %arg1: index) -> index{
 
 // CHECK-LABEL: func @affine_parallel(
 // CHECK-SAME: %[[ARG0:.*]]: memref<100x100xf32>, %[[ARG1:.*]]: memref<100x100xf32>) {
-func @affine_parallel(%o: memref<100x100xf32>, %a: memref<100x100xf32>) {
+func.func @affine_parallel(%o: memref<100x100xf32>, %a: memref<100x100xf32>) {
   affine.parallel (%i, %j) = (0, 0) to (100, 100) {
   }
   return
@@ -690,7 +690,7 @@ func @affine_parallel(%o: memref<100x100xf32>, %a: memref<100x100xf32>) {
 
 // CHECK-LABEL: func @affine_parallel_tiled(
 // CHECK-SAME: %[[ARG0:.*]]: memref<100x100xf32>, %[[ARG1:.*]]: memref<100x100xf32>, %[[ARG2:.*]]: memref<100x100xf32>) {
-func @affine_parallel_tiled(%o: memref<100x100xf32>, %a: memref<100x100xf32>, %b: memref<100x100xf32>) {
+func.func @affine_parallel_tiled(%o: memref<100x100xf32>, %a: memref<100x100xf32>, %b: memref<100x100xf32>) {
   affine.parallel (%i0, %j0, %k0) = (0, 0, 0) to (100, 100, 100) step (10, 10, 10) {
     affine.parallel (%i1, %j1, %k1) = (%i0, %j0, %k0) to (%i0 + 10, %j0 + 10, %k0 + 10) {
       %0 = affine.load %a[%i1, %k1] : memref<100x100xf32>
@@ -728,7 +728,7 @@ func @affine_parallel_tiled(%o: memref<100x100xf32>, %a: memref<100x100xf32>, %b
 
 /////////////////////////////////////////////////////////////////////
 
-func @affine_parallel_simple(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>) -> (memref<3x3xf32>) {
+func.func @affine_parallel_simple(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>) -> (memref<3x3xf32>) {
   %O = memref.alloc() : memref<3x3xf32>
   affine.parallel (%kx, %ky) = (0, 0) to (2, 2) {
       %1 = affine.load %arg0[%kx, %ky] : memref<3x3xf32>
@@ -757,7 +757,7 @@ func @affine_parallel_simple(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>) ->
 
 /////////////////////////////////////////////////////////////////////
 
-func @affine_parallel_simple_dynamic_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
+func.func @affine_parallel_simple_dynamic_bounds(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
   %c_0 = arith.constant 0 : index
   %output_dim = memref.dim %arg0, %c_0 : memref<?x?xf32>
   affine.parallel (%kx, %ky) = (%c_0, %c_0) to (%output_dim, %output_dim) {
@@ -788,7 +788,7 @@ func @affine_parallel_simple_dynamic_bounds(%arg0: memref<?x?xf32>, %arg1: memre
 
 /////////////////////////////////////////////////////////////////////
 
-func @affine_parallel_with_reductions(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>) -> (f32, f32) {
+func.func @affine_parallel_with_reductions(%arg0: memref<3x3xf32>, %arg1: memref<3x3xf32>) -> (f32, f32) {
   %0:2 = affine.parallel (%kx, %ky) = (0, 0) to (2, 2) reduce ("addf", "mulf") -> (f32, f32) {
             %1 = affine.load %arg0[%kx, %ky] : memref<3x3xf32>
             %2 = affine.load %arg1[%kx, %ky] : memref<3x3xf32>
@@ -829,7 +829,7 @@ func @affine_parallel_with_reductions(%arg0: memref<3x3xf32>, %arg1: memref<3x3x
 
 /////////////////////////////////////////////////////////////////////
 
-func @affine_parallel_with_reductions_f64(%arg0: memref<3x3xf64>, %arg1: memref<3x3xf64>) -> (f64, f64) {
+func.func @affine_parallel_with_reductions_f64(%arg0: memref<3x3xf64>, %arg1: memref<3x3xf64>) -> (f64, f64) {
   %0:2 = affine.parallel (%kx, %ky) = (0, 0) to (2, 2) reduce ("addf", "mulf") -> (f64, f64) {
             %1 = affine.load %arg0[%kx, %ky] : memref<3x3xf64>
             %2 = affine.load %arg1[%kx, %ky] : memref<3x3xf64>
@@ -868,7 +868,7 @@ func @affine_parallel_with_reductions_f64(%arg0: memref<3x3xf64>, %arg1: memref<
 
 /////////////////////////////////////////////////////////////////////
 
-func @affine_parallel_with_reductions_i64(%arg0: memref<3x3xi64>, %arg1: memref<3x3xi64>) -> (i64, i64) {
+func.func @affine_parallel_with_reductions_i64(%arg0: memref<3x3xi64>, %arg1: memref<3x3xi64>) -> (i64, i64) {
   %0:2 = affine.parallel (%kx, %ky) = (0, 0) to (2, 2) reduce ("addi", "muli") -> (i64, i64) {
             %1 = affine.load %arg0[%kx, %ky] : memref<3x3xi64>
             %2 = affine.load %arg1[%kx, %ky] : memref<3x3xi64>

diff  --git a/mlir/test/Conversion/AffineToStandard/no-memref.mlir b/mlir/test/Conversion/AffineToStandard/no-memref.mlir
index 4b5bee174e84f..b5af765024e1b 100644
--- a/mlir/test/Conversion/AffineToStandard/no-memref.mlir
+++ b/mlir/test/Conversion/AffineToStandard/no-memref.mlir
@@ -4,7 +4,7 @@
 // the lowering pass. We shouldn't fail trying to create memref.load here.
 
 // CHECK-LABEL: @no_memref_op
-func @no_memref_op(%arg0: memref<f32>) {
+func.func @no_memref_op(%arg0: memref<f32>) {
   // CHECK: memref.load
   affine.load %arg0[] : memref<f32>
   return

diff  --git a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
index fb25534e62b52..db56d5f405f9e 100644
--- a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
+++ b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s
 
 // CHECK-LABEL: @vector_ops
-func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> {
+func.func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> {
 // CHECK-NEXT:  %0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : vector<4xf32>
   %0 = arith.constant dense<42.> : vector<4xf32>
 // CHECK-NEXT:  %1 = llvm.fadd %arg0, %0 : vector<4xf32>
@@ -34,7 +34,7 @@ func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>
 }
 
 // CHECK-LABEL: @ops
-func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
+func.func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
 ^bb0(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64):
 // CHECK:  = llvm.fsub %arg0, %arg1 : f32
   %0 = arith.subf %arg0, %arg1: f32
@@ -75,7 +75,7 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
 // system would have a 1-bit address space.  Otherwise, we would have had to
 // make this test dependent on the pointer size on the target system.
 // CHECK-LABEL: @index_cast
-func @index_cast(%arg0: index, %arg1: i1) {
+func.func @index_cast(%arg0: index, %arg1: i1) {
 // CHECK: = llvm.trunc %0 : i{{.*}} to i1
   %0 = arith.index_cast %arg0: index to i1
 // CHECK-NEXT: = llvm.sext %arg1 : i1 to i{{.*}}
@@ -84,7 +84,7 @@ func @index_cast(%arg0: index, %arg1: i1) {
 }
 
 // CHECK-LABEL: @vector_index_cast
-func @vector_index_cast(%arg0: vector<2xindex>, %arg1: vector<2xi1>) {
+func.func @vector_index_cast(%arg0: vector<2xindex>, %arg1: vector<2xi1>) {
 // CHECK: = llvm.trunc %{{.*}} : vector<2xi{{.*}}> to vector<2xi1>
   %0 = arith.index_cast %arg0: vector<2xindex> to vector<2xi1>
 // CHECK-NEXT: = llvm.sext %{{.*}} : vector<2xi1> to vector<2xi{{.*}}>
@@ -94,7 +94,7 @@ func @vector_index_cast(%arg0: vector<2xindex>, %arg1: vector<2xi1>) {
 
 // Checking conversion of signed integer types to floating point.
 // CHECK-LABEL: @sitofp
-func @sitofp(%arg0 : i32, %arg1 : i64) {
+func.func @sitofp(%arg0 : i32, %arg1 : i64) {
 // CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to f32
   %0 = arith.sitofp %arg0: i32 to f32
 // CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to f64
@@ -108,7 +108,7 @@ func @sitofp(%arg0 : i32, %arg1 : i64) {
 
 // Checking conversion of integer vectors to floating point vector types.
 // CHECK-LABEL: @sitofp_vector
-func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
+func.func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
 // CHECK-NEXT: = llvm.sitofp {{.*}} : vector<2xi16> to vector<2xf32>
   %0 = arith.sitofp %arg0: vector<2xi16> to vector<2xf32>
 // CHECK-NEXT: = llvm.sitofp {{.*}} : vector<2xi16> to vector<2xf64>
@@ -126,7 +126,7 @@ func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector
 
 // Checking conversion of unsigned integer types to floating point.
 // CHECK-LABEL: @uitofp
-func @uitofp(%arg0 : i32, %arg1 : i64) {
+func.func @uitofp(%arg0 : i32, %arg1 : i64) {
 // CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to f32
   %0 = arith.uitofp %arg0: i32 to f32
 // CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to f64
@@ -140,7 +140,7 @@ func @uitofp(%arg0 : i32, %arg1 : i64) {
 
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fpext
-func @fpext(%arg0 : f16, %arg1 : f32) {
+func.func @fpext(%arg0 : f16, %arg1 : f32) {
 // CHECK-NEXT: = llvm.fpext {{.*}} : f16 to f32
   %0 = arith.extf %arg0: f16 to f32
 // CHECK-NEXT: = llvm.fpext {{.*}} : f16 to f64
@@ -152,7 +152,7 @@ func @fpext(%arg0 : f16, %arg1 : f32) {
 
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fpext
-func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
+func.func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
 // CHECK-NEXT: = llvm.fpext {{.*}} : vector<2xf16> to vector<2xf32>
   %0 = arith.extf %arg0: vector<2xf16> to vector<2xf32>
 // CHECK-NEXT: = llvm.fpext {{.*}} : vector<2xf16> to vector<2xf64>
@@ -164,7 +164,7 @@ func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
 
 // Checking conversion of floating point to integer types.
 // CHECK-LABEL: @fptosi
-func @fptosi(%arg0 : f32, %arg1 : f64) {
+func.func @fptosi(%arg0 : f32, %arg1 : f64) {
 // CHECK-NEXT: = llvm.fptosi {{.*}} : f32 to i32
   %0 = arith.fptosi %arg0: f32 to i32
 // CHECK-NEXT: = llvm.fptosi {{.*}} : f32 to i64
@@ -178,7 +178,7 @@ func @fptosi(%arg0 : f32, %arg1 : f64) {
 
 // Checking conversion of floating point vectors to integer vector types.
 // CHECK-LABEL: @fptosi_vector
-func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
+func.func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
 // CHECK-NEXT: = llvm.fptosi {{.*}} : vector<2xf16> to vector<2xi32>
   %0 = arith.fptosi %arg0: vector<2xf16> to vector<2xi32>
 // CHECK-NEXT: = llvm.fptosi {{.*}} : vector<2xf16> to vector<2xi64>
@@ -196,7 +196,7 @@ func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector
 
 // Checking conversion of floating point to integer types.
 // CHECK-LABEL: @fptoui
-func @fptoui(%arg0 : f32, %arg1 : f64) {
+func.func @fptoui(%arg0 : f32, %arg1 : f64) {
 // CHECK-NEXT: = llvm.fptoui {{.*}} : f32 to i32
   %0 = arith.fptoui %arg0: f32 to i32
 // CHECK-NEXT: = llvm.fptoui {{.*}} : f32 to i64
@@ -210,7 +210,7 @@ func @fptoui(%arg0 : f32, %arg1 : f64) {
 
 // Checking conversion of floating point vectors to integer vector types.
 // CHECK-LABEL: @fptoui_vector
-func @fptoui_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
+func.func @fptoui_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
 // CHECK-NEXT: = llvm.fptoui {{.*}} : vector<2xf16> to vector<2xi32>
   %0 = arith.fptoui %arg0: vector<2xf16> to vector<2xi32>
 // CHECK-NEXT: = llvm.fptoui {{.*}} : vector<2xf16> to vector<2xi64>
@@ -228,7 +228,7 @@ func @fptoui_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector
 
 // Checking conversion of integer vectors to floating point vector types.
 // CHECK-LABEL: @uitofp_vector
-func @uitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
+func.func @uitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
 // CHECK-NEXT: = llvm.uitofp {{.*}} : vector<2xi16> to vector<2xf32>
   %0 = arith.uitofp %arg0: vector<2xi16> to vector<2xf32>
 // CHECK-NEXT: = llvm.uitofp {{.*}} : vector<2xi16> to vector<2xf64>
@@ -246,7 +246,7 @@ func @uitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector
 
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fptrunc
-func @fptrunc(%arg0 : f32, %arg1 : f64) {
+func.func @fptrunc(%arg0 : f32, %arg1 : f64) {
 // CHECK-NEXT: = llvm.fptrunc {{.*}} : f32 to f16
   %0 = arith.truncf %arg0: f32 to f16
 // CHECK-NEXT: = llvm.fptrunc {{.*}} : f64 to f16
@@ -258,7 +258,7 @@ func @fptrunc(%arg0 : f32, %arg1 : f64) {
 
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fptrunc
-func @fptrunc_vector(%arg0 : vector<2xf32>, %arg1 : vector<2xf64>) {
+func.func @fptrunc_vector(%arg0 : vector<2xf32>, %arg1 : vector<2xf64>) {
 // CHECK-NEXT: = llvm.fptrunc {{.*}} : vector<2xf32> to vector<2xf16>
   %0 = arith.truncf %arg0: vector<2xf32> to vector<2xf16>
 // CHECK-NEXT: = llvm.fptrunc {{.*}} : vector<2xf64> to vector<2xf16>
@@ -270,7 +270,7 @@ func @fptrunc_vector(%arg0 : vector<2xf32>, %arg1 : vector<2xf64>) {
 
 // Check sign and zero extension and truncation of integers.
 // CHECK-LABEL: @integer_extension_and_truncation
-func @integer_extension_and_truncation(%arg0 : i3) {
+func.func @integer_extension_and_truncation(%arg0 : i3) {
 // CHECK-NEXT: = llvm.sext %arg0 : i3 to i6
   %0 = arith.extsi %arg0 : i3 to i6
 // CHECK-NEXT: = llvm.zext %arg0 : i3 to i6
@@ -281,7 +281,7 @@ func @integer_extension_and_truncation(%arg0 : i3) {
 }
 
 // CHECK-LABEL: func @fcmp(%arg0: f32, %arg1: f32) {
-func @fcmp(f32, f32) -> () {
+func.func @fcmp(f32, f32) -> () {
 ^bb0(%arg0: f32, %arg1: f32):
   // CHECK:      llvm.fcmp "oeq" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 : f32
@@ -319,7 +319,7 @@ func @fcmp(f32, f32) -> () {
 // -----
 
 // CHECK-LABEL: @index_vector
-func @index_vector(%arg0: vector<4xindex>) {
+func.func @index_vector(%arg0: vector<4xindex>) {
   // CHECK: %[[CST:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3]> : vector<4xindex>) : vector<4xi64>
   %0 = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex>
   // CHECK: %[[V:.*]] = llvm.add %{{.*}}, %[[CST]] : vector<4xi64>
@@ -330,7 +330,7 @@ func @index_vector(%arg0: vector<4xindex>) {
 // -----
 
 // CHECK-LABEL: @bitcast_1d
-func @bitcast_1d(%arg0: vector<2xf32>) {
+func.func @bitcast_1d(%arg0: vector<2xf32>) {
   // CHECK: llvm.bitcast %{{.*}} : vector<2xf32> to vector<2xi32>
   arith.bitcast %arg0 : vector<2xf32> to vector<2xi32>
   return
@@ -339,7 +339,7 @@ func @bitcast_1d(%arg0: vector<2xf32>) {
 // -----
 
 // CHECK-LABEL: func @cmpf_2dvector(
-func @cmpf_2dvector(%arg0 : vector<4x3xf32>, %arg1 : vector<4x3xf32>) {
+func.func @cmpf_2dvector(%arg0 : vector<4x3xf32>, %arg1 : vector<4x3xf32>) {
   // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[EXTRACT1:.*]] = llvm.extractvalue %[[ARG0]][0] : !llvm.array<4 x vector<3xf32>>
@@ -353,7 +353,7 @@ func @cmpf_2dvector(%arg0 : vector<4x3xf32>, %arg1 : vector<4x3xf32>) {
 // -----
 
 // CHECK-LABEL: func @cmpi_0dvector(
-func @cmpi_0dvector(%arg0 : vector<i32>, %arg1 : vector<i32>) {
+func.func @cmpi_0dvector(%arg0 : vector<i32>, %arg1 : vector<i32>) {
   // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[CMP:.*]] = llvm.icmp "ult" %[[ARG0]], %[[ARG1]] : vector<1xi32>
@@ -364,7 +364,7 @@ func @cmpi_0dvector(%arg0 : vector<i32>, %arg1 : vector<i32>) {
 // -----
 
 // CHECK-LABEL: func @cmpi_2dvector(
-func @cmpi_2dvector(%arg0 : vector<4x3xi32>, %arg1 : vector<4x3xi32>) {
+func.func @cmpi_2dvector(%arg0 : vector<4x3xi32>, %arg1 : vector<4x3xi32>) {
   // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast
   // CHECK: %[[EXTRACT1:.*]] = llvm.extractvalue %[[ARG0]][0] : !llvm.array<4 x vector<3xi32>>
@@ -378,7 +378,7 @@ func @cmpi_2dvector(%arg0 : vector<4x3xi32>, %arg1 : vector<4x3xi32>) {
 // -----
 
 // CHECK-LABEL: @select
-func @select(%arg0 : i1, %arg1 : i32, %arg2 : i32) -> i32 {
+func.func @select(%arg0 : i1, %arg1 : i32, %arg2 : i32) -> i32 {
   // CHECK: = llvm.select %arg0, %arg1, %arg2 : i1, i32
   %0 = arith.select %arg0, %arg1, %arg2 : i32
   return %0 : i32

diff  --git a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
index 5d324bafed249..0df07b5d80532 100644
--- a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
+++ b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s
 
 // CHECK-LABEL: @vec_bin
-func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> {
+func.func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> {
   // CHECK: llvm.mlir.undef : !llvm.array<2 x array<2 x vector<2xf32>>>
 
   // This block appears 2x2 times
@@ -22,7 +22,7 @@ func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> {
 }
 
 // CHECK-LABEL: @sexti
-func @sexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
+func.func @sexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.sext %{{.*}} : vector<3xi32> to vector<3xi64>
@@ -35,7 +35,7 @@ func @sexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
 }
 
 // CHECK-LABEL: @zexti
-func @zexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
+func.func @zexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.zext %{{.*}} : vector<3xi32> to vector<3xi64>
@@ -48,7 +48,7 @@ func @zexti_vector(%arg0 : vector<1x2x3xi32>, %arg1 : vector<1x2x3xi64>) {
 }
 
 // CHECK-LABEL: @sitofp
-func @sitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
+func.func @sitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xf32>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.sitofp %{{.*}} : vector<3xi32> to vector<3xf32>
@@ -61,7 +61,7 @@ func @sitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
 }
 
 // CHECK-LABEL: @uitofp
-func @uitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
+func.func @uitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xf32>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.uitofp %{{.*}} : vector<3xi32> to vector<3xf32>
@@ -74,7 +74,7 @@ func @uitofp_vector(%arg0 : vector<1x2x3xi32>) -> vector<1x2x3xf32> {
 }
 
 // CHECK-LABEL: @fptosi
-func @fptosi_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
+func.func @fptosi_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xf32>>>
   // CHECK: llvm.fptosi %{{.*}} : vector<3xf32> to vector<3xi32>
@@ -87,7 +87,7 @@ func @fptosi_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
 }
 
 // CHECK-LABEL: @fptoui
-func @fptoui_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
+func.func @fptoui_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi32>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xf32>>>
   // CHECK: llvm.fptoui %{{.*}} : vector<3xf32> to vector<3xi32>
@@ -100,7 +100,7 @@ func @fptoui_vector(%arg0 : vector<1x2x3xf32>) -> vector<1x2x3xi32> {
 }
 
 // CHECK-LABEL: @fpext
-func @fpext_vector(%arg0 : vector<1x2x3xf16>) -> vector<1x2x3xf64> {
+func.func @fpext_vector(%arg0 : vector<1x2x3xf16>) -> vector<1x2x3xf64> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xf64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xf16>>>
   // CHECK: llvm.fpext %{{.*}} : vector<3xf16> to vector<3xf64>
@@ -113,7 +113,7 @@ func @fpext_vector(%arg0 : vector<1x2x3xf16>) -> vector<1x2x3xf64> {
 }
 
 // CHECK-LABEL: @fptrunc
-func @fptrunc_vector(%arg0 : vector<1x2x3xf64>) -> vector<1x2x3xf16> {
+func.func @fptrunc_vector(%arg0 : vector<1x2x3xf64>) -> vector<1x2x3xf16> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xf16>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xf64>>>
   // CHECK: llvm.fptrunc %{{.*}} : vector<3xf64> to vector<3xf16>
@@ -126,7 +126,7 @@ func @fptrunc_vector(%arg0 : vector<1x2x3xf64>) -> vector<1x2x3xf16> {
 }
 
 // CHECK-LABEL: @trunci
-func @trunci_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi16> {
+func.func @trunci_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi16> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi16>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.trunc %{{.*}} : vector<3xi64> to vector<3xi16>
@@ -139,7 +139,7 @@ func @trunci_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi16> {
 }
 
 // CHECK-LABEL: @shl
-func @shl_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
+func.func @shl_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.shl %{{.*}}, %{{.*}} : vector<3xi64>
@@ -153,7 +153,7 @@ func @shl_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
 }
 
 // CHECK-LABEL: @shrs
-func @shrs_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
+func.func @shrs_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.ashr %{{.*}}, %{{.*}} : vector<3xi64>
@@ -167,7 +167,7 @@ func @shrs_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
 }
 
 // CHECK-LABEL: @shru
-func @shru_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
+func.func @shru_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
   // CHECK: llvm.mlir.undef : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<1 x array<2 x vector<3xi64>>>
   // CHECK: llvm.lshr %{{.*}}, %{{.*}} : vector<3xi64>
@@ -183,7 +183,7 @@ func @shru_vector(%arg0 : vector<1x2x3xi64>) -> vector<1x2x3xi64> {
 // -----
 
 // CHECK-LABEL: @bitcast_2d
-func @bitcast_2d(%arg0: vector<2x4xf32>) {
+func.func @bitcast_2d(%arg0: vector<2x4xf32>) {
   // CHECK: llvm.mlir.undef
   // CHECK: llvm.extractvalue %{{.*}}[0]
   // CHECK: llvm.bitcast %{{.*}} : vector<4xf32> to vector<4xi32>
@@ -198,7 +198,7 @@ func @bitcast_2d(%arg0: vector<2x4xf32>) {
 // -----
 
 // CHECK-LABEL: func @select_2d(
-func @select_2d(%arg0 : vector<4x3xi1>, %arg1 : vector<4x3xi32>, %arg2 : vector<4x3xi32>) {
+func.func @select_2d(%arg0 : vector<4x3xi1>, %arg1 : vector<4x3xi32>, %arg2 : vector<4x3xi32>) {
   // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %arg0
   // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %arg1
   // CHECK: %[[ARG2:.*]] = builtin.unrealized_conversion_cast %arg2

diff  --git a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
index b097ce4922ccb..8da0ae19bb38c 100644
--- a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
+++ b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
@@ -11,7 +11,7 @@ module attributes {
 
 // Check integer operation conversions.
 // CHECK-LABEL: @int32_scalar
-func @int32_scalar(%lhs: i32, %rhs: i32) {
+func.func @int32_scalar(%lhs: i32, %rhs: i32) {
   // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: i32
   // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
@@ -29,7 +29,7 @@ func @int32_scalar(%lhs: i32, %rhs: i32) {
 
 // CHECK-LABEL: @scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
-func @scalar_srem(%lhs: i32, %rhs: i32) {
+func.func @scalar_srem(%lhs: i32, %rhs: i32) {
   // CHECK: %[[LABS:.+]] = spv.GLSL.SAbs %[[LHS]] : i32
   // CHECK: %[[RABS:.+]] = spv.GLSL.SAbs %[[RHS]] : i32
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
@@ -42,7 +42,7 @@ func @scalar_srem(%lhs: i32, %rhs: i32) {
 
 // Check float unary operation conversions.
 // CHECK-LABEL: @float32_unary_scalar
-func @float32_unary_scalar(%arg0: f32) {
+func.func @float32_unary_scalar(%arg0: f32) {
   // CHECK: spv.FNegate %{{.*}}: f32
   %0 = arith.negf %arg0 : f32
   return
@@ -50,7 +50,7 @@ func @float32_unary_scalar(%arg0: f32) {
 
 // Check float binary operation conversions.
 // CHECK-LABEL: @float32_binary_scalar
-func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
+func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %lhs, %rhs: f32
   // CHECK: spv.FSub %{{.*}}, %{{.*}}: f32
@@ -66,7 +66,7 @@ func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
 
 // Check int vector types.
 // CHECK-LABEL: @int_vector234
-func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
+func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
   // CHECK: spv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
@@ -76,7 +76,7 @@ func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
 
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
-func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
+func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
   // CHECK: %[[LABS:.+]] = spv.GLSL.SAbs %[[LHS]] : vector<3xi16>
   // CHECK: %[[RABS:.+]] = spv.GLSL.SAbs %[[RHS]] : vector<3xi16>
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
@@ -89,7 +89,7 @@ func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
 
 // Check float vector types.
 // CHECK-LABEL: @float_vector234
-func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
+func.func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
   %0 = arith.addf %arg0, %arg0: vector<2xf16>
   // CHECK: spv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
@@ -98,21 +98,21 @@ func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
 }
 
 // CHECK-LABEL: @one_elem_vector
-func @one_elem_vector(%arg0: vector<1xi32>) {
+func.func @one_elem_vector(%arg0: vector<1xi32>) {
   // CHECK: spv.IAdd %{{.+}}, %{{.+}}: i32
   %0 = arith.addi %arg0, %arg0: vector<1xi32>
   return
 }
 
 // CHECK-LABEL: @unsupported_5elem_vector
-func @unsupported_5elem_vector(%arg0: vector<5xi32>) {
+func.func @unsupported_5elem_vector(%arg0: vector<5xi32>) {
   // CHECK: arith.subi
   %1 = arith.subi %arg0, %arg0: vector<5xi32>
   return
 }
 
 // CHECK-LABEL: @unsupported_2x2elem_vector
-func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
+func.func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
   // CHECK: arith.muli
   %2 = arith.muli %arg0, %arg0: vector<2x2xi32>
   return
@@ -128,7 +128,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @int_vector23
-func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
+func.func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
@@ -137,7 +137,7 @@ func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
 }
 
 // CHECK-LABEL: @float_scalar
-func @float_scalar(%arg0: f16, %arg1: f64) {
+func.func @float_scalar(%arg0: f16, %arg1: f64) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %arg0, %arg0: f16
   // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
@@ -156,7 +156,7 @@ module attributes {
 } {
 
 // expected-error @+1 {{failed to materialize conversion for block argument #0 that remained live after conversion, type was 'vector<4xi64>', with target type 'vector<4xi32>'}}
-func @int_vector4_invalid(%arg0: vector<4xi64>) {
+func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
   // expected-error @+2 {{bitwidth emulation is not implemented yet on unsigned op}}
   // expected-note @+1 {{see existing live user here}}
   %0 = arith.divui %arg0, %arg0: vector<4xi64>
@@ -176,7 +176,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @bitwise_scalar
-func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
+func.func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : i32
   // CHECK: spv.BitwiseOr
@@ -187,7 +187,7 @@ func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @bitwise_vector
-func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
+func.func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
   // CHECK: spv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi32>
   // CHECK: spv.BitwiseOr
@@ -198,7 +198,7 @@ func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
 }
 
 // CHECK-LABEL: @logical_scalar
-func @logical_scalar(%arg0 : i1, %arg1 : i1) {
+func.func @logical_scalar(%arg0 : i1, %arg1 : i1) {
   // CHECK: spv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : i1
   // CHECK: spv.LogicalOr
@@ -209,7 +209,7 @@ func @logical_scalar(%arg0 : i1, %arg1 : i1) {
 }
 
 // CHECK-LABEL: @logical_vector
-func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
+func.func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
   // CHECK: spv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi1>
   // CHECK: spv.LogicalOr
@@ -220,7 +220,7 @@ func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
 }
 
 // CHECK-LABEL: @shift_scalar
-func @shift_scalar(%arg0 : i32, %arg1 : i32) {
+func.func @shift_scalar(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : i32
   // CHECK: spv.ShiftRightArithmetic
@@ -231,7 +231,7 @@ func @shift_scalar(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @shift_vector
-func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
+func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
   // CHECK: spv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : vector<4xi32>
   // CHECK: spv.ShiftRightArithmetic
@@ -254,7 +254,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpf
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK: spv.FOrdEqual
   %1 = arith.cmpf oeq, %arg0, %arg1 : f32
   // CHECK: spv.FOrdGreaterThan
@@ -292,7 +292,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpf
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK: spv.Ordered
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
   // CHECK: spv.Unordered
@@ -311,7 +311,7 @@ module attributes {
 
 // CHECK-LABEL: @cmpf
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK:      %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
   // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
   // CHECK-NEXT: %[[OR:.+]] = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
@@ -338,7 +338,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpi
-func @cmpi(%arg0 : i32, %arg1 : i32) {
+func.func @cmpi(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.IEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i32
   // CHECK: spv.INotEqual
@@ -363,7 +363,7 @@ func @cmpi(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @boolcmpi
-func @boolcmpi(%arg0 : i1, %arg1 : i1) {
+func.func @boolcmpi(%arg0 : i1, %arg1 : i1) {
   // CHECK: spv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i1
   // CHECK: spv.LogicalNotEqual
@@ -372,7 +372,7 @@ func @boolcmpi(%arg0 : i1, %arg1 : i1) {
 }
 
 // CHECK-LABEL: @vecboolcmpi
-func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
+func.func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
   // CHECK: spv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : vector<4xi1>
   // CHECK: spv.LogicalNotEqual
@@ -394,7 +394,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @constant
-func @constant() {
+func.func @constant() {
   // CHECK: spv.Constant true
   %0 = arith.constant true
   // CHECK: spv.Constant 42 : i32
@@ -421,7 +421,7 @@ func @constant() {
 }
 
 // CHECK-LABEL: @constant_16bit
-func @constant_16bit() {
+func.func @constant_16bit() {
   // CHECK: spv.Constant 4 : i16
   %0 = arith.constant 4 : i16
   // CHECK: spv.Constant 5.000000e+00 : f16
@@ -434,7 +434,7 @@ func @constant_16bit() {
 }
 
 // CHECK-LABEL: @constant_64bit
-func @constant_64bit() {
+func.func @constant_64bit() {
   // CHECK: spv.Constant 4 : i64
   %0 = arith.constant 4 : i64
   // CHECK: spv.Constant 5.000000e+00 : f64
@@ -447,7 +447,7 @@ func @constant_64bit() {
 }
 
 // CHECK-LABEL: @constant_size1
-func @constant_size1() {
+func.func @constant_size1() {
   // CHECK: spv.Constant true
   %0 = arith.constant dense<true> : tensor<1xi1>
   // CHECK: spv.Constant 4 : i64
@@ -467,7 +467,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @constant_16bit
-func @constant_16bit() {
+func.func @constant_16bit() {
   // CHECK: spv.Constant 4 : i32
   %0 = arith.constant 4 : i16
   // CHECK: spv.Constant 5.000000e+00 : f32
@@ -482,7 +482,7 @@ func @constant_16bit() {
 }
 
 // CHECK-LABEL: @constant_64bit
-func @constant_64bit() {
+func.func @constant_64bit() {
   // CHECK: spv.Constant 4 : i32
   %0 = arith.constant 4 : i64
   // CHECK: spv.Constant 5.000000e+00 : f32
@@ -497,7 +497,7 @@ func @constant_64bit() {
 }
 
 // CHECK-LABEL: @constant_size1
-func @constant_size1() {
+func.func @constant_size1() {
   // CHECK: spv.Constant 4 : i32
   %0 = arith.constant dense<4> : vector<1xi64>
   // CHECK: spv.Constant 5.000000e+00 : f32
@@ -506,7 +506,7 @@ func @constant_size1() {
 }
 
 // CHECK-LABEL: @corner_cases
-func @corner_cases() {
+func.func @corner_cases() {
   // CHECK: %{{.*}} = spv.Constant -1 : i32
   %0 = arith.constant 4294967295  : i64 // 2^32 - 1
   // CHECK: %{{.*}} = spv.Constant 2147483647 : i32
@@ -535,7 +535,7 @@ func @corner_cases() {
 }
 
 // CHECK-LABEL: @unsupported_cases
-func @unsupported_cases() {
+func.func @unsupported_cases() {
   // CHECK: %{{.*}} = arith.constant 4294967296 : i64
   %0 = arith.constant 4294967296 : i64 // 2^32
   // CHECK: %{{.*}} = arith.constant -2147483649 : i64
@@ -559,35 +559,35 @@ module attributes {
 } {
 
 // CHECK-LABEL: index_cast1
-func @index_cast1(%arg0: i16) {
+func.func @index_cast1(%arg0: i16) {
   // CHECK: spv.SConvert %{{.+}} : i16 to i32
   %0 = arith.index_cast %arg0 : i16 to index
   return
 }
 
 // CHECK-LABEL: index_cast2
-func @index_cast2(%arg0: index) {
+func.func @index_cast2(%arg0: index) {
   // CHECK: spv.SConvert %{{.+}} : i32 to i16
   %0 = arith.index_cast %arg0 : index to i16
   return
 }
 
 // CHECK-LABEL: index_cast3
-func @index_cast3(%arg0: i32) {
+func.func @index_cast3(%arg0: i32) {
   // CHECK-NOT: spv.SConvert
   %0 = arith.index_cast %arg0 : i32 to index
   return
 }
 
 // CHECK-LABEL: index_cast4
-func @index_cast4(%arg0: index) {
+func.func @index_cast4(%arg0: index) {
   // CHECK-NOT: spv.SConvert
   %0 = arith.index_cast %arg0 : index to i32
   return
 }
 
 // CHECK-LABEL: @bit_cast
-func @bit_cast(%arg0: vector<2xf32>, %arg1: i64) {
+func.func @bit_cast(%arg0: vector<2xf32>, %arg1: i64) {
   // CHECK: spv.Bitcast %{{.+}} : vector<2xf32> to vector<2xi32>
   %0 = arith.bitcast %arg0 : vector<2xf32> to vector<2xi32>
   // CHECK: spv.Bitcast %{{.+}} : i64 to f64
@@ -596,63 +596,63 @@ func @bit_cast(%arg0: vector<2xf32>, %arg1: i64) {
 }
 
 // CHECK-LABEL: @fpext1
-func @fpext1(%arg0: f16) -> f64 {
+func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK: spv.FConvert %{{.*}} : f16 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fpext2
-func @fpext2(%arg0 : f32) -> f64 {
+func.func @fpext2(%arg0 : f32) -> f64 {
   // CHECK: spv.FConvert %{{.*}} : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fptrunc1
-func @fptrunc1(%arg0 : f64) -> f16 {
+func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK: spv.FConvert %{{.*}} : f64 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @fptrunc2
-func @fptrunc2(%arg0: f32) -> f16 {
+func.func @fptrunc2(%arg0: f32) -> f16 {
   // CHECK: spv.FConvert %{{.*}} : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @sitofp1
-func @sitofp1(%arg0 : i32) -> f32 {
+func.func @sitofp1(%arg0 : i32) -> f32 {
   // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @sitofp2
-func @sitofp2(%arg0 : i64) -> f64 {
+func.func @sitofp2(%arg0 : i64) -> f64 {
   // CHECK: spv.ConvertSToF %{{.*}} : i64 to f64
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_i16_f32
-func @uitofp_i16_f32(%arg0: i16) -> f32 {
+func.func @uitofp_i16_f32(%arg0: i16) -> f32 {
   // CHECK: spv.ConvertUToF %{{.*}} : i16 to f32
   %0 = arith.uitofp %arg0 : i16 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i32_f32
-func @uitofp_i32_f32(%arg0 : i32) -> f32 {
+func.func @uitofp_i32_f32(%arg0 : i32) -> f32 {
   // CHECK: spv.ConvertUToF %{{.*}} : i32 to f32
   %0 = arith.uitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f32
-func @uitofp_i1_f32(%arg0 : i1) -> f32 {
+func.func @uitofp_i1_f32(%arg0 : i1) -> f32 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f32
   // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
@@ -661,7 +661,7 @@ func @uitofp_i1_f32(%arg0 : i1) -> f32 {
 }
 
 // CHECK-LABEL: @uitofp_i1_f64
-func @uitofp_i1_f64(%arg0 : i1) -> f64 {
+func.func @uitofp_i1_f64(%arg0 : i1) -> f64 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f64
   // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f64
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
@@ -670,7 +670,7 @@ func @uitofp_i1_f64(%arg0 : i1) -> f64 {
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f32
-func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
+func.func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf32>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf32>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
@@ -690,35 +690,35 @@ spv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
 }
 
 // CHECK-LABEL: @sexti1
-func @sexti1(%arg0: i16) -> i64 {
+func.func @sexti1(%arg0: i16) -> i64 {
   // CHECK: spv.SConvert %{{.*}} : i16 to i64
   %0 = arith.extsi %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @sexti2
-func @sexti2(%arg0 : i32) -> i64 {
+func.func @sexti2(%arg0 : i32) -> i64 {
   // CHECK: spv.SConvert %{{.*}} : i32 to i64
   %0 = arith.extsi %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti1
-func @zexti1(%arg0: i16) -> i64 {
+func.func @zexti1(%arg0: i16) -> i64 {
   // CHECK: spv.UConvert %{{.*}} : i16 to i64
   %0 = arith.extui %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti2
-func @zexti2(%arg0 : i32) -> i64 {
+func.func @zexti2(%arg0 : i32) -> i64 {
   // CHECK: spv.UConvert %{{.*}} : i32 to i64
   %0 = arith.extui %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti3
-func @zexti3(%arg0 : i1) -> i32 {
+func.func @zexti3(%arg0 : i1) -> i32 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
@@ -727,7 +727,7 @@ func @zexti3(%arg0 : i1) -> i32 {
 }
 
 // CHECK-LABEL: @zexti4
-func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
+func.func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi32>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi32>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
@@ -736,7 +736,7 @@ func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
 }
 
 // CHECK-LABEL: @zexti5
-func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
+func.func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi64>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi64>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
@@ -745,21 +745,21 @@ func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
 }
 
 // CHECK-LABEL: @trunci1
-func @trunci1(%arg0 : i64) -> i16 {
+func.func @trunci1(%arg0 : i64) -> i16 {
   // CHECK: spv.SConvert %{{.*}} : i64 to i16
   %0 = arith.trunci %arg0 : i64 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunci2
-func @trunci2(%arg0: i32) -> i16 {
+func.func @trunci2(%arg0: i32) -> i16 {
   // CHECK: spv.SConvert %{{.*}} : i32 to i16
   %0 = arith.trunci %arg0 : i32 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunc_to_i1
-func @trunc_to_i1(%arg0: i32) -> i1 {
+func.func @trunc_to_i1(%arg0: i32) -> i1 {
   // CHECK: %[[MASK:.*]] = spv.Constant 1 : i32
   // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
   // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
@@ -771,7 +771,7 @@ func @trunc_to_i1(%arg0: i32) -> i1 {
 }
 
 // CHECK-LABEL: @trunc_to_veci1
-func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
+func.func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
   // CHECK: %[[MASK:.*]] = spv.Constant dense<1> : vector<4xi32>
   // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
   // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
@@ -783,14 +783,14 @@ func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
 }
 
 // CHECK-LABEL: @fptosi1
-func @fptosi1(%arg0 : f32) -> i32 {
+func.func @fptosi1(%arg0 : f32) -> i32 {
   // CHECK: spv.ConvertFToS %{{.*}} : f32 to i32
   %0 = arith.fptosi %arg0 : f32 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @fptosi2
-func @fptosi2(%arg0 : f16) -> i16 {
+func.func @fptosi2(%arg0 : f16) -> i16 {
   // CHECK: spv.ConvertFToS %{{.*}} : f16 to i16
   %0 = arith.fptosi %arg0 : f16 to i16
   return %0 : i16
@@ -808,7 +808,7 @@ module attributes {
 
 // CHECK-LABEL: @fpext1
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fpext1(%arg0: f16) -> f64 {
+func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0: f64
@@ -816,7 +816,7 @@ func @fpext1(%arg0: f16) -> f64 {
 
 // CHECK-LABEL: @fpext2
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fpext2(%arg0 : f32) -> f64 {
+func.func @fpext2(%arg0 : f32) -> f64 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0: f64
@@ -834,7 +834,7 @@ module attributes {
 
 // CHECK-LABEL: @fptrunc1
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fptrunc1(%arg0 : f64) -> f16 {
+func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0: f16
@@ -842,14 +842,14 @@ func @fptrunc1(%arg0 : f64) -> f16 {
 
 // CHECK-LABEL: @fptrunc2
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fptrunc2(%arg0: f32) -> f16 {
+func.func @fptrunc2(%arg0: f32) -> f16 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0: f16
 }
 
 // CHECK-LABEL: @sitofp
-func @sitofp(%arg0 : i64) -> f64 {
+func.func @sitofp(%arg0 : i64) -> f64 {
   // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0: f64
@@ -867,7 +867,7 @@ module attributes {
 
 // CHECK-LABEL: @scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
-func @scalar_srem(%lhs: i32, %rhs: i32) {
+func.func @scalar_srem(%lhs: i32, %rhs: i32) {
   // CHECK: %[[LABS:.+]] = spv.OCL.s_abs %[[LHS]] : i32
   // CHECK: %[[RABS:.+]] = spv.OCL.s_abs %[[RHS]] : i32
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
@@ -880,7 +880,7 @@ func @scalar_srem(%lhs: i32, %rhs: i32) {
 
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
-func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
+func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
   // CHECK: %[[LABS:.+]] = spv.OCL.s_abs %[[LHS]] : vector<3xi16>
   // CHECK: %[[RABS:.+]] = spv.OCL.s_abs %[[RHS]] : vector<3xi16>
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
@@ -902,7 +902,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @select
-func @select(%arg0 : i32, %arg1 : i32) {
+func.func @select(%arg0 : i32, %arg1 : i32) {
   %0 = arith.cmpi sle, %arg0, %arg1 : i32
   // CHECK: spv.Select
   %1 = arith.select %0, %arg0, %arg1 : i32
@@ -924,7 +924,7 @@ module attributes {
 
 // Check integer operation conversions.
 // CHECK-LABEL: @int32_scalar
-func @int32_scalar(%lhs: i32, %rhs: i32) {
+func.func @int32_scalar(%lhs: i32, %rhs: i32) {
   // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: i32
   // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
@@ -950,7 +950,7 @@ func @int32_scalar(%lhs: i32, %rhs: i32) {
 
 // CHECK-LABEL: @scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
-func @scalar_srem(%lhs: i32, %rhs: i32) {
+func.func @scalar_srem(%lhs: i32, %rhs: i32) {
   // CHECK: %[[LABS:.+]] = spv.GLSL.SAbs %[[LHS]] : i32
   // CHECK: %[[RABS:.+]] = spv.GLSL.SAbs %[[RHS]] : i32
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
@@ -963,7 +963,7 @@ func @scalar_srem(%lhs: i32, %rhs: i32) {
 
 // Check float unary operation conversions.
 // CHECK-LABEL: @float32_unary_scalar
-func @float32_unary_scalar(%arg0: f32) {
+func.func @float32_unary_scalar(%arg0: f32) {
   // CHECK: spv.FNegate %{{.*}}: f32
   %5 = arith.negf %arg0 : f32
   return
@@ -971,7 +971,7 @@ func @float32_unary_scalar(%arg0: f32) {
 
 // Check float binary operation conversions.
 // CHECK-LABEL: @float32_binary_scalar
-func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
+func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %lhs, %rhs: f32
   // CHECK: spv.FSub %{{.*}}, %{{.*}}: f32
@@ -991,7 +991,7 @@ func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
 
 // Check int vector types.
 // CHECK-LABEL: @int_vector234
-func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
+func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
   // CHECK: spv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
@@ -1001,7 +1001,7 @@ func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
 
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
-func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
+func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
   // CHECK: %[[LABS:.+]] = spv.GLSL.SAbs %[[LHS]] : vector<3xi16>
   // CHECK: %[[RABS:.+]] = spv.GLSL.SAbs %[[RHS]] : vector<3xi16>
   // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
@@ -1014,7 +1014,7 @@ func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
 
 // Check float vector types.
 // CHECK-LABEL: @float_vector234
-func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
+func.func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
   %0 = arith.addf %arg0, %arg0: vector<2xf16>
   // CHECK: spv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
@@ -1023,21 +1023,21 @@ func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
 }
 
 // CHECK-LABEL: @one_elem_vector
-func @one_elem_vector(%arg0: vector<1xi32>) {
+func.func @one_elem_vector(%arg0: vector<1xi32>) {
   // CHECK: spv.IAdd %{{.+}}, %{{.+}}: i32
   %0 = arith.addi %arg0, %arg0: vector<1xi32>
   return
 }
 
 // CHECK-LABEL: @unsupported_5elem_vector
-func @unsupported_5elem_vector(%arg0: vector<5xi32>) {
+func.func @unsupported_5elem_vector(%arg0: vector<5xi32>) {
   // CHECK: subi
   %1 = arith.subi %arg0, %arg0: vector<5xi32>
   return
 }
 
 // CHECK-LABEL: @unsupported_2x2elem_vector
-func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
+func.func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
   // CHECK: muli
   %2 = arith.muli %arg0, %arg0: vector<2x2xi32>
   return
@@ -1053,7 +1053,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @int_vector23
-func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
+func.func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
   // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
@@ -1062,7 +1062,7 @@ func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
 }
 
 // CHECK-LABEL: @float_scalar
-func @float_scalar(%arg0: f16, %arg1: f64) {
+func.func @float_scalar(%arg0: f16, %arg1: f64) {
   // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %arg0, %arg0: f16
   // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
@@ -1081,7 +1081,7 @@ module attributes {
 } {
 
 // expected-error at below {{failed to materialize conversion for block argument #0 that remained live after conversion}}
-func @int_vector4_invalid(%arg0: vector<4xi64>) {
+func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
   // expected-error at below {{bitwidth emulation is not implemented yet on unsigned op}}
   // expected-note at below {{see existing live user here}}
   %0 = arith.divui %arg0, %arg0: vector<4xi64>
@@ -1101,7 +1101,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @bitwise_scalar
-func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
+func.func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : i32
   // CHECK: spv.BitwiseOr
@@ -1112,7 +1112,7 @@ func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @bitwise_vector
-func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
+func.func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
   // CHECK: spv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi32>
   // CHECK: spv.BitwiseOr
@@ -1123,7 +1123,7 @@ func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
 }
 
 // CHECK-LABEL: @logical_scalar
-func @logical_scalar(%arg0 : i1, %arg1 : i1) {
+func.func @logical_scalar(%arg0 : i1, %arg1 : i1) {
   // CHECK: spv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : i1
   // CHECK: spv.LogicalOr
@@ -1134,7 +1134,7 @@ func @logical_scalar(%arg0 : i1, %arg1 : i1) {
 }
 
 // CHECK-LABEL: @logical_vector
-func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
+func.func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
   // CHECK: spv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi1>
   // CHECK: spv.LogicalOr
@@ -1145,7 +1145,7 @@ func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
 }
 
 // CHECK-LABEL: @shift_scalar
-func @shift_scalar(%arg0 : i32, %arg1 : i32) {
+func.func @shift_scalar(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : i32
   // CHECK: spv.ShiftRightArithmetic
@@ -1156,7 +1156,7 @@ func @shift_scalar(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @shift_vector
-func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
+func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
   // CHECK: spv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : vector<4xi32>
   // CHECK: spv.ShiftRightArithmetic
@@ -1179,7 +1179,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpf
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK: spv.FOrdEqual
   %1 = arith.cmpf oeq, %arg0, %arg1 : f32
   // CHECK: spv.FOrdGreaterThan
@@ -1217,7 +1217,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpf
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK: spv.Ordered
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
   // CHECK: spv.Unordered
@@ -1236,7 +1236,7 @@ module attributes {
 
 // CHECK-LABEL: @cmpf
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
-func @cmpf(%arg0 : f32, %arg1 : f32) {
+func.func @cmpf(%arg0 : f32, %arg1 : f32) {
   // CHECK:      %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
   // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
   // CHECK-NEXT: %[[OR:.+]] = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
@@ -1263,7 +1263,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @cmpi
-func @cmpi(%arg0 : i32, %arg1 : i32) {
+func.func @cmpi(%arg0 : i32, %arg1 : i32) {
   // CHECK: spv.IEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i32
   // CHECK: spv.INotEqual
@@ -1288,7 +1288,7 @@ func @cmpi(%arg0 : i32, %arg1 : i32) {
 }
 
 // CHECK-LABEL: @boolcmpi
-func @boolcmpi(%arg0 : i1, %arg1 : i1) {
+func.func @boolcmpi(%arg0 : i1, %arg1 : i1) {
   // CHECK: spv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i1
   // CHECK: spv.LogicalNotEqual
@@ -1297,7 +1297,7 @@ func @boolcmpi(%arg0 : i1, %arg1 : i1) {
 }
 
 // CHECK-LABEL: @vecboolcmpi
-func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
+func.func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
   // CHECK: spv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : vector<4xi1>
   // CHECK: spv.LogicalNotEqual
@@ -1319,7 +1319,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @constant
-func @constant() {
+func.func @constant() {
   // CHECK: spv.Constant true
   %0 = arith.constant true
   // CHECK: spv.Constant 42 : i32
@@ -1346,7 +1346,7 @@ func @constant() {
 }
 
 // CHECK-LABEL: @constant_16bit
-func @constant_16bit() {
+func.func @constant_16bit() {
   // CHECK: spv.Constant 4 : i16
   %0 = arith.constant 4 : i16
   // CHECK: spv.Constant 5.000000e+00 : f16
@@ -1359,7 +1359,7 @@ func @constant_16bit() {
 }
 
 // CHECK-LABEL: @constant_64bit
-func @constant_64bit() {
+func.func @constant_64bit() {
   // CHECK: spv.Constant 4 : i64
   %0 = arith.constant 4 : i64
   // CHECK: spv.Constant 5.000000e+00 : f64
@@ -1381,7 +1381,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @constant_16bit
-func @constant_16bit() {
+func.func @constant_16bit() {
   // CHECK: spv.Constant 4 : i32
   %0 = arith.constant 4 : i16
   // CHECK: spv.Constant 5.000000e+00 : f32
@@ -1396,7 +1396,7 @@ func @constant_16bit() {
 }
 
 // CHECK-LABEL: @constant_64bit
-func @constant_64bit() {
+func.func @constant_64bit() {
   // CHECK: spv.Constant 4 : i32
   %0 = arith.constant 4 : i64
   // CHECK: spv.Constant 5.000000e+00 : f32
@@ -1411,7 +1411,7 @@ func @constant_64bit() {
 }
 
 // CHECK-LABEL: @corner_cases
-func @corner_cases() {
+func.func @corner_cases() {
   // CHECK: %{{.*}} = spv.Constant -1 : i32
   %0 = arith.constant 4294967295  : i64 // 2^32 - 1
   // CHECK: %{{.*}} = spv.Constant 2147483647 : i32
@@ -1440,7 +1440,7 @@ func @corner_cases() {
 }
 
 // CHECK-LABEL: @unsupported_cases
-func @unsupported_cases() {
+func.func @unsupported_cases() {
   // CHECK: %{{.*}} = arith.constant 4294967296 : i64
   %0 = arith.constant 4294967296 : i64 // 2^32
   // CHECK: %{{.*}} = arith.constant -2147483649 : i64
@@ -1464,91 +1464,91 @@ module attributes {
 } {
 
 // CHECK-LABEL: index_cast1
-func @index_cast1(%arg0: i16) {
+func.func @index_cast1(%arg0: i16) {
   // CHECK: spv.SConvert %{{.+}} : i16 to i32
   %0 = arith.index_cast %arg0 : i16 to index
   return
 }
 
 // CHECK-LABEL: index_cast2
-func @index_cast2(%arg0: index) {
+func.func @index_cast2(%arg0: index) {
   // CHECK: spv.SConvert %{{.+}} : i32 to i16
   %0 = arith.index_cast %arg0 : index to i16
   return
 }
 
 // CHECK-LABEL: index_cast3
-func @index_cast3(%arg0: i32) {
+func.func @index_cast3(%arg0: i32) {
   // CHECK-NOT: spv.SConvert
   %0 = arith.index_cast %arg0 : i32 to index
   return
 }
 
 // CHECK-LABEL: index_cast4
-func @index_cast4(%arg0: index) {
+func.func @index_cast4(%arg0: index) {
   // CHECK-NOT: spv.SConvert
   %0 = arith.index_cast %arg0 : index to i32
   return
 }
 
 // CHECK-LABEL: @fpext1
-func @fpext1(%arg0: f16) -> f64 {
+func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK: spv.FConvert %{{.*}} : f16 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fpext2
-func @fpext2(%arg0 : f32) -> f64 {
+func.func @fpext2(%arg0 : f32) -> f64 {
   // CHECK: spv.FConvert %{{.*}} : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fptrunc1
-func @fptrunc1(%arg0 : f64) -> f16 {
+func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK: spv.FConvert %{{.*}} : f64 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @fptrunc2
-func @fptrunc2(%arg0: f32) -> f16 {
+func.func @fptrunc2(%arg0: f32) -> f16 {
   // CHECK: spv.FConvert %{{.*}} : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @sitofp1
-func @sitofp1(%arg0 : i32) -> f32 {
+func.func @sitofp1(%arg0 : i32) -> f32 {
   // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @sitofp2
-func @sitofp2(%arg0 : i64) -> f64 {
+func.func @sitofp2(%arg0 : i64) -> f64 {
   // CHECK: spv.ConvertSToF %{{.*}} : i64 to f64
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_i16_f32
-func @uitofp_i16_f32(%arg0: i16) -> f32 {
+func.func @uitofp_i16_f32(%arg0: i16) -> f32 {
   // CHECK: spv.ConvertUToF %{{.*}} : i16 to f32
   %0 = arith.uitofp %arg0 : i16 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i32_f32
-func @uitofp_i32_f32(%arg0 : i32) -> f32 {
+func.func @uitofp_i32_f32(%arg0 : i32) -> f32 {
   // CHECK: spv.ConvertUToF %{{.*}} : i32 to f32
   %0 = arith.uitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f32
-func @uitofp_i1_f32(%arg0 : i1) -> f32 {
+func.func @uitofp_i1_f32(%arg0 : i1) -> f32 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f32
   // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
@@ -1557,7 +1557,7 @@ func @uitofp_i1_f32(%arg0 : i1) -> f32 {
 }
 
 // CHECK-LABEL: @uitofp_i1_f64
-func @uitofp_i1_f64(%arg0 : i1) -> f64 {
+func.func @uitofp_i1_f64(%arg0 : i1) -> f64 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f64
   // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f64
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
@@ -1566,7 +1566,7 @@ func @uitofp_i1_f64(%arg0 : i1) -> f64 {
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f32
-func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
+func.func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf32>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf32>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
@@ -1586,35 +1586,35 @@ spv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
 }
 
 // CHECK-LABEL: @sexti1
-func @sexti1(%arg0: i16) -> i64 {
+func.func @sexti1(%arg0: i16) -> i64 {
   // CHECK: spv.SConvert %{{.*}} : i16 to i64
   %0 = arith.extsi %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @sexti2
-func @sexti2(%arg0 : i32) -> i64 {
+func.func @sexti2(%arg0 : i32) -> i64 {
   // CHECK: spv.SConvert %{{.*}} : i32 to i64
   %0 = arith.extsi %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti1
-func @zexti1(%arg0: i16) -> i64 {
+func.func @zexti1(%arg0: i16) -> i64 {
   // CHECK: spv.UConvert %{{.*}} : i16 to i64
   %0 = arith.extui %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti2
-func @zexti2(%arg0 : i32) -> i64 {
+func.func @zexti2(%arg0 : i32) -> i64 {
   // CHECK: spv.UConvert %{{.*}} : i32 to i64
   %0 = arith.extui %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti3
-func @zexti3(%arg0 : i1) -> i32 {
+func.func @zexti3(%arg0 : i1) -> i32 {
   // CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
@@ -1623,7 +1623,7 @@ func @zexti3(%arg0 : i1) -> i32 {
 }
 
 // CHECK-LABEL: @zexti4
-func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
+func.func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi32>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi32>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
@@ -1632,7 +1632,7 @@ func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
 }
 
 // CHECK-LABEL: @zexti5
-func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
+func.func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
   // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi64>
   // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi64>
   // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
@@ -1641,21 +1641,21 @@ func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
 }
 
 // CHECK-LABEL: @trunci1
-func @trunci1(%arg0 : i64) -> i16 {
+func.func @trunci1(%arg0 : i64) -> i16 {
   // CHECK: spv.SConvert %{{.*}} : i64 to i16
   %0 = arith.trunci %arg0 : i64 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunci2
-func @trunci2(%arg0: i32) -> i16 {
+func.func @trunci2(%arg0: i32) -> i16 {
   // CHECK: spv.SConvert %{{.*}} : i32 to i16
   %0 = arith.trunci %arg0 : i32 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunc_to_i1
-func @trunc_to_i1(%arg0: i32) -> i1 {
+func.func @trunc_to_i1(%arg0: i32) -> i1 {
   // CHECK: %[[MASK:.*]] = spv.Constant 1 : i32
   // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
   // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
@@ -1667,7 +1667,7 @@ func @trunc_to_i1(%arg0: i32) -> i1 {
 }
 
 // CHECK-LABEL: @trunc_to_veci1
-func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
+func.func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
   // CHECK: %[[MASK:.*]] = spv.Constant dense<1> : vector<4xi32>
   // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
   // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
@@ -1679,14 +1679,14 @@ func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
 }
 
 // CHECK-LABEL: @fptosi1
-func @fptosi1(%arg0 : f32) -> i32 {
+func.func @fptosi1(%arg0 : f32) -> i32 {
   // CHECK: spv.ConvertFToS %{{.*}} : f32 to i32
   %0 = arith.fptosi %arg0 : f32 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @fptosi2
-func @fptosi2(%arg0 : f16) -> i16 {
+func.func @fptosi2(%arg0 : f16) -> i16 {
   // CHECK: spv.ConvertFToS %{{.*}} : f16 to i16
   %0 = arith.fptosi %arg0 : f16 to i16
   return %0 : i16
@@ -1704,7 +1704,7 @@ module attributes {
 
 // CHECK-LABEL: @fpext1
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fpext1(%arg0: f16) -> f64 {
+func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0: f64
@@ -1712,7 +1712,7 @@ func @fpext1(%arg0: f16) -> f64 {
 
 // CHECK-LABEL: @fpext2
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fpext2(%arg0 : f32) -> f64 {
+func.func @fpext2(%arg0 : f32) -> f64 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0: f64
@@ -1730,7 +1730,7 @@ module attributes {
 
 // CHECK-LABEL: @fptrunc1
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fptrunc1(%arg0 : f64) -> f16 {
+func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0: f16
@@ -1738,14 +1738,14 @@ func @fptrunc1(%arg0 : f64) -> f16 {
 
 // CHECK-LABEL: @fptrunc2
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @fptrunc2(%arg0: f32) -> f16 {
+func.func @fptrunc2(%arg0: f32) -> f16 {
   // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0: f16
 }
 
 // CHECK-LABEL: @sitofp
-func @sitofp(%arg0 : i64) -> f64 {
+func.func @sitofp(%arg0 : i64) -> f64 {
   // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0: f64

diff  --git a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
index f84c5960c4200..404be5c7b3710 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -convert-async-to-llvm | FileCheck %s
 
 // CHECK-LABEL: @coro_id
-func @coro_id() {
+func.func @coro_id() {
   // CHECK: %0 = llvm.mlir.constant(0 : i32) : i32
   // CHECK: %1 = llvm.mlir.null : !llvm.ptr<i8>
   // CHECK: %2 = llvm.intr.coro.id %0, %1, %1, %1 : !llvm.token
@@ -10,7 +10,7 @@ func @coro_id() {
 }
 
 // CHECK-LABEL: @coro_begin
-func @coro_begin() {
+func.func @coro_begin() {
   // CHECK: %[[ID:.*]] = llvm.intr.coro.id
   %0 = async.coro.id
   // CHECK: %[[SIZE:.*]] = llvm.intr.coro.size : i64
@@ -28,7 +28,7 @@ func @coro_begin() {
 }
 
 // CHECK-LABEL: @coro_free
-func @coro_free() {
+func.func @coro_free() {
   // CHECK: %[[ID:.*]] = llvm.intr.coro.id
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
@@ -40,7 +40,7 @@ func @coro_free() {
 }
 
 // CHECK-LABEL: @coro_end
-func @coro_end() {
+func.func @coro_end() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0
@@ -51,7 +51,7 @@ func @coro_end() {
 }
 
 // CHECK-LABEL: @coro_save
-func @coro_save() {
+func.func @coro_save() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0
@@ -61,7 +61,7 @@ func @coro_save() {
 }
 
 // CHECK-LABEL: @coro_suspend
-func @coro_suspend() {
+func.func @coro_suspend() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0

diff  --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
index 4b598c474a946..8784397fa1f5c 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
@@ -1,14 +1,14 @@
 // RUN: mlir-opt %s -convert-async-to-llvm | FileCheck %s --dump-input=always
 
 // CHECK-LABEL: @create_token
-func @create_token() {
+func.func @create_token() {
   // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken
   %0 = async.runtime.create : !async.token
   return
 }
 
 // CHECK-LABEL: @create_value
-func @create_value() {
+func.func @create_value() {
   // CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<f32>
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
   // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][%[[ONE]]]
@@ -19,7 +19,7 @@ func @create_value() {
 }
 
 // CHECK-LABEL: @create_group
-func @create_group() {
+func.func @create_group() {
   // CHECK: %[[C:.*]] = arith.constant 1 : index
   // CHECK: %[[S:.*]] = builtin.unrealized_conversion_cast %[[C]] : index to i64
   %c = arith.constant 1 : index
@@ -29,7 +29,7 @@ func @create_group() {
 }
 
 // CHECK-LABEL: @set_token_available
-func @set_token_available() {
+func.func @set_token_available() {
   // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken
   %0 = async.runtime.create : !async.token
   // CHECK: call @mlirAsyncRuntimeEmplaceToken(%[[TOKEN]])
@@ -38,7 +38,7 @@ func @set_token_available() {
 }
 
 // CHECK-LABEL: @set_value_available
-func @set_value_available() {
+func.func @set_value_available() {
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
   %0 = async.runtime.create : !async.value<f32>
   // CHECK: call @mlirAsyncRuntimeEmplaceValue(%[[VALUE]])
@@ -47,7 +47,7 @@ func @set_value_available() {
 }
 
 // CHECK-LABEL: @is_token_error
-func @is_token_error() -> i1 {
+func.func @is_token_error() -> i1 {
   // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken
   %0 = async.runtime.create : !async.token
   // CHECK: %[[ERR:.*]] = call @mlirAsyncRuntimeIsTokenError(%[[TOKEN]])
@@ -56,7 +56,7 @@ func @is_token_error() -> i1 {
 }
 
 // CHECK-LABEL: @is_value_error
-func @is_value_error() -> i1 {
+func.func @is_value_error() -> i1 {
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
   %0 = async.runtime.create : !async.value<f32>
   // CHECK: %[[ERR:.*]] = call @mlirAsyncRuntimeIsValueError(%[[VALUE]])
@@ -65,7 +65,7 @@ func @is_value_error() -> i1 {
 }
 
 // CHECK-LABEL: @await_token
-func @await_token() {
+func.func @await_token() {
   // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken
   %0 = async.runtime.create : !async.token
   // CHECK: call @mlirAsyncRuntimeAwaitToken(%[[TOKEN]])
@@ -74,7 +74,7 @@ func @await_token() {
 }
 
 // CHECK-LABEL: @await_value
-func @await_value() {
+func.func @await_value() {
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
   %0 = async.runtime.create : !async.value<f32>
   // CHECK: call @mlirAsyncRuntimeAwaitValue(%[[VALUE]])
@@ -83,7 +83,7 @@ func @await_value() {
 }
 
 // CHECK-LABEL: @await_group
-func @await_group() {
+func.func @await_group() {
   %c = arith.constant 1 : index
   // CHECK: %[[GROUP:.*]] = call @mlirAsyncRuntimeCreateGroup
   %0 = async.runtime.create_group %c: !async.group
@@ -93,7 +93,7 @@ func @await_group() {
 }
 
 // CHECK-LABEL: @await_and_resume_token
-func @await_and_resume_token() {
+func.func @await_and_resume_token() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0
@@ -107,7 +107,7 @@ func @await_and_resume_token() {
 }
 
 // CHECK-LABEL: @await_and_resume_value
-func @await_and_resume_value() {
+func.func @await_and_resume_value() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0
@@ -121,7 +121,7 @@ func @await_and_resume_value() {
 }
 
 // CHECK-LABEL: @await_and_resume_group
-func @await_and_resume_group() {
+func.func @await_and_resume_group() {
   %c = arith.constant 1 : index
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
@@ -136,7 +136,7 @@ func @await_and_resume_group() {
 }
 
 // CHECK-LABEL: @resume
-func @resume() {
+func.func @resume() {
   %0 = async.coro.id
   // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
   %1 = async.coro.begin %0
@@ -147,7 +147,7 @@ func @resume() {
 }
 
 // CHECK-LABEL: @store
-func @store() {
+func.func @store() {
   // CHECK: %[[CST:.*]] = arith.constant 1.0
   %0 = arith.constant 1.0 : f32
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
@@ -160,7 +160,7 @@ func @store() {
 }
 
 // CHECK-LABEL: @load
-func @load() -> f32 {
+func.func @load() -> f32 {
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
   %0 = async.runtime.create : !async.value<f32>
   // CHECK: %[[P0:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
@@ -172,7 +172,7 @@ func @load() -> f32 {
 }
 
 // CHECK-LABEL: @add_token_to_group
-func @add_token_to_group() {
+func.func @add_token_to_group() {
   %c = arith.constant 1 : index
   // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken
   %0 = async.runtime.create : !async.token

diff  --git a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
index 73aa0d3d671b0..05bffc6967bd3 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -split-input-file -async-to-async-runtime -convert-async-to-llvm | FileCheck %s
 
 // CHECK-LABEL: reference_counting
-func @reference_counting(%arg0: !async.token) {
+func.func @reference_counting(%arg0: !async.token) {
   // CHECK: %[[C2:.*]] = arith.constant 2 : i64
   // CHECK: call @mlirAsyncRuntimeAddRef(%arg0, %[[C2]])
   async.runtime.add_ref %arg0 {count = 2 : i64} : !async.token
@@ -16,7 +16,7 @@ func @reference_counting(%arg0: !async.token) {
 // -----
 
 // CHECK-LABEL: execute_no_async_args
-func @execute_no_async_args(%arg0: f32, %arg1: memref<1xf32>) {
+func.func @execute_no_async_args(%arg0: f32, %arg1: memref<1xf32>) {
   // CHECK: %[[TOKEN:.*]] = call @async_execute_fn(%arg0, %arg1)
   %token = async.execute {
     %c0 = arith.constant 0 : index
@@ -71,7 +71,7 @@ func @execute_no_async_args(%arg0: f32, %arg1: memref<1xf32>) {
 // -----
 
 // CHECK-LABEL: nested_async_execute
-func @nested_async_execute(%arg0: f32, %arg1: f32, %arg2: memref<1xf32>) {
+func.func @nested_async_execute(%arg0: f32, %arg1: f32, %arg2: memref<1xf32>) {
   // CHECK: %[[TOKEN:.*]] = call @async_execute_fn_0(%arg0, %arg2, %arg1)
   %token0 = async.execute {
     %c0 = arith.constant 0 : index
@@ -129,7 +129,7 @@ func @nested_async_execute(%arg0: f32, %arg1: f32, %arg2: memref<1xf32>) {
 // -----
 
 // CHECK-LABEL: async_execute_token_dependency
-func @async_execute_token_dependency(%arg0: f32, %arg1: memref<1xf32>) {
+func.func @async_execute_token_dependency(%arg0: f32, %arg1: memref<1xf32>) {
   // CHECK: %0 = call @async_execute_fn(%arg0, %arg1)
   %token = async.execute {
     %c0 = arith.constant 0 : index
@@ -177,7 +177,7 @@ func @async_execute_token_dependency(%arg0: f32, %arg1: memref<1xf32>) {
 // -----
 
 // CHECK-LABEL: async_group_await_all
-func @async_group_await_all(%arg0: f32, %arg1: memref<1xf32>) {
+func.func @async_group_await_all(%arg0: f32, %arg1: memref<1xf32>) {
   %c = arith.constant 1 : index
   // CHECK: %[[GROUP:.*]] = call @mlirAsyncRuntimeCreateGroup
   %0 = async.create_group %c : !async.group
@@ -219,7 +219,7 @@ func @async_group_await_all(%arg0: f32, %arg1: memref<1xf32>) {
 // -----
 
 // CHECK-LABEL: execute_and_return_f32
-func @execute_and_return_f32() -> f32 {
+func.func @execute_and_return_f32() -> f32 {
  // CHECK: %[[RET:.*]]:2 = call @async_execute_fn
   %token, %result = async.execute -> !async.value<f32> {
     %c0 = arith.constant 123.0 : f32
@@ -257,7 +257,7 @@ func @execute_and_return_f32() -> f32 {
 // -----
 
 // CHECK-LABEL: @async_value_operands
-func @async_value_operands() {
+func.func @async_value_operands() {
   // CHECK: %[[RET:.*]]:2 = call @async_execute_fn
   %token, %result = async.execute -> !async.value<f32> {
     %c0 = arith.constant 123.0 : f32

diff  --git a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
index 8dbed159a2dab..6190aad34d9bf 100644
--- a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
+++ b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -verify-diagnostics -convert-bufferization-to-memref -split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: @conversion_static
-func @conversion_static(%arg0 : memref<2xf32>) -> memref<2xf32> {
+func.func @conversion_static(%arg0 : memref<2xf32>) -> memref<2xf32> {
   %0 = bufferization.clone %arg0 : memref<2xf32> to memref<2xf32>
   memref.dealloc %arg0 : memref<2xf32>
   return %0 : memref<2xf32>
@@ -15,7 +15,7 @@ func @conversion_static(%arg0 : memref<2xf32>) -> memref<2xf32> {
 // -----
 
 // CHECK-LABEL: @conversion_dynamic
-func @conversion_dynamic(%arg0 : memref<?xf32>) -> memref<?xf32> {
+func.func @conversion_dynamic(%arg0 : memref<?xf32>) -> memref<?xf32> {
   %1 = bufferization.clone %arg0 : memref<?xf32> to memref<?xf32>
   memref.dealloc %arg0 : memref<?xf32>
   return %1 : memref<?xf32>
@@ -30,7 +30,7 @@ func @conversion_dynamic(%arg0 : memref<?xf32>) -> memref<?xf32> {
 
 // -----
 
-func @conversion_unknown(%arg0 : memref<*xf32>) -> memref<*xf32> {
+func.func @conversion_unknown(%arg0 : memref<*xf32>) -> memref<*xf32> {
 // expected-error at +1 {{failed to legalize operation 'bufferization.clone' that was explicitly marked illegal}}
   %1 = bufferization.clone %arg0 : memref<*xf32> to memref<*xf32>
   memref.dealloc %arg0 : memref<*xf32>
@@ -50,7 +50,7 @@ func @conversion_unknown(%arg0 : memref<*xf32>) -> memref<*xf32> {
 //       CHECK:   memref.copy
 //       CHECK:   memref.dealloc
 //       CHECK:   return %[[CASTED]]
-func @conversion_with_layout_map(%arg0 : memref<?xf32, #map>) -> memref<?xf32, #map> {
+func.func @conversion_with_layout_map(%arg0 : memref<?xf32, #map>) -> memref<?xf32, #map> {
   %1 = bufferization.clone %arg0 : memref<?xf32, #map> to memref<?xf32, #map>
   memref.dealloc %arg0 : memref<?xf32, #map>
   return %1 : memref<?xf32, #map>
@@ -62,7 +62,7 @@ func @conversion_with_layout_map(%arg0 : memref<?xf32, #map>) -> memref<?xf32, #
 // map cannot be allocated (or casted to).
 
 #map2 = affine_map<(d0)[s0] -> (d0 * 10 + s0)>
-func @conversion_with_invalid_layout_map(%arg0 : memref<?xf32, #map2>)
+func.func @conversion_with_invalid_layout_map(%arg0 : memref<?xf32, #map2>)
     -> memref<?xf32, #map2> {
 // expected-error at +1 {{failed to legalize operation 'bufferization.clone' that was explicitly marked illegal}}
   %1 = bufferization.clone %arg0 : memref<?xf32, #map2> to memref<?xf32, #map2>

diff  --git a/mlir/test/Conversion/ComplexToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/ComplexToLLVM/convert-to-llvm.mlir
index 0abc375f4ed6a..32409ca242c28 100644
--- a/mlir/test/Conversion/ComplexToLLVM/convert-to-llvm.mlir
+++ b/mlir/test/Conversion/ComplexToLLVM/convert-to-llvm.mlir
@@ -5,14 +5,14 @@
 // CHECK-NEXT:    %[[CPLX0:.*]] = llvm.mlir.undef : !llvm.struct<(f32, f32)>
 // CHECK-NEXT:    %[[CPLX1:.*]] = llvm.insertvalue %[[REAL0]], %[[CPLX0]][0] : !llvm.struct<(f32, f32)>
 // CHECK-NEXT:    %[[CPLX2:.*]] = llvm.insertvalue %[[IMAG0]], %[[CPLX1]][1] : !llvm.struct<(f32, f32)>
-func @complex_create(%real: f32, %imag: f32) -> complex<f32> {
+func.func @complex_create(%real: f32, %imag: f32) -> complex<f32> {
   %cplx2 = complex.create %real, %imag : complex<f32>
   return %cplx2 : complex<f32>
 }
 
 // CHECK-LABEL: func @complex_constant
 // CHECK-NEXT:    llvm.mlir.constant([1.000000e+00, 2.000000e+00]) : !llvm.struct<(f64, f64)>
-func @complex_constant() -> complex<f64> {
+func.func @complex_constant() -> complex<f64> {
   %cplx2 = complex.constant [1.000000e+00, 2.000000e+00] : complex<f64>
   return %cplx2 : complex<f64>
 }
@@ -22,7 +22,7 @@ func @complex_constant() -> complex<f64> {
 // CHECK-NEXT:    %[[CAST0:.*]] = builtin.unrealized_conversion_cast %[[CPLX]] : complex<f32> to !llvm.struct<(f32, f32)>
 // CHECK-NEXT:    %[[REAL:.*]] = llvm.extractvalue %[[CAST0]][0] : !llvm.struct<(f32, f32)>
 // CHECK-NEXT:    %[[IMAG:.*]] = llvm.extractvalue %[[CAST0]][1] : !llvm.struct<(f32, f32)>
-func @complex_extract(%cplx: complex<f32>) {
+func.func @complex_extract(%cplx: complex<f32>) {
   %real1 = complex.re %cplx : complex<f32>
   %imag1 = complex.im %cplx : complex<f32>
   return
@@ -38,7 +38,7 @@ func @complex_extract(%cplx: complex<f32>) {
 // CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fadd %[[A_IMAG]], %[[B_IMAG]] : f64
 // CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(f64, f64)>
 // CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(f64, f64)>
-func @complex_addition() {
+func.func @complex_addition() {
   %a_re = arith.constant 1.2 : f64
   %a_im = arith.constant 3.4 : f64
   %a = complex.create %a_re, %a_im : complex<f64>
@@ -59,7 +59,7 @@ func @complex_addition() {
 // CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fsub %[[A_IMAG]], %[[B_IMAG]] : f64
 // CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(f64, f64)>
 // CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(f64, f64)>
-func @complex_substraction() {
+func.func @complex_substraction() {
   %a_re = arith.constant 1.2 : f64
   %a_im = arith.constant 3.4 : f64
   %a = complex.create %a_re, %a_im : complex<f64>
@@ -72,7 +72,7 @@ func @complex_substraction() {
 
 // CHECK-LABEL: func @complex_div
 // CHECK-SAME:    %[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>
-func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %div = complex.div %lhs, %rhs : complex<f32>
   return %div : complex<f32>
 }
@@ -108,7 +108,7 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_mul
 // CHECK-SAME:    %[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>
-func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %mul = complex.mul %lhs, %rhs : complex<f32>
   return %mul : complex<f32>
 }
@@ -137,7 +137,7 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_abs
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_abs(%arg: complex<f32>) -> f32 {
+func.func @complex_abs(%arg: complex<f32>) -> f32 {
   %abs = complex.abs %arg: complex<f32>
   return %abs : f32
 }

diff  --git a/mlir/test/Conversion/ComplexToLLVM/full-conversion.mlir b/mlir/test/Conversion/ComplexToLLVM/full-conversion.mlir
index 6e27c23349b99..d8bf45d752669 100644
--- a/mlir/test/Conversion/ComplexToLLVM/full-conversion.mlir
+++ b/mlir/test/Conversion/ComplexToLLVM/full-conversion.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: llvm.func @complex_div
 // CHECK-SAME:    %[[LHS:.*]]: ![[C_TY:.*>]], %[[RHS:.*]]: ![[C_TY]]) -> ![[C_TY]]
-func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %div = complex.div %lhs, %rhs : complex<f32>
   return %div : complex<f32>
 }
@@ -33,7 +33,7 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: llvm.func @complex_mul
 // CHECK-SAME:    %[[LHS:.*]]: ![[C_TY:.*>]], %[[RHS:.*]]: ![[C_TY]]) -> ![[C_TY]]
-func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %mul = complex.mul %lhs, %rhs : complex<f32>
   return %mul : complex<f32>
 }
@@ -57,7 +57,7 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: llvm.func @complex_abs
 // CHECK-SAME: %[[ARG:.*]]: ![[C_TY:.*]])
-func @complex_abs(%arg: complex<f32>) -> f32 {
+func.func @complex_abs(%arg: complex<f32>) -> f32 {
   %abs = complex.abs %arg: complex<f32>
   return %abs : f32
 }

diff  --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
index 4cb67167adf55..03e80fc67f9b1 100644
--- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: func @complex_abs
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_abs(%arg: complex<f32>) -> f32 {
+func.func @complex_abs(%arg: complex<f32>) -> f32 {
   %abs = complex.abs %arg: complex<f32>
   return %abs : f32
 }
@@ -16,7 +16,7 @@ func @complex_abs(%arg: complex<f32>) -> f32 {
 
 // CHECK-LABEL: func @complex_add
 // CHECK-SAME: (%[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>)
-func @complex_add(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_add(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %add = complex.add %lhs, %rhs: complex<f32>
   return %add : complex<f32>
 }
@@ -31,7 +31,7 @@ func @complex_add(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_div
 // CHECK-SAME: (%[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>)
-func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %div = complex.div %lhs, %rhs : complex<f32>
   return %div : complex<f32>
 }
@@ -140,7 +140,7 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_eq
 // CHECK-SAME: %[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>
-func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
+func.func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
   %eq = complex.eq %lhs, %rhs: complex<f32>
   return %eq : i1
 }
@@ -155,7 +155,7 @@ func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
 
 // CHECK-LABEL: func @complex_exp
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_exp(%arg: complex<f32>) -> complex<f32> {
+func.func @complex_exp(%arg: complex<f32>) -> complex<f32> {
   %exp = complex.exp %arg: complex<f32>
   return %exp : complex<f32>
 }
@@ -171,7 +171,7 @@ func @complex_exp(%arg: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_log
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_log(%arg: complex<f32>) -> complex<f32> {
+func.func @complex_log(%arg: complex<f32>) -> complex<f32> {
   %log = complex.log %arg: complex<f32>
   return %log : complex<f32>
 }
@@ -190,7 +190,7 @@ func @complex_log(%arg: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_log1p
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_log1p(%arg: complex<f32>) -> complex<f32> {
+func.func @complex_log1p(%arg: complex<f32>) -> complex<f32> {
   %log1p = complex.log1p %arg: complex<f32>
   return %log1p : complex<f32>
 }
@@ -214,7 +214,7 @@ func @complex_log1p(%arg: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_mul
 // CHECK-SAME: (%[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>)
-func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %mul = complex.mul %lhs, %rhs : complex<f32>
   return %mul : complex<f32>
 }
@@ -332,7 +332,7 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_neg
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_neg(%arg: complex<f32>) -> complex<f32> {
+func.func @complex_neg(%arg: complex<f32>) -> complex<f32> {
   %neg = complex.neg %arg: complex<f32>
   return %neg : complex<f32>
 }
@@ -345,7 +345,7 @@ func @complex_neg(%arg: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_neq
 // CHECK-SAME: %[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>
-func @complex_neq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
+func.func @complex_neq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
   %neq = complex.neq %lhs, %rhs: complex<f32>
   return %neq : i1
 }
@@ -360,7 +360,7 @@ func @complex_neq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
 
 // CHECK-LABEL: func @complex_sign
 // CHECK-SAME: %[[ARG:.*]]: complex<f32>
-func @complex_sign(%arg: complex<f32>) -> complex<f32> {
+func.func @complex_sign(%arg: complex<f32>) -> complex<f32> {
   %sign = complex.sign %arg: complex<f32>
   return %sign : complex<f32>
 }
@@ -384,7 +384,7 @@ func @complex_sign(%arg: complex<f32>) -> complex<f32> {
 
 // CHECK-LABEL: func @complex_sub
 // CHECK-SAME: (%[[LHS:.*]]: complex<f32>, %[[RHS:.*]]: complex<f32>)
-func @complex_sub(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
+func.func @complex_sub(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
   %sub = complex.sub %lhs, %rhs: complex<f32>
   return %sub : complex<f32>
 }

diff  --git a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir
index b155ccbfea8db..5ff58240358e7 100644
--- a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: llvm.func @complex_abs
 // CHECK-SAME: %[[ARG:.*]]: ![[C_TY:.*]])
-func @complex_abs(%arg: complex<f32>) -> f32 {
+func.func @complex_abs(%arg: complex<f32>) -> f32 {
   %abs = complex.abs %arg: complex<f32>
   return %abs : f32
 }
@@ -16,7 +16,7 @@ func @complex_abs(%arg: complex<f32>) -> f32 {
 
 // CHECK-LABEL: llvm.func @complex_eq
 // CHECK-SAME: %[[LHS:.*]]: ![[C_TY:.*]], %[[RHS:.*]]: ![[C_TY:.*]])
-func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
+func.func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
   %eq = complex.eq %lhs, %rhs: complex<f32>
   return %eq : i1
 }
@@ -31,7 +31,7 @@ func @complex_eq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
 
 // CHECK-LABEL: llvm.func @complex_neq
 // CHECK-SAME: %[[LHS:.*]]: ![[C_TY:.*]], %[[RHS:.*]]: ![[C_TY:.*]])
-func @complex_neq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
+func.func @complex_neq(%lhs: complex<f32>, %rhs: complex<f32>) -> i1 {
   %neq = complex.neq %lhs, %rhs: complex<f32>
   return %neq : i1
 }

diff  --git a/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
index 96d14381a2194..a41edbbb4bab8 100644
--- a/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
@@ -9,7 +9,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: func @simple_loop
-func @simple_loop(%begin: i32, %end: i32, %step: i32) {
+func.func @simple_loop(%begin: i32, %end: i32, %step: i32) {
 // CHECK-NEXT:  spv.Branch ^bb1
   cf.br ^bb1
 

diff  --git a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
index d8dc4d28c32db..7e32ef5a45d2a 100644
--- a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
@@ -10,7 +10,7 @@
 // CHECK-LABEL: @external
 // CHECK: %[[ALLOC0:.*]]: !llvm.ptr<f32>, %[[ALIGN0:.*]]: !llvm.ptr<f32>, %[[OFFSET0:.*]]: i64, %[[SIZE00:.*]]: i64, %[[SIZE01:.*]]: i64, %[[STRIDE00:.*]]: i64, %[[STRIDE01:.*]]: i64,
 // CHECK: %[[ALLOC1:.*]]: !llvm.ptr<f32>, %[[ALIGN1:.*]]: !llvm.ptr<f32>, %[[OFFSET1:.*]]: i64)
-func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
+func.func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
   // Populate the descriptor for arg0.
   // CHECK: %[[DESC00:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESC01:.*]] = llvm.insertvalue %arg0, %[[DESC00]][0]
@@ -47,10 +47,10 @@ func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
 // Verify that the return value is not affected.
 // CHECK-LABEL: @returner
 // CHECK: -> !llvm.struct<(struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>, struct<(ptr<f32>, ptr<f32>, i64)>)>
-func private @returner() -> (memref<?x?xf32>, memref<f32>)
+func.func private @returner() -> (memref<?x?xf32>, memref<f32>)
 
 // CHECK-LABEL: @caller
-func @caller() {
+func.func @caller() {
   %0:2 = call @returner() : () -> (memref<?x?xf32>, memref<f32>)
   // Extract individual values from the descriptor for the first memref.
   // CHECK: %[[ALLOC0:.*]] = llvm.extractvalue %[[DESC0:.*]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -74,7 +74,7 @@ func @caller() {
 
 // CHECK-LABEL: @callee
 // EMIT_C_ATTRIBUTE-LABEL: @callee
-func @callee(%arg0: memref<?xf32>, %arg1: index) {
+func.func @callee(%arg0: memref<?xf32>, %arg1: index) {
   %0 = memref.load %arg0[%arg1] : memref<?xf32>
   return
 }
@@ -99,7 +99,7 @@ func @callee(%arg0: memref<?xf32>, %arg1: index) {
 
 // CHECK-LABEL: @other_callee
 // EMIT_C_ATTRIBUTE-LABEL: @other_callee
-func @other_callee(%arg0: memref<?xf32>, %arg1: index) attributes { llvm.emit_c_interface } {
+func.func @other_callee(%arg0: memref<?xf32>, %arg1: index) attributes { llvm.emit_c_interface } {
   %0 = memref.load %arg0[%arg1] : memref<?xf32>
   return
 }
@@ -115,7 +115,7 @@ func @other_callee(%arg0: memref<?xf32>, %arg1: index) attributes { llvm.emit_c_
 //===========================================================================//
 
 // CHECK-LABEL: llvm.func @return_var_memref_caller
-func @return_var_memref_caller(%arg0: memref<4x3xf32>) {
+func.func @return_var_memref_caller(%arg0: memref<4x3xf32>) {
   // CHECK: %[[CALL_RES:.*]] = llvm.call @return_var_memref
   %0 = call @return_var_memref(%arg0) : (memref<4x3xf32>) -> memref<*xf32>
 
@@ -144,7 +144,7 @@ func @return_var_memref_caller(%arg0: memref<4x3xf32>) {
 }
 
 // CHECK-LABEL: llvm.func @return_var_memref
-func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> attributes { llvm.emit_c_interface } {
+func.func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> attributes { llvm.emit_c_interface } {
   // Match the construction of the unranked descriptor.
   // CHECK: %[[ALLOCA:.*]] = llvm.alloca
   // CHECK: %[[MEMORY:.*]] = llvm.bitcast %[[ALLOCA]]
@@ -181,7 +181,7 @@ func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> attributes { ll
 // CHECK-SAME: (%{{.*}}: !llvm.ptr<struct<(i64, ptr<i8>)>>, %{{.*}}: !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>)
 
 // CHECK-LABEL: llvm.func @return_two_var_memref_caller
-func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
+func.func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
   // Only check that we create two 
diff erent descriptors using 
diff erent
   // memory, and deallocate both sources. The size computation is same as for
   // the single result.
@@ -209,7 +209,7 @@ func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
 }
 
 // CHECK-LABEL: llvm.func @return_two_var_memref
-func @return_two_var_memref(%arg0: memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>) attributes { llvm.emit_c_interface } {
+func.func @return_two_var_memref(%arg0: memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>) attributes { llvm.emit_c_interface } {
   // Match the construction of the unranked descriptor.
   // CHECK: %[[ALLOCA:.*]] = llvm.alloca
   // CHECK: %[[MEMORY:.*]] = llvm.bitcast %[[ALLOCA]]

diff  --git a/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
index 01e3562190647..0e63e628ebf8d 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
@@ -3,7 +3,7 @@
 // CHECK-LABEL: func @check_attributes
 // When expanding the memref to multiple arguments, argument attributes are replicated.
 // CHECK-COUNT-7: {dialect.a = true, dialect.b = 4 : i64}
-func @check_attributes(%static: memref<10x20xf32> {dialect.a = true, dialect.b = 4 : i64 }) {
+func.func @check_attributes(%static: memref<10x20xf32> {dialect.a = true, dialect.b = 4 : i64 }) {
   return
 }
 
@@ -12,6 +12,6 @@ func @check_attributes(%static: memref<10x20xf32> {dialect.a = true, dialect.b =
 // commas in the argument list for this purpose.
 // CHECK: %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: !llvm{{.*}} {first.arg = true}, %{{.*}}: i{{.*}} {first.arg = true},
 // CHECK-SAME: %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: !llvm{{.*}} {second.arg = 42 : i32}, %{{.*}}: i{{.*}} {second.arg = 42 : i32})
-func @check_multiple(%first: memref<f32> {first.arg = true}, %second: memref<f32> {second.arg = 42 : i32}) {
+func.func @check_multiple(%first: memref<f32> {first.arg = true}, %second: memref<f32> {second.arg = 42 : i32}) {
   return
 }

diff  --git a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
index 7c42f8e3e1958..223f2fdc23a77 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
@@ -1,33 +1,33 @@
 // RUN: mlir-opt -convert-func-to-llvm -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //CHECK: llvm.func @second_order_arg(!llvm.ptr<func<void ()>>)
-func private @second_order_arg(%arg0 : () -> ())
+func.func private @second_order_arg(%arg0 : () -> ())
 
 //CHECK: llvm.func @second_order_result() -> !llvm.ptr<func<void ()>>
-func private @second_order_result() -> (() -> ())
+func.func private @second_order_result() -> (() -> ())
 
 //CHECK: llvm.func @second_order_multi_result() -> !llvm.struct<(ptr<func<i32 ()>>, ptr<func<i64 ()>>, ptr<func<f32 ()>>)>
-func private @second_order_multi_result() -> (() -> (i32), () -> (i64), () -> (f32))
+func.func private @second_order_multi_result() -> (() -> (i32), () -> (i64), () -> (f32))
 
 //CHECK: llvm.func @third_order(!llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>) -> !llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>
-func private @third_order(%arg0 : (() -> ()) -> (() -> ())) -> ((() -> ()) -> (() -> ()))
+func.func private @third_order(%arg0 : (() -> ()) -> (() -> ())) -> ((() -> ()) -> (() -> ()))
 
 //CHECK: llvm.func @fifth_order_left(!llvm.ptr<func<void (ptr<func<void (ptr<func<void (ptr<func<void ()>>)>>)>>)>>)
-func private @fifth_order_left(%arg0: (((() -> ()) -> ()) -> ()) -> ())
+func.func private @fifth_order_left(%arg0: (((() -> ()) -> ()) -> ()) -> ())
 
 //CHECK: llvm.func @fifth_order_right(!llvm.ptr<func<ptr<func<ptr<func<ptr<func<void ()>> ()>> ()>> ()>>)
-func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ()))))
+func.func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ()))))
 
 // Check that memrefs are converted to argument packs if appear as function arguments.
 // CHECK: llvm.func @memref_call_conv(!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64)
-func private @memref_call_conv(%arg0: memref<?xf32>)
+func.func private @memref_call_conv(%arg0: memref<?xf32>)
 
 // Same in nested functions.
 // CHECK: llvm.func @memref_call_conv_nested(!llvm.ptr<func<void (ptr<f32>, ptr<f32>, i64, i64, i64)>>)
-func private @memref_call_conv_nested(%arg0: (memref<?xf32>) -> ())
+func.func private @memref_call_conv_nested(%arg0: (memref<?xf32>) -> ())
 
 //CHECK-LABEL: llvm.func @pass_through(%arg0: !llvm.ptr<func<void ()>>) -> !llvm.ptr<func<void ()>> {
-func @pass_through(%arg0: () -> ()) -> (() -> ()) {
+func.func @pass_through(%arg0: () -> ()) -> (() -> ()) {
 // CHECK-NEXT:  llvm.br ^bb1(%arg0 : !llvm.ptr<func<void ()>>)
   cf.br ^bb1(%arg0 : () -> ())
 
@@ -38,14 +38,14 @@ func @pass_through(%arg0: () -> ()) -> (() -> ()) {
 }
 
 // CHECK-LABEL: llvm.func extern_weak @llvmlinkage(i32)
-func private @llvmlinkage(i32) attributes { "llvm.linkage" = #llvm.linkage<extern_weak> }
+func.func private @llvmlinkage(i32) attributes { "llvm.linkage" = #llvm.linkage<extern_weak> }
 
 // CHECK-LABEL: llvm.func @body(i32)
-func private @body(i32)
+func.func private @body(i32)
 
 // CHECK-LABEL: llvm.func @indirect_const_call
 // CHECK-SAME: (%[[ARG0:.*]]: i32) {
-func @indirect_const_call(%arg0: i32) {
+func.func @indirect_const_call(%arg0: i32) {
 // CHECK-NEXT: %[[ADDR:.*]] = llvm.mlir.addressof @body : !llvm.ptr<func<void (i32)>>
   %0 = constant @body : (i32) -> ()
 // CHECK-NEXT:  llvm.call %[[ADDR]](%[[ARG0:.*]]) : (i32) -> ()
@@ -55,7 +55,7 @@ func @indirect_const_call(%arg0: i32) {
 }
 
 // CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
-func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
+func.func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
 // CHECK-NEXT:  %0 = llvm.call %arg0(%arg1) : (f32) -> i32
   %0 = call_indirect %arg0(%arg1) : (f32) -> i32
 // CHECK-NEXT:  llvm.return %0 : i32
@@ -64,4 +64,4 @@ func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
 
 // -----
 
-func private @badllvmlinkage(i32) attributes { "llvm.linkage" = 3 : i64 } // expected-error {{Contains llvm.linkage attribute not of type LLVM::LinkageAttr}}
+func.func private @badllvmlinkage(i32) attributes { "llvm.linkage" = 3 : i64 } // expected-error {{Contains llvm.linkage attribute not of type LLVM::LinkageAttr}}

diff  --git a/mlir/test/Conversion/FuncToLLVM/convert-types.mlir b/mlir/test/Conversion/FuncToLLVM/convert-types.mlir
index 8c214a8f03e9c..9cdc38730f357 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-types.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-types.mlir
@@ -2,34 +2,34 @@
 
 // CHECK-LABEL: @ptr
 // CHECK: !llvm.ptr<i42>
-func private @ptr() -> !llvm.ptr<!test.smpla>
+func.func private @ptr() -> !llvm.ptr<!test.smpla>
 
 // CHECK-LABEL: @ptr_ptr()
 // CHECK: !llvm.ptr<ptr<i42>> 
-func private @ptr_ptr() -> !llvm.ptr<!llvm.ptr<!test.smpla>>
+func.func private @ptr_ptr() -> !llvm.ptr<!llvm.ptr<!test.smpla>>
 
 // CHECK-LABEL: @struct_ptr()
 // CHECK: !llvm.struct<(ptr<i42>)> 
-func private @struct_ptr() -> !llvm.struct<(ptr<!test.smpla>)>
+func.func private @struct_ptr() -> !llvm.struct<(ptr<!test.smpla>)>
 
 // CHECK-LABEL: @named_struct_ptr()
 // CHECK: !llvm.struct<"_Converted_named", (ptr<i42>)>
-func private @named_struct_ptr() -> !llvm.struct<"named", (ptr<!test.smpla>)>
+func.func private @named_struct_ptr() -> !llvm.struct<"named", (ptr<!test.smpla>)>
 
 // CHECK-LABEL: @named_no_convert
 // CHECK: !llvm.struct<"no_convert", (ptr<struct<"no_convert">>)>
-func private @named_no_convert() -> !llvm.struct<"no_convert", (ptr<struct<"no_convert">>)>
+func.func private @named_no_convert() -> !llvm.struct<"no_convert", (ptr<struct<"no_convert">>)>
 
 // CHECK-LABEL: @array_ptr()
 // CHECK: !llvm.array<10 x ptr<i42>> 
-func private @array_ptr() -> !llvm.array<10 x ptr<!test.smpla>>
+func.func private @array_ptr() -> !llvm.array<10 x ptr<!test.smpla>>
 
 // CHECK-LABEL: @func()
 // CHECK: !llvm.ptr<func<i42 (i42)>>
-func private @func() -> !llvm.ptr<!llvm.func<!test.smpla (!test.smpla)>>
+func.func private @func() -> !llvm.ptr<!llvm.func<!test.smpla (!test.smpla)>>
 
 // TODO: support conversion of recursive types in the conversion infra.
 // CHECK-LABEL: @named_recursive()
 // CHECK: !llvm.struct<"_Converted_recursive", (ptr<i42>, ptr<struct<"_Converted_recursive">>)>
-func private @named_recursive() -> !llvm.struct<"recursive", (ptr<!test.smpla>, ptr<struct<"recursive">>)>
+func.func private @named_recursive() -> !llvm.struct<"recursive", (ptr<!test.smpla>, ptr<struct<"recursive">>)>
 

diff  --git a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
index 2e9bfefb3e466..abccf9f5117e5 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
@@ -11,7 +11,7 @@
 // CHECK-SAME: -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-LABEL: func @check_static_return
 // BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<f32>) -> !llvm.ptr<f32> {
-func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
+func.func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
 // CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 
 // BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -40,7 +40,7 @@ func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
 // CHECK-SAME: -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-LABEL: func @check_static_return_with_offset
 // BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<f32>) -> !llvm.ptr<f32> {
-func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, strides:[22,1]>) -> memref<32x18xf32, offset:7, strides:[22,1]> {
+func.func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, strides:[22,1]>) -> memref<32x18xf32, offset:7, strides:[22,1]> {
 // CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 
 // BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -64,11 +64,11 @@ func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, stri
 // -----
 
 // BAREPTR: llvm.func @foo(!llvm.ptr<i8>) -> !llvm.ptr<i8>
-func private @foo(memref<10xi8>) -> memref<20xi8>
+func.func private @foo(memref<10xi8>) -> memref<20xi8>
 
 // BAREPTR-LABEL: func @check_memref_func_call
 // BAREPTR-SAME:    %[[in:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
-func @check_memref_func_call(%in : memref<10xi8>) -> memref<20xi8> {
+func.func @check_memref_func_call(%in : memref<10xi8>) -> memref<20xi8> {
   // BAREPTR:         %[[inDesc:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 0]
   // BAREPTR-NEXT:    %[[barePtr:.*]] = llvm.extractvalue %[[inDesc]][1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
   // BAREPTR-NEXT:    %[[call:.*]] = llvm.call @foo(%[[barePtr]]) : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
@@ -91,7 +91,7 @@ func @check_memref_func_call(%in : memref<10xi8>) -> memref<20xi8> {
 
 // BAREPTR-LABEL: func @check_return(
 // BAREPTR-SAME: %{{.*}}: memref<?xi8>) -> memref<?xi8> 
-func @check_return(%in : memref<?xi8>) -> memref<?xi8> {
+func.func @check_return(%in : memref<?xi8>) -> memref<?xi8> {
   // BAREPTR: llvm.return {{.*}} : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
   return %in : memref<?xi8>
 }

diff  --git a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
index d4da48d04c849..d43c9ac74f823 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
@@ -3,7 +3,7 @@
 
 // BAREPTR-LABEL: func @check_noalias
 // BAREPTR-SAME: %{{.*}}: !llvm.ptr<f32> {llvm.noalias}, %{{.*}}: !llvm.ptr<f32> {llvm.noalias}
-func @check_noalias(%static : memref<2xf32> {llvm.noalias}, %other : memref<2xf32> {llvm.noalias}) {
+func.func @check_noalias(%static : memref<2xf32> {llvm.noalias}, %other : memref<2xf32> {llvm.noalias}) {
     return
 }
 
@@ -16,7 +16,7 @@ func @check_noalias(%static : memref<2xf32> {llvm.noalias}, %other : memref<2xf3
 // CHECK-COUNT-5: i64
 // CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>,
+func.func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>,
                                      %dynamic : memref<?x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>,
                                      %mixed : memref<10x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>) {
   return
@@ -32,7 +32,7 @@ func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)
 // CHECK32-SAME: %arg0: !llvm.ptr<i32>, %arg1: !llvm.ptr<i32>,
 // CHECK32-SAME: %arg2: i32, %arg3: i32, %arg4: i32)
 // CHECK32-SAME: -> !llvm.struct<(ptr<i32>, ptr<i32>, i32, array<1 x i32>, array<1 x i32>)>
-func @memref_index(%arg0: memref<32xindex>) -> memref<32xindex> {
+func.func @memref_index(%arg0: memref<32xindex>) -> memref<32xindex> {
   return %arg0 : memref<32xindex>
 }
 
@@ -45,7 +45,7 @@ func @memref_index(%arg0: memref<32xindex>) -> memref<32xindex> {
 // CHECK-COUNT-5: i64
 // CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %mixed : memref<10x?xf32>) {
+func.func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %mixed : memref<10x?xf32>) {
   return
 }
 
@@ -56,10 +56,10 @@ func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %m
 // in the presence of unranked memrefs when using such a calling convention.
 
 // BAREPTR: func private @hoo(memref<*xi8>) -> memref<*xi8>
-func private @hoo(memref<*xi8>) -> memref<*xi8>
+func.func private @hoo(memref<*xi8>) -> memref<*xi8>
 
 // BAREPTR-LABEL: func @check_unranked_memref_func_call(%{{.*}}: memref<*xi8>) -> memref<*xi8>
-func @check_unranked_memref_func_call(%in: memref<*xi8>) -> memref<*xi8> {
+func.func @check_unranked_memref_func_call(%in: memref<*xi8>) -> memref<*xi8> {
   // BAREPTR-NEXT: call @hoo(%{{.*}}) : (memref<*xi8>) -> memref<*xi8>
   %res = call @hoo(%in) : (memref<*xi8>) -> memref<*xi8>
   // BAREPTR-NEXT: return %{{.*}} : memref<*xi8>
@@ -76,7 +76,7 @@ func @check_unranked_memref_func_call(%in: memref<*xi8>) -> memref<*xi8> {
 // BAREPTR: @unsupported_memref_element_type
 // BAREPTR-SAME: memref<
 // BAREPTR-NOT: !llvm.ptr
-func private @unsupported_memref_element_type() -> memref<42 x !test.memref_element>
+func.func private @unsupported_memref_element_type() -> memref<42 x !test.memref_element>
 
 // CHECK: @unsupported_unranked_memref_element_type
 // CHECK-SAME: memref<
@@ -84,16 +84,16 @@ func private @unsupported_memref_element_type() -> memref<42 x !test.memref_elem
 // BAREPTR: @unsupported_unranked_memref_element_type
 // BAREPTR-SAME: memref<
 // BAREPTR-NOT: !llvm.ptr
-func private @unsupported_unranked_memref_element_type() -> memref<* x !test.memref_element>
+func.func private @unsupported_unranked_memref_element_type() -> memref<* x !test.memref_element>
 
 // -----
 
 // BAREPTR: llvm.func @goo(f32) -> f32
-func private @goo(f32) -> f32
+func.func private @goo(f32) -> f32
 
 // BAREPTR-LABEL: func @check_scalar_func_call
 // BAREPTR-SAME:    %[[in:.*]]: f32)
-func @check_scalar_func_call(%in : f32) {
+func.func @check_scalar_func_call(%in : f32) {
   // BAREPTR-NEXT:    %[[call:.*]] = llvm.call @goo(%[[in]]) : (f32) -> f32
   %res = call @goo(%in) : (f32) -> (f32)
   return
@@ -105,7 +105,7 @@ func @check_scalar_func_call(%in : f32) {
 
 // CHECK-LABEL: func @loop_carried
 // BAREPTR-LABEL: func @loop_carried
-func @loop_carried(%arg0 : index, %arg1 : index, %arg2 : index, %base0 : !base_type, %base1 : !base_type) -> (!base_type, !base_type) {
+func.func @loop_carried(%arg0 : index, %arg1 : index, %arg2 : index, %base0 : !base_type, %base1 : !base_type) -> (!base_type, !base_type) {
   // This test checks that in the BAREPTR case, the branch arguments only forward the descriptor.
   // This test was lowered from a simple scf.for that swaps 2 memref iter_args.
   //      BAREPTR: llvm.br ^bb1(%{{.*}}, %{{.*}}, %{{.*}} : i64, !llvm.struct<(ptr<i32, 201>, ptr<i32, 201>, i64, array<1 x i64>, array<1 x i64>)>, !llvm.struct<(ptr<i32, 201>, ptr<i32, 201>, i64, array<1 x i64>, array<1 x i64>)>)

diff  --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
index 4a5089efe9403..7f4396b7b6d79 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
@@ -4,17 +4,17 @@
 // CHECK-LABEL: func @empty() {
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT: }
-func @empty() {
+func.func @empty() {
 ^bb0:
   return
 }
 
 // CHECK-LABEL: llvm.func @body(i64)
-func private @body(index)
+func.func private @body(index)
 
 // CHECK-LABEL: func @simple_loop() {
 // CHECK32-LABEL: func @simple_loop() {
-func @simple_loop() {
+func.func @simple_loop() {
 ^bb0:
 // CHECK-NEXT:  llvm.br ^bb1
 // CHECK32-NEXT:  llvm.br ^bb1
@@ -69,7 +69,7 @@ func @simple_loop() {
 // CHECK-NEXT:  llvm.call @simple_loop() : () -> ()
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT: }
-func @simple_caller() {
+func.func @simple_caller() {
 ^bb0:
   call @simple_loop() : () -> ()
   return
@@ -77,7 +77,7 @@ func @simple_caller() {
 
 // Check that function call attributes persist during conversion.
 // CHECK-LABEL: @call_with_attributes
-func @call_with_attributes() {
+func.func @call_with_attributes() {
   // CHECK: llvm.call @simple_loop() {baz = [1, 2, 3, 4], foo = "bar"} : () -> ()
   call @simple_loop() {foo="bar", baz=[1,2,3,4]} : () -> ()
   return
@@ -88,7 +88,7 @@ func @call_with_attributes() {
 // CHECK-NEXT:  llvm.call @more_imperfectly_nested_loops() : () -> ()
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT: }
-func @ml_caller() {
+func.func @ml_caller() {
 ^bb0:
   call @simple_loop() : () -> ()
   call @more_imperfectly_nested_loops() : () -> ()
@@ -97,10 +97,10 @@ func @ml_caller() {
 
 // CHECK-LABEL: llvm.func @body_args(i64) -> i64
 // CHECK32-LABEL: llvm.func @body_args(i32) -> i32
-func private @body_args(index) -> index
+func.func private @body_args(index) -> index
 // CHECK-LABEL: llvm.func @other(i64, i32) -> i32
 // CHECK32-LABEL: llvm.func @other(i32, i32) -> i32
-func private @other(index, i32) -> i32
+func.func private @other(index, i32) -> i32
 
 // CHECK-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 {
 // CHECK-NEXT:  {{.*}} = llvm.mlir.constant(0 : i32) : i32
@@ -108,7 +108,7 @@ func private @other(index, i32) -> i32
 // CHECK32-LABEL: func @func_args(%arg0: i32, %arg1: i32) -> i32 {
 // CHECK32-NEXT:  {{.*}} = llvm.mlir.constant(0 : i32) : i32
 // CHECK32-NEXT:  llvm.br ^bb1
-func @func_args(i32, i32) -> i32 {
+func.func @func_args(i32, i32) -> i32 {
 ^bb0(%arg0: i32, %arg1: i32):
   %c0_i32 = arith.constant 0 : i32
   cf.br ^bb1
@@ -177,19 +177,19 @@ func @func_args(i32, i32) -> i32 {
 
 // CHECK-LABEL: llvm.func @pre(i64)
 // CHECK32-LABEL: llvm.func @pre(i32)
-func private @pre(index)
+func.func private @pre(index)
 
 // CHECK-LABEL: llvm.func @body2(i64, i64)
 // CHECK32-LABEL: llvm.func @body2(i32, i32)
-func private @body2(index, index)
+func.func private @body2(index, index)
 
 // CHECK-LABEL: llvm.func @post(i64)
 // CHECK32-LABEL: llvm.func @post(i32)
-func private @post(index)
+func.func private @post(index)
 
 // CHECK-LABEL: func @imperfectly_nested_loops() {
 // CHECK-NEXT:  llvm.br ^bb1
-func @imperfectly_nested_loops() {
+func.func @imperfectly_nested_loops() {
 ^bb0:
   cf.br ^bb1
 
@@ -261,10 +261,10 @@ func @imperfectly_nested_loops() {
 }
 
 // CHECK-LABEL: llvm.func @mid(i64)
-func private @mid(index)
+func.func private @mid(index)
 
 // CHECK-LABEL: llvm.func @body3(i64, i64)
-func private @body3(index, index)
+func.func private @body3(index, index)
 
 // A complete function transformation check.
 // CHECK-LABEL: func @more_imperfectly_nested_loops() {
@@ -314,7 +314,7 @@ func private @body3(index, index)
 // CHECK-NEXT:^bb12:	// pred: ^bb2
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT: }
-func @more_imperfectly_nested_loops() {
+func.func @more_imperfectly_nested_loops() {
 ^bb0:
   cf.br ^bb1
 ^bb1:	// pred: ^bb0
@@ -364,22 +364,22 @@ func @more_imperfectly_nested_loops() {
 }
 
 // CHECK-LABEL: llvm.func @get_i64() -> i64
-func private @get_i64() -> (i64)
+func.func private @get_i64() -> (i64)
 // CHECK-LABEL: llvm.func @get_f32() -> f32
-func private @get_f32() -> (f32)
+func.func private @get_f32() -> (f32)
 // CHECK-LABEL: llvm.func @get_c16() -> !llvm.struct<(f16, f16)>
-func private @get_c16() -> (complex<f16>)
+func.func private @get_c16() -> (complex<f16>)
 // CHECK-LABEL: llvm.func @get_c32() -> !llvm.struct<(f32, f32)>
-func private @get_c32() -> (complex<f32>)
+func.func private @get_c32() -> (complex<f32>)
 // CHECK-LABEL: llvm.func @get_c64() -> !llvm.struct<(f64, f64)>
-func private @get_c64() -> (complex<f64>)
+func.func private @get_c64() -> (complex<f64>)
 // CHECK-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>
 // CHECK32-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>
-func private @get_memref() -> (memref<42x?x10x?xf32>)
+func.func private @get_memref() -> (memref<42x?x10x?xf32>)
 
 // CHECK-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)> {
 // CHECK32-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)> {
-func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
+func.func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
 ^bb0:
 // CHECK-NEXT:  {{.*}} = llvm.call @get_i64() : () -> i64
 // CHECK-NEXT:  {{.*}} = llvm.call @get_f32() : () -> f32
@@ -406,7 +406,7 @@ func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
 
 // CHECK-LABEL: llvm.func @multireturn_caller() {
 // CHECK32-LABEL: llvm.func @multireturn_caller() {
-func @multireturn_caller() {
+func.func @multireturn_caller() {
 ^bb0:
 // CHECK-NEXT:  {{.*}} = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
 // CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
@@ -428,7 +428,7 @@ func @multireturn_caller() {
 }
 
 // CHECK-LABEL: @dfs_block_order
-func @dfs_block_order(%arg0: i32) -> (i32) {
+func.func @dfs_block_order(%arg0: i32) -> (i32) {
 // CHECK-NEXT:  %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32
   %0 = arith.constant 42 : i32
 // CHECK-NEXT:  llvm.br ^bb2
@@ -451,7 +451,7 @@ func @dfs_block_order(%arg0: i32) -> (i32) {
 
 // CHECK-LABEL: func @ceilf(
 // CHECK-SAME: f32
-func @ceilf(%arg0 : f32) {
+func.func @ceilf(%arg0 : f32) {
   // CHECK: "llvm.intr.ceil"(%arg0) : (f32) -> f32
   %0 = math.ceil %arg0 : f32
   func.return
@@ -461,7 +461,7 @@ func @ceilf(%arg0 : f32) {
 
 // CHECK-LABEL: func @floorf(
 // CHECK-SAME: f32
-func @floorf(%arg0 : f32) {
+func.func @floorf(%arg0 : f32) {
   // CHECK: "llvm.intr.floor"(%arg0) : (f32) -> f32
   %0 = math.floor %arg0 : f32
   func.return
@@ -473,7 +473,7 @@ func @floorf(%arg0 : f32) {
 // CHECK: llvm.func @abort()
 // CHECK-LABEL: @assert_test_function
 // CHECK-SAME:  (%[[ARG:.*]]: i1)
-func @assert_test_function(%arg : i1) {
+func.func @assert_test_function(%arg : i1) {
   // CHECK: llvm.cond_br %[[ARG]], ^[[CONTINUATION_BLOCK:.*]], ^[[FAILURE_BLOCK:.*]]
   // CHECK: ^[[CONTINUATION_BLOCK]]:
   // CHECK: llvm.return
@@ -490,19 +490,19 @@ func @assert_test_function(%arg : i1) {
 // nullptr result type.
 
 // CHECK-LABEL: @call_zero_result_func
-func @call_zero_result_func() {
+func.func @call_zero_result_func() {
   // CHECK: call @zero_result_func
   call @zero_result_func() : () -> ()
   return
 }
-func private @zero_result_func()
+func.func private @zero_result_func()
 
 // -----
 
 // CHECK-LABEL: func @fmaf(
 // CHECK-SAME: %[[ARG0:.*]]: f32
 // CHECK-SAME: %[[ARG1:.*]]: vector<4xf32>
-func @fmaf(%arg0: f32, %arg1: vector<4xf32>) {
+func.func @fmaf(%arg0: f32, %arg1: vector<4xf32>) {
   // CHECK: %[[S:.*]] = "llvm.intr.fma"(%[[ARG0]], %[[ARG0]], %[[ARG0]]) : (f32, f32, f32) -> f32
   %0 = math.fma %arg0, %arg0, %arg0 : f32
   // CHECK: %[[V:.*]] = "llvm.intr.fma"(%[[ARG1]], %[[ARG1]], %[[ARG1]]) : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
@@ -513,7 +513,7 @@ func @fmaf(%arg0: f32, %arg1: vector<4xf32>) {
 // -----
 
 // CHECK-LABEL: func @switchi8(
-func @switchi8(%arg0 : i8) -> i32 {
+func.func @switchi8(%arg0 : i8) -> i32 {
   cf.switch %arg0 : i8, [
     default: ^bb1,
     42: ^bb1,

diff  --git a/mlir/test/Conversion/FuncToLLVM/invalid.mlir b/mlir/test/Conversion/FuncToLLVM/invalid.mlir
index 1e1469ff2dfb7..e70252ff87ed1 100644
--- a/mlir/test/Conversion/FuncToLLVM/invalid.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/invalid.mlir
@@ -1,9 +1,9 @@
 // RUN: mlir-opt %s -convert-func-to-llvm -verify-diagnostics -split-input-file
 
 // Should not crash on unsupported types in function signatures.
-func private @unsupported_signature() -> tensor<10 x i32>
+func.func private @unsupported_signature() -> tensor<10 x i32>
 
 // -----
 
-func private @partially_supported_signature() -> (vector<10 x i32>, tensor<10 x i32>)
+func.func private @partially_supported_signature() -> (vector<10 x i32>, tensor<10 x i32>)
 

diff  --git a/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir b/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
index 17329e16f40c9..889048e6fcbeb 100644
--- a/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
@@ -9,35 +9,35 @@ module attributes {
 } {
 
 // CHECK-LABEL: spv.func @return_none_val
-func @return_none_val() {
+func.func @return_none_val() {
   // CHECK: spv.Return
   return
 }
 
 // CHECK-LABEL: spv.func @return_one_val
 //  CHECK-SAME: (%[[ARG:.+]]: f32)
-func @return_one_val(%arg0: f32) -> f32 {
+func.func @return_one_val(%arg0: f32) -> f32 {
   // CHECK: spv.ReturnValue %[[ARG]] : f32
   return %arg0: f32
 }
 
 // Check that multiple-return functions are not converted.
 // CHECK-LABEL: func @return_multi_val
-func @return_multi_val(%arg0: f32) -> (f32, f32) {
+func.func @return_multi_val(%arg0: f32) -> (f32, f32) {
   // CHECK: return
   return %arg0, %arg0: f32, f32
 }
 
 // CHECK-LABEL: spv.func @return_one_index
 //  CHECK-SAME: (%[[ARG:.+]]: i32)
-func @return_one_index(%arg0: index) -> index {
+func.func @return_one_index(%arg0: index) -> index {
   // CHECK: spv.ReturnValue %[[ARG]] : i32
   return %arg0: index
 }
 
 // CHECK-LABEL: spv.func @call_functions
 //  CHECK-SAME: (%[[ARG:.+]]: i32)
-func @call_functions(%arg0: index) -> index {
+func.func @call_functions(%arg0: index) -> index {
   // CHECK: spv.FunctionCall @return_none_val() : () -> ()
   call @return_none_val(): () -> ()
   // CHECK: {{%.*}} = spv.FunctionCall @return_one_index(%[[ARG]]) : (i32) -> i32

diff  --git a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
index 3ab856eb0c75b..3fb5d72547040 100644
--- a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
+++ b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
@@ -19,7 +19,7 @@ module attributes {
 // NOEMU-SAME: i8
 // NOEMU-SAME: si8
 // NOEMU-SAME: ui8
-func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
+func.func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 
 // CHECK-LABEL: spv.func @integer16
 // CHECK-SAME: i32
@@ -29,7 +29,7 @@ func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 // NOEMU-SAME: i16
 // NOEMU-SAME: si16
 // NOEMU-SAME: ui16
-func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
+func.func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 
 // CHECK-LABEL: spv.func @integer64
 // CHECK-SAME: i32
@@ -39,7 +39,7 @@ func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 // NOEMU-SAME: i64
 // NOEMU-SAME: si64
 // NOEMU-SAME: ui64
-func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
+func.func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
 
 } // end module
 
@@ -59,7 +59,7 @@ module attributes {
 // NOEMU-SAME: i8
 // NOEMU-SAME: si8
 // NOEMU-SAME: ui8
-func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
+func.func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 
 // CHECK-LABEL: spv.func @integer16
 // CHECK-SAME: i16
@@ -69,7 +69,7 @@ func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 // NOEMU-SAME: i16
 // NOEMU-SAME: si16
 // NOEMU-SAME: ui16
-func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
+func.func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 
 // CHECK-LABEL: spv.func @integer64
 // CHECK-SAME: i64
@@ -79,7 +79,7 @@ func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 // NOEMU-SAME: i64
 // NOEMU-SAME: si64
 // NOEMU-SAME: ui64
-func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
+func.func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
 
 } // end module
 
@@ -91,13 +91,13 @@ module attributes {
 } {
 
 // CHECK-NOT: spv.func @integer4
-func @integer4(%arg0: i4) { return }
+func.func @integer4(%arg0: i4) { return }
 
 // CHECK-NOT: spv.func @integer128
-func @integer128(%arg0: i128) { return }
+func.func @integer128(%arg0: i128) { return }
 
 // CHECK-NOT: spv.func @integer42
-func @integer42(%arg0: i42) { return }
+func.func @integer42(%arg0: i42) { return }
 
 } // end module
 // -----
@@ -113,7 +113,7 @@ module attributes {
 
 // CHECK-LABEL: spv.func @index_type
 // CHECK-SAME: %{{.*}}: i32
-func @index_type(%arg0: index) { return }
+func.func @index_type(%arg0: index) { return }
 
 } // end module
 
@@ -133,13 +133,13 @@ module attributes {
 // CHECK-SAME: f32
 // NOEMU-LABEL: func @float16
 // NOEMU-SAME: f16
-func @float16(%arg0: f16) { return }
+func.func @float16(%arg0: f16) { return }
 
 // CHECK-LABEL: spv.func @float64
 // CHECK-SAME: f32
 // NOEMU-LABEL: func @float64
 // NOEMU-SAME: f64
-func @float64(%arg0: f64) { return }
+func.func @float64(%arg0: f64) { return }
 
 } // end module
 
@@ -155,13 +155,13 @@ module attributes {
 // CHECK-SAME: f16
 // NOEMU-LABEL: spv.func @float16
 // NOEMU-SAME: f16
-func @float16(%arg0: f16) { return }
+func.func @float16(%arg0: f16) { return }
 
 // CHECK-LABEL: spv.func @float64
 // CHECK-SAME: f64
 // NOEMU-LABEL: spv.func @float64
 // NOEMU-SAME: f64
-func @float64(%arg0: f64) { return }
+func.func @float64(%arg0: f64) { return }
 
 } // end module
 
@@ -173,7 +173,7 @@ module attributes {
 } {
 
 // CHECK-NOT: spv.func @bf16_type
-func @bf16_type(%arg0: bf16) { return }
+func.func @bf16_type(%arg0: bf16) { return }
 
 } // end module
 
@@ -193,7 +193,7 @@ module attributes {
 // CHECK-SAME: vector<2xi32>
 // CHECK-SAME: vector<3xsi32>
 // CHECK-SAME: vector<4xui32>
-func @int_vector(
+func.func @int_vector(
   %arg0: vector<2xi8>,
   %arg1: vector<3xsi16>,
   %arg2: vector<4xui64>
@@ -202,7 +202,7 @@ func @int_vector(
 // CHECK-LABEL: spv.func @float_vector
 // CHECK-SAME: vector<2xf32>
 // CHECK-SAME: vector<3xf32>
-func @float_vector(
+func.func @float_vector(
   %arg0: vector<2xf16>,
   %arg1: vector<3xf64>
 ) { return }
@@ -222,7 +222,7 @@ module attributes {
 // CHECK-SAME: vector<2xi8>
 // CHECK-SAME: vector<3xsi16>
 // CHECK-SAME: vector<4xui64>
-func @int_vector(
+func.func @int_vector(
   %arg0: vector<2xi8>,
   %arg1: vector<3xsi16>,
   %arg2: vector<4xui64>
@@ -231,14 +231,14 @@ func @int_vector(
 // CHECK-LABEL: spv.func @float_vector
 // CHECK-SAME: vector<2xf16>
 // CHECK-SAME: vector<3xf64>
-func @float_vector(
+func.func @float_vector(
   %arg0: vector<2xf16>,
   %arg1: vector<3xf64>
 ) { return }
 
 // CHECK-LABEL: spv.func @one_element_vector
 // CHECK-SAME: %{{.+}}: i32
-func @one_element_vector(%arg0: vector<1xi32>) { return }
+func.func @one_element_vector(%arg0: vector<1xi32>) { return }
 
 } // end module
 
@@ -250,7 +250,7 @@ module attributes {
 } {
 
 // CHECK-NOT: spv.func @large_vector
-func @large_vector(%arg0: vector<1024xi32>) { return }
+func.func @large_vector(%arg0: vector<1024xi32>) { return }
 
 } // end module
 
@@ -273,7 +273,7 @@ module attributes {
 // CHECK-SAME: PushConstant
 // CHECK-SAME: Private
 // CHECK-SAME: Function
-func @memref_mem_space(
+func.func @memref_mem_space(
     %arg0: memref<4xf32, 0>,
     %arg1: memref<4xf32, 4>,
     %arg2: memref<4xf32, 3>,
@@ -298,55 +298,55 @@ module attributes {
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<2 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_1bit_type
 // NOEMU-SAME: memref<5xi1>
-func @memref_1bit_type(%arg0: memref<5xi1>) { return }
+func.func @memref_1bit_type(%arg0: memref<5xi1>) { return }
 
 // CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_8bit_StorageBuffer
 // NOEMU-SAME: memref<16xi8>
-func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return }
+func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return }
 
 // CHECK-LABEL: spv.func @memref_8bit_Uniform
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_8bit_Uniform
 // NOEMU-SAME: memref<16xsi8, 4>
-func @memref_8bit_Uniform(%arg0: memref<16xsi8, 4>) { return }
+func.func @memref_8bit_Uniform(%arg0: memref<16xsi8, 4>) { return }
 
 // CHECK-LABEL: spv.func @memref_8bit_PushConstant
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_8bit_PushConstant
 // NOEMU-SAME: memref<16xui8, 7>
-func @memref_8bit_PushConstant(%arg0: memref<16xui8, 7>) { return }
+func.func @memref_8bit_PushConstant(%arg0: memref<16xui8, 7>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_16bit_StorageBuffer
 // NOEMU-SAME: memref<16xi16>
-func @memref_16bit_StorageBuffer(%arg0: memref<16xi16, 0>) { return }
+func.func @memref_16bit_StorageBuffer(%arg0: memref<16xi16, 0>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Uniform
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_16bit_Uniform
 // NOEMU-SAME: memref<16xsi16, 4>
-func @memref_16bit_Uniform(%arg0: memref<16xsi16, 4>) { return }
+func.func @memref_16bit_Uniform(%arg0: memref<16xsi16, 4>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_PushConstant
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_16bit_PushConstant
 // NOEMU-SAME: memref<16xui16, 7>
-func @memref_16bit_PushConstant(%arg0: memref<16xui16, 7>) { return }
+func.func @memref_16bit_PushConstant(%arg0: memref<16xui16, 7>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Input
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f32, stride=4>)>, Input>
 // NOEMU-LABEL: func @memref_16bit_Input
 // NOEMU-SAME: memref<16xf16, 9>
-func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return }
+func.func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Output
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f32, stride=4>)>, Output>
 // NOEMU-LABEL: func @memref_16bit_Output
 // NOEMU-SAME: memref<16xf16, 10>
-func @memref_16bit_Output(%arg4: memref<16xf16, 10>) { return }
+func.func @memref_16bit_Output(%arg4: memref<16xf16, 10>) { return }
 
 } // end module
 
@@ -365,7 +365,7 @@ module attributes {
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, PushConstant>
 // NOEMU-LABEL: spv.func @memref_8bit_PushConstant
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, PushConstant>
-func @memref_8bit_PushConstant(%arg0: memref<16xi8, 7>) { return }
+func.func @memref_8bit_PushConstant(%arg0: memref<16xi8, 7>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_PushConstant
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, PushConstant>
@@ -373,7 +373,7 @@ func @memref_8bit_PushConstant(%arg0: memref<16xi8, 7>) { return }
 // NOEMU-LABEL: spv.func @memref_16bit_PushConstant
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, PushConstant>
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, PushConstant>
-func @memref_16bit_PushConstant(
+func.func @memref_16bit_PushConstant(
   %arg0: memref<16xi16, 7>,
   %arg1: memref<16xf16, 7>
 ) { return }
@@ -395,7 +395,7 @@ module attributes {
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, StorageBuffer>
 // NOEMU-LABEL: spv.func @memref_8bit_StorageBuffer
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, StorageBuffer>
-func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return }
+func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, StorageBuffer>
@@ -403,7 +403,7 @@ func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return }
 // NOEMU-LABEL: spv.func @memref_16bit_StorageBuffer
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, StorageBuffer>
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, StorageBuffer>
-func @memref_16bit_StorageBuffer(
+func.func @memref_16bit_StorageBuffer(
   %arg0: memref<16xi16, 0>,
   %arg1: memref<16xf16, 0>
 ) { return }
@@ -425,7 +425,7 @@ module attributes {
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, Uniform>
 // NOEMU-LABEL: spv.func @memref_8bit_Uniform
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, Uniform>
-func @memref_8bit_Uniform(%arg0: memref<16xi8, 4>) { return }
+func.func @memref_8bit_Uniform(%arg0: memref<16xi8, 4>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Uniform
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, Uniform>
@@ -433,7 +433,7 @@ func @memref_8bit_Uniform(%arg0: memref<16xi8, 4>) { return }
 // NOEMU-LABEL: spv.func @memref_16bit_Uniform
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, Uniform>
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, Uniform>
-func @memref_16bit_Uniform(
+func.func @memref_16bit_Uniform(
   %arg0: memref<16xi16, 4>,
   %arg1: memref<16xf16, 4>
 ) { return }
@@ -454,13 +454,13 @@ module attributes {
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2>)>, Input>
 // NOEMU-LABEL: spv.func @memref_16bit_Input
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2>)>, Input>
-func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return }
+func.func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Output
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2>)>, Output>
 // NOEMU-LABEL: spv.func @memref_16bit_Output
 // NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2>)>, Output>
-func @memref_16bit_Output(%arg4: memref<16xi16, 10>) { return }
+func.func @memref_16bit_Output(%arg4: memref<16xi16, 10>) { return }
 
 } // end module
 
@@ -473,7 +473,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: spv.func @memref_offset_strides
-func @memref_offset_strides(
+func.func @memref_offset_strides(
 // CHECK-SAME: !spv.array<64 x f32, stride=4> [0])>, StorageBuffer>
 // CHECK-SAME: !spv.array<72 x f32, stride=4> [0])>, StorageBuffer>
 // CHECK-SAME: !spv.array<256 x f32, stride=4> [0])>, StorageBuffer>
@@ -509,18 +509,18 @@ module attributes {
 // Check that unranked shapes are not supported.
 // CHECK-LABEL: func @unranked_memref
 // CHECK-SAME: memref<*xi32>
-func @unranked_memref(%arg0: memref<*xi32>) { return }
+func.func @unranked_memref(%arg0: memref<*xi32>) { return }
 
 // CHECK-LABEL: func @memref_1bit_type
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_1bit_type
 // NOEMU-SAME: memref<?xi1>
-func @memref_1bit_type(%arg0: memref<?xi1>) { return }
+func.func @memref_1bit_type(%arg0: memref<?xi1>) { return }
 
 // CHECK-LABEL: func @dynamic_dim_memref
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32, stride=4> [0])>, StorageBuffer>
-func @dynamic_dim_memref(%arg0: memref<8x?xi32>,
+func.func @dynamic_dim_memref(%arg0: memref<8x?xi32>,
                          %arg1: memref<?x?xf32>) { return }
 
 // Check that using non-32-bit scalar types in interface storage classes
@@ -531,49 +531,49 @@ func @dynamic_dim_memref(%arg0: memref<8x?xi32>,
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_8bit_StorageBuffer
 // NOEMU-SAME: memref<?xi8>
-func @memref_8bit_StorageBuffer(%arg0: memref<?xi8, 0>) { return }
+func.func @memref_8bit_StorageBuffer(%arg0: memref<?xi8, 0>) { return }
 
 // CHECK-LABEL: spv.func @memref_8bit_Uniform
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_8bit_Uniform
 // NOEMU-SAME: memref<?xsi8, 4>
-func @memref_8bit_Uniform(%arg0: memref<?xsi8, 4>) { return }
+func.func @memref_8bit_Uniform(%arg0: memref<?xsi8, 4>) { return }
 
 // CHECK-LABEL: spv.func @memref_8bit_PushConstant
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_8bit_PushConstant
 // NOEMU-SAME: memref<?xui8, 7>
-func @memref_8bit_PushConstant(%arg0: memref<?xui8, 7>) { return }
+func.func @memref_8bit_PushConstant(%arg0: memref<?xui8, 7>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_16bit_StorageBuffer
 // NOEMU-SAME: memref<?xi16>
-func @memref_16bit_StorageBuffer(%arg0: memref<?xi16, 0>) { return }
+func.func @memref_16bit_StorageBuffer(%arg0: memref<?xi16, 0>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Uniform
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_16bit_Uniform
 // NOEMU-SAME: memref<?xsi16, 4>
-func @memref_16bit_Uniform(%arg0: memref<?xsi16, 4>) { return }
+func.func @memref_16bit_Uniform(%arg0: memref<?xsi16, 4>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_PushConstant
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_16bit_PushConstant
 // NOEMU-SAME: memref<?xui16, 7>
-func @memref_16bit_PushConstant(%arg0: memref<?xui16, 7>) { return }
+func.func @memref_16bit_PushConstant(%arg0: memref<?xui16, 7>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Input
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32, stride=4>)>, Input>
 // NOEMU-LABEL: func @memref_16bit_Input
 // NOEMU-SAME: memref<?xf16, 9>
-func @memref_16bit_Input(%arg3: memref<?xf16, 9>) { return }
+func.func @memref_16bit_Input(%arg3: memref<?xf16, 9>) { return }
 
 // CHECK-LABEL: spv.func @memref_16bit_Output
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32, stride=4>)>, Output>
 // NOEMU-LABEL: func @memref_16bit_Output
 // NOEMU-SAME: memref<?xf16, 10>
-func @memref_16bit_Output(%arg4: memref<?xf16, 10>) { return }
+func.func @memref_16bit_Output(%arg4: memref<?xf16, 10>) { return }
 
 } // end module
 
@@ -587,7 +587,7 @@ module attributes {
 // CHECK-LABEL: func @memref_vector
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x vector<2xf32>, stride=8> [0])>, StorageBuffer>
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x vector<4xf32>, stride=16> [0])>, Uniform>
-func @memref_vector(
+func.func @memref_vector(
     %arg0: memref<4xvector<2xf32>, 0>,
     %arg1: memref<4xvector<4xf32>, 4>)
 { return }
@@ -595,7 +595,7 @@ func @memref_vector(
 // CHECK-LABEL: func @dynamic_dim_memref_vector
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<vector<4xi32>, stride=16> [0])>, StorageBuffer>
 // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<vector<2xf32>, stride=8> [0])>, StorageBuffer>
-func @dynamic_dim_memref_vector(%arg0: memref<8x?xvector<4xi32>>,
+func.func @dynamic_dim_memref_vector(%arg0: memref<8x?xvector<4xi32>>,
                          %arg1: memref<?x?xvector<2xf32>>)
 { return }
 
@@ -610,7 +610,7 @@ module attributes {
 
 // CHECK-LABEL: func @memref_vector_wrong_size
 // CHECK-SAME: memref<4xvector<5xf32>>
-func @memref_vector_wrong_size(
+func.func @memref_vector_wrong_size(
     %arg0: memref<4xvector<5xf32>, 0>)
 { return }
 
@@ -633,7 +633,7 @@ module attributes {
 // CHECK-SAME: !spv.array<32 x i32, stride=4>
 // CHECK-SAME: !spv.array<32 x i16, stride=2>
 // CHECK-SAME: !spv.array<32 x i8, stride=1>
-func @int_tensor_types(
+func.func @int_tensor_types(
   %arg0: tensor<8x4xi64>,
   %arg1: tensor<8x4xi32>,
   %arg2: tensor<8x4xi16>,
@@ -644,7 +644,7 @@ func @int_tensor_types(
 // CHECK-SAME: !spv.array<32 x f64, stride=8>
 // CHECK-SAME: !spv.array<32 x f32, stride=4>
 // CHECK-SAME: !spv.array<32 x f16, stride=2>
-func @float_tensor_types(
+func.func @float_tensor_types(
   %arg0: tensor<8x4xf64>,
   %arg1: tensor<8x4xf32>,
   %arg2: tensor<8x4xf16>
@@ -664,7 +664,7 @@ module attributes {
 // CHECK-SAME: !spv.array<32 x i32, stride=4>
 // CHECK-SAME: !spv.array<32 x i32, stride=4>
 // CHECK-SAME: !spv.array<32 x i32, stride=4>
-func @int_tensor_types(
+func.func @int_tensor_types(
   %arg0: tensor<8x4xi64>,
   %arg1: tensor<8x4xi32>,
   %arg2: tensor<8x4xi16>,
@@ -675,7 +675,7 @@ func @int_tensor_types(
 // CHECK-SAME: !spv.array<32 x f32, stride=4>
 // CHECK-SAME: !spv.array<32 x f32, stride=4>
 // CHECK-SAME: !spv.array<32 x f32, stride=4>
-func @float_tensor_types(
+func.func @float_tensor_types(
   %arg0: tensor<8x4xf64>,
   %arg1: tensor<8x4xf32>,
   %arg2: tensor<8x4xf16>
@@ -692,10 +692,10 @@ module attributes {
 
 // CHECK-LABEL: func @unranked_tensor
 // CHECK-SAME: tensor<*xi32>
-func @unranked_tensor(%arg0: tensor<*xi32>) { return }
+func.func @unranked_tensor(%arg0: tensor<*xi32>) { return }
 
 // CHECK-LABEL: func @dynamic_dim_tensor
 // CHECK-SAME: tensor<8x?xi32>
-func @dynamic_dim_tensor(%arg0: tensor<8x?xi32>) { return }
+func.func @dynamic_dim_tensor(%arg0: tensor<8x?xi32>) { return }
 
 } // end module

diff  --git a/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
index 4169f0e8191dc..a779913ab3253 100644
--- a/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
@@ -3,7 +3,7 @@
 module attributes {gpu.container_module} {
   // CHECK-LABEL: llvm.func @main
   // CHECK-SAME: %[[size:.*]]: i64
-  func @main(%size : index) {
+  func.func @main(%size : index) {
     // CHECK: %[[stream:.*]] = llvm.call @mgpuStreamCreate()
     %0 = gpu.wait async
     // CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}}[%[[size]]]

diff  --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
index 6eb5aaad77cb2..b11d15712e83e 100644
--- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
@@ -17,7 +17,7 @@ module attributes {gpu.container_module} {
     }
   }
 
-  func @foo(%buffer: memref<?xf32>) {
+  func.func @foo(%buffer: memref<?xf32>) {
     %c8 = arith.constant 8 : index
     %c32 = arith.constant 32 : i32
     %c256 = arith.constant 256 : i32

diff  --git a/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
index 162c2f4e838a0..df10b317aa499 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
@@ -3,7 +3,7 @@
 module attributes {gpu.container_module} {
 
   // CHECK: func @foo
-  func @foo(%dst : memref<7xf32, 1>, %src : memref<7xf32>) {
+  func.func @foo(%dst : memref<7xf32, 1>, %src : memref<7xf32>) {
     // CHECK: %[[t0:.*]] = llvm.call @mgpuStreamCreate
     %t0 = gpu.wait async
     // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint

diff  --git a/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
index 1b786ed0f6e5b..ef5b6ef2c7bb6 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
@@ -3,7 +3,7 @@
 module attributes {gpu.container_module} {
 
   // CHECK: func @foo
-  func @foo(%dst : memref<7xf32, 1>, %value : f32) {
+  func.func @foo(%dst : memref<7xf32, 1>, %value : f32) {
     // CHECK: %[[t0:.*]] = llvm.call @mgpuStreamCreate
     %t0 = gpu.wait async
     // CHECK: %[[size_bytes:.*]] = llvm.mlir.constant

diff  --git a/mlir/test/Conversion/GPUCommon/lower-wait-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-wait-to-gpu-runtime-calls.mlir
index 2ee85ecd78aeb..d15efe354cfa8 100644
--- a/mlir/test/Conversion/GPUCommon/lower-wait-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-wait-to-gpu-runtime-calls.mlir
@@ -2,7 +2,7 @@
 
 module attributes {gpu.container_module} {
 
-  func @foo() {
+  func.func @foo() {
     // CHECK: %[[t0:.*]] = llvm.call @mgpuStreamCreate
     // CHECK: %[[e0:.*]] = llvm.call @mgpuEventCreate
     // CHECK: llvm.call @mgpuEventRecord(%[[e0]], %[[t0]])

diff  --git a/mlir/test/Conversion/GPUToSPIRV/builtins.mlir b/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
index 1481c5e25482e..16518c8b95f4e 100644
--- a/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -split-input-file -convert-gpu-to-spirv %s -o - | FileCheck %s
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_id_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -25,7 +25,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     %c256 = arith.constant 256 : i32
     gpu.launch_func @kernels::@builtin_workgroup_id_y
@@ -51,7 +51,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_id_z
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -75,7 +75,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -100,7 +100,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_y
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -122,7 +122,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_z
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -144,7 +144,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_local_id_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -168,7 +168,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_num_workgroups_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -224,7 +224,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -248,7 +248,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_y
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -272,7 +272,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_workgroup_size_z
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -296,7 +296,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_global_id_x
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -320,7 +320,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_global_id_y
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)
@@ -344,7 +344,7 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  func @builtin() {
+  func.func @builtin() {
     %c0 = arith.constant 1 : index
     gpu.launch_func @kernels::@builtin_global_id_z
         blocks in (%c0, %c0, %c0) threads in (%c0, %c0, %c0)

diff  --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
index fd0ed46733ee8..54acfe90a34cb 100644
--- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
@@ -5,7 +5,7 @@ module attributes {
   spv.target_env = #spv.target_env<
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
 } {
-  func @load_store(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>) {
+  func.func @load_store(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>) {
     %c0 = arith.constant 0 : index
     %c12 = arith.constant 12 : index
     %0 = arith.subi %c12, %c0 : index

diff  --git a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
index a539f878a48db..6d022d70250b0 100644
--- a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
@@ -18,7 +18,7 @@ module attributes {
     }
   }
 
-  func @main() {
+  func.func @main() {
     %0 = "op"() : () -> (f32)
     %1 = "op"() : () -> (memref<12xf32, 11>)
     %cst = arith.constant 1 : index

diff  --git a/mlir/test/Conversion/GPUToSPIRV/simple.mlir b/mlir/test/Conversion/GPUToSPIRV/simple.mlir
index b590d41c358b5..ceeaf46370856 100644
--- a/mlir/test/Conversion/GPUToSPIRV/simple.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/simple.mlir
@@ -14,7 +14,7 @@ module attributes {gpu.container_module} {
     }
   }
 
-  func @main() {
+  func.func @main() {
     %0 = "op"() : () -> (f32)
     %1 = "op"() : () -> (memref<12xf32>)
     %cst = arith.constant 1 : index
@@ -60,7 +60,7 @@ module attributes {gpu.container_module} {
     }
   }
 
-  func @main() {
+  func.func @main() {
     %0 = "op"() : () -> (f32)
     %1 = "op"() : () -> (memref<12xf32>)
     %cst = arith.constant 1 : index
@@ -118,7 +118,7 @@ module attributes {gpu.container_module} {
     }
   }
 
-  func @main() {
+  func.func @main() {
     %0 = "op"() : () -> (f32)
     %1 = "op"() : () -> (memref<12xf32>)
     %cst = arith.constant 1 : index

diff  --git a/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir b/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
index 94b9b0ec1c2f1..8056074cf3b0f 100644
--- a/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
@@ -23,7 +23,7 @@ module attributes {gpu.container_module} {
       gpu.return
     }
   }
-  func @foo() {
+  func.func @foo() {
     %0 = memref.alloc() : memref<12xf32>
     %c1 = arith.constant 1 : index
     gpu.launch_func @kernels::@kernel

diff  --git a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
index 770dd4ab35e2a..375b5c4117630 100644
--- a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
+++ b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
@@ -44,7 +44,7 @@ module attributes {
 // CHECK:        }
 // CHECK:        spv.Return
 
-func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes {
+func.func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes {
   spv.entry_point_abi = {local_size = dense<[16, 1, 1]>: vector<3xi32>}
 } {
   linalg.generic #single_workgroup_reduction_trait
@@ -74,7 +74,7 @@ module attributes {
   spv.target_env = #spv.target_env<
     #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
 } {
-func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) {
+func.func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
   linalg.generic #single_workgroup_reduction_trait
       ins(%input : memref<16xi32>)
@@ -103,7 +103,7 @@ module attributes {
   spv.target_env = #spv.target_env<
     #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
 } {
-func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes {
+func.func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes {
   spv.entry_point_abi = {local_size = dense<[32, 1, 1]>: vector<3xi32>}
 } {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
@@ -134,7 +134,7 @@ module attributes {
   spv.target_env = #spv.target_env<
     #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
 } {
-func @single_workgroup_reduction(%input: memref<16x8xi32>, %output: memref<16xi32>) attributes {
+func.func @single_workgroup_reduction(%input: memref<16x8xi32>, %output: memref<16xi32>) attributes {
   spv.entry_point_abi = {local_size = dense<[16, 8, 1]>: vector<3xi32>}
 } {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}

diff  --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
index 78e31c32bb434..af32271ace9df 100644
--- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
+++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(convert-math-to-llvm)" | FileCheck %s
 
 // CHECK-LABEL: @ops
-func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {
+func.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {
 // CHECK: = "llvm.intr.exp"(%{{.*}}) : (f32) -> f32
   %13 = math.exp %arg0 : f32
 // CHECK: = "llvm.intr.exp2"(%{{.*}}) : (f32) -> f32
@@ -17,7 +17,7 @@ func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {
 
 // CHECK-LABEL: func @log1p(
 // CHECK-SAME: f32
-func @log1p(%arg0 : f32) {
+func.func @log1p(%arg0 : f32) {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
   // CHECK: %[[ADD:.*]] = llvm.fadd %[[ONE]], %arg0 : f32
   // CHECK: %[[LOG:.*]] = "llvm.intr.log"(%[[ADD]]) : (f32) -> f32
@@ -28,7 +28,7 @@ func @log1p(%arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @log1p_2dvector(
-func @log1p_2dvector(%arg0 : vector<4x3xf32>) {
+func.func @log1p_2dvector(%arg0 : vector<4x3xf32>) {
   // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<4 x vector<3xf32>>
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<3xf32>) : vector<3xf32>
   // CHECK: %[[ADD:.*]] = llvm.fadd %[[ONE]], %[[EXTRACT]] : vector<3xf32>
@@ -42,7 +42,7 @@ func @log1p_2dvector(%arg0 : vector<4x3xf32>) {
 
 // CHECK-LABEL: func @expm1(
 // CHECK-SAME: f32
-func @expm1(%arg0 : f32) {
+func.func @expm1(%arg0 : f32) {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
   // CHECK: %[[EXP:.*]] = "llvm.intr.exp"(%arg0) : (f32) -> f32
   // CHECK: %[[SUB:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : f32
@@ -54,7 +54,7 @@ func @expm1(%arg0 : f32) {
 
 // CHECK-LABEL: func @rsqrt(
 // CHECK-SAME: f32
-func @rsqrt(%arg0 : f32) {
+func.func @rsqrt(%arg0 : f32) {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
   // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (f32) -> f32
   // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : f32
@@ -66,7 +66,7 @@ func @rsqrt(%arg0 : f32) {
 
 // CHECK-LABEL: func @sine(
 // CHECK-SAME: f32
-func @sine(%arg0 : f32) {
+func.func @sine(%arg0 : f32) {
   // CHECK: "llvm.intr.sin"(%arg0) : (f32) -> f32
   %0 = math.sin %arg0 : f32
   func.return
@@ -76,7 +76,7 @@ func @sine(%arg0 : f32) {
 
 // CHECK-LABEL: func @ctlz(
 // CHECK-SAME: i32
-func @ctlz(%arg0 : i32) {
+func.func @ctlz(%arg0 : i32) {
   // CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
   // CHECK: "llvm.intr.ctlz"(%arg0, %[[ZERO]]) : (i32, i1) -> i32
   %0 = math.ctlz %arg0 : i32
@@ -87,7 +87,7 @@ func @ctlz(%arg0 : i32) {
 
 // CHECK-LABEL: func @cttz(
 // CHECK-SAME: i32
-func @cttz(%arg0 : i32) {
+func.func @cttz(%arg0 : i32) {
   // CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
   // CHECK: "llvm.intr.cttz"(%arg0, %[[ZERO]]) : (i32, i1) -> i32
   %0 = math.cttz %arg0 : i32
@@ -98,7 +98,7 @@ func @cttz(%arg0 : i32) {
 
 // CHECK-LABEL: func @cttz_vec(
 // CHECK-SAME: i32
-func @cttz_vec(%arg0 : vector<4xi32>) {
+func.func @cttz_vec(%arg0 : vector<4xi32>) {
   // CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
   // CHECK: "llvm.intr.cttz"(%arg0, %[[ZERO]]) : (vector<4xi32>, i1) -> vector<4xi32>
   %0 = math.cttz %arg0 : vector<4xi32>
@@ -109,7 +109,7 @@ func @cttz_vec(%arg0 : vector<4xi32>) {
 
 // CHECK-LABEL: func @ctpop(
 // CHECK-SAME: i32
-func @ctpop(%arg0 : i32) {
+func.func @ctpop(%arg0 : i32) {
   // CHECK: "llvm.intr.ctpop"(%arg0) : (i32) -> i32
   %0 = math.ctpop %arg0 : i32
   func.return
@@ -119,7 +119,7 @@ func @ctpop(%arg0 : i32) {
 
 // CHECK-LABEL: func @ctpop_vector(
 // CHECK-SAME: vector<3xi32>
-func @ctpop_vector(%arg0 : vector<3xi32>) {
+func.func @ctpop_vector(%arg0 : vector<3xi32>) {
   // CHECK: "llvm.intr.ctpop"(%arg0) : (vector<3xi32>) -> vector<3xi32>
   %0 = math.ctpop %arg0 : vector<3xi32>
   func.return
@@ -129,7 +129,7 @@ func @ctpop_vector(%arg0 : vector<3xi32>) {
 
 // CHECK-LABEL: func @rsqrt_double(
 // CHECK-SAME: f64
-func @rsqrt_double(%arg0 : f64) {
+func.func @rsqrt_double(%arg0 : f64) {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f64) : f64
   // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (f64) -> f64
   // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : f64
@@ -141,7 +141,7 @@ func @rsqrt_double(%arg0 : f64) {
 
 // CHECK-LABEL: func @rsqrt_vector(
 // CHECK-SAME: vector<4xf32>
-func @rsqrt_vector(%arg0 : vector<4xf32>) {
+func.func @rsqrt_vector(%arg0 : vector<4xf32>) {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : vector<4xf32>
   // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (vector<4xf32>) -> vector<4xf32>
   // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : vector<4xf32>
@@ -152,7 +152,7 @@ func @rsqrt_vector(%arg0 : vector<4xf32>) {
 // -----
 
 // CHECK-LABEL: func @rsqrt_multidim_vector(
-func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
+func.func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
   // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<4 x vector<3xf32>>
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<3xf32>) : vector<3xf32>
   // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%[[EXTRACT]]) : (vector<3xf32>) -> vector<3xf32>
@@ -166,7 +166,7 @@ func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
 
 // CHECK-LABEL: func @powf(
 // CHECK-SAME: f64
-func @powf(%arg0 : f64) {
+func.func @powf(%arg0 : f64) {
   // CHECK: %[[POWF:.*]] = "llvm.intr.pow"(%arg0, %arg0) : (f64, f64) -> f64
   %0 = math.powf %arg0, %arg0 : f64
   func.return

diff  --git a/mlir/test/Conversion/MathToLibm/convert-to-libm.mlir b/mlir/test/Conversion/MathToLibm/convert-to-libm.mlir
index 003155b0fe44a..57af89badd635 100644
--- a/mlir/test/Conversion/MathToLibm/convert-to-libm.mlir
+++ b/mlir/test/Conversion/MathToLibm/convert-to-libm.mlir
@@ -12,7 +12,7 @@
 // CHECK-LABEL: func @tanh_caller
 // CHECK-SAME: %[[FLOAT:.*]]: f32
 // CHECK-SAME: %[[DOUBLE:.*]]: f64
-func @tanh_caller(%float: f32, %double: f64) -> (f32, f64)  {
+func.func @tanh_caller(%float: f32, %double: f64) -> (f32, f64)  {
   // CHECK-DAG: %[[FLOAT_RESULT:.*]] = call @tanhf(%[[FLOAT]]) : (f32) -> f32
   %float_result = math.tanh %float : f32
   // CHECK-DAG: %[[DOUBLE_RESULT:.*]] = call @tanh(%[[DOUBLE]]) : (f64) -> f64
@@ -25,7 +25,7 @@ func @tanh_caller(%float: f32, %double: f64) -> (f32, f64)  {
 // CHECK-LABEL: func @atan2_caller
 // CHECK-SAME: %[[FLOAT:.*]]: f32
 // CHECK-SAME: %[[DOUBLE:.*]]: f64
-func @atan2_caller(%float: f32, %double: f64) -> (f32, f64) {
+func.func @atan2_caller(%float: f32, %double: f64) -> (f32, f64) {
   // CHECK-DAG: %[[FLOAT_RESULT:.*]] = call @atan2f(%[[FLOAT]], %[[FLOAT]]) : (f32, f32) -> f32
   %float_result = math.atan2 %float, %float : f32
   // CHECK-DAG: %[[DOUBLE_RESULT:.*]] = call @atan2(%[[DOUBLE]], %[[DOUBLE]]) : (f64, f64) -> f64
@@ -37,7 +37,7 @@ func @atan2_caller(%float: f32, %double: f64) -> (f32, f64) {
 // CHECK-LABEL: func @erf_caller
 // CHECK-SAME: %[[FLOAT:.*]]: f32
 // CHECK-SAME: %[[DOUBLE:.*]]: f64
-func @erf_caller(%float: f32, %double: f64) -> (f32, f64)  {
+func.func @erf_caller(%float: f32, %double: f64) -> (f32, f64)  {
   // CHECK-DAG: %[[FLOAT_RESULT:.*]] = call @erff(%[[FLOAT]]) : (f32) -> f32
   %float_result = math.erf %float : f32
   // CHECK-DAG: %[[DOUBLE_RESULT:.*]] = call @erf(%[[DOUBLE]]) : (f64) -> f64
@@ -49,7 +49,7 @@ func @erf_caller(%float: f32, %double: f64) -> (f32, f64)  {
 // CHECK-LABEL: func @expm1_caller
 // CHECK-SAME: %[[FLOAT:.*]]: f32
 // CHECK-SAME: %[[DOUBLE:.*]]: f64
-func @expm1_caller(%float: f32, %double: f64) -> (f32, f64) {
+func.func @expm1_caller(%float: f32, %double: f64) -> (f32, f64) {
   // CHECK-DAG: %[[FLOAT_RESULT:.*]] = call @expm1f(%[[FLOAT]]) : (f32) -> f32
   %float_result = math.expm1 %float : f32
   // CHECK-DAG: %[[DOUBLE_RESULT:.*]] = call @expm1(%[[DOUBLE]]) : (f64) -> f64
@@ -58,7 +58,7 @@ func @expm1_caller(%float: f32, %double: f64) -> (f32, f64) {
   return %float_result, %double_result : f32, f64
 }
 
-func @expm1_vec_caller(%float: vector<2xf32>, %double: vector<2xf64>) -> (vector<2xf32>, vector<2xf64>) {
+func.func @expm1_vec_caller(%float: vector<2xf32>, %double: vector<2xf64>) -> (vector<2xf32>, vector<2xf64>) {
   %float_result = math.expm1 %float : vector<2xf32>
   %double_result = math.expm1 %double : vector<2xf64>
   return %float_result, %double_result : vector<2xf32>, vector<2xf64>
@@ -83,7 +83,7 @@ func @expm1_vec_caller(%float: vector<2xf32>, %double: vector<2xf64>) -> (vector
 // CHECK:           return %[[VAL_11]], %[[VAL_17]] : vector<2xf32>, vector<2xf64>
 // CHECK:         }
 
-func @expm1_multidim_vec_caller(%float: vector<2x2xf32>) -> (vector<2x2xf32>) {
+func.func @expm1_multidim_vec_caller(%float: vector<2x2xf32>) -> (vector<2x2xf32>) {
   %result = math.expm1 %float : vector<2x2xf32>
   return %result : vector<2x2xf32>
 }

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
index f2379f2cab5cd..42b5a7fc05d91 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -split-input-file -convert-math-to-spirv -verify-diagnostics %s -o - | FileCheck %s
 
-func @copy_sign_scalar(%value: f32, %sign: f32) -> f32 {
+func.func @copy_sign_scalar(%value: f32, %sign: f32) -> f32 {
   %0 = math.copysign %value, %sign : f32
   return %0: f32
 }
@@ -21,7 +21,7 @@ func @copy_sign_scalar(%value: f32, %sign: f32) -> f32 {
 
 module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16, Int16], []>, {}> } {
 
-func @copy_sign_vector(%value: vector<3xf16>, %sign: vector<3xf16>) -> vector<3xf16> {
+func.func @copy_sign_vector(%value: vector<3xf16>, %sign: vector<3xf16>) -> vector<3xf16> {
   %0 = math.copysign %value, %sign : vector<3xf16>
   return %0: vector<3xf16>
 }

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-glsl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-glsl-spirv.mlir
index e172114f22eac..b03333644e4f5 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-glsl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-glsl-spirv.mlir
@@ -3,7 +3,7 @@
 module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader], []>, {}> } {
 
 // CHECK-LABEL: @float32_unary_scalar
-func @float32_unary_scalar(%arg0: f32) {
+func.func @float32_unary_scalar(%arg0: f32) {
   // CHECK: spv.GLSL.Cos %{{.*}}: f32
   %0 = math.cos %arg0 : f32
   // CHECK: spv.GLSL.Exp %{{.*}}: f32
@@ -36,7 +36,7 @@ func @float32_unary_scalar(%arg0: f32) {
 }
 
 // CHECK-LABEL: @float32_unary_vector
-func @float32_unary_vector(%arg0: vector<3xf32>) {
+func.func @float32_unary_vector(%arg0: vector<3xf32>) {
   // CHECK: spv.GLSL.Cos %{{.*}}: vector<3xf32>
   %0 = math.cos %arg0 : vector<3xf32>
   // CHECK: spv.GLSL.Exp %{{.*}}: vector<3xf32>
@@ -63,28 +63,28 @@ func @float32_unary_vector(%arg0: vector<3xf32>) {
 }
 
 // CHECK-LABEL: @float32_binary_scalar
-func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
+func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
   // CHECK: spv.GLSL.Pow %{{.*}}: f32
   %0 = math.powf %lhs, %rhs : f32
   return
 }
 
 // CHECK-LABEL: @float32_binary_vector
-func @float32_binary_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) {
+func.func @float32_binary_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) {
   // CHECK: spv.GLSL.Pow %{{.*}}: vector<4xf32>
   %0 = math.powf %lhs, %rhs : vector<4xf32>
   return
 }
 
 // CHECK-LABEL: @float32_ternary_scalar
-func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
+func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
   // CHECK: spv.GLSL.Fma %{{.*}}: f32
   %0 = math.fma %a, %b, %c : f32
   return
 }
 
 // CHECK-LABEL: @float32_ternary_vector
-func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
+func.func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
                             %c: vector<4xf32>) {
   // CHECK: spv.GLSL.Fma %{{.*}}: vector<4xf32>
   %0 = math.fma %a, %b, %c : vector<4xf32>

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
index 8a248edc9902f..852d52778207b 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
@@ -3,7 +3,7 @@
 module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, {}> } {
 
 // CHECK-LABEL: @float32_unary_scalar
-func @float32_unary_scalar(%arg0: f32) {
+func.func @float32_unary_scalar(%arg0: f32) {
   // CHECK: spv.OCL.cos %{{.*}}: f32
   %0 = math.cos %arg0 : f32
   // CHECK: spv.OCL.exp %{{.*}}: f32
@@ -38,7 +38,7 @@ func @float32_unary_scalar(%arg0: f32) {
 }
 
 // CHECK-LABEL: @float32_unary_vector
-func @float32_unary_vector(%arg0: vector<3xf32>) {
+func.func @float32_unary_vector(%arg0: vector<3xf32>) {
   // CHECK: spv.OCL.cos %{{.*}}: vector<3xf32>
   %0 = math.cos %arg0 : vector<3xf32>
   // CHECK: spv.OCL.exp %{{.*}}: vector<3xf32>
@@ -65,28 +65,28 @@ func @float32_unary_vector(%arg0: vector<3xf32>) {
 }
 
 // CHECK-LABEL: @float32_binary_scalar
-func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
+func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
   // CHECK: spv.OCL.pow %{{.*}}: f32
   %0 = math.powf %lhs, %rhs : f32
   return
 }
 
 // CHECK-LABEL: @float32_binary_vector
-func @float32_binary_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) {
+func.func @float32_binary_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) {
   // CHECK: spv.OCL.pow %{{.*}}: vector<4xf32>
   %0 = math.powf %lhs, %rhs : vector<4xf32>
   return
 }
 
 // CHECK-LABEL: @float32_ternary_scalar
-func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
+func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
   // CHECK: spv.OCL.fma %{{.*}}: f32
   %0 = math.fma %a, %b, %c : f32
   return
 }
 
 // CHECK-LABEL: @float32_ternary_vector
-func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
+func.func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
                             %c: vector<4xf32>) {
   // CHECK: spv.OCL.fma %{{.*}}: vector<4xf32>
   %0 = math.fma %a, %b, %c : vector<4xf32>

diff  --git a/mlir/test/Conversion/MemRefToLLVM/convert-alloca-scope.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-alloca-scope.mlir
index 0f4fb4ce7c238..c859fc16a4103 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-alloca-scope.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-alloca-scope.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -convert-memref-to-llvm %s | FileCheck %s
 
 // CHECK-LABEL: @empty
-func @empty() {
+func.func @empty() {
   // CHECK: llvm.intr.stacksave 
   // CHECK: llvm.br
   memref.alloca_scope {
@@ -13,7 +13,7 @@ func @empty() {
 }
 
 // CHECK-LABEL: @returns_nothing
-func @returns_nothing(%b: f32) {
+func.func @returns_nothing(%b: f32) {
   %a = arith.constant 10.0 : f32
   // CHECK: llvm.intr.stacksave 
   memref.alloca_scope {
@@ -25,7 +25,7 @@ func @returns_nothing(%b: f32) {
 }
 
 // CHECK-LABEL: @returns_one_value
-func @returns_one_value(%b: f32) -> f32 {
+func.func @returns_one_value(%b: f32) -> f32 {
   %a = arith.constant 10.0 : f32
   // CHECK: llvm.intr.stacksave 
   %result = memref.alloca_scope -> f32 {
@@ -37,7 +37,7 @@ func @returns_one_value(%b: f32) -> f32 {
 }
 
 // CHECK-LABEL: @returns_multiple_values
-func @returns_multiple_values(%b: f32) -> f32 {
+func.func @returns_multiple_values(%b: f32) -> f32 {
   %a = arith.constant 10.0 : f32
   // CHECK: llvm.intr.stacksave 
   %result1, %result2 = memref.alloca_scope -> (f32, f32) {

diff  --git a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
index c10dc3590d008..639d89976baf4 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
@@ -4,7 +4,7 @@
 
 // CHECK-LABEL: func @mixed_alloc(
 //       CHECK:   %[[Marg:.*]]: index, %[[Narg:.*]]: index)
-func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
+func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
 //   CHECK-DAG:  %[[M:.*]] = builtin.unrealized_conversion_cast %[[Marg]]
 //   CHECK-DAG:  %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]]
 //       CHECK:  %[[c42:.*]] = llvm.mlir.constant(42 : index) : i64
@@ -34,7 +34,7 @@ func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
 // -----
 
 // CHECK-LABEL: func @mixed_dealloc
-func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
+func.func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
 //      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 // CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[ptri8]]) : (!llvm.ptr<i8>) -> ()
@@ -46,7 +46,7 @@ func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
 
 // CHECK-LABEL: func @dynamic_alloc(
 //       CHECK:   %[[Marg:.*]]: index, %[[Narg:.*]]: index)
-func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
+func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //   CHECK-DAG:  %[[M:.*]] = builtin.unrealized_conversion_cast %[[Marg]]
 //   CHECK-DAG:  %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]]
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -73,7 +73,7 @@ func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 
 // CHECK-LABEL: func @dynamic_alloca
 // CHECK: %[[Marg:.*]]: index, %[[Narg:.*]]: index)
-func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
+func.func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //   CHECK-DAG:  %[[M:.*]] = builtin.unrealized_conversion_cast %[[Marg]]
 //   CHECK-DAG:  %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]]
 //  CHECK-NEXT:  %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -107,7 +107,7 @@ func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 // -----
 
 // CHECK-LABEL: func @dynamic_dealloc
-func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
+func.func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
 //      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[ptri8]]) : (!llvm.ptr<i8>) -> ()
@@ -119,7 +119,7 @@ func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
 
 // CHECK-LABEL: func @stdlib_aligned_alloc({{.*}})
 // ALIGNED-ALLOC-LABEL: func @stdlib_aligned_alloc({{.*}})
-func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
+func.func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
 // ALIGNED-ALLOC:       %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -167,7 +167,7 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
 
 // CHECK-LABEL: func @mixed_load(
 // CHECK:         %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
-func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
+func.func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
 //   CHECK-DAG:  %[[I:.*]] = builtin.unrealized_conversion_cast %[[Iarg]]
 //   CHECK-DAG:  %[[J:.*]] = builtin.unrealized_conversion_cast %[[Jarg]]
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -184,7 +184,7 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
 
 // CHECK-LABEL: func @dynamic_load(
 // CHECK:         %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
-func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
+func.func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
 //   CHECK-DAG:  %[[I:.*]] = builtin.unrealized_conversion_cast %[[Iarg]]
 //   CHECK-DAG:  %[[J:.*]] = builtin.unrealized_conversion_cast %[[Jarg]]
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -201,7 +201,7 @@ func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
 
 // CHECK-LABEL: func @prefetch
 // CHECK:         %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
-func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
+func.func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
 //      CHECK-DAG:  %[[I:.*]] = builtin.unrealized_conversion_cast %[[Iarg]]
 //      CHECK-DAG:  %[[J:.*]] = builtin.unrealized_conversion_cast %[[Jarg]]
 //      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -231,7 +231,7 @@ func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
 
 // CHECK-LABEL: func @dynamic_store
 // CHECK:         %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index
-func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
+func.func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
 //   CHECK-DAG:  %[[I:.*]] = builtin.unrealized_conversion_cast %[[Iarg]]
 //   CHECK-DAG:  %[[J:.*]] = builtin.unrealized_conversion_cast %[[Jarg]]
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -248,7 +248,7 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
 
 // CHECK-LABEL: func @mixed_store
 // CHECK:         %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index
-func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
+func.func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
 //   CHECK-DAG:  %[[I:.*]] = builtin.unrealized_conversion_cast %[[Iarg]]
 //   CHECK-DAG:  %[[J:.*]] = builtin.unrealized_conversion_cast %[[Jarg]]
 //       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -264,7 +264,7 @@ func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32)
 // -----
 
 // CHECK-LABEL: func @memref_cast_static_to_dynamic
-func @memref_cast_static_to_dynamic(%static : memref<10x42xf32>) {
+func.func @memref_cast_static_to_dynamic(%static : memref<10x42xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %static : memref<10x42xf32> to memref<?x?xf32>
   return
@@ -273,7 +273,7 @@ func @memref_cast_static_to_dynamic(%static : memref<10x42xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_static_to_mixed
-func @memref_cast_static_to_mixed(%static : memref<10x42xf32>) {
+func.func @memref_cast_static_to_mixed(%static : memref<10x42xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %static : memref<10x42xf32> to memref<?x42xf32>
   return
@@ -282,7 +282,7 @@ func @memref_cast_static_to_mixed(%static : memref<10x42xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_dynamic_to_static
-func @memref_cast_dynamic_to_static(%dynamic : memref<?x?xf32>) {
+func.func @memref_cast_dynamic_to_static(%dynamic : memref<?x?xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %dynamic : memref<?x?xf32> to memref<10x12xf32>
   return
@@ -291,7 +291,7 @@ func @memref_cast_dynamic_to_static(%dynamic : memref<?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_dynamic_to_mixed
-func @memref_cast_dynamic_to_mixed(%dynamic : memref<?x?xf32>) {
+func.func @memref_cast_dynamic_to_mixed(%dynamic : memref<?x?xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %dynamic : memref<?x?xf32> to memref<?x12xf32>
   return
@@ -300,7 +300,7 @@ func @memref_cast_dynamic_to_mixed(%dynamic : memref<?x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_mixed_to_dynamic
-func @memref_cast_mixed_to_dynamic(%mixed : memref<42x?xf32>) {
+func.func @memref_cast_mixed_to_dynamic(%mixed : memref<42x?xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %mixed : memref<42x?xf32> to memref<?x?xf32>
   return
@@ -309,7 +309,7 @@ func @memref_cast_mixed_to_dynamic(%mixed : memref<42x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_mixed_to_static
-func @memref_cast_mixed_to_static(%mixed : memref<42x?xf32>) {
+func.func @memref_cast_mixed_to_static(%mixed : memref<42x?xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %mixed : memref<42x?xf32> to memref<42x1xf32>
   return
@@ -318,7 +318,7 @@ func @memref_cast_mixed_to_static(%mixed : memref<42x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_mixed_to_mixed
-func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
+func.func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
 // CHECK-NOT: llvm.bitcast
   %0 = memref.cast %mixed : memref<42x?xf32> to memref<?x1xf32>
   return
@@ -328,7 +328,7 @@ func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
 
 // CHECK-LABEL: func @memref_cast_ranked_to_unranked
 // CHECK32-LABEL: func @memref_cast_ranked_to_unranked
-func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
+func.func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
 // CHECK-DAG:  %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK-DAG:  %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>>
 // CHECK-DAG:  llvm.store %{{.*}}, %[[p]] : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>>
@@ -352,7 +352,7 @@ func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @memref_cast_unranked_to_ranked
-func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
+func.func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
 //      CHECK: %[[p:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(i64, ptr<i8>)>
 // CHECK-NEXT: llvm.bitcast %[[p]] : !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>>
   %0 = memref.cast %arg : memref<*xf32> to memref<?x?x10x2xf32>
@@ -362,7 +362,7 @@ func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
 // -----
 
 // CHECK-LABEL: func @mixed_memref_dim
-func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
+func.func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
 // CHECK: llvm.mlir.constant(42 : index) : i64
   %c0 = arith.constant 0 : index
   %0 = memref.dim %mixed, %c0 : memref<42x?x?x13x?xf32>
@@ -385,7 +385,7 @@ func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
 
 // CHECK-LABEL: @memref_dim_with_dyn_index
 // CHECK: %{{.*}}, %[[IDXarg:.*]]: index
-func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
+func.func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
   // CHECK-DAG: %[[IDX:.*]] = builtin.unrealized_conversion_cast %[[IDXarg]]
   // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
   // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -401,7 +401,7 @@ func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
 // -----
 
 // CHECK-LABEL: @memref_reinterpret_cast_ranked_to_static_shape
-func @memref_reinterpret_cast_ranked_to_static_shape(%input : memref<2x3xf32>) {
+func.func @memref_reinterpret_cast_ranked_to_static_shape(%input : memref<2x3xf32>) {
   %output = memref.reinterpret_cast %input to
            offset: [0], sizes: [6, 1], strides: [1, 1]
            : memref<2x3xf32> to memref<6x1xf32>
@@ -428,7 +428,7 @@ func @memref_reinterpret_cast_ranked_to_static_shape(%input : memref<2x3xf32>) {
 // -----
 
 // CHECK-LABEL: @memref_reinterpret_cast_unranked_to_dynamic_shape
-func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
+func.func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
                                                         %size_0 : index,
                                                         %size_1 : index,
                                                         %stride_0 : index,
@@ -469,7 +469,7 @@ func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
 // -----
 
 // CHECK-LABEL: @memref_reshape
-func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
+func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
   %output = memref.reshape %input(%shape)
                 : (memref<2x3xf32>, memref<?xindex>) -> memref<*xf32>
   return
@@ -540,7 +540,7 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
 // -----
 
 // ALIGNED-ALLOC-LABEL: @memref_of_memref
-func @memref_of_memref() {
+func.func @memref_of_memref() {
   // Sizeof computation is as usual.
   // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null
   // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
@@ -565,7 +565,7 @@ func @memref_of_memref() {
 
 module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
   // ALIGNED-ALLOC-LABEL: @memref_of_memref_32
-  func @memref_of_memref_32() {
+  func.func @memref_of_memref_32() {
     // Sizeof computation is as usual.
     // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null
     // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
@@ -591,7 +591,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
 // -----
 
 // ALIGNED-ALLOC-LABEL: @memref_of_memref_of_memref
-func @memref_of_memref_of_memref() {
+func.func @memref_of_memref_of_memref() {
   // Sizeof computation is as usual, also check the type.
   // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<
   // ALIGNED-ALLOC-SAME:   struct<(
@@ -614,7 +614,7 @@ func @memref_of_memref_of_memref() {
 // -----
 
 // ALIGNED-ALLOC-LABEL: @ranked_unranked
-func @ranked_unranked() {
+func.func @ranked_unranked() {
   // ALIGNED-ALLOC: llvm.mlir.null
   // ALIGNED-ALLOC-SAME: !llvm.[[INNER:ptr<struct<\(i64, ptr<i8>\)>>]]
   // ALIGNED-ALLOC: llvm.getelementptr

diff  --git a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
index 63448ef30fc4b..93f1449002902 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -convert-memref-to-llvm -split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: func @zero_d_alloc()
-func @zero_d_alloc() -> memref<f32> {
+func.func @zero_d_alloc() -> memref<f32> {
 // CHECK: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
 // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -22,7 +22,7 @@ func @zero_d_alloc() -> memref<f32> {
 // -----
 
 // CHECK-LABEL: func @zero_d_dealloc
-func @zero_d_dealloc(%arg0: memref<f32>) {
+func.func @zero_d_dealloc(%arg0: memref<f32>) {
 // CHECK: unrealized_conversion_cast
 // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 // CHECK: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
@@ -35,7 +35,7 @@ func @zero_d_dealloc(%arg0: memref<f32>) {
 // -----
 
 // CHECK-LABEL: func @aligned_1d_alloc(
-func @aligned_1d_alloc() -> memref<42xf32> {
+func.func @aligned_1d_alloc() -> memref<42xf32> {
 // CHECK: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
 // CHECK: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
@@ -64,7 +64,7 @@ func @aligned_1d_alloc() -> memref<42xf32> {
 // -----
 
 // CHECK-LABEL: func @static_alloc()
-func @static_alloc() -> memref<32x18xf32> {
+func.func @static_alloc() -> memref<32x18xf32> {
 // CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
 // CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
 // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -78,7 +78,7 @@ func @static_alloc() -> memref<32x18xf32> {
 // -----
 
 // CHECK-LABEL: func @static_alloca()
-func @static_alloca() -> memref<32x18xf32> {
+func.func @static_alloca() -> memref<32x18xf32> {
 // CHECK: %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
 // CHECK: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
 // CHECK: %[[st2:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -103,7 +103,7 @@ func @static_alloca() -> memref<32x18xf32> {
 // -----
 
 // CHECK-LABEL: func @static_dealloc
-func @static_dealloc(%static: memref<10x8xf32>) {
+func.func @static_dealloc(%static: memref<10x8xf32>) {
 // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // CHECK: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK: llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
@@ -114,7 +114,7 @@ func @static_dealloc(%static: memref<10x8xf32>) {
 // -----
 
 // CHECK-LABEL: func @zero_d_load
-func @zero_d_load(%arg0: memref<f32>) -> f32 {
+func.func @zero_d_load(%arg0: memref<f32>) -> f32 {
 // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 // CHECK: %{{.*}} = llvm.load %[[ptr]] : !llvm.ptr<f32>
   %0 = memref.load %arg0[] : memref<f32>
@@ -127,7 +127,7 @@ func @zero_d_load(%arg0: memref<f32>) -> f32 {
 // CHECK:         %[[MEMREF:.*]]: memref<10x42xf32>,
 // CHECK:         %[[I:.*]]: index,
 // CHECK:         %[[J:.*]]: index)
-func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
+func.func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
 // CHECK:  %[[II:.*]] = builtin.unrealized_conversion_cast %[[I]]
 // CHECK:  %[[JJ:.*]] = builtin.unrealized_conversion_cast %[[J]]
 // CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -143,7 +143,7 @@ func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
 // -----
 
 // CHECK-LABEL: func @zero_d_store
-func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
+func.func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
 // CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 // CHECK: llvm.store %{{.*}}, %[[ptr]] : !llvm.ptr<f32>
   memref.store %arg1, %arg0[] : memref<f32>
@@ -155,7 +155,7 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
 // CHECK-LABEL: func @static_store
 // CHECK:         %[[MEMREF:.*]]: memref<10x42xf32>,
 // CHECK-SAME:    %[[I:.*]]: index, %[[J:.*]]: index,
-func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
+func.func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
 // CHECK: %[[II:.*]] = builtin.unrealized_conversion_cast %[[I]]
 // CHECK: %[[JJ:.*]] = builtin.unrealized_conversion_cast %[[J]]
 // CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -172,7 +172,7 @@ func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f
 // -----
 
 // CHECK-LABEL: func @static_memref_dim
-func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
+func.func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
 // CHECK:  llvm.mlir.constant(42 : index) : i64
   %c0 = arith.constant 0 : index
   %0 = memref.dim %static, %c0 : memref<42x32x15x13x27xf32>
@@ -196,7 +196,7 @@ func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
 // Check that consistent types are emitted in address arithemic in presence of
 // a data layout specification.
 module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
-  func @address() {
+  func.func @address() {
     %c1 = arith.constant 1 : index
     %0 = memref.alloc(%c1) : memref<? x vector<2xf32>>
     // CHECK: %[[CST_S:.*]] = arith.constant 1 : index

diff  --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 32f9c2fef0585..9b03c18a58802 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -3,7 +3,7 @@
 
 // CHECK-LABEL: func @view(
 // CHECK: %[[ARG0F:.*]]: index, %[[ARG1F:.*]]: index, %[[ARG2F:.*]]: index
-func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK: %[[ARG2:.*]] = builtin.unrealized_conversion_cast %[[ARG2F:.*]]
   // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %[[ARG0F:.*]]
   // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1F:.*]]
@@ -99,7 +99,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
 // CHECK32:         %[[ARG0f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG1f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG2f:.*]]: index)
-func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK-DAG: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast %[[MEM]]
   // CHECK-DAG: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %[[ARG0f]]
   // CHECK-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1f]]
@@ -164,7 +164,7 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
 // CHECK32:         %[[ARG0f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG1f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG2f:.*]]: index)
-func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1], 3>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1], 3>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK-DAG: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast %[[MEM]]
   // CHECK-DAG: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %[[ARG0f]]
   // CHECK-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1f]]
@@ -228,7 +228,7 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
 // CHECK32-SAME:         %[[ARG0f:[a-zA-Z0-9]*]]: index
 // CHECK32-SAME:         %[[ARG1f:[a-zA-Z0-9]*]]: index
 // CHECK32-SAME:         %[[ARG2f:[a-zA-Z0-9]*]]: index
-func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK-DAG: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast %[[MEM]]
   // CHECK-DAG: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %[[ARG0f]]
   // CHECK-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1f]]
@@ -296,7 +296,7 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
 // CHECK32-SAME:         %[[ARG0f:[a-zA-Z0-9]*]]: index
 // CHECK32-SAME:         %[[ARG1f:[a-zA-Z0-9]*]]: index
 // CHECK32-SAME:         %[[ARG2f:[a-zA-Z0-9]*]]: index
-func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK-DAG: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast %[[MEM]]
   // CHECK-DAG: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %[[ARG0f]]
   // CHECK-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1f]]
@@ -352,7 +352,7 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
 
 // CHECK-LABEL: func @subview_const_stride_and_offset(
 // CHECK32-LABEL: func @subview_const_stride_and_offset(
-func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>) {
+func.func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>) {
   // The last "insertvalue" that populates the memref descriptor from the function arguments.
   // CHECK: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast
   // CHECK32: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast
@@ -392,7 +392,7 @@ func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides:
 // CHECK32:         %[[ARG0f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG1f:[a-zA-Z0-9]*]]: index,
 // CHECK32:         %[[ARG2f:.*]]: index)
-func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK32-DAG: %[[MEMREF:.*]] = builtin.unrealized_conversion_cast %[[MEM]]
   // CHECK32-DAG: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1f]]
   // CHECK32-DAG: %[[ARG2:.*]] = builtin.unrealized_conversion_cast %[[ARG2f]]
@@ -429,7 +429,7 @@ func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4,
 // -----
 
 // CHECK-LABEL: func @subview_leading_operands(
-func @subview_leading_operands(%0 : memref<5x3xf32>, %1: memref<5x?xf32>) {
+func.func @subview_leading_operands(%0 : memref<5x3xf32>, %1: memref<5x?xf32>) {
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // Alloc ptr
   // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -456,7 +456,7 @@ func @subview_leading_operands(%0 : memref<5x3xf32>, %1: memref<5x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @subview_leading_operands_dynamic(
-func @subview_leading_operands_dynamic(%0 : memref<5x?xf32>) {
+func.func @subview_leading_operands_dynamic(%0 : memref<5x?xf32>) {
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // Alloc ptr
   // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
@@ -493,7 +493,7 @@ func @subview_leading_operands_dynamic(%0 : memref<5x?xf32>) {
 // -----
 
 // CHECK-LABEL: func @subview_rank_reducing_leading_operands(
-func @subview_rank_reducing_leading_operands(%0 : memref<5x3xf32>) {
+func.func @subview_rank_reducing_leading_operands(%0 : memref<5x3xf32>) {
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
   // Alloc ptr
   // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
@@ -518,7 +518,7 @@ func @subview_rank_reducing_leading_operands(%0 : memref<5x3xf32>) {
 // -----
 
 // CHECK-LABEL: func @assume_alignment
-func @assume_alignment(%0 : memref<4x4xf16>) {
+func.func @assume_alignment(%0 : memref<4x4xf16>) {
   // CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr<f16>, ptr<f16>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
   // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64
@@ -534,7 +534,7 @@ func @assume_alignment(%0 : memref<4x4xf16>) {
 
 // CHECK-LABEL: func @dim_of_unranked
 // CHECK32-LABEL: func @dim_of_unranked
-func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
+func.func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
   %c0 = arith.constant 0 : index
   %dim = memref.dim %unranked, %c0 : memref<*xi32>
   return %dim : index
@@ -566,7 +566,7 @@ func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
 // -----
 
 // CHECK-LABEL: func @address_space(
-func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
+func.func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
   %0 = memref.alloc() : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
   %1 = arith.constant 7 : index
   // CHECK: llvm.load %{{.*}} : !llvm.ptr<f32, 5>
@@ -587,7 +587,7 @@ func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
 //       CHECK:    llvm.insertvalue {{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:   llvm.extractvalue {{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.insertvalue {{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
-func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
+func.func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %0 = memref.transpose %arg0 (i, j, k) -> (k, i, j) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]> to memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d0 * s2 + d1)>>
   return
 }
@@ -608,7 +608,7 @@ memref.global @gv2 : memref<2x3xf32> = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]>
 
 // Test 1D memref.
 // CHECK-LABEL: func @get_gv0_memref
-func @get_gv0_memref() {
+func.func @get_gv0_memref() {
   %0 = memref.get_global @gv0 : memref<2xf32>
   // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64
   // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -629,7 +629,7 @@ func @get_gv0_memref() {
 
 // Test 2D memref.
 // CHECK-LABEL: func @get_gv2_memref
-func @get_gv2_memref() {
+func.func @get_gv2_memref() {
   // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : i64
   // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64
   // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64
@@ -657,7 +657,7 @@ func @get_gv2_memref() {
 memref.global @gv3 : memref<f32> = dense<1.0>
 
 // CHECK-LABEL: func @get_gv3_memref
-func @get_gv3_memref() {
+func.func @get_gv3_memref() {
   // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr<f32>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
   // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -678,7 +678,7 @@ memref.global "private" @gv4 : memref<f32> = dense<1.0> {alignment = 64}
 
 // -----
 
-func @collapse_shape_static(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
+func.func @collapse_shape_static(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
   %0 = memref.collapse_shape %arg0 [[0, 1], [2], [3, 4]] :
     memref<1x3x4x1x5xf32> into memref<3x4x5xf32>
   return %0 : memref<3x4x5xf32>
@@ -706,7 +706,7 @@ func @collapse_shape_static(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
 
 // -----
 
-func @expand_shape_static(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
+func.func @expand_shape_static(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
   // Reshapes that expand a contiguous tensor with some 1's.
   %0 = memref.expand_shape %arg0 [[0, 1], [2], [3, 4]]
       : memref<3x4x5xf32> into memref<1x3x4x1x5xf32>
@@ -744,7 +744,7 @@ func @expand_shape_static(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
 
 // -----
 
-func @collapse_shape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref<f32> {
+func.func @collapse_shape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref<f32> {
   %0 = memref.collapse_shape %arg0 [] : memref<1x1xf32> into memref<f32>
   return %0 : memref<f32>
 }
@@ -759,7 +759,7 @@ func @collapse_shape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref<f32> {
 
 // -----
 
-func @expand_shape_zero_dim(%arg0 : memref<f32>) -> memref<1x1xf32> {
+func.func @expand_shape_zero_dim(%arg0 : memref<f32>) -> memref<1x1xf32> {
   %0 = memref.expand_shape %arg0 [] : memref<f32> into memref<1x1xf32>
   return %0 : memref<1x1xf32>
 }
@@ -782,7 +782,7 @@ func @expand_shape_zero_dim(%arg0 : memref<f32>) -> memref<1x1xf32> {
 
 // -----
 
-func @collapse_shape_dynamic(%arg0 : memref<1x2x?xf32>) -> memref<1x?xf32> {
+func.func @collapse_shape_dynamic(%arg0 : memref<1x2x?xf32>) -> memref<1x?xf32> {
   %0 = memref.collapse_shape %arg0 [[0], [1, 2]]:  memref<1x2x?xf32> into memref<1x?xf32>
   return %0 : memref<1x?xf32>
 }
@@ -810,7 +810,7 @@ func @collapse_shape_dynamic(%arg0 : memref<1x2x?xf32>) -> memref<1x?xf32> {
 
 // -----
 
-func @expand_shape_dynamic(%arg0 : memref<1x?xf32>) -> memref<1x2x?xf32> {
+func.func @expand_shape_dynamic(%arg0 : memref<1x?xf32>) -> memref<1x2x?xf32> {
   %0 = memref.expand_shape %arg0 [[0], [1, 2]]: memref<1x?xf32> into memref<1x2x?xf32>
   return %0 : memref<1x2x?xf32>
 }
@@ -842,7 +842,7 @@ func @expand_shape_dynamic(%arg0 : memref<1x?xf32>) -> memref<1x2x?xf32> {
 
 // CHECK-LABEL: func @rank_of_unranked
 // CHECK32-LABEL: func @rank_of_unranked
-func @rank_of_unranked(%unranked: memref<*xi32>) {
+func.func @rank_of_unranked(%unranked: memref<*xi32>) {
   %rank = memref.rank %unranked : memref<*xi32>
   return
 }
@@ -852,7 +852,7 @@ func @rank_of_unranked(%unranked: memref<*xi32>) {
 
 // CHECK-LABEL: func @rank_of_ranked
 // CHECK32-LABEL: func @rank_of_ranked
-func @rank_of_ranked(%ranked: memref<?xi32>) {
+func.func @rank_of_ranked(%ranked: memref<?xi32>) {
   %rank = memref.rank %ranked : memref<?xi32>
   return
 }
@@ -862,7 +862,7 @@ func @rank_of_ranked(%ranked: memref<?xi32>) {
 // -----
 
 // CHECK-LABEL: func @atomic_rmw
-func @atomic_rmw(%I : memref<10xi32>, %ival : i32, %F : memref<10xf32>, %fval : f32, %i : index) {
+func.func @atomic_rmw(%I : memref<10xi32>, %ival : i32, %F : memref<10xf32>, %fval : f32, %i : index) {
   memref.atomic_rmw assign %fval, %F[%i] : (f32, memref<10xf32>) -> f32
   // CHECK: llvm.atomicrmw xchg %{{.*}}, %{{.*}} acq_rel
   memref.atomic_rmw addi %ival, %I[%i] : (i32, memref<10xi32>) -> i32
@@ -887,7 +887,7 @@ func @atomic_rmw(%I : memref<10xi32>, %ival : i32, %F : memref<10xf32>, %fval :
 // -----
 
 // CHECK-LABEL: func @collapse_static_shape_with_non_identity_layout
-func @collapse_static_shape_with_non_identity_layout(%arg: memref<1x1x8x8xf32, affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 64 + s0 + d1 * 64 + d2 * 8 + d3)>>) -> memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> {
+func.func @collapse_static_shape_with_non_identity_layout(%arg: memref<1x1x8x8xf32, affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 64 + s0 + d1 * 64 + d2 * 8 + d3)>>) -> memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> {
 // CHECK-NOT: memref.collapse_shape
   %1 = memref.collapse_shape %arg [[0, 1, 2, 3]] : memref<1x1x8x8xf32, affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 64 + s0 + d1 * 64 + d2 * 8 + d3)>> into memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
   return %1 : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
@@ -896,7 +896,7 @@ func @collapse_static_shape_with_non_identity_layout(%arg: memref<1x1x8x8xf32, a
 // -----
 
 // CHECK-LABEL: func @generic_atomic_rmw
-func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) {
+func.func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) {
   %x = memref.generic_atomic_rmw %I[%i] : memref<10xi32> {
     ^bb0(%old_value : i32):
       memref.atomic_yield %old_value : i32
@@ -915,7 +915,7 @@ func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) {
 // -----
 
 // CHECK-LABEL: func @memref_copy_ranked
-func @memref_copy_ranked() {
+func.func @memref_copy_ranked() {
   %0 = memref.alloc() : memref<2xf32>
   // CHECK: llvm.mlir.constant(2 : index) : i64
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
@@ -949,7 +949,7 @@ func @memref_copy_ranked() {
 
 // CHECK-LABEL: func @memref_copy_contiguous
 #map = affine_map<(d0, d1)[s0] -> (d0 * 2 + s0 + d1)>
-func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) {
+func.func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) {
   %buf = memref.alloc() : memref<1x2xi32>
   %sub = memref.subview %in[%offset, 0] [1, 2] [1, 1] : memref<16x2xi32> to memref<1x2xi32, #map>
   memref.copy %sub, %buf : memref<1x2xi32, #map> to memref<1x2xi32>
@@ -977,7 +977,7 @@ func @memref_copy_contiguous(%in: memref<16x2xi32>, %offset: index) {
 
 // CHECK-LABEL: func @memref_copy_noncontiguous
 #map = affine_map<(d0, d1)[s0] -> (d0 * 2 + s0 + d1)>
-func @memref_copy_noncontiguous(%in: memref<16x2xi32>, %offset: index) {
+func.func @memref_copy_noncontiguous(%in: memref<16x2xi32>, %offset: index) {
   %buf = memref.alloc() : memref<2x1xi32>
   %sub = memref.subview %in[%offset, 0] [2, 1] [1, 1] : memref<16x2xi32> to memref<2x1xi32, #map>
   memref.copy %sub, %buf : memref<2x1xi32, #map> to memref<2x1xi32>
@@ -988,7 +988,7 @@ func @memref_copy_noncontiguous(%in: memref<16x2xi32>, %offset: index) {
 // -----
 
 // CHECK-LABEL: func @memref_copy_unranked
-func @memref_copy_unranked() {
+func.func @memref_copy_unranked() {
   %0 = memref.alloc() : memref<2xi1>
   // CHECK: llvm.mlir.constant(2 : index) : i64
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<i1>, ptr<i1>, i64, array<1 x i64>, array<1 x i64>)>

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
index f2aec7bbd842c..adaed0ea3a910 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
@@ -5,7 +5,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
+  func.func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
     %0 = memref.alloc() : memref<4x5xf32, 3>
     %1 = memref.load %0[%arg0, %arg1] : memref<4x5xf32, 3>
     memref.store %1, %0[%arg0, %arg1] : memref<4x5xf32, 3>
@@ -30,7 +30,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
+  func.func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
     %0 = memref.alloc() : memref<4x5xi16, 3>
     %1 = memref.load %0[%arg0, %arg1] : memref<4x5xi16, 3>
     memref.store %1, %0[%arg0, %arg1] : memref<4x5xi16, 3>
@@ -59,7 +59,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @two_allocs() {
+  func.func @two_allocs() {
     %0 = memref.alloc() : memref<4x5xf32, 3>
     %1 = memref.alloc() : memref<2x3xi32, 3>
     return
@@ -79,7 +79,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @two_allocs_vector() {
+  func.func @two_allocs_vector() {
     %0 = memref.alloc() : memref<4xvector<4xf32>, 3>
     %1 = memref.alloc() : memref<2xvector<2xi32>, 3>
     return
@@ -100,7 +100,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : index) {
+  func.func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : index) {
     // expected-error @+1 {{unhandled allocation type}}
     %0 = memref.alloc(%arg0) : memref<4x?xf32, 3>
     return
@@ -114,7 +114,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_mem() {
+  func.func @alloc_dealloc_mem() {
     // expected-error @+1 {{unhandled allocation type}}
     %0 = memref.alloc() : memref<4x5xf32>
     return
@@ -129,7 +129,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : memref<4x?xf32, 3>) {
+  func.func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : memref<4x?xf32, 3>) {
     // expected-error @+1 {{unhandled deallocation type}}
     memref.dealloc %arg0 : memref<4x?xf32, 3>
     return
@@ -143,7 +143,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
   }
 {
-  func @alloc_dealloc_mem(%arg0 : memref<4x5xf32>) {
+  func.func @alloc_dealloc_mem(%arg0 : memref<4x5xf32>) {
     // expected-error @+1 {{unhandled deallocation type}}
     memref.dealloc %arg0 : memref<4x5xf32>
     return

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
index 90492ff14247d..162cb026c1e45 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
@@ -15,7 +15,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @load_store_zero_rank_float
-func @load_store_zero_rank_float(%arg0: memref<f32>, %arg1: memref<f32>) {
+func.func @load_store_zero_rank_float(%arg0: memref<f32>, %arg1: memref<f32>) {
   //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32> to !spv.ptr<!spv.struct<(!spv.array<1 x f32, stride=4> [0])>, StorageBuffer>
   //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32> to !spv.ptr<!spv.struct<(!spv.array<1 x f32, stride=4> [0])>, StorageBuffer>
   //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
@@ -34,7 +34,7 @@ func @load_store_zero_rank_float(%arg0: memref<f32>, %arg1: memref<f32>) {
 }
 
 // CHECK-LABEL: @load_store_zero_rank_int
-func @load_store_zero_rank_int(%arg0: memref<i32>, %arg1: memref<i32>) {
+func.func @load_store_zero_rank_int(%arg0: memref<i32>, %arg1: memref<i32>) {
   //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32> to !spv.ptr<!spv.struct<(!spv.array<1 x i32, stride=4> [0])>, StorageBuffer>
   //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32> to !spv.ptr<!spv.struct<(!spv.array<1 x i32, stride=4> [0])>, StorageBuffer>
   //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
@@ -53,7 +53,7 @@ func @load_store_zero_rank_int(%arg0: memref<i32>, %arg1: memref<i32>) {
 }
 
 // CHECK-LABEL: func @load_store_unknown_dim
-func @load_store_unknown_dim(%i: index, %source: memref<?xi32>, %dest: memref<?xi32>) {
+func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32>, %dest: memref<?xi32>) {
   // CHECK: %[[SRC:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32> to !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
   // CHECK: %[[DST:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32> to !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
   // CHECK: %[[AC0:.+]] = spv.AccessChain %[[SRC]]
@@ -67,7 +67,7 @@ func @load_store_unknown_dim(%i: index, %source: memref<?xi32>, %dest: memref<?x
 
 // CHECK-LABEL: func @load_i1
 //  CHECK-SAME: (%[[SRC:.+]]: memref<4xi1>, %[[IDX:.+]]: index)
-func @load_i1(%src: memref<4xi1>, %i : index) -> i1 {
+func.func @load_i1(%src: memref<4xi1>, %i : index) -> i1 {
   // CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1> to !spv.ptr<!spv.struct<(!spv.array<4 x i8, stride=1> [0])>, StorageBuffer>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
   // CHECK: %[[ZERO_0:.+]] = spv.Constant 0 : i32
@@ -87,7 +87,7 @@ func @load_i1(%src: memref<4xi1>, %i : index) -> i1 {
 // CHECK-LABEL: func @store_i1
 //  CHECK-SAME: %[[DST:.+]]: memref<4xi1>,
 //  CHECK-SAME: %[[IDX:.+]]: index
-func @store_i1(%dst: memref<4xi1>, %i: index) {
+func.func @store_i1(%dst: memref<4xi1>, %i: index) {
   %true = arith.constant true
   // CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1> to !spv.ptr<!spv.struct<(!spv.array<4 x i8, stride=1> [0])>, StorageBuffer>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
@@ -118,7 +118,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @load_i1
-func @load_i1(%arg0: memref<i1>) -> i1 {
+func.func @load_i1(%arg0: memref<i1>) -> i1 {
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
   //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
@@ -143,7 +143,7 @@ func @load_i1(%arg0: memref<i1>) -> i1 {
 }
 
 // CHECK-LABEL: @load_i8
-func @load_i8(%arg0: memref<i8>) {
+func.func @load_i8(%arg0: memref<i8>) {
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
   //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
@@ -165,7 +165,7 @@ func @load_i8(%arg0: memref<i8>) {
 
 // CHECK-LABEL: @load_i16
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: index)
-func @load_i16(%arg0: memref<10xi16>, %index : index) {
+func.func @load_i16(%arg0: memref<10xi16>, %index : index) {
   //     CHECK: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   //     CHECK: %[[OFFSET:.+]] = spv.Constant 0 : i32
@@ -191,7 +191,7 @@ func @load_i16(%arg0: memref<10xi16>, %index : index) {
 }
 
 // CHECK-LABEL: @load_i32
-func @load_i32(%arg0: memref<i32>) {
+func.func @load_i32(%arg0: memref<i32>) {
   // CHECK-NOT: spv.SDiv
   //     CHECK: spv.Load
   // CHECK-NOT: spv.ShiftRightArithmetic
@@ -200,7 +200,7 @@ func @load_i32(%arg0: memref<i32>) {
 }
 
 // CHECK-LABEL: @load_f32
-func @load_f32(%arg0: memref<f32>) {
+func.func @load_f32(%arg0: memref<f32>) {
   // CHECK-NOT: spv.SDiv
   //     CHECK: spv.Load
   // CHECK-NOT: spv.ShiftRightArithmetic
@@ -210,7 +210,7 @@ func @load_f32(%arg0: memref<f32>) {
 
 // CHECK-LABEL: @store_i1
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i1)
-func @store_i1(%arg0: memref<i1>, %value: i1) {
+func.func @store_i1(%arg0: memref<i1>, %value: i1) {
   //     CHECK: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   //     CHECK: %[[FOUR:.+]] = spv.Constant 4 : i32
@@ -236,7 +236,7 @@ func @store_i1(%arg0: memref<i1>, %value: i1) {
 
 // CHECK-LABEL: @store_i8
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i8)
-func @store_i8(%arg0: memref<i8>, %value: i8) {
+func.func @store_i8(%arg0: memref<i8>, %value: i8) {
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
@@ -260,7 +260,7 @@ func @store_i8(%arg0: memref<i8>, %value: i8) {
 
 // CHECK-LABEL: @store_i16
 //       CHECK: (%[[ARG0:.+]]: memref<10xi16>, %[[ARG1:.+]]: index, %[[ARG2:.+]]: i16)
-func @store_i16(%arg0: memref<10xi16>, %index: index, %value: i16) {
+func.func @store_i16(%arg0: memref<10xi16>, %index: index, %value: i16) {
   //     CHECK-DAG: %[[ARG2_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG2]] : i16 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
@@ -288,7 +288,7 @@ func @store_i16(%arg0: memref<10xi16>, %index: index, %value: i16) {
 }
 
 // CHECK-LABEL: @store_i32
-func @store_i32(%arg0: memref<i32>, %value: i32) {
+func.func @store_i32(%arg0: memref<i32>, %value: i32) {
   //     CHECK: spv.Store
   // CHECK-NOT: spv.AtomicAnd
   // CHECK-NOT: spv.AtomicOr
@@ -297,7 +297,7 @@ func @store_i32(%arg0: memref<i32>, %value: i32) {
 }
 
 // CHECK-LABEL: @store_f32
-func @store_f32(%arg0: memref<f32>, %value: f32) {
+func.func @store_f32(%arg0: memref<f32>, %value: f32) {
   //     CHECK: spv.Store
   // CHECK-NOT: spv.AtomicAnd
   // CHECK-NOT: spv.AtomicOr
@@ -318,7 +318,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @load_i8
-func @load_i8(%arg0: memref<i8>) {
+func.func @load_i8(%arg0: memref<i8>) {
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
   //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
   //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
@@ -339,7 +339,7 @@ func @load_i8(%arg0: memref<i8>) {
 }
 
 // CHECK-LABEL: @load_i16
-func @load_i16(%arg0: memref<i16>) {
+func.func @load_i16(%arg0: memref<i16>) {
   // CHECK-NOT: spv.SDiv
   //     CHECK: spv.Load
   // CHECK-NOT: spv.ShiftRightArithmetic
@@ -349,7 +349,7 @@ func @load_i16(%arg0: memref<i16>) {
 
 // CHECK-LABEL: @store_i8
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i8)
-func @store_i8(%arg0: memref<i8>, %value: i8) {
+func.func @store_i8(%arg0: memref<i8>, %value: i8) {
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
   //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
@@ -372,7 +372,7 @@ func @store_i8(%arg0: memref<i8>, %value: i8) {
 }
 
 // CHECK-LABEL: @store_i16
-func @store_i16(%arg0: memref<10xi16>, %index: index, %value: i16) {
+func.func @store_i16(%arg0: memref<10xi16>, %index: index, %value: i16) {
   //     CHECK: spv.Store
   // CHECK-NOT: spv.AtomicAnd
   // CHECK-NOT: spv.AtomicOr

diff  --git a/mlir/test/Conversion/NVGPUToNVVM/mma-sync-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/mma-sync-to-nvvm.mlir
index e095fecdfa87e..dfc31f1da8560 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/mma-sync-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/mma-sync-to-nvvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt --convert-nvgpu-to-nvvm --split-input-file %s | FileCheck %s
 
 // CHECK-LABEL: @m16n8k16_fp16
-func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vector<2x2xf16>) -> vector<2x2xf16> {
+func.func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vector<2x2xf16>) -> vector<2x2xf16> {
   // CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<4 x vector<2xf16>>
   // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<4 x vector<2xf16>>
   // CHECK: llvm.extractvalue %{{.*}}[2] : !llvm.array<4 x vector<2xf16>>
@@ -25,7 +25,7 @@ func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vecto
 // -----
 
 // CHECK-LABEL: @m16n8k8_fp16
-func @m16n8k8_fp16(%arg0: vector<2x2xf16>, %arg1: vector<1x2xf16>, %arg2: vector<2x2xf16>) -> vector<2x2xf16> {
+func.func @m16n8k8_fp16(%arg0: vector<2x2xf16>, %arg1: vector<1x2xf16>, %arg2: vector<2x2xf16>) -> vector<2x2xf16> {
   // CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
   // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
   // CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<1 x vector<2xf16>>
@@ -48,7 +48,7 @@ func @m16n8k8_fp16(%arg0: vector<2x2xf16>, %arg1: vector<1x2xf16>, %arg2: vector
 
 
 // CHECK-LABEL: @m16n8k32_int8
-func @m16n8k32_int8(%arg0: vector<4x4xi8>, %arg1: vector<2x4xi8>, %arg2: vector<2x2xi32>) -> vector<2x2xi32> {
+func.func @m16n8k32_int8(%arg0: vector<4x4xi8>, %arg1: vector<2x4xi8>, %arg2: vector<2x2xi32>) -> vector<2x2xi32> {
   // CHECK: [[el:%.+]] = llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<4 x vector<4xi8>>
   // CHECK: llvm.bitcast [[el]] : vector<4xi8> to i32
   // CHECK: [[el:%.+]] = llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<4 x vector<4xi8>>
@@ -75,7 +75,7 @@ func @m16n8k32_int8(%arg0: vector<4x4xi8>, %arg1: vector<2x4xi8>, %arg2: vector<
 // -----
 
 // CHECK-LABEL: @m8n8k4_f64
-func @m8n8k4_f64(%arg0: vector<1x1xf64>, %arg1: vector<1x1xf64>, %arg2: vector<1x2xf64>) -> vector<1x2xf64> {
+func.func @m8n8k4_f64(%arg0: vector<1x1xf64>, %arg1: vector<1x1xf64>, %arg2: vector<1x2xf64>) -> vector<1x2xf64> {
   // CHECK: llvm.extractvalue
   // CHECK: llvm.extractvalue
   // CHECK: llvm.extractvalue
@@ -95,7 +95,7 @@ func @m8n8k4_f64(%arg0: vector<1x1xf64>, %arg1: vector<1x1xf64>, %arg2: vector<1
 
 
 // CHECK-LABEL: @ldmatrix_x4
-func @ldmatrix_x4(%arg0: memref<128x128xf16, 3>) ->  vector<4x2xf16> {
+func.func @ldmatrix_x4(%arg0: memref<128x128xf16, 3>) ->  vector<4x2xf16> {
   %c0  = arith.constant 0 : index
   // CHECK: nvvm.ldmatrix {{%.+}} {layout = #nvvm.mma_layout<row>, num = 4 : i32} {{.*}} -> !llvm.struct<(i32, i32, i32, i32)
   %a = nvgpu.ldmatrix %arg0[%c0, %c0] {transpose = false, numTiles = 4 : i32} : memref<128x128xf16, 3> -> vector<4x2xf16>
@@ -117,7 +117,7 @@ func @ldmatrix_x4(%arg0: memref<128x128xf16, 3>) ->  vector<4x2xf16> {
 // -----
 
 // CHECK-LABEL: @ldmatrix_x1
-func @ldmatrix_x1(%arg0: memref<128x128xf16, 3>) ->  vector<1x2xf16> {
+func.func @ldmatrix_x1(%arg0: memref<128x128xf16, 3>) ->  vector<1x2xf16> {
   %c0  = arith.constant 0 : index
   // CHECK: nvvm.ldmatrix {{%.+}} {layout = #nvvm.mma_layout<row>, num = 1 : i32} {{.*}} -> i32
   %a = nvgpu.ldmatrix %arg0[%c0, %c0] {transpose = false, numTiles = 1 : i32} : memref<128x128xf16, 3> -> vector<1x2xf16>    

diff  --git a/mlir/test/Conversion/OpenACCToLLVM/convert-data-operands-to-llvmir.mlir b/mlir/test/Conversion/OpenACCToLLVM/convert-data-operands-to-llvmir.mlir
index e774a21de25a8..b37ccbbdf6194 100644
--- a/mlir/test/Conversion/OpenACCToLLVM/convert-data-operands-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenACCToLLVM/convert-data-operands-to-llvmir.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt -convert-openacc-to-llvm -split-input-file %s | FileCheck %s
 
-func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.enter_data copyin(%b : memref<10xf32>) create(%a : memref<10xf32>)
   return
 }
@@ -9,7 +9,7 @@ func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testenterdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
   acc.enter_data copyin(%b : memref<10xf32>) create(%a : !llvm.ptr<f32>)
   return
 }
@@ -18,7 +18,7 @@ func @testenterdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testenterdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
   acc.enter_data copyin(%b : memref<10xf32>) create_zero(%a : memref<10xi64>) attributes {async}
   return
 }
@@ -27,7 +27,7 @@ func @testenterdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   %ifCond = arith.constant true
   acc.enter_data if(%ifCond) copyin(%b : memref<10xf32>) create(%a : memref<10xf32>)
   return
@@ -37,7 +37,7 @@ func @testenterdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.exit_data copyout(%b : memref<10xf32>) delete(%a : memref<10xf32>)
   return
 }
@@ -46,7 +46,7 @@ func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testexitdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
   acc.exit_data copyout(%b : memref<10xf32>) delete(%a : !llvm.ptr<f32>)
   return
 }
@@ -55,7 +55,7 @@ func @testexitdataop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testexitdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
   acc.exit_data copyout(%b : memref<10xf32>) delete(%a : memref<10xi64>) attributes {async}
   return
 }
@@ -64,7 +64,7 @@ func @testexitdataop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   %ifCond = arith.constant true
   acc.exit_data if(%ifCond) copyout(%b : memref<10xf32>) delete(%a : memref<10xf32>)
   return
@@ -74,7 +74,7 @@ func @testexitdataop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.update host(%b : memref<10xf32>) device(%a : memref<10xf32>)
   return
 }
@@ -83,7 +83,7 @@ func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testupdateop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
+func.func @testupdateop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
   acc.update host(%b : memref<10xf32>) device(%a : !llvm.ptr<f32>)
   return
 }
@@ -92,7 +92,7 @@ func @testupdateop(%a: !llvm.ptr<f32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testupdateop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
   acc.update host(%b : memref<10xf32>) device(%a : memref<10xi64>) attributes {async}
   return
 }
@@ -101,7 +101,7 @@ func @testupdateop(%a: memref<10xi64>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   %ifCond = arith.constant true
   acc.update if(%ifCond) host(%b : memref<10xf32>) device(%a : memref<10xf32>)
   return
@@ -111,7 +111,7 @@ func @testupdateop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.data copy(%b : memref<10xf32>) copyout(%a : memref<10xf32>) {
   }
   return
@@ -121,7 +121,7 @@ func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
+func.func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
   acc.data copyin(%b : memref<10xf32>) deviceptr(%c: !llvm.ptr<f32>) attach(%a : !llvm.ptr<f32>) {
   }
   return
@@ -131,7 +131,7 @@ func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>)
 
 // -----
 
-func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   %ifCond = arith.constant true
   acc.data if(%ifCond) copyin_readonly(%b : memref<10xf32>) copyout_zero(%a : memref<10xf32>) {
   }
@@ -142,7 +142,7 @@ func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
+func.func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
   acc.data create(%b : memref<10xf32>) create_zero(%c: !llvm.ptr<f32>) no_create(%a : !llvm.ptr<f32>) {
   }
   return
@@ -152,7 +152,7 @@ func @testdataregion(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>)
 
 // -----
 
-func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.data present(%a, %b : memref<10xf32>, memref<10xf32>) {
   }
   return
@@ -162,7 +162,7 @@ func @testdataregion(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.parallel copy(%b : memref<10xf32>) copyout(%a : memref<10xf32>) {
   }
   return
@@ -172,7 +172,7 @@ func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
+func.func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
   acc.parallel copyin(%b : memref<10xf32>) deviceptr(%c: !llvm.ptr<f32>) attach(%a : !llvm.ptr<f32>) {
   }
   return
@@ -182,7 +182,7 @@ func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>)
 
 // -----
 
-func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   %ifCond = arith.constant true
   acc.parallel if(%ifCond) copyin_readonly(%b : memref<10xf32>) copyout_zero(%a : memref<10xf32>) {
   }
@@ -193,7 +193,7 @@ func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
+func.func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>) -> () {
   acc.parallel create(%b : memref<10xf32>) create_zero(%c: !llvm.ptr<f32>) no_create(%a : !llvm.ptr<f32>) {
   }
   return
@@ -203,7 +203,7 @@ func @testparallelop(%a: !llvm.ptr<f32>, %b: memref<10xf32>, %c: !llvm.ptr<f32>)
 
 // -----
 
-func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.parallel present(%a: memref<10xf32>, %b: memref<10xf32>) {
   }
   return
@@ -213,7 +213,7 @@ func @testparallelop(%a: memref<10xf32>, %b: memref<10xf32>) -> () {
 
 // -----
 
-func @testparallelop(%i: i64, %a: memref<10xf32>, %b: memref<10xf32>) -> () {
+func.func @testparallelop(%i: i64, %a: memref<10xf32>, %b: memref<10xf32>) -> () {
   acc.parallel num_gangs(%i: i64) present(%a: memref<10xf32>, %b: memref<10xf32>) {
   } attributes {async}
   return

diff  --git a/mlir/test/Conversion/OpenACCToSCF/convert-openacc-to-scf.mlir b/mlir/test/Conversion/OpenACCToSCF/convert-openacc-to-scf.mlir
index 29d2bb66e7289..7a189ecd6007d 100644
--- a/mlir/test/Conversion/OpenACCToSCF/convert-openacc-to-scf.mlir
+++ b/mlir/test/Conversion/OpenACCToSCF/convert-openacc-to-scf.mlir
@@ -1,6 +1,6 @@
 // RUN: mlir-opt %s -convert-openacc-to-scf -split-input-file | FileCheck %s
 
-func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
   acc.enter_data if(%ifCond) create(%a: memref<10xf32>)
   return
 }
@@ -12,7 +12,7 @@ func @testenterdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
 
 // -----
 
-func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
   acc.exit_data if(%ifCond) delete(%a: memref<10xf32>)
   return
 }
@@ -24,7 +24,7 @@ func @testexitdataop(%a: memref<10xf32>, %ifCond: i1) -> () {
 
 // -----
 
-func @testupdateop(%a: memref<10xf32>, %ifCond: i1) -> () {
+func.func @testupdateop(%a: memref<10xf32>, %ifCond: i1) -> () {
   acc.update if(%ifCond) host(%a: memref<10xf32>)
   return
 }

diff  --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index dcfdf18393a12..cb1e2411e8091 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -convert-openmp-to-llvm %s  -split-input-file | FileCheck %s
 
 // CHECK-LABEL: llvm.func @master_block_arg
-func @master_block_arg() {
+func.func @master_block_arg() {
   // CHECK: omp.master
   omp.master {
   // CHECK-NEXT: ^[[BB0:.*]](%[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64):
@@ -16,7 +16,7 @@ func @master_block_arg() {
 }
 
 // CHECK-LABEL: llvm.func @branch_loop
-func @branch_loop() {
+func.func @branch_loop() {
   %start = arith.constant 0 : index
   %end = arith.constant 0 : index
   // CHECK: omp.parallel
@@ -46,7 +46,7 @@ func @branch_loop() {
 
 // CHECK-LABEL: @wsloop
 // CHECK: (%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64, %[[ARG3:.*]]: i64, %[[ARG4:.*]]: i64, %[[ARG5:.*]]: i64)
-func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) {
+func.func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) {
   // CHECK: omp.parallel
   omp.parallel {
     // CHECK: omp.wsloop for (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) {

diff  --git a/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir b/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir
index 159c557cac259..4c4079143df0f 100644
--- a/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir
+++ b/mlir/test/Conversion/SCFToControlFlow/convert-to-cfg.mlir
@@ -11,7 +11,7 @@
 //  CHECK-NEXT:    cf.br ^bb1(%[[iv]] : index)
 //  CHECK-NEXT:  ^bb3:   // pred: ^bb1
 //  CHECK-NEXT:    return
-func @simple_std_for_loop(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @simple_std_for_loop(%arg0 : index, %arg1 : index, %arg2 : index) {
   scf.for %i0 = %arg0 to %arg1 step %arg2 {
     %c1 = arith.constant 1 : index
   }
@@ -38,7 +38,7 @@ func @simple_std_for_loop(%arg0 : index, %arg1 : index, %arg2 : index) {
 //  CHECK-NEXT:    cf.br ^bb1(%[[iv0]] : index)
 //  CHECK-NEXT:  ^bb6:   // pred: ^bb1
 //  CHECK-NEXT:    return
-func @simple_std_2_for_loops(%arg0 : index, %arg1 : index, %arg2 : index) {
+func.func @simple_std_2_for_loops(%arg0 : index, %arg1 : index, %arg2 : index) {
   scf.for %i0 = %arg0 to %arg1 step %arg2 {
     %c1 = arith.constant 1 : index
     scf.for %i1 = %arg0 to %arg1 step %arg2 {
@@ -55,7 +55,7 @@ func @simple_std_2_for_loops(%arg0 : index, %arg1 : index, %arg2 : index) {
 //  CHECK-NEXT:     cf.br ^bb2
 //  CHECK-NEXT:   ^bb2:   // 2 preds: ^bb0, ^bb1
 //  CHECK-NEXT:     return
-func @simple_std_if(%arg0: i1) {
+func.func @simple_std_if(%arg0: i1) {
   scf.if %arg0 {
     %c1 = arith.constant 1 : index
   }
@@ -72,7 +72,7 @@ func @simple_std_if(%arg0: i1) {
 //  CHECK-NEXT:     cf.br ^bb3
 //  CHECK-NEXT:   ^bb3:   // 2 preds: ^bb1, ^bb2
 //  CHECK-NEXT:     return
-func @simple_std_if_else(%arg0: i1) {
+func.func @simple_std_if_else(%arg0: i1) {
   scf.if %arg0 {
     %c1 = arith.constant 1 : index
   } else {
@@ -96,7 +96,7 @@ func @simple_std_if_else(%arg0: i1) {
 //  CHECK-NEXT:   cf.br ^bb5
 //  CHECK-NEXT: ^bb5:   // 2 preds: ^bb0, ^bb4
 //  CHECK-NEXT:   return
-func @simple_std_2_ifs(%arg0: i1) {
+func.func @simple_std_2_ifs(%arg0: i1) {
   scf.if %arg0 {
     %c1 = arith.constant 1 : index
     scf.if %arg0 {
@@ -133,7 +133,7 @@ func @simple_std_2_ifs(%arg0: i1) {
 //  CHECK-NEXT:   ^bb8:   // pred: ^bb1
 //  CHECK-NEXT:     return
 //  CHECK-NEXT: }
-func @simple_std_for_loop_with_2_ifs(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : i1) {
+func.func @simple_std_for_loop_with_2_ifs(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : i1) {
   scf.for %i0 = %arg0 to %arg1 step %arg2 {
     %c1 = arith.constant 1 : index
     scf.if %arg3 {
@@ -149,7 +149,7 @@ func @simple_std_for_loop_with_2_ifs(%arg0 : index, %arg1 : index, %arg2 : index
 }
 
 // CHECK-LABEL: func @simple_if_yield
-func @simple_if_yield(%arg0: i1) -> (i1, i1) {
+func.func @simple_if_yield(%arg0: i1) -> (i1, i1) {
 // CHECK:   cf.cond_br %{{.*}}, ^[[then:.*]], ^[[else:.*]]
   %0:2 = scf.if %arg0 -> (i1, i1) {
 // CHECK: ^[[then]]:
@@ -176,7 +176,7 @@ func @simple_if_yield(%arg0: i1) -> (i1, i1) {
 }
 
 // CHECK-LABEL: func @nested_if_yield
-func @nested_if_yield(%arg0: i1) -> (index) {
+func.func @nested_if_yield(%arg0: i1) -> (index) {
 // CHECK:   cf.cond_br %{{.*}}, ^[[first_then:.*]], ^[[first_else:.*]]
   %0 = scf.if %arg0 -> i1 {
 // CHECK: ^[[first_then]]:
@@ -248,7 +248,7 @@ func @nested_if_yield(%arg0: i1) -> (index) {
 // CHECK:           return
 // CHECK:         }
 
-func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
                         %arg3 : index, %arg4 : index) {
   %step = arith.constant 1 : index
   scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
@@ -275,7 +275,7 @@ func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
 //
 // CHECK:      ^[[CONTINUE]]:
 // CHECK:        return %[[ITER_ARG0]], %[[ITER_ARG1]] : f32, f32
-func @for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> (f32, f32) {
+func.func @for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> (f32, f32) {
   %s0 = arith.constant 0.0 : f32
   %s1 = arith.constant 1.0 : f32
   %result:2 = scf.for %i0 = %arg0 to %arg1 step %arg2 iter_args(%si = %s0, %sj = %s1) -> (f32, f32) {
@@ -302,7 +302,7 @@ func @for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> (f32, f32) {
 // CHECK:         cf.br ^[[COND_OUT]](%{{.*}}, %[[ARG_IN]] : index, f32)
 // CHECK:       ^[[CONT_OUT]]:
 // CHECK:         return %[[ARG_OUT]] : f32
-func @nested_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> f32 {
+func.func @nested_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> f32 {
   %s0 = arith.constant 1.0 : f32
   %r = scf.for %i0 = %arg0 to %arg1 step %arg2 iter_args(%iter = %s0) -> (f32) {
     %result = scf.for %i1 = %arg0 to %arg1 step %arg2 iter_args(%si = %iter) -> (f32) {
@@ -314,11 +314,11 @@ func @nested_for_yield(%arg0 : index, %arg1 : index, %arg2 : index) -> f32 {
   return %r : f32
 }
 
-func private @generate() -> i64
+func.func private @generate() -> i64
 
 // CHECK-LABEL: @simple_parallel_reduce_loop
 // CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index, %[[STEP:.*]]: index, %[[INIT:.*]]: f32
-func @simple_parallel_reduce_loop(%arg0: index, %arg1: index,
+func.func @simple_parallel_reduce_loop(%arg0: index, %arg1: index,
                                   %arg2: index, %arg3: f32) -> f32 {
   // A parallel loop with reduction is converted through sequential loops with
   // reductions into a CFG of blocks where the partially reduced value is
@@ -358,7 +358,7 @@ func @simple_parallel_reduce_loop(%arg0: index, %arg1: index,
 
 // CHECK-LABEL: parallel_reduce_loop
 // CHECK-SAME: %[[INIT1:[0-9A-Za-z_]*]]: f32)
-func @parallel_reduce_loop(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_reduce_loop(%arg0 : index, %arg1 : index, %arg2 : index,
                            %arg3 : index, %arg4 : index, %arg5 : f32) -> (f32, i64) {
   // Multiple reduction blocks should be folded in the same body, and the
   // reduction value must be forwarded through block structures.
@@ -403,7 +403,7 @@ func @parallel_reduce_loop(%arg0 : index, %arg1 : index, %arg2 : index,
 // that the presence of unknown ops does not prevent the conversion from being
 // applied.
 // CHECK-LABEL: @unknown_op_inside_loop
-func @unknown_op_inside_loop(%arg0: index, %arg1: index, %arg2: index) {
+func.func @unknown_op_inside_loop(%arg0: index, %arg1: index, %arg2: index) {
   // CHECK-NOT: scf.for
   scf.for %i = %arg0 to %arg1 step %arg2 {
     // CHECK: unknown.op
@@ -414,7 +414,7 @@ func @unknown_op_inside_loop(%arg0: index, %arg1: index, %arg2: index) {
 }
 
 // CHECK-LABEL: @minimal_while
-func @minimal_while() {
+func.func @minimal_while() {
   // CHECK:   %[[COND:.*]] = "test.make_condition"() : () -> i1
   // CHECK:   cf.br ^[[BEFORE:.*]]
   %0 = "test.make_condition"() : () -> i1
@@ -435,7 +435,7 @@ func @minimal_while() {
 }
 
 // CHECK-LABEL: @do_while
-func @do_while(%arg0: f32) {
+func.func @do_while(%arg0: f32) {
   // CHECK:   cf.br ^[[BEFORE:.*]]({{.*}}: f32)
   scf.while (%arg1 = %arg0) : (f32) -> (f32) {
   // CHECK: ^[[BEFORE]](%[[VAL:.*]]: f32):
@@ -455,7 +455,7 @@ func @do_while(%arg0: f32) {
 
 // CHECK-LABEL: @while_values
 // CHECK-SAME: (%[[ARG0:.*]]: i32, %[[ARG1:.*]]: f32)
-func @while_values(%arg0: i32, %arg1: f32) {
+func.func @while_values(%arg0: i32, %arg1: f32) {
   // CHECK:     %[[COND:.*]] = "test.make_condition"() : () -> i1
   %0 = "test.make_condition"() : () -> i1
   %c0_i32 = arith.constant 0 : i32
@@ -483,7 +483,7 @@ func @while_values(%arg0: i32, %arg1: f32) {
 }
 
 // CHECK-LABEL: @nested_while_ops
-func @nested_while_ops(%arg0: f32) -> i64 {
+func.func @nested_while_ops(%arg0: f32) -> i64 {
   // CHECK:       cf.br ^[[OUTER_BEFORE:.*]](%{{.*}} : f32)
   %0 = scf.while(%outer = %arg0) : (f32) -> i64 {
     // CHECK:   ^[[OUTER_BEFORE]](%{{.*}}: f32):
@@ -548,7 +548,7 @@ func @nested_while_ops(%arg0: f32) -> i64 {
 
 // CHECK-LABEL: @ifs_in_parallel
 // CHECK: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: i1, %[[ARG4:.*]]: i1)
-func @ifs_in_parallel(%arg1: index, %arg2: index, %arg3: index, %arg4: i1, %arg5: i1) {
+func.func @ifs_in_parallel(%arg1: index, %arg2: index, %arg3: index, %arg4: i1, %arg5: i1) {
   // CHECK:   cf.br ^[[LOOP_LATCH:.*]](%[[ARG0]] : index)
   // CHECK: ^[[LOOP_LATCH]](%[[LOOP_IV:.*]]: index):
   // CHECK:   %[[LOOP_COND:.*]] = arith.cmpi slt, %[[LOOP_IV]], %[[ARG1]] : index
@@ -589,7 +589,7 @@ func @ifs_in_parallel(%arg1: index, %arg2: index, %arg3: index, %arg4: i1, %arg5
 }
 
 // CHECK-LABEL: func @func_execute_region_elim_multi_yield
-func @func_execute_region_elim_multi_yield() {
+func.func @func_execute_region_elim_multi_yield() {
     "test.foo"() : () -> ()
     %v = scf.execute_region -> i64 {
       %c = "test.cmp"() : () -> i1

diff  --git a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
index 1d6de6fe56acd..aaa063d9fc4b0 100644
--- a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
+++ b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
@@ -3,7 +3,7 @@
 
 // CHECK-THREADS-LABEL: @one_d_loop
 // CHECK-BLOCKS-LABEL: @one_d_loop
-func @one_d_loop(%A : memref<?xf32>, %B : memref<?xf32>) {
+func.func @one_d_loop(%A : memref<?xf32>, %B : memref<?xf32>) {
   // Bounds of the loop, its range and step.
   // CHECK-THREADS-NEXT: %{{.*}} = arith.constant 0 : index
   // CHECK-THREADS-NEXT: %{{.*}} = arith.constant 42 : index

diff  --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 6630cacc43bf4..9beb6ed093f88 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -2,7 +2,7 @@
 
 // 2-d parallel loop mapped to block.y and block.x
 
-func @parallel_loop_bidy_bidx(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop_bidy_bidx(%arg0 : index, %arg1 : index, %arg2 : index,
                               %arg3 : index, %arg4 : index,
                               %buf : memref<?x?xf32>,
                               %res : memref<?x?xf32>) {
@@ -40,7 +40,7 @@ func @parallel_loop_bidy_bidx(%arg0 : index, %arg1 : index, %arg2 : index,
 
 // tiled 2-d parallel loop mapped to block.y and block.x and thread.y and thread.x.
 
-func @parallel_loop_tiled(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop_tiled(%arg0 : index, %arg1 : index, %arg2 : index,
                         %arg3 : index,
                         %buf : memref<?x?xf32>,
                         %res : memref<?x?xf32>) {
@@ -99,7 +99,7 @@ func @parallel_loop_tiled(%arg0 : index, %arg1 : index, %arg2 : index,
 
 // 2-d parallel loop mapped to block.y and sequential
 
-func @parallel_loop_bidy_seq(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop_bidy_seq(%arg0 : index, %arg1 : index, %arg2 : index,
                              %arg3 : index, %arg4 : index,
                              %buf : memref<?x?xf32>,
                              %res : memref<?x?xf32>) {
@@ -140,7 +140,7 @@ func @parallel_loop_bidy_seq(%arg0 : index, %arg1 : index, %arg2 : index,
 
 // tiled 2-d parallel loop mapped to block.y and seq. and thread.y and seq.
 
-func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
                               %arg3 : index,
                               %buf : memref<?x?xf32>,
                               %res : memref<?x?xf32>) {
@@ -203,7 +203,7 @@ func @parallel_loop_tiled_seq(%arg0 : index, %arg1 : index, %arg2 : index,
 #map3 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
 
 module {
-  func @sum(%arg0: memref<?x?xf32, #map0>, %arg1: memref<?x?xf32, #map0>, %arg2: memref<?x?xf32, #map0>) {
+  func.func @sum(%arg0: memref<?x?xf32, #map0>, %arg1: memref<?x?xf32, #map0>, %arg2: memref<?x?xf32, #map0>) {
     %c1 = arith.constant 1 : index
     %c0 = arith.constant 0 : index
     %c3 = arith.constant 3 : index
@@ -306,7 +306,7 @@ module {
 
 // Optional attribute lowering test
 
-func @parallel_loop_optional_attr() {
+func.func @parallel_loop_optional_attr() {
   %c0 = arith.constant 0 : index
   %c1 = arith.constant 1 : index
   scf.parallel (%i0) = (%c0) to (%c1) step (%c1) {
@@ -319,7 +319,7 @@ func @parallel_loop_optional_attr() {
 
 // Mapping to the same processor twice. Cannot be mapped.
 
-func @parallel_double_map(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_double_map(%arg0 : index, %arg1 : index, %arg2 : index,
                           %arg3 : index,
                           %buf : memref<?x?xf32>,
                           %res : memref<?x?xf32>) {
@@ -340,7 +340,7 @@ func @parallel_double_map(%arg0 : index, %arg1 : index, %arg2 : index,
 
 // Loop with loop-variant upper bound. Cannot be mapped.
 
-func @parallel_loop_loop_variant_bound(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_loop_loop_variant_bound(%arg0 : index, %arg1 : index, %arg2 : index,
                                        %arg3 : index,
                                        %buf : memref<?x?xf32>,
                                        %res : memref<?x?xf32>) {
@@ -374,7 +374,7 @@ func @parallel_loop_loop_variant_bound(%arg0 : index, %arg1 : index, %arg2 : ind
 
 // Loop without annotations. Cannot be mapped.
 
-func @parallel_no_annotations(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @parallel_no_annotations(%arg0 : index, %arg1 : index, %arg2 : index,
                               %arg3 : index,
                               %buf : memref<?x?xf32>,
                               %res : memref<?x?xf32>) {

diff  --git a/mlir/test/Conversion/SCFToGPU/step_one.mlir b/mlir/test/Conversion/SCFToGPU/step_one.mlir
index 5a571401cf07c..c4668bfeba747 100644
--- a/mlir/test/Conversion/SCFToGPU/step_one.mlir
+++ b/mlir/test/Conversion/SCFToGPU/step_one.mlir
@@ -3,7 +3,7 @@
 
 // CHECK-11-LABEL: @step_1
 // CHECK-22-LABEL: @step_1
-func @step_1(%A : memref<?x?x?x?xf32>, %B : memref<?x?x?x?xf32>) {
+func.func @step_1(%A : memref<?x?x?x?xf32>, %B : memref<?x?x?x?xf32>) {
   // Bounds of the loop, its range and step.
   // CHECK-11-NEXT: %{{.*}} = arith.constant 0 : index
   // CHECK-11-NEXT: %{{.*}} = arith.constant 42 : index

diff  --git a/mlir/test/Conversion/SCFToGPU/step_positive.mlir b/mlir/test/Conversion/SCFToGPU/step_positive.mlir
index 87e59c6264dc0..65d16046382d6 100644
--- a/mlir/test/Conversion/SCFToGPU/step_positive.mlir
+++ b/mlir/test/Conversion/SCFToGPU/step_positive.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s
 
 // CHECK-LABEL: @step_var
-func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {
+func.func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {
   // Check that we divide by step.
   // CHECK:  %[[range_i:.*]] = arith.divsi {{.*}}, %{{.*}}
   // CHECK:  %[[range_j:.*]] = arith.divsi {{.*}}, %{{.*}}

diff  --git a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
index ee76868da88d8..d71f7578804d9 100644
--- a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
@@ -17,7 +17,7 @@
 // CHECK: llvm.atomicrmw fadd %[[ARG0]], %[[RHS]] monotonic
 
 // CHECK-LABEL: @reduction1
-func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
                  %arg3 : index, %arg4 : index) {
   // CHECK: %[[CST:.*]] = arith.constant 0.0
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1
@@ -63,7 +63,7 @@ func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
 // CHECK-NOT: atomic
 
 // CHECK-LABEL: @reduction2
-func @reduction2(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @reduction2(%arg0 : index, %arg1 : index, %arg2 : index,
                  %arg3 : index, %arg4 : index) {
   %step = arith.constant 1 : index
   %zero = arith.constant 0.0 : f32
@@ -97,7 +97,7 @@ func @reduction2(%arg0 : index, %arg1 : index, %arg2 : index,
 // CHECK-NOT: atomic
 
 // CHECK-LABEL: @reduction3
-func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
                  %arg3 : index, %arg4 : index) {
   %step = arith.constant 1 : index
   %zero = arith.constant 0.0 : f32
@@ -148,7 +148,7 @@ func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
 // CHECK: llvm.atomicrmw max %[[ARG0]], %[[RHS]] monotonic
 
 // CHECK-LABEL: @reduction4
-func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
+func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
                  %arg3 : index, %arg4 : index) -> (f32, i64) {
   %step = arith.constant 1 : index
   // CHECK: %[[ZERO:.*]] = arith.constant 0.0

diff  --git a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
index e9c2c85e69d1f..e0fdcae1b896b 100644
--- a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt -convert-scf-to-openmp %s | FileCheck %s
 
 // CHECK-LABEL: @parallel
-func @parallel(%arg0: index, %arg1: index, %arg2: index,
+func.func @parallel(%arg0: index, %arg1: index, %arg2: index,
           %arg3: index, %arg4: index, %arg5: index) {
   // CHECK: omp.parallel {
   // CHECK: omp.wsloop for (%[[LVAR1:.*]], %[[LVAR2:.*]]) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) {
@@ -18,7 +18,7 @@ func @parallel(%arg0: index, %arg1: index, %arg2: index,
 }
 
 // CHECK-LABEL: @nested_loops
-func @nested_loops(%arg0: index, %arg1: index, %arg2: index,
+func.func @nested_loops(%arg0: index, %arg1: index, %arg2: index,
                    %arg3: index, %arg4: index, %arg5: index) {
   // CHECK: omp.parallel {
   // CHECK: omp.wsloop for (%[[LVAR_OUT1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) {
@@ -41,7 +41,7 @@ func @nested_loops(%arg0: index, %arg1: index, %arg2: index,
 }
 
 // CHECK-LABEL: @adjacent_loops
-func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index,
+func.func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index,
                      %arg3: index, %arg4: index, %arg5: index) {
   // CHECK: omp.parallel {
   // CHECK: omp.wsloop for (%[[LVAR_AL1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) {

diff  --git a/mlir/test/Conversion/SCFToSPIRV/for.mlir b/mlir/test/Conversion/SCFToSPIRV/for.mlir
index c998d39825a28..f46c45a286d04 100644
--- a/mlir/test/Conversion/SCFToSPIRV/for.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/for.mlir
@@ -5,7 +5,7 @@ module attributes {
     #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
 } {
 
-func @loop_kernel(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>) {
+func.func @loop_kernel(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>) {
   // CHECK: %[[LB:.*]] = spv.Constant 4 : i32
   %lb = arith.constant 4 : index
   // CHECK: %[[UB:.*]] = spv.Constant 42 : i32
@@ -43,7 +43,7 @@ func @loop_kernel(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>) {
 }
 
 // CHECK-LABEL: @loop_yield
-func @loop_yield(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>) {
+func.func @loop_yield(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>) {
   // CHECK: %[[LB:.*]] = spv.Constant 4 : i32
   %lb = arith.constant 4 : index
   // CHECK: %[[UB:.*]] = spv.Constant 42 : i32

diff  --git a/mlir/test/Conversion/SCFToSPIRV/if.mlir b/mlir/test/Conversion/SCFToSPIRV/if.mlir
index 224549b5391ea..b52ae2b65fbdd 100644
--- a/mlir/test/Conversion/SCFToSPIRV/if.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/if.mlir
@@ -6,7 +6,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @kernel_simple_selection
-func @kernel_simple_selection(%arg2 : memref<10xf32>, %arg3 : i1) {
+func.func @kernel_simple_selection(%arg2 : memref<10xf32>, %arg3 : i1) {
   %value = arith.constant 0.0 : f32
   %i = arith.constant 0 : index
 
@@ -26,7 +26,7 @@ func @kernel_simple_selection(%arg2 : memref<10xf32>, %arg3 : i1) {
 }
 
 // CHECK-LABEL: @kernel_nested_selection
-func @kernel_nested_selection(%arg3 : memref<10xf32>, %arg4 : memref<10xf32>, %arg5 : i1, %arg6 : i1) {
+func.func @kernel_nested_selection(%arg3 : memref<10xf32>, %arg4 : memref<10xf32>, %arg5 : i1, %arg6 : i1) {
   %i = arith.constant 0 : index
   %j = arith.constant 9 : index
 
@@ -80,7 +80,7 @@ func @kernel_nested_selection(%arg3 : memref<10xf32>, %arg4 : memref<10xf32>, %a
 }
 
 // CHECK-LABEL: @simple_if_yield
-func @simple_if_yield(%arg2 : memref<10xf32>, %arg3 : i1) {
+func.func @simple_if_yield(%arg2 : memref<10xf32>, %arg3 : i1) {
   // CHECK: %[[VAR1:.*]] = spv.Variable : !spv.ptr<f32, Function>
   // CHECK: %[[VAR2:.*]] = spv.Variable : !spv.ptr<f32, Function>
   // CHECK:       spv.mlir.selection {
@@ -124,7 +124,7 @@ func @simple_if_yield(%arg2 : memref<10xf32>, %arg3 : i1) {
 // TODO: The transformation should only be legal if VariablePointer capability
 // is supported. This test is still useful to make sure we can handle scf op
 // result with type change.
-func @simple_if_yield_type_change(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>, %arg4 : i1) {
+func.func @simple_if_yield_type_change(%arg2 : memref<10xf32>, %arg3 : memref<10xf32>, %arg4 : i1) {
   // CHECK-LABEL: @simple_if_yield_type_change
   // CHECK:       %[[VAR:.*]] = spv.Variable : !spv.ptr<!spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>, Function>
   // CHECK:       spv.mlir.selection {

diff  --git a/mlir/test/Conversion/SCFToSPIRV/while.mlir b/mlir/test/Conversion/SCFToSPIRV/while.mlir
index d44ea1e98404e..11a1daa132577 100644
--- a/mlir/test/Conversion/SCFToSPIRV/while.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/while.mlir
@@ -6,7 +6,7 @@ module attributes {
 } {
 
 // CHECK-LABEL: @while_loop1
-func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
+func.func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
   // CHECK-SAME: (%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32)
   // CHECK: %[[INITVAR:.*]] = spv.Constant 2 : i32
   // CHECK: %[[VAR1:.*]] = spv.Variable : !spv.ptr<i32, Function>
@@ -39,7 +39,7 @@ func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
 // -----
 
 // CHECK-LABEL: @while_loop2
-func @while_loop2(%arg0: f32) -> i64 {
+func.func @while_loop2(%arg0: f32) -> i64 {
   // CHECK-SAME: (%[[ARG:.*]]: f32)
   // CHECK: %[[VAR:.*]] = spv.Variable : !spv.ptr<i64, Function>
   // CHECK: spv.mlir.loop {

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
index c7cf3060dee09..3e94e40232ff2 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
@@ -37,7 +37,7 @@ module attributes {gpu.container_module, spv.target_env = #spv.target_env<#spv.v
     }
   }
 
-  func @main() {
+  func.func @main() {
     %buffer = memref.alloc() : memref<6xi32>
     %one = arith.constant 1 : index
     gpu.launch_func @foo::@bar blocks in (%one, %one, %one)

diff  --git a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
index 7624ab8119b9f..0c6aca5e6a5d7 100644
--- a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
@@ -9,7 +9,7 @@
 // CHECK:           cf.assert %[[BROADCAST_IS_VALID]], "required broadcastable shapes"
 // CHECK:           return %[[RET]] : !shape.witness
 // CHECK:         }
-func @cstr_broadcastable(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !shape.witness {
+func.func @cstr_broadcastable(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !shape.witness {
   %witness = shape.cstr_broadcastable %arg0, %arg1 : tensor<?xindex>, tensor<?xindex>
   return %witness : !shape.witness
 }
@@ -22,13 +22,13 @@ func @cstr_broadcastable(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !sha
 // CHECK:           cf.assert %[[EQUAL_IS_VALID]], "required equal shapes"
 // CHECK:           return %[[RET]] : !shape.witness
 // CHECK:         }
-func @cstr_eq(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !shape.witness {
+func.func @cstr_eq(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !shape.witness {
   %witness = shape.cstr_eq %arg0, %arg1 : tensor<?xindex>, tensor<?xindex>
   return %witness : !shape.witness
 }
 
 // CHECK-LABEL: func @cstr_require
-func @cstr_require(%arg0: i1) -> !shape.witness {
+func.func @cstr_require(%arg0: i1) -> !shape.witness {
   // CHECK: %[[RET:.*]] = shape.const_witness true
   // CHECK: cf.assert %arg0, "msg"
   // CHECK: return %[[RET]]

diff  --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
index 5ebee9a4d5e25..5c77a68445d27 100644
--- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
@@ -3,7 +3,7 @@
 // Lower binary ops.
 // CHECK-LABEL: @binary_ops
 // CHECK-SAME: (%[[LHS:.*]]: index, %[[RHS:.*]]: index)
-func @binary_ops(%lhs : index, %rhs : index) {
+func.func @binary_ops(%lhs : index, %rhs : index) {
   // CHECK: arith.addi %[[LHS]], %[[RHS]] : index
   %sum = shape.add %lhs, %rhs : index, index -> index
   // CHECK: arith.muli %[[LHS]], %[[RHS]] : index
@@ -16,7 +16,7 @@ func @binary_ops(%lhs : index, %rhs : index) {
 // Don't lower binary ops when they operate on `shape.size`.
 // CHECK-LABEL: @binary_ops_on_size
 // CHECK-SAME: (%[[LHS:.*]]: !shape.size, %[[RHS:.*]]: !shape.size)
-func @binary_ops_on_size(%lhs : !shape.size, %rhs : !shape.size) {
+func.func @binary_ops_on_size(%lhs : !shape.size, %rhs : !shape.size) {
   // CHECK: shape.add %[[LHS]], %[[RHS]] : !shape.size, !shape.size -> !shape.size
   // CHECK: shape.mul %[[LHS]], %[[RHS]] : !shape.size, !shape.size -> !shape.size
   %sum = shape.add %lhs, %rhs : !shape.size, !shape.size -> !shape.size
@@ -29,7 +29,7 @@ func @binary_ops_on_size(%lhs : !shape.size, %rhs : !shape.size) {
 // Convert `rank` to `dim` of the first dimension.
 // CHECK-LABEL: @rank
 // CHECK-SAME: (%[[SHAPE:.*]]: tensor<?xindex>) -> index
-func @rank(%shape : tensor<?xindex>) -> index {
+func.func @rank(%shape : tensor<?xindex>) -> index {
   // CHECK: %[[C0:.*]] = arith.constant 0 : index
   // CHECK: %[[RESULT:.*]] = tensor.dim %[[SHAPE]], %[[C0]]
   // CHECK: return %[[RESULT]] : index
@@ -41,7 +41,7 @@ func @rank(%shape : tensor<?xindex>) -> index {
 
 // Don't lower `get_extent` if it is of type `shape.size`.
 // CHECK-LABEL: @get_extent
-func @get_extent(%shape : tensor<?xindex>, %idx : !shape.size) -> !shape.size {
+func.func @get_extent(%shape : tensor<?xindex>, %idx : !shape.size) -> !shape.size {
   // CHECK: shape.get_extent
   %result = shape.get_extent %shape, %idx
       : tensor<?xindex>, !shape.size -> !shape.size
@@ -52,7 +52,7 @@ func @get_extent(%shape : tensor<?xindex>, %idx : !shape.size) -> !shape.size {
 
 // Don't lower `rank` if type is not error-free.
 // CHECK-LABEL: @rank
-func @rank(%shape : !shape.shape) {
+func.func @rank(%shape : !shape.shape) {
   // CHECK: shape.rank
   %rank = shape.rank %shape : !shape.shape -> !shape.size
   return
@@ -64,7 +64,7 @@ func @rank(%shape : !shape.shape) {
 // `shape_of` operation.
 // CHECK-LABEL: @get_extent_shape_of
 // CHECK-SAME:  (%[[ARG:.*]]: tensor<2x3xf32>, %[[IDX:.*]]: index) -> index
-func @get_extent_shape_of(%arg : tensor<2x3xf32>, %idx : index) -> index {
+func.func @get_extent_shape_of(%arg : tensor<2x3xf32>, %idx : index) -> index {
   // CHECK: %[[RESULT:.*]] = tensor.dim %[[ARG]], %[[IDX]] : tensor<2x3xf32>
   // CHECK: return %[[RESULT]] : index
   %shape = shape.shape_of %arg : tensor<2x3xf32> -> tensor<?xindex>
@@ -77,7 +77,7 @@ func @get_extent_shape_of(%arg : tensor<2x3xf32>, %idx : index) -> index {
 // Express `get_extent` as `tensor.extract`.
 // CHECK-LABEL: @get_extent_from_extent_tensor
 // CHECK-SAME: (%[[EXTENTS:.*]]: tensor<?xindex>, %[[IDX:.*]]: index) -> index
-func @get_extent_from_extent_tensor(%extents : tensor<?xindex>, %idx : index)
+func.func @get_extent_from_extent_tensor(%extents : tensor<?xindex>, %idx : index)
     -> index {
   // CHECK: %[[RESULT:.*]] = tensor.extract %[[EXTENTS]][%[[IDX]]] : tensor<?xindex>
   // CHECK: return %[[RESULT]] : index
@@ -90,7 +90,7 @@ func @get_extent_from_extent_tensor(%extents : tensor<?xindex>, %idx : index)
 // Lower `const_shape` to `tensor.from_elements`.
 // CHECK-LABEL: @const_shape
 // CHECK-SAME: () -> tensor<3xindex>
-func @const_shape() -> tensor<3xindex> {
+func.func @const_shape() -> tensor<3xindex> {
   // CHECK: %[[C1:.*]] = arith.constant 1 : index
   // CHECK: %[[C2:.*]] = arith.constant 2 : index
   // CHECK: %[[C3:.*]] = arith.constant 3 : index
@@ -106,7 +106,7 @@ func @const_shape() -> tensor<3xindex> {
 // Lower `const_shape` in the case of rank 0.
 // CHECK-LABEL: func @const_shape_zero_elements
 // CHECK-SAME: () -> tensor<0xindex>
-func @const_shape_zero_elements() -> tensor<0xindex> {
+func.func @const_shape_zero_elements() -> tensor<0xindex> {
   // CHECK: %[[TENSOR:.*]] = tensor.from_elements : tensor<0xindex>
   // CHECK: %[[RESULT:.*]] = tensor.cast %[[TENSOR]] : tensor<0xindex> to tensor<0xindex>
   // CHECK: return %[[RESULT]] : tensor<0xindex>
@@ -119,7 +119,7 @@ func @const_shape_zero_elements() -> tensor<0xindex> {
 // Lower `any` to its first operand.
 // CHECK-LABEL: @any_of_three
 // CHECK-SAME:  (%[[A:.*]]: tensor<?xindex>, %[[B:.*]]: tensor<?xindex>, %[[C:.*]]: tensor<?xindex>) -> tensor<?xindex>
-func @any_of_three(%a : tensor<?xindex>,
+func.func @any_of_three(%a : tensor<?xindex>,
                    %b : tensor<?xindex>,
                    %c : tensor<?xindex>) -> tensor<?xindex> {
   // CHECK: return %[[A]] : tensor<?xindex>
@@ -132,7 +132,7 @@ func @any_of_three(%a : tensor<?xindex>,
 // Lower `any` to its first operand.
 // CHECK-LABEL: @any_of_one
 // CHECK-SAME:  (%[[A:.*]]: tensor<?xindex>) -> tensor<?xindex>
-func @any_of_one(%a : tensor<?xindex>) -> tensor<?xindex> {
+func.func @any_of_one(%a : tensor<?xindex>) -> tensor<?xindex> {
   // CHECK: return %[[A]] : tensor<?xindex>
   %result = "shape.any"(%a) : (tensor<?xindex>) -> tensor<?xindex>
   return %result : tensor<?xindex>
@@ -142,7 +142,7 @@ func @any_of_one(%a : tensor<?xindex>) -> tensor<?xindex> {
 
 // Lower 'const_size` to `arith.constant`
 // CHECK-LABEL: @const_size
-func @const_size() -> index {
+func.func @const_size() -> index {
   // CHECK: %[[RES:.*]] = arith.constant 42 : index
   %size = shape.const_size 42
   %result = shape.size_to_index %size : !shape.size
@@ -156,7 +156,7 @@ func @const_size() -> index {
 // Fold to_extent_tensor when already on tensor.
 // CHECK-LABEL: @to_extent_tensor
 // CHECK-SAME: (%[[ARG:.*]]: tensor<?xindex>
-func @to_extent_tensor(%arg: tensor<?xindex>) -> tensor<3xindex> {
+func.func @to_extent_tensor(%arg: tensor<?xindex>) -> tensor<3xindex> {
   // CHECK-NOT: to_extent_tensor
   // CHECK: %[[RES:.*]] = tensor.cast %[[ARG]] : tensor<?xindex> to tensor<3xindex
   %casted = shape.to_extent_tensor %arg : tensor<?xindex> -> tensor<3xindex>
@@ -166,7 +166,7 @@ func @to_extent_tensor(%arg: tensor<?xindex>) -> tensor<3xindex> {
 
 // CHECK-LABEL: @shape_reduce
 // CHECK-SAME:  (%[[SHAPE:.*]]: tensor<?xindex>) -> index
-func @shape_reduce(%shape : tensor<?xindex>) -> index {
+func.func @shape_reduce(%shape : tensor<?xindex>) -> index {
   %init = arith.constant 1 : index
   %num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index {
     ^bb0(%index : index, %extent : index, %acc: index):
@@ -191,7 +191,7 @@ func @shape_reduce(%shape : tensor<?xindex>) -> index {
 // Don't lower `shape_of` for result type of `shape.shape`.
 // CHECK-LABEL: @shape_of
 // CHECK-SAME: (%[[ARG:.*]]: tensor<*xf32>)
-func @shape_of(%arg : tensor<*xf32>) {
+func.func @shape_of(%arg : tensor<*xf32>) {
   // CHECK: shape.shape
   %shape = shape.shape_of %arg : tensor<*xf32> -> !shape.shape
   return
@@ -202,7 +202,7 @@ func @shape_of(%arg : tensor<*xf32>) {
 // Lower `shape_of` for unranked tensors.
 // CHECK-LABEL: @shape_of_unranked
 // CHECK-SAME: (%[[ARG:.*]]: tensor<*xf32>)
-func @shape_of_unranked(%arg : tensor<*xf32>) {
+func.func @shape_of_unranked(%arg : tensor<*xf32>) {
   // CHECK: %[[RANK:.*]] = tensor.rank %[[ARG]] : tensor<*xf32>
   // CHECK: %[[SHAPE:.*]] = tensor.generate %[[RANK]] {
   // CHECK: ^bb0(%[[I:.*]]: index):
@@ -218,7 +218,7 @@ func @shape_of_unranked(%arg : tensor<*xf32>) {
 // Don't lower `shape_of` with `shape.shape` type.
 // CHECK-LABEL: @shape_of
 // CHECK-SAME: (%[[ARG:.*]]: tensor<1x2x3xf32>)
-func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
+func.func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
   // CHECK: shape.shape_of %[[ARG]] : tensor<1x2x3xf32> -> !shape.shape
   %shape = shape.shape_of %arg : tensor<1x2x3xf32> -> !shape.shape
   return
@@ -229,7 +229,7 @@ func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
 // Lower `shape_of` for statically shaped tensor.
 // CHECK-LABEL: @shape_of_stat
 // CHECK-SAME: (%[[ARG:.*]]: tensor<1x2x3xf32>)
-func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
+func.func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
   // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
   // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
   // CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
@@ -243,7 +243,7 @@ func @shape_of_stat(%arg : tensor<1x2x3xf32>) {
 // Lower `shape_of` for 0-D tensor.
 // CHECK-LABEL: @shape_of_zero_d
 // CHECK-SAME: (%[[ARG:.*]]: tensor<f32>)
-func @shape_of_zero_d(%arg : tensor<f32>) {
+func.func @shape_of_zero_d(%arg : tensor<f32>) {
   // CHECK-DAG: %[[SHAPE_UNCASTED:.*]] = tensor.from_elements : tensor<0xindex>
   %shape = shape.shape_of %arg : tensor<f32> -> tensor<?xindex>
   return
@@ -254,7 +254,7 @@ func @shape_of_zero_d(%arg : tensor<f32>) {
 // Lower `shape_of` for dynamically shaped tensor.
 // CHECK-LABEL: @shape_of_dyn
 // CHECK-SAME: (%[[ARG:.*]]: tensor<1x5x?xf32>)
-func @shape_of_dyn(%arg : tensor<1x5x?xf32>) {
+func.func @shape_of_dyn(%arg : tensor<1x5x?xf32>) {
   // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
   // CHECK-DAG: %[[C5:.*]] = arith.constant 5 : index
   // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -268,7 +268,7 @@ func @shape_of_dyn(%arg : tensor<1x5x?xf32>) {
 
 // CHECK-LABEL:  @shape_eq
 // CHECK-SAME:   (%[[A:.*]]: tensor<?xindex>, %[[B:.*]]: tensor<?xindex>) -> i1
-func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
+func.func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
   // CHECK: %[[C0:.*]] = arith.constant 0 : index
   // CHECK: %[[RANK_A:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xindex>
   // CHECK: %[[RANK_B:.*]] = tensor.dim %[[B]], %[[C0]] : tensor<?xindex>
@@ -297,7 +297,7 @@ func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
 
 // CHECK-LABEL:  @shape_eq
 // CHECK-SAME:   (%[[A:.*]]: tensor<?xindex>, %[[B:.*]]: tensor<?xindex>, %[[C:.*]]: tensor<?xindex>) -> i1
-func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>, %c : tensor<?xindex>) -> i1 {
+func.func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>, %c : tensor<?xindex>) -> i1 {
   // CHECK: %[[C0:.*]] = arith.constant 0 : index
   // CHECK: %[[RANK_A:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xindex>
   // CHECK: %[[RANK_B:.*]] = tensor.dim %[[B]], %[[C0]] : tensor<?xindex>
@@ -344,7 +344,7 @@ func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>, %c : tensor<?xindex>)
 
 // Don't lower `shape.broadcast` if a `shape.shape` type is involved.
 // CHECK-LABEL: @broadcast
-func @broadcast(%a : tensor<?xindex>, %b : !shape.shape) -> !shape.shape {
+func.func @broadcast(%a : tensor<?xindex>, %b : !shape.shape) -> !shape.shape {
   // CHECK: shape.broadcast
   %c = shape.broadcast %a, %b : tensor<?xindex>, !shape.shape -> !shape.shape
   return %c : !shape.shape
@@ -352,7 +352,7 @@ func @broadcast(%a : tensor<?xindex>, %b : !shape.shape) -> !shape.shape {
 
 // -----
 
-func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>) -> i1 {
+func.func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>) -> i1 {
   %0 = shape.is_broadcastable %a, %b, %c : tensor<2xindex>, tensor<3xindex>, tensor<2xindex>
   return %0 : i1
 }
@@ -442,7 +442,7 @@ func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : ten
 
 // -----
 
-func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>) -> !shape.witness {
+func.func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>) -> !shape.witness {
   %0 = shape.cstr_broadcastable %a, %b, %c : tensor<2xindex>, tensor<3xindex>, tensor<2xindex>
   return %0 : !shape.witness
 }
@@ -536,7 +536,7 @@ func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>
 
 // -----
 
-func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
+func.func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
                                            %b : tensor<3xindex>,
                                            %c : tensor<2xindex>) {
 // CHECK-LABEL:   func @broadcast_3_shapes_
diff erent_extents(
@@ -596,7 +596,7 @@ func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
 // -----
 
 // CHECK-LABEL: @broadcast_to_known_rank
-func @broadcast_to_known_rank(%a : tensor<1xindex>, %b : tensor<3xindex>)
+func.func @broadcast_to_known_rank(%a : tensor<1xindex>, %b : tensor<3xindex>)
     -> tensor<3xindex> {
   // CHECK: %[[RES:.*]] = tensor.cast %{{.*}} : tensor<?xindex> to tensor<3xindex>
   // CHECK: return %[[RES]] : tensor<3xindex>
@@ -609,7 +609,7 @@ func @broadcast_to_known_rank(%a : tensor<1xindex>, %b : tensor<3xindex>)
 // Lower `split_at`
 // CHECK-LABEL: @split_at
 // CHECK-SAME: %[[SHAPE:.*]]: tensor<?xindex>, %[[INDEX:.*]]: index
-func @split_at(%shape: tensor<?xindex>, %index: index) -> (tensor<?xindex>, tensor<?xindex>) {
+func.func @split_at(%shape: tensor<?xindex>, %index: index) -> (tensor<?xindex>, tensor<?xindex>) {
   // CHECK-NEXT: %[[C0:.*]] = arith.constant 0 : index
   // CHECK-NEXT: %[[RANK:.*]] = tensor.dim %[[SHAPE]], %[[C0]] : tensor<?xindex>
   // CHECK-NEXT: %[[POSINDEX:.*]] = arith.addi %[[INDEX]], %[[RANK]] : index

diff  --git a/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-callers.mlir b/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-callers.mlir
index d18bb6f692ed5..dd9fc39236562 100644
--- a/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-callers.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-callers.mlir
@@ -3,7 +3,7 @@
 // CHECK: llvm.func @res_attrs_with_memref_return() -> (!llvm.struct{{.*}} {test.returnOne})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_memref_return
 // CHECK: %{{.*}}: !llvm.ptr{{.*}} {test.returnOne}
-func @res_attrs_with_memref_return() -> (memref<f32> {test.returnOne}) {
+func.func @res_attrs_with_memref_return() -> (memref<f32> {test.returnOne}) {
   %0 = memref.alloc() : memref<f32>
   return %0 : memref<f32>
 }
@@ -11,7 +11,7 @@ func @res_attrs_with_memref_return() -> (memref<f32> {test.returnOne}) {
 // CHECK: llvm.func @res_attrs_with_value_return() -> (f32 {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_value_return
 // CHECK: -> (f32 {test.returnOne = 1 : i64})
-func @res_attrs_with_value_return() -> (f32 {test.returnOne = 1}) {
+func.func @res_attrs_with_value_return() -> (f32 {test.returnOne = 1}) {
   %0 = arith.constant 1.00 : f32
   return %0 : f32
 }
@@ -19,7 +19,7 @@ func @res_attrs_with_value_return() -> (f32 {test.returnOne = 1}) {
 // CHECK: llvm.func @multiple_return() -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_return
 // CHECK: (%{{.*}}: !llvm.ptr<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
-func @multiple_return() -> (memref<f32> {test.returnOne = 1}, f32 {test.returnTwo = 2, test.returnThree = 3}) {
+func.func @multiple_return() -> (memref<f32> {test.returnOne = 1}, f32 {test.returnTwo = 2, test.returnThree = 3}) {
   %0 = memref.alloc() : memref<f32>
   %1 = arith.constant 1.00 : f32
   return %0, %1 : memref<f32>, f32
@@ -28,7 +28,7 @@ func @multiple_return() -> (memref<f32> {test.returnOne = 1}, f32 {test.returnTw
 // CHECK: llvm.func @multiple_return_missing_res_attr() -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_return_missing_res_attr
 // CHECK: (%{{.*}}: !llvm.ptr<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
-func @multiple_return_missing_res_attr() -> (memref<f32> {test.returnOne = 1}, i64, f32 {test.returnTwo = 2, test.returnThree = 3}) {
+func.func @multiple_return_missing_res_attr() -> (memref<f32> {test.returnOne = 1}, i64, f32 {test.returnTwo = 2, test.returnThree = 3}) {
   %0 = memref.alloc() : memref<f32>
   %1 = arith.constant 2 : i64
   %2 = arith.constant 1.00 : f32
@@ -38,7 +38,7 @@ func @multiple_return_missing_res_attr() -> (memref<f32> {test.returnOne = 1}, i
 // CHECK: llvm.func @one_arg_attr_no_res_attrs_with_memref_return({{.*}}) -> !llvm.struct{{.*}}
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_no_res_attrs_with_memref_return
 // CHECK: %{{.*}}: !llvm.ptr<{{.*}}>, %{{.*}}: !llvm.ptr<{{.*}}> {test.argOne = 1 : i64}
-func @one_arg_attr_no_res_attrs_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> memref<f32> {
+func.func @one_arg_attr_no_res_attrs_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> memref<f32> {
   %0 = memref.alloc() : memref<f32>
   return %0 : memref<f32>
 }
@@ -46,7 +46,7 @@ func @one_arg_attr_no_res_attrs_with_memref_return(%arg0: memref<f32> {test.argO
 // CHECK: llvm.func @one_arg_attr_one_res_attr_with_memref_return({{.*}}) -> (!llvm.struct<{{.*}}> {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_one_res_attr_with_memref_return
 // CHECK: (%{{.*}}: !llvm.ptr<{{.*}}> {test.returnOne = 1 : i64}, %{{.*}}: !llvm.ptr<{{.*}}> {test.argOne = 1 : i64}
-func @one_arg_attr_one_res_attr_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> (memref<f32> {test.returnOne = 1}) {
+func.func @one_arg_attr_one_res_attr_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> (memref<f32> {test.returnOne = 1}) {
   %0 = memref.alloc() : memref<f32>
   return %0 : memref<f32>
 }
@@ -54,7 +54,7 @@ func @one_arg_attr_one_res_attr_with_memref_return(%arg0: memref<f32> {test.argO
 // CHECK: llvm.func @one_arg_attr_one_res_attr_with_value_return({{.*}}) -> (f32 {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_one_res_attr_with_value_return
 // CHECK: (%{{.*}}: !llvm.ptr<{{.*}}> {test.argOne = 1 : i64}) -> (f32 {test.returnOne = 1 : i64})
-func @one_arg_attr_one_res_attr_with_value_return(%arg0: memref<f32> {test.argOne = 1}) -> (f32 {test.returnOne = 1}) {
+func.func @one_arg_attr_one_res_attr_with_value_return(%arg0: memref<f32> {test.argOne = 1}) -> (f32 {test.returnOne = 1}) {
   %0 = arith.constant 1.00 : f32
   return %0 : f32
 }
@@ -62,7 +62,7 @@ func @one_arg_attr_one_res_attr_with_value_return(%arg0: memref<f32> {test.argOn
 // CHECK: llvm.func @multiple_arg_attr_multiple_res_attr({{.*}}) -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{}, {test.returnOne = 1 : i64}, {test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_arg_attr_multiple_res_attr
 // CHECK: (%{{.*}}: !llvm.ptr<{{.*}}> {llvm.struct_attrs = [{}, {test.returnOne = 1 : i64}, {test.returnTwo = 2 : i64}]}, %{{.*}}: !llvm.ptr<{{.*}}> {test.argZero = 0 : i64}, %{{.*}}: f32, %{{.*}}: i32 {test.argTwo = 2 : i64}
-func @multiple_arg_attr_multiple_res_attr(%arg0: memref<f32> {test.argZero = 0}, %arg1: f32, %arg2: i32 {test.argTwo = 2}) -> (f32, memref<i32> {test.returnOne = 1}, i32 {test.returnTwo = 2}) {
+func.func @multiple_arg_attr_multiple_res_attr(%arg0: memref<f32> {test.argZero = 0}, %arg1: f32, %arg2: i32 {test.argTwo = 2}) -> (f32, memref<i32> {test.returnOne = 1}, i32 {test.returnTwo = 2}) {
   %0 = arith.constant 1.00 : f32
   %1 = memref.alloc() : memref<i32>
   %2 = arith.constant 2 : i32

diff  --git a/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-functions.mlir b/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-functions.mlir
index ca136a8993d42..1e97d9548da44 100644
--- a/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-functions.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/emit-c-wrappers-for-external-functions.mlir
@@ -3,39 +3,39 @@
 // CHECK: llvm.func @res_attrs_with_memref_return() -> (!llvm.struct{{.*}} {test.returnOne})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_memref_return
 // CHECK: !llvm.ptr{{.*}} {test.returnOne}
-func private @res_attrs_with_memref_return() -> (memref<f32> {test.returnOne})
+func.func private @res_attrs_with_memref_return() -> (memref<f32> {test.returnOne})
 
 // CHECK: llvm.func @res_attrs_with_value_return() -> (f32 {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_value_return
 // CHECK: -> (f32 {test.returnOne = 1 : i64})
-func private @res_attrs_with_value_return() -> (f32 {test.returnOne = 1})
+func.func private @res_attrs_with_value_return() -> (f32 {test.returnOne = 1})
 
 // CHECK: llvm.func @multiple_return() -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_return
 // CHECK: (!llvm.ptr<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
-func private @multiple_return() -> (memref<f32> {test.returnOne = 1}, f32 {test.returnTwo = 2, test.returnThree = 3})
+func.func private @multiple_return() -> (memref<f32> {test.returnOne = 1}, f32 {test.returnTwo = 2, test.returnThree = 3})
 
 // CHECK: llvm.func @multiple_return_missing_res_attr() -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_return_missing_res_attr
 // CHECK: (!llvm.ptr<{{.*}}> {llvm.struct_attrs = [{test.returnOne = 1 : i64}, {}, {test.returnThree = 3 : i64, test.returnTwo = 2 : i64}]})
-func private @multiple_return_missing_res_attr() -> (memref<f32> {test.returnOne = 1}, i64, f32 {test.returnTwo = 2, test.returnThree = 3})
+func.func private @multiple_return_missing_res_attr() -> (memref<f32> {test.returnOne = 1}, i64, f32 {test.returnTwo = 2, test.returnThree = 3})
 
 // CHECK: llvm.func @one_arg_attr_no_res_attrs_with_memref_return({{.*}}) -> !llvm.struct{{.*}}
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_no_res_attrs_with_memref_return
 // CHECK: !llvm.ptr<{{.*}}>, !llvm.ptr<{{.*}}> {test.argOne = 1 : i64}
-func private @one_arg_attr_no_res_attrs_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> memref<f32>
+func.func private @one_arg_attr_no_res_attrs_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> memref<f32>
 
 // CHECK: llvm.func @one_arg_attr_one_res_attr_with_memref_return({{.*}}) -> (!llvm.struct<{{.*}}> {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_one_res_attr_with_memref_return
 // CHECK: (!llvm.ptr<{{.*}}> {test.returnOne = 1 : i64}, !llvm.ptr<{{.*}}> {test.argOne = 1 : i64}
-func private @one_arg_attr_one_res_attr_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> (memref<f32> {test.returnOne = 1})
+func.func private @one_arg_attr_one_res_attr_with_memref_return(%arg0: memref<f32> {test.argOne = 1}) -> (memref<f32> {test.returnOne = 1})
 
 // CHECK: llvm.func @one_arg_attr_one_res_attr_with_value_return({{.*}}) -> (f32 {test.returnOne = 1 : i64})
 // CHECK-LABEL: llvm.func @_mlir_ciface_one_arg_attr_one_res_attr_with_value_return
 // CHECK: (!llvm.ptr<{{.*}}> {test.argOne = 1 : i64}) -> (f32 {test.returnOne = 1 : i64})
-func private @one_arg_attr_one_res_attr_with_value_return(%arg0: memref<f32> {test.argOne = 1}) -> (f32 {test.returnOne = 1})
+func.func private @one_arg_attr_one_res_attr_with_value_return(%arg0: memref<f32> {test.argOne = 1}) -> (f32 {test.returnOne = 1})
 
 // CHECK: llvm.func @multiple_arg_attr_multiple_res_attr({{.*}}) -> (!llvm.struct<{{.*}}> {llvm.struct_attrs = [{}, {test.returnOne = 1 : i64}, {test.returnTwo = 2 : i64}]})
 // CHECK-LABEL: llvm.func @_mlir_ciface_multiple_arg_attr_multiple_res_attr
 // CHECK: (!llvm.ptr<{{.*}}> {llvm.struct_attrs = [{}, {test.returnOne = 1 : i64}, {test.returnTwo = 2 : i64}]}, !llvm.ptr<{{.*}}> {test.argZero = 0 : i64}, f32, i32 {test.argTwo = 2 : i64}
-func private @multiple_arg_attr_multiple_res_attr(%arg0: memref<f32> {test.argZero = 0}, %arg1: f32, %arg2: i32 {test.argTwo = 2}) -> (f32, memref<i32> {test.returnOne = 1}, i32 {test.returnTwo = 2})
+func.func private @multiple_arg_attr_multiple_res_attr(%arg0: memref<f32> {test.argZero = 0}, %arg1: f32, %arg2: i32 {test.argTwo = 2}) -> (f32, memref<i32> {test.returnOne = 1}, i32 {test.returnTwo = 2})

diff  --git a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
index 737da471ee03a..68963d2f61e1c 100644
--- a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
@@ -6,7 +6,7 @@
 
 // CHECK-LABEL: func @tensor_extract_constant
 // CHECK-SAME: (%[[A:.+]]: i32, %[[B:.+]]: i32, %[[C:.+]]: i32)
-func @tensor_extract_constant(%a : index, %b: index, %c: index) -> i32 {
+func.func @tensor_extract_constant(%a : index, %b: index, %c: index) -> i32 {
   // CHECK: %[[CST:.+]] = spv.Constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]>
   %cst = arith.constant dense<[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]> : tensor<2x2x3xi32>
   // CHECK: %[[VAR:.+]] = spv.Variable init(%[[CST]]) : !spv.ptr<!spv.array<12 x i32, stride=4>, Function>

diff  --git a/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir b/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir
index 21bb255bcb210..9e0c72f1686ae 100644
--- a/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir
+++ b/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt --split-input-file --tosa-to-arith="include-apply-rescale=false" %s -verify-diagnostics -o -| FileCheck --check-prefix="SCALE" %s
 
 // CHECK-LABEL: func @const_test
-func @const_test() -> (tensor<i32>) {
+func.func @const_test() -> (tensor<i32>) {
   // CHECK: [[C3:%.+]] = arith.constant dense<3> : tensor<i32>
   %0 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32>
 
@@ -13,7 +13,7 @@ func @const_test() -> (tensor<i32>) {
 // -----
 
 // CHECK-LABEL: @apply_scale_test_i32
-func @apply_scale_test_i32(%arg0 : i32, %arg1 : i32, %arg2 : i8) -> (i32) {
+func.func @apply_scale_test_i32(%arg0 : i32, %arg1 : i32, %arg2 : i8) -> (i32) {
   // CHECK-DAG: [[C1_8:%.+]] = arith.constant 1 : i8
   // CHECK-DAG: [[C1_32:%.+]] = arith.constant 1 : i32
   // CHECK-DAG: [[C1_64:%.+]] = arith.constant 1 : i64
@@ -51,7 +51,7 @@ func @apply_scale_test_i32(%arg0 : i32, %arg1 : i32, %arg2 : i8) -> (i32) {
 // -----
 
 // CHECK-LABEL: @apply_scale_test_vector
-func @apply_scale_test_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>, %arg2 : vector<4xi8>) -> (vector<4xi32>) {
+func.func @apply_scale_test_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>, %arg2 : vector<4xi8>) -> (vector<4xi32>) {
   // CHECK-DAG: [[C1_8:%.+]] = arith.constant dense<1> : vector<4xi8>
   // CHECK-DAG: [[C1_32:%.+]] = arith.constant dense<1> : vector<4xi32>
   // CHECK-DAG: [[C1_64:%.+]] = arith.constant dense<1> : vector<4xi64>
@@ -88,7 +88,7 @@ func @apply_scale_test_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>, %arg
 // -----
 
 // CHECK-LABEL: @apply_scale_test_i48
-func @apply_scale_test_i48(%arg0 : i48, %arg1 : i32, %arg2 : i8) -> (i32) {
+func.func @apply_scale_test_i48(%arg0 : i48, %arg1 : i32, %arg2 : i8) -> (i32) {
   // CHECK-DAG: [[C1_8:%.+]] = arith.constant 1 : i8
   // CHECK-DAG: [[C1_32:%.+]] = arith.constant 1 : i32
   // CHECK-DAG: [[C1_64:%.+]] = arith.constant 1 : i64

diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
index 91e74358e4d3d..b6060c1fa03be 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt --split-input-file -pass-pipeline="func.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s
 
 // CHECK-LABEL: @matmul
-func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) {
+func.func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) {
   // CHECK: [[C0:%.+]] = arith.constant 0
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 5, 6]
   // CHECK: [[FILLED:%.+]] = linalg.fill ins([[C0]] : f32) outs([[INIT]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32>
@@ -14,7 +14,7 @@ func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x
 
 
 // CHECK-LABEL: @matmul_quantized
-func @matmul_quantized(%arg0: tensor<1x5x3xi8>, %arg1: tensor<1x3x6xi8>) -> (tensor<1x5x6xi32>) {
+func.func @matmul_quantized(%arg0: tensor<1x5x3xi8>, %arg1: tensor<1x3x6xi8>) -> (tensor<1x5x6xi32>) {
   // CHECK: [[C0:%.+]] = arith.constant 0
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 5, 6]
   // CHECK: [[FILLED:%.+]] = linalg.fill ins([[C0]] : i32) outs([[INIT]] : tensor<1x5x6xi32>) -> tensor<1x5x6xi32>
@@ -28,7 +28,7 @@ func @matmul_quantized(%arg0: tensor<1x5x3xi8>, %arg1: tensor<1x3x6xi8>) -> (ten
 // -----
 
 // CHECK-LABEL: @matmul_dyn_batch
-func @matmul_dyn_batch(%arg0: tensor<?x5x3xf32>, %arg1: tensor<?x3x6xf32>) -> (tensor<?x5x6xf32>) {
+func.func @matmul_dyn_batch(%arg0: tensor<?x5x3xf32>, %arg1: tensor<?x3x6xf32>) -> (tensor<?x5x6xf32>) {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[C0_0:.+]] = arith.constant 0
@@ -42,7 +42,7 @@ func @matmul_dyn_batch(%arg0: tensor<?x5x3xf32>, %arg1: tensor<?x3x6xf32>) -> (t
 // -----
 
 // CHECK-LABEL: @matmul_dyn_independent_dim
-func @matmul_dyn_independent_dim(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x?xf32>) -> (tensor<1x5x?xf32>) {
+func.func @matmul_dyn_independent_dim(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x?xf32>) -> (tensor<1x5x?xf32>) {
   // CHECK: %[[C2:.+]] = arith.constant 2
   // CHECK: %[[DIM:.+]] = tensor.dim %arg1, %[[C2]]
   // CHECK: %[[C0:.+]] = arith.constant 0
@@ -56,7 +56,7 @@ func @matmul_dyn_independent_dim(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x?xf
 // -----
 
 // CHECK-LABEL: @matmul_dyn_independent_dim
-func @matmul_dyn_independent_dim(%arg0: tensor<1x5x?xf32>, %arg1: tensor<1x?x6xf32>) -> (tensor<1x5x6xf32>) {
+func.func @matmul_dyn_independent_dim(%arg0: tensor<1x5x?xf32>, %arg1: tensor<1x?x6xf32>) -> (tensor<1x5x6xf32>) {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 5, 6]
   // CHECK: %[[FILLED:.+]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32>
@@ -71,7 +71,7 @@ func @matmul_dyn_independent_dim(%arg0: tensor<1x5x?xf32>, %arg1: tensor<1x?x6xf
 // CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @fully_connected
-func @fully_connected(%arg0: tensor<5x3xf32>, %arg1: tensor<6x3xf32>, %arg2: tensor<6xf32>) -> (tensor<5x6xf32>) {
+func.func @fully_connected(%arg0: tensor<5x3xf32>, %arg1: tensor<6x3xf32>, %arg2: tensor<6xf32>) -> (tensor<5x6xf32>) {
   // CHECK: [[INITT:%.+]] = linalg.init_tensor [5, 6]
   // CHECK: [[ZERO:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[ZERO]]{{.*}}outs([[INITT]]
@@ -94,7 +94,7 @@ func @fully_connected(%arg0: tensor<5x3xf32>, %arg1: tensor<6x3xf32>, %arg2: ten
 // CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @quantized_fully_connected
-func @quantized_fully_connected(%arg0: tensor<5x3xi8>, %arg1: tensor<6x3xi8>, %arg2: tensor<6xi32>) -> (tensor<5x6xi32>) {
+func.func @quantized_fully_connected(%arg0: tensor<5x3xi8>, %arg1: tensor<6x3xi8>, %arg2: tensor<6xi32>) -> (tensor<5x6xi32>) {
   // CHECK: [[INITT:%.+]] = linalg.init_tensor [5, 6]
   // CHECK: [[ZERO:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[ZERO]]{{.*}}outs([[INITT]]
@@ -118,7 +118,7 @@ func @quantized_fully_connected(%arg0: tensor<5x3xi8>, %arg1: tensor<6x3xi8>, %a
 // CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @fully_connected_dyn
-func @fully_connected_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<6x3xf32>, %arg2: tensor<6xf32>) -> (tensor<?x6xf32>) {
+func.func @fully_connected_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<6x3xf32>, %arg2: tensor<6xf32>) -> (tensor<?x6xf32>) {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INITT:.+]] = linalg.init_tensor [%[[DIM]], 6]
@@ -140,7 +140,7 @@ func @fully_connected_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<6x3xf32>, %arg2:
 // -----
 
 // CHECK-LABEL: @max_pool
-func @max_pool(%arg0: tensor<1x6x34x62xf32>) -> () {
+func.func @max_pool(%arg0: tensor<1x6x34x62xf32>) -> () {
   // CHECK-DAG: [[CONST:%.+]] = arith.constant -3.40282347E+38
   // CHECK-DAG: [[INIT:%.+]] = linalg.init_tensor [1, 4, 32, 62]
   // CHECK-DAG: [[FILL:%.+]] = linalg.fill ins([[CONST]]{{.*}}outs([[INIT]]
@@ -151,7 +151,7 @@ func @max_pool(%arg0: tensor<1x6x34x62xf32>) -> () {
 }
 
 // CHECK-LABEL: @max_pool_padded
-func @max_pool_padded(%arg0: tensor<1x6x34x62xf32>) -> () {
+func.func @max_pool_padded(%arg0: tensor<1x6x34x62xf32>) -> () {
   // CHECK-DAG: [[CONST:%.+]] = arith.constant -3.40282347E+38 : f32
   // CHECK-DAG: [[PAD:%.+]] = tensor.pad %arg0 low[0, 0, 0, 0] high[0, 0, 1, 0]
   // CHECK-DAG:   tensor.yield [[CONST]]
@@ -165,7 +165,7 @@ func @max_pool_padded(%arg0: tensor<1x6x34x62xf32>) -> () {
 }
 
 // CHECK-LABEL: @max_pool_dyn
-func @max_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> () {
+func.func @max_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[CONST:.+]] = arith.constant -3.40282347E+38
@@ -178,7 +178,7 @@ func @max_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> () {
 }
 
 // CHECK-LABEL: @max_pool_i8
-func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () {
+func.func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () {
   // CHECK: arith.constant -128
   // CHECK: linalg.pooling_nhwc_max
   %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi8>)  -> (tensor<1x4x32x62xi8>)
@@ -186,7 +186,7 @@ func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () {
 }
 
 // CHECK-LABEL: @max_pool_i16
-func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () {
+func.func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () {
   // CHECK: arith.constant -32768
   // CHECK: linalg.pooling_nhwc_max
   %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi16>)  -> (tensor<1x4x32x62xi16>)
@@ -194,7 +194,7 @@ func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () {
 }
 
 // CHECK-LABEL: @max_pool_i32
-func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () {
+func.func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () {
   // CHECK: arith.constant -2147483648
   // CHECK: linalg.pooling_nhwc_max
   %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi32>)  -> (tensor<1x4x32x62xi32>)
@@ -203,7 +203,7 @@ func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () {
 // -----
 
 // CHECK-LABEL: @avg_pool
-func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
+func.func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
   // Initial piece computes the sum of the pooling region, with appropriate padding.
   // CHECK: [[CONST:%.+]] = arith.constant 0
   // CHECK: [[PAD:%.+]] = tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
@@ -264,7 +264,7 @@ func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
 // -----
 
 // CHECK-LABEL: @avg_pool_dyn
-func @avg_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> (tensor<?x5x33x62xf32>) {
+func.func @avg_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> (tensor<?x5x33x62xf32>) {
   // The calculations remain the same as above, only testing for dyn behavior
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
@@ -282,7 +282,7 @@ func @avg_pool_dyn(%arg0: tensor<?x6x34x62xf32>) -> (tensor<?x5x33x62xf32>) {
 // -----
 
 // CHECK-LABEL: @avg_pool_i8
-func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
+func.func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
 
   // CHECK: linalg.pooling_nhwc_sum
   // CHECK: linalg.generic
@@ -311,7 +311,7 @@ func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @avg_pool_i16
-func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
+func.func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
 
   // CHECK: linalg.pooling_nhwc_sum
   // CHECK: linalg.generic
@@ -343,7 +343,7 @@ func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
 // CHECK: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @conv2d_f32
-func @conv2d_f32(%input: tensor<1x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
+func.func @conv2d_f32(%input: tensor<1x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
   // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]>
   // CHECK: %[[W:.+]] = "tosa.transpose"(%arg1, %[[PERM]])
   // CHECK: %[[M_IN:.+]] = linalg.init_tensor [1, 45, 40, 28]
@@ -364,7 +364,7 @@ func @conv2d_f32(%input: tensor<1x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>
 // CHECK: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @conv2d_dyn
-func @conv2d_dyn(%input: tensor<?x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
+func.func @conv2d_dyn(%input: tensor<?x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]>
@@ -387,7 +387,7 @@ func @conv2d_dyn(%input: tensor<?x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>
 // CHECK: #[[$MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @conv2d_dyn_w_h
-func @conv2d_dyn_w_h(%input: tensor<1x?x?x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
+func.func @conv2d_dyn_w_h(%input: tensor<1x?x?x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () {
   // Computing output height
   // CHECK: %[[C1:.+]] = arith.constant 1
   // CHECK: %[[H:.+]] = tensor.dim %arg0, %[[C1]]
@@ -444,7 +444,7 @@ func @conv2d_dyn_w_h(%input: tensor<1x?x?x27xf32>, %weights: tensor<28x3x3x27xf3
 // -----
 
 // CHECK-LABEL: @conv2d_padded_f32
-func @conv2d_padded_f32(%input: tensor<1x47x40x28xf32>, %weights: tensor<28x3x3x28xf32>, %bias: tensor<28xf32>) -> () {
+func.func @conv2d_padded_f32(%input: tensor<1x47x40x28xf32>, %weights: tensor<28x3x3x28xf32>, %bias: tensor<28xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
   // CHECK:   tensor.yield %[[C0]]
@@ -456,7 +456,7 @@ func @conv2d_padded_f32(%input: tensor<1x47x40x28xf32>, %weights: tensor<28x3x3x
 // -----
 
 // CHECK-LABEL: @conv2d_quant
-func @conv2d_quant(%arg0 : tensor<1x12x12x1xi8>, %arg1 : tensor<1024x3x3x1xi8>, %arg2 : tensor<1024xi32>) -> () {
+func.func @conv2d_quant(%arg0 : tensor<1x12x12x1xi8>, %arg1 : tensor<1024x3x3x1xi8>, %arg2 : tensor<1024xi32>) -> () {
   // CHECK:   %[[C22:.+]] = arith.constant -22
   // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
   // CHECK:   tensor.yield %[[C22]]
@@ -471,7 +471,7 @@ func @conv2d_quant(%arg0 : tensor<1x12x12x1xi8>, %arg1 : tensor<1024x3x3x1xi8>,
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @depthwise_conv
-func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
+func.func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 5, 5, 3, 11]
   // CHECK: [[CST0:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -493,7 +493,7 @@ func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>,
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @depthwise_conv_dyn
-func @depthwise_conv_dyn(%arg0 : tensor<?x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
+func.func @depthwise_conv_dyn(%arg0 : tensor<?x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[BATCH]], 5, 5, 3, 11]
@@ -517,7 +517,7 @@ func @depthwise_conv_dyn(%arg0 : tensor<?x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf3
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @depthwise_conv_strides
-func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
+func.func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3x1x3x11xf32>, %arg2 : tensor<33xf32>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 5, 5, 3, 11]
   // CHECK: [[CST0:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -539,7 +539,7 @@ func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3x1x3x
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @depthwise_conv_quant
-func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3x4x128xi8>, %arg2 : tensor<512xi32>) -> () {
+func.func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3x4x128xi8>, %arg2 : tensor<512xi32>) -> () {
   // CHECK: [[PADV:%.+]] = arith.constant -128
   // CHECK: [[PAD:%.+]] = tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0]
   // CHECK:   tensor.yield [[PADV]]
@@ -567,7 +567,7 @@ func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3x4x12
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @depthwise_conv_quant_dilations
-func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 : tensor<3x3x4x128xi8>, %arg2 : tensor<512xi32>) -> () {
+func.func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 : tensor<3x3x4x128xi8>, %arg2 : tensor<512xi32>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 10, 10, 4, 128]
   // CHECK: [[CST0:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -586,7 +586,7 @@ func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 : tenso
 }
 
 // CHECK-LABEL: @depthwise_conv2d_dyn_w_h
-func @depthwise_conv2d_dyn_w_h(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {
+func.func @depthwise_conv2d_dyn_w_h(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) {
   // CHECK: arith.addi
   // CHECK: arith.subi
   // CHECK: arith.muli

diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index 9ce5377f1eb19..8864a85af6f3e 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -3,7 +3,7 @@
 // CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>
 
 // CHECK-LABEL: @test_abs
-func @test_abs(%arg0: tensor<f32>) -> tensor<f32> {
+func.func @test_abs(%arg0: tensor<f32>) -> tensor<f32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [] : tensor<f32>
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = []} ins(%arg0 : tensor<f32>) outs([[INIT]] : tensor<f32>) {
   // CHECK: ^bb0(%arg1: f32, %arg2: f32):
@@ -22,7 +22,7 @@ func @test_abs(%arg0: tensor<f32>) -> tensor<f32> {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @test_abs
-func @test_abs(%arg0: tensor<2xf32>) -> tensor<2xf32> {
+func.func @test_abs(%arg0: tensor<2xf32>) -> tensor<2xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2] : tensor<2xf32>
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%arg0 : tensor<2xf32>) outs([[INIT]] : tensor<2xf32>) {
   // CHECK: ^bb0(%arg1: f32, %arg2: f32):
@@ -40,7 +40,7 @@ func @test_abs(%arg0: tensor<2xf32>) -> tensor<2xf32> {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @test_abs
-func @test_abs(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
+func.func @test_abs(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32>
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<2x3xf32>) outs([[INIT]] : tensor<2x3xf32>) {
   // CHECK: ^bb0(%arg1: f32, %arg2: f32):
@@ -56,7 +56,7 @@ func @test_abs(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
 // -----
 
 // CHECK-LABEL: @test_abs
-func @test_abs(%arg0: tensor<?xf32>) -> tensor<?xf32> {
+func.func @test_abs(%arg0: tensor<?xf32>) -> tensor<?xf32> {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DIM]]]
@@ -71,7 +71,7 @@ func @test_abs(%arg0: tensor<?xf32>) -> tensor<?xf32> {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @test_abs_dyn
-func @test_abs_dyn(%arg0: tensor<2x?xf32>) -> tensor<2x?xf32> {
+func.func @test_abs_dyn(%arg0: tensor<2x?xf32>) -> tensor<2x?xf32> {
   // CHECK: %[[C1:.+]] = arith.constant 1
   // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C1]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, %[[DIM]]]
@@ -87,7 +87,7 @@ func @test_abs_dyn(%arg0: tensor<2x?xf32>) -> tensor<2x?xf32> {
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @test_broadcast
-func @test_broadcast(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>) -> tensor<2xf32> {
+func.func @test_broadcast(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>) -> tensor<2xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2] : tensor<2xf32>
   // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %arg0
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel"]} ins([[RESHAPE]], %arg1 : tensor<f32>, tensor<2xf32>) outs([[INIT]] : tensor<2xf32>) {
@@ -105,7 +105,7 @@ func @test_broadcast(%arg0: tensor<1xf32>, %arg1: tensor<2xf32>) -> tensor<2xf32
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0) -> ()>
 
 // CHECK-LABEL: @test_broadcast_swapped_args
-func @test_broadcast_swapped_args(%arg0: tensor<2xf32>, %arg1: tensor<1xf32>) -> tensor<2xf32> {
+func.func @test_broadcast_swapped_args(%arg0: tensor<2xf32>, %arg1: tensor<1xf32>) -> tensor<2xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2] : tensor<2xf32>
   // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %arg1
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%arg0, [[RESHAPE]] : tensor<2xf32>, tensor<f32>) outs([[INIT]] : tensor<2xf32>) {
@@ -124,7 +124,7 @@ func @test_broadcast_swapped_args(%arg0: tensor<2xf32>, %arg1: tensor<1xf32>) ->
 // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)>
 
 // CHECK-LABEL: @test_multibroadcast
-func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x3xf32> {
+func.func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x3xf32> {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32>
   // CHECK: [[RESHAPE1:%.+]] = tensor.collapse_shape %arg0 {{\[}}[0, 1]]
   // CHECK: [[RESHAPE2:%.+]] = tensor.collapse_shape %arg1 {{\[}}[0, 1]]
@@ -140,7 +140,7 @@ func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> ten
 // -----
 
 // CHECK-LABEL: @test_simple_f32
-func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
+func.func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
   // CHECK: linalg.generic
   // CHECK: tanh
   %0 = "tosa.tanh"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
@@ -267,7 +267,7 @@ func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
 // -----
 
 // CHECK-LABEL: @test_simple_f16
-func @test_simple_f16(%arg0: tensor<1xf16>) -> () {
+func.func @test_simple_f16(%arg0: tensor<1xf16>) -> () {
 
   // CHECK: linalg.generic
   // CHECK: arith.extf
@@ -279,7 +279,7 @@ func @test_simple_f16(%arg0: tensor<1xf16>) -> () {
 // -----
 
 // CHECK-LABEL: @test_simple_i16
-func @test_simple_i16(%arg0: tensor<1xi16>) -> () {
+func.func @test_simple_i16(%arg0: tensor<1xi16>) -> () {
   // CHECK: linalg.generic
   // CHECK: arith.extsi
   // CHECK: arith.extsi
@@ -292,7 +292,7 @@ func @test_simple_i16(%arg0: tensor<1xi16>) -> () {
 // -----
 
 // CHECK-LABEL: @test_simple_ui8
-func @test_simple_ui8(%arg0: tensor<1xui8>) -> () {
+func.func @test_simple_ui8(%arg0: tensor<1xui8>) -> () {
   // CHECK: arith.uitofp
   %0 = "tosa.cast"(%arg0) : (tensor<1xui8>) -> tensor<1xf32>
   return
@@ -301,7 +301,7 @@ func @test_simple_ui8(%arg0: tensor<1xui8>) -> () {
 // -----
 
 // CHECK-LABEL: @test_simple_i32
-func @test_simple_i32(%arg0: tensor<1xi32>) -> () {
+func.func @test_simple_i32(%arg0: tensor<1xi32>) -> () {
   // CHECK: linalg.generic
   // CHECK: arith.addi
   %0 = "tosa.add"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32>
@@ -442,7 +442,7 @@ func @test_simple_i32(%arg0: tensor<1xi32>) -> () {
 // -----
 
 // CHECK-LABEL: @test_simple_ui8
-func @test_simple_ui8(%arg0: tensor<1xi8>) -> () {
+func.func @test_simple_ui8(%arg0: tensor<1xi8>) -> () {
 
   // CHECK: linalg.generic
   // CHECK: sitofp
@@ -454,7 +454,7 @@ func @test_simple_ui8(%arg0: tensor<1xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @test_i8
-func @test_i8(%arg0: tensor<1xi8>) -> () {
+func.func @test_i8(%arg0: tensor<1xi8>) -> () {
   // CHECK: linalg.generic
   // CHECK-DAG: %[[C127:.+]] = arith.constant -127
   // CHECK-DAG: %[[C126:.+]] = arith.constant 126
@@ -479,7 +479,7 @@ func @test_i8(%arg0: tensor<1xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @test_bool
-func @test_bool(%arg0: tensor<1xi1>, %arg1: tensor<1xi1>) -> () {
+func.func @test_bool(%arg0: tensor<1xi1>, %arg1: tensor<1xi1>) -> () {
   // CHECK: linalg.generic
   // CHECK: and
   %0 = "tosa.logical_and"(%arg0, %arg1) : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1>
@@ -503,7 +503,7 @@ func @test_bool(%arg0: tensor<1xi1>, %arg1: tensor<1xi1>) -> () {
 // -----
 
 // CHECK-LABEL: @test_negate_quantized
-func @test_negate_quantized(%arg0: tensor<1xi8>) -> () {
+func.func @test_negate_quantized(%arg0: tensor<1xi8>) -> () {
   // CHECK: linalg.generic
   // CHECK: [[ZERO:%.+]] = arith.constant 0
   // CHECK: [[EXT:%.+]] = arith.extsi %arg1 : i8 to i16
@@ -532,7 +532,7 @@ func @test_negate_quantized(%arg0: tensor<1xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @test_reshape_downrank
-func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
+func.func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
   // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [6]} : (tensor<2x3xf32>) -> tensor<6xf32>
   // CHECK: return [[RESHAPE]]
@@ -542,7 +542,7 @@ func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_downrank_dyn
-func @test_reshape_downrank_dyn(%arg0: tensor<2x?xf32>) -> tensor<?xf32> {
+func.func @test_reshape_downrank_dyn(%arg0: tensor<2x?xf32>) -> tensor<?xf32> {
   // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<2x?xf32>) -> tensor<?xf32>
   // CHECK: return [[RESHAPE]]
@@ -552,7 +552,7 @@ func @test_reshape_downrank_dyn(%arg0: tensor<2x?xf32>) -> tensor<?xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_uprank
-func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
+func.func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
   // CHECK: [[RESHAPE:%.+]] = tensor.expand_shape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<6xf32>) -> tensor<2x3xf32>
   // CHECK: return [[RESHAPE]]
@@ -562,7 +562,7 @@ func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_uprank_dyn
-func @test_reshape_uprank_dyn(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
+func.func @test_reshape_uprank_dyn(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
   // CHECK: [[RESHAPE:%.+]] = tensor.expand_shape %arg0 {{\[}}[0, 1]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor<?xf32>) -> tensor<2x?xf32>
   // CHECK: return [[RESHAPE]]
@@ -572,7 +572,7 @@ func @test_reshape_uprank_dyn(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_samerank
-func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
+func.func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
   // CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xf32>)
   // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
   // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]]
@@ -584,7 +584,7 @@ func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_samerank_dyn
-func @test_reshape_samerank_dyn(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
+func.func @test_reshape_samerank_dyn(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
   // CHECK-SAME: (%[[ARG0:.*]]: tensor<?x2xf32>)
   // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
   // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]]
@@ -596,7 +596,7 @@ func @test_reshape_samerank_dyn(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
 // -----
 
 // CHECK-LABEL: @test_reshape_downrank_6D
-func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
+func.func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
   // CHECK: tensor.collapse_shape %arg0 {{\[}}[0, 1, 2], [3], [4, 5]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [6, 5, 77]} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32>
   return %0 : tensor<6x5x77xf32>
@@ -605,7 +605,7 @@ func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77
 // -----
 
 // CHECK-LABEL: @test_reshape_downrank_6D_dyn
-func @test_reshape_downrank_6D_dyn(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {
+func.func @test_reshape_downrank_6D_dyn(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {
   // CHECK: tensor.collapse_shape %arg0 {{\[}}[0, 1, 2, 3, 4, 5]]
   // CHECK: tensor.expand_shape %0 {{\[}}[0, 1, 2]]
   %0 = "tosa.reshape"(%arg0) {new_shape = [-1, 5, 77]} : (tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32>
@@ -615,7 +615,7 @@ func @test_reshape_downrank_6D_dyn(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x
 // -----
 
 // CHECK-LABEL: @test_identity
-func @test_identity(%arg0: tensor<1xf32>, %arg1: tensor<1xi32>) -> (tensor<1xf32>, tensor<1xi32>) {
+func.func @test_identity(%arg0: tensor<1xf32>, %arg1: tensor<1xi32>) -> (tensor<1xf32>, tensor<1xi32>) {
   %0 = "tosa.identity"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
   %1 = "tosa.identity"(%arg1) : (tensor<1xi32>) -> tensor<1xi32>
 
@@ -630,7 +630,7 @@ func @test_identity(%arg0: tensor<1xf32>, %arg1: tensor<1xi32>) -> (tensor<1xf32
 
 // CHECK-LABEL: @test_transpose
 // CHECK-SAME: ([[ARG0:%.+]]: tensor<1x2x3xi32>)
-func @test_transpose(%arg0: tensor<1x2x3xi32>) -> () {
+func.func @test_transpose(%arg0: tensor<1x2x3xi32>) -> () {
   %0 = arith.constant dense<[1, 2, 0]> : tensor<3xi32>
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3, 1]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel"]} ins([[ARG0]] : tensor<1x2x3xi32>) outs([[OUT:%.+]] : tensor<2x3x1xi32>)
@@ -648,7 +648,7 @@ func @test_transpose(%arg0: tensor<1x2x3xi32>) -> () {
 
 // CHECK-LABEL: @test_transpose_dyn
 // CHECK-SAME: (%[[ARG0:.+]]: tensor<1x?x3x4xi32>)
-func @test_transpose_dyn(%arg0: tensor<1x?x3x4xi32>) -> () {
+func.func @test_transpose_dyn(%arg0: tensor<1x?x3x4xi32>) -> () {
   %0 = arith.constant dense<[1, 3, 0, 2]> : tensor<4xi32>
   // CHECK: %[[C1:.+]] = arith.constant 1
   // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C1]]
@@ -668,7 +668,7 @@ func @test_transpose_dyn(%arg0: tensor<1x?x3x4xi32>) -> () {
 
 // CHECK-LABEL: @test_transpose_dyn
 // CHECK-SAME: (%[[ARG0:.+]]: tensor<?x?xf32>)
-func @test_transpose_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
+func.func @test_transpose_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
   %0 = arith.constant dense<[1, 0]> : tensor<2xi32>
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DIM0:.+]] = tensor.dim %arg0, %[[C0]]
@@ -691,7 +691,7 @@ func @test_transpose_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
 
 // CHECK-LABEL: @reduce_float
 // CHECK-SAME: [[ARG0:%.+]]: tensor<5x4xf32>
-func @reduce_float(%arg0: tensor<5x4xf32>) -> () {
+func.func @reduce_float(%arg0: tensor<5x4xf32>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [4]
   // CHECK: [[CST0:%.+]] = arith.constant 0.0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -740,7 +740,7 @@ func @reduce_float(%arg0: tensor<5x4xf32>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
 
 // CHECK-LABEL: @reduce_float_dyn
-func @reduce_float_dyn(%arg0: tensor<?x5x4xf32>) -> () {
+func.func @reduce_float_dyn(%arg0: tensor<?x5x4xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DYN]], 4]
@@ -762,7 +762,7 @@ func @reduce_float_dyn(%arg0: tensor<?x5x4xf32>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
 
 // CHECK-LABEL: @reduce_float_dyn_nonzero_batch
-func @reduce_float_dyn_nonzero_batch(%arg0: tensor<5x?x4xf32>) -> () {
+func.func @reduce_float_dyn_nonzero_batch(%arg0: tensor<5x?x4xf32>) -> () {
   // CHECK: %[[C1:.+]] = arith.constant 1
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[C1]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [5, %[[DYN]]]
@@ -784,7 +784,7 @@ func @reduce_float_dyn_nonzero_batch(%arg0: tensor<5x?x4xf32>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
 
 // CHECK-LABEL: @reduce_float_dyn_multiple
-func @reduce_float_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
+func.func @reduce_float_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DYN]]]
@@ -808,7 +808,7 @@ func @reduce_float_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
 
 // CHECK-LABEL: @reduce_int
 // CHECK-SAME: [[ARG0:%.+]]: tensor<5x4xi32>
-func @reduce_int(%arg0: tensor<5x4xi32>) -> () {
+func.func @reduce_int(%arg0: tensor<5x4xi32>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [4]
   // CHECK: [[CST0:%.+]] = arith.constant 0
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -858,7 +858,7 @@ func @reduce_int(%arg0: tensor<5x4xi32>) -> () {
 
 // CHECK-LABEL: @reduce_bool
 // CHECK-SAME: [[ARG0:%.+]]: tensor<5x4xi1>
-func @reduce_bool(%arg0: tensor<5x4xi1>) -> () {
+func.func @reduce_bool(%arg0: tensor<5x4xi1>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [4]
   // CHECK: [[CST0:%.+]] = arith.constant true
   // CHECK: [[FILL:%.+]] = linalg.fill ins([[CST0]]{{.*}}outs([[INIT]]
@@ -881,7 +881,7 @@ func @reduce_bool(%arg0: tensor<5x4xi1>) -> () {
 // -----
 
 // CHECK-LABEL: @concat
-func @concat(%arg0: tensor<5x1xf32>, %arg1: tensor<6x1xf32>) -> () {
+func.func @concat(%arg0: tensor<5x1xf32>, %arg1: tensor<6x1xf32>) -> () {
   // CHECK: [[AXIS:%.+]] = arith.constant 0
   // CHECK: [[STRIDE:%.+]]   = arith.constant 1
   // CHECK: [[OFFSET:%.+]] = arith.constant 0 : index
@@ -911,7 +911,7 @@ func @concat(%arg0: tensor<5x1xf32>, %arg1: tensor<6x1xf32>) -> () {
 // -----
 
 // CHECK-LABEL: @concat_non_axis_dyn
-func @concat_non_axis_dyn(%arg0: tensor<5x?xf32>, %arg1: tensor<6x?xf32>) -> () {
+func.func @concat_non_axis_dyn(%arg0: tensor<5x?xf32>, %arg1: tensor<6x?xf32>) -> () {
   // CHECK: %[[AXIS:.+]] = arith.constant 0
   // CHECK: %[[STRIDE:.+]]   = arith.constant 1
   // CHECK: %[[OFFSET:.+]] = arith.constant 0 : index
@@ -932,7 +932,7 @@ func @concat_non_axis_dyn(%arg0: tensor<5x?xf32>, %arg1: tensor<6x?xf32>) -> ()
 // -----
 
 // CHECK-LABEL: @concat_axis_dyn
-func @concat_axis_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<?x3xf32>) -> () {
+func.func @concat_axis_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<?x3xf32>) -> () {
   // CHECK: %[[AXIS:.+]] = arith.constant 0
   // CHECK: %[[STRIDE:.+]]   = arith.constant 1
   // CHECK: %[[OFFSET:.+]] = arith.constant 0 : index
@@ -957,7 +957,7 @@ func @concat_axis_dyn(%arg0: tensor<?x3xf32>, %arg1: tensor<?x3xf32>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @rescale_i8
-func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
+func.func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
   // CHECK: [[C0:%.+]] = arith.constant 19689
   // CHECK: [[C1:%.+]] = arith.constant 15
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2]
@@ -1010,7 +1010,7 @@ func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @rescale_i8_dyn
-func @rescale_i8_dyn(%arg0 : tensor<?x2xi8>) -> () {
+func.func @rescale_i8_dyn(%arg0 : tensor<?x2xi8>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[BATCH]], 2]
@@ -1030,7 +1030,7 @@ func @rescale_i8_dyn(%arg0 : tensor<?x2xi8>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @rescale_ui8
-func @rescale_ui8(%arg0 : tensor<2xui8>) -> () {
+func.func @rescale_ui8(%arg0 : tensor<2xui8>) -> () {
   // CHECK: [[C0:%.+]] = arith.constant 19689
   // CHECK: [[C1:%.+]] = arith.constant 15
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2]
@@ -1061,7 +1061,7 @@ func @rescale_ui8(%arg0 : tensor<2xui8>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @rescale_per_channel
-func @rescale_per_channel(%arg0 : tensor<3xi8>) -> (tensor<3xi8>) {
+func.func @rescale_per_channel(%arg0 : tensor<3xi8>) -> (tensor<3xi8>) {
   // CHECK: [[MULTIPLIERS:%.+]] = arith.constant dense<[42, 43, 0]>
   // CHECK: [[SHIFTS:%.+]] = arith.constant dense<[14, 15, 0]>
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [3]
@@ -1091,7 +1091,7 @@ func @rescale_per_channel(%arg0 : tensor<3xi8>) -> (tensor<3xi8>) {
 // -----
 
 // CHECK-LABEL: @rescaleDoubleRound
-func @rescaleDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
+func.func @rescaleDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
   // CHECK: linalg.generic
   // CHECK: "tosa.apply_scale"
   // CHECK-SAME:  {double_round = true}
@@ -1100,7 +1100,7 @@ func @rescaleDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
 }
 
 // CHECK-LABEL: @rescaleUnnecessaryDoubleRound
-func @rescaleUnnecessaryDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
+func.func @rescaleUnnecessaryDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
   // CHECK: linalg.generic
   // CHECK: "tosa.apply_scale"
   // CHECK-SAME:  {double_round = false}
@@ -1113,7 +1113,7 @@ func @rescaleUnnecessaryDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 
 // CHECK-LABEL: @reverse
-func @reverse(%arg0: tensor<5x4xi32>) -> () {
+func.func @reverse(%arg0: tensor<5x4xi32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[RDIM:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [5, 4]
@@ -1147,7 +1147,7 @@ func @reverse(%arg0: tensor<5x4xi32>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
 
 // CHECK-LABEL: @reverse_dyn
-func @reverse_dyn(%arg0: tensor<?xi32>) -> () {
+func.func @reverse_dyn(%arg0: tensor<?xi32>) -> () {
   // CHECK: %[[C0_1:.+]] = arith.constant 0
   // CHECK: %[[D0_1:.+]] = tensor.dim %arg0, %[[C0_1]]
   // CHECK: %[[C0_2:.+]] = arith.constant 0
@@ -1170,7 +1170,7 @@ func @reverse_dyn(%arg0: tensor<?xi32>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @tile
-func @tile(%arg0 : tensor<2x3xi8>) -> () {
+func.func @tile(%arg0 : tensor<2x3xi8>) -> () {
   // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 2, 1, 3]
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>)
   // CHECK:   linalg.yield %arg1 : i8
@@ -1198,7 +1198,7 @@ func @tile(%arg0 : tensor<2x3xi8>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @tile_dyn_input
-func @tile_dyn_input(%arg0 : tensor<?x3xi8>) -> () {
+func.func @tile_dyn_input(%arg0 : tensor<?x3xi8>) -> () {
   // CHECK: %[[CST0:.+]] = arith.constant 0
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[CST0]] : tensor<?x3xi8>
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, %[[DYN]], 1, 3]
@@ -1217,7 +1217,7 @@ func @tile_dyn_input(%arg0 : tensor<?x3xi8>) -> () {
 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
 
 // CHECK-LABEL: @tile_dyn_multiples
-func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () {
+func.func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () {
   // CHECK: %[[CST1:.+]] = arith.constant 1
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[CST1]] : tensor<2x3xi8>
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, 2, %[[DYN]], 3]
@@ -1232,7 +1232,7 @@ func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () {
 
 // -----
 
-func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
+func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
   %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
   // TODO: Output contains multiple "arith.constant 1 : index".
   // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
@@ -1248,7 +1248,7 @@ func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
   return %1 : tensor<4x9xf32>
 }
 
-func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
+func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
   %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
   // CHECK: [[CST:%.+]] = arith.constant 0 : i32
   // CHECK: tensor.pad
@@ -1257,7 +1257,7 @@ func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
   return %1 : tensor<4x9xi32>
 }
 
-func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
+func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
   %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
   // CHECK: [[CST:%.+]] = arith.constant 42 : i32
   // CHECK: tensor.pad
@@ -1268,7 +1268,7 @@ func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
 
 // -----
 
-func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
+func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
   %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
   // TODO: Output contains multiple "arith.constant 1 : index".
   // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
@@ -1287,7 +1287,7 @@ func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
 
 // -----
 
-func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
+func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
   %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32>
   // TODO: Output contains multiple "arith.constant 1 : index".
   // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
@@ -1303,7 +1303,7 @@ func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
   return %1 : tensor<?x9xf32>
 }
 
-func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
+func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
   %0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32>
   // TODO: Output contains multiple "arith.constant 1 : index".
   // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
@@ -1327,7 +1327,7 @@ func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
 // CHECK: #[[$MAP3:.*]] = affine_map<(d0) -> (d0)>
 // CHECK: #[[$MAP4:.*]] = affine_map<(d0) -> ()>
 
-func @argmax(%arg0 : tensor<3x2xi32>, %arg1 : tensor<6xf32>) -> () {
+func.func @argmax(%arg0 : tensor<3x2xi32>, %arg1 : tensor<6xf32>) -> () {
   // CHECK: [[IDX_INIT:%.+]] = linalg.init_tensor [2]
   // CHECK: [[IDX_MIN:%.+]] = arith.constant 0 : i32
   // CHECK: [[IDX_FILL:%.+]] = linalg.fill ins([[IDX_MIN]]{{.*}}outs([[IDX_INIT]]
@@ -1375,7 +1375,7 @@ func @argmax(%arg0 : tensor<3x2xi32>, %arg1 : tensor<6xf32>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)>
 
-func @argmax_dyn_non_axis(%arg0 : tensor<3x?xi32>) -> () {
+func.func @argmax_dyn_non_axis(%arg0 : tensor<3x?xi32>) -> () {
   // CHECK: %[[CST1:.+]] = arith.constant 1
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[CST1]]
   // CHECK: %[[IDX_INIT:.+]] = linalg.init_tensor [%[[DYN]]]
@@ -1400,7 +1400,7 @@ func @argmax_dyn_non_axis(%arg0 : tensor<3x?xi32>) -> () {
 // CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
 // CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
 
-func @argmax_dyn_axis(%arg0 : tensor<3x?xi32>) -> () {
+func.func @argmax_dyn_axis(%arg0 : tensor<3x?xi32>) -> () {
   // CHECK: %[[IDX_INIT:.+]] = linalg.init_tensor [3]
   // CHECK: %[[IDX_MIN:.+]] = arith.constant 0 : i32
   // CHECK: %[[IDX_FILL:.+]] = linalg.fill ins(%[[IDX_MIN]]{{.*}}outs(%[[IDX_INIT]]
@@ -1421,7 +1421,7 @@ func @argmax_dyn_axis(%arg0 : tensor<3x?xi32>) -> () {
 // -----
 
 // CHECK-LABEL: @gather_float
-func @gather_float(%arg0: tensor<2x3x2xf32>, %arg1: tensor<2x3xi32>) -> () {
+func.func @gather_float(%arg0: tensor<2x3x2xf32>, %arg1: tensor<2x3xi32>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, 3, 2]
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<2x3xi32>) outs(%[[INIT]] : tensor<2x3x2xf32>)
   // CHECK: ^bb0(%[[ARG0:.+]]: i32, %[[ARG1:.+]]: f32)
@@ -1435,7 +1435,7 @@ func @gather_float(%arg0: tensor<2x3x2xf32>, %arg1: tensor<2x3xi32>) -> () {
 }
 
 // CHECK-LABEL: @gather_float_dyn
-func @gather_float_dyn(%arg0: tensor<?x3x2xf32>, %arg1: tensor<?x3xi32>) -> () {
+func.func @gather_float_dyn(%arg0: tensor<?x3x2xf32>, %arg1: tensor<?x3xi32>) -> () {
   // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[BATCH]], 3, 2]
@@ -1451,7 +1451,7 @@ func @gather_float_dyn(%arg0: tensor<?x3x2xf32>, %arg1: tensor<?x3xi32>) -> () {
 }
 
 // CHECK-LABEL: @gather_int
-func @gather_int(%arg0: tensor<2x3x2xi32>, %arg1: tensor<2x3xi32>) -> () {
+func.func @gather_int(%arg0: tensor<2x3x2xi32>, %arg1: tensor<2x3xi32>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, 3, 2]
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg1 : tensor<2x3xi32>) outs(%[[INIT]] : tensor<2x3x2xi32>)
   // CHECK: ^bb0(%[[ARG0:.+]]: i32, %[[ARG1:.+]]: i32)
@@ -1467,7 +1467,7 @@ func @gather_int(%arg0: tensor<2x3x2xi32>, %arg1: tensor<2x3xi32>) -> () {
 // -----
 
 // CHECK-LABEL: @table8
-func @table8(%arg0: tensor<6xi8>, %arg1: tensor<512xi8>) -> () {
+func.func @table8(%arg0: tensor<6xi8>, %arg1: tensor<512xi8>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [6]
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%arg0 : tensor<6xi8>) outs(%[[INIT]] : tensor<6xi8>)
   // CHECK: ^bb0(%[[ARG_IN:.+]]: i8, %[[ARG_INIT:.+]]: i8)
@@ -1483,7 +1483,7 @@ func @table8(%arg0: tensor<6xi8>, %arg1: tensor<512xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @table16
-func @table16(%arg0: tensor<6xi16>, %arg1: tensor<513xi16>) -> () {
+func.func @table16(%arg0: tensor<6xi16>, %arg1: tensor<513xi16>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [6]
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%arg0 : tensor<6xi16>) outs(%[[INIT]] : tensor<6xi32>)
   // CHECK: ^bb0(%arg2: i16, %arg3: i32)
@@ -1514,7 +1514,7 @@ func @table16(%arg0: tensor<6xi16>, %arg1: tensor<513xi16>) -> () {
 // -----
 
 // CHECK-LABEL: @table8_dyn
-func @table8_dyn(%arg0: tensor<?xi8>, %arg1: tensor<512xi8>) -> () {
+func.func @table8_dyn(%arg0: tensor<?xi8>, %arg1: tensor<512xi8>) -> () {
   // CHECK: %[[CST0:.+]] = arith.constant 0
   // CHECK: %[[DYN:.+]] = tensor.dim %arg0, %[[CST0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DYN]]]
@@ -1532,7 +1532,7 @@ func @table8_dyn(%arg0: tensor<?xi8>, %arg1: tensor<512xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @table8_dyn_table
-func @table8_dyn_table(%arg0: tensor<6xi8>, %arg1: tensor<?xi8>) -> () {
+func.func @table8_dyn_table(%arg0: tensor<6xi8>, %arg1: tensor<?xi8>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [6]
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%arg0 : tensor<6xi8>) outs(%[[INIT]] : tensor<6xi8>)
   // CHECK: ^bb0(%[[ARG_IN:.+]]: i8, %[[ARG_INIT:.+]]: i8)
@@ -1548,7 +1548,7 @@ func @table8_dyn_table(%arg0: tensor<6xi8>, %arg1: tensor<?xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @resize_nearest
-func @resize_nearest(%input: tensor<1x2x2x1xf32>) -> () {
+func.func @resize_nearest(%input: tensor<1x2x2x1xf32>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 4, 4, 1]
   // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK: %[[IDX0:.+]] = linalg.index 0
@@ -1617,7 +1617,7 @@ func @resize_nearest(%input: tensor<1x2x2x1xf32>) -> () {
 // -----
 
 // CHECK-LABEL: @resize_bilinear
-func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
+func.func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 4, 4, 1]
   // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK: %[[IDX0:.+]] = linalg.index 0
@@ -1699,7 +1699,7 @@ func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
 // -----
 
 // CHECK-LABEL: @resize_nearest_int
-func @resize_nearest_int(%input: tensor<1x2x2x1xi32>) -> () {
+func.func @resize_nearest_int(%input: tensor<1x2x2x1xi32>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 4, 4, 1]
   // CHECK: %[[GENERIC:.+]] = linalg.generic
   // CHECK: %[[IDX0:.+]] = linalg.index 0
@@ -1767,7 +1767,7 @@ func @resize_nearest_int(%input: tensor<1x2x2x1xi32>) -> () {
 // -----
 
 // CHECK-LABEL: @resize_bilinear_int
-func @resize_bilinear_int(%input: tensor<1x2x2x1xi8>) -> () {
+func.func @resize_bilinear_int(%input: tensor<1x2x2x1xi8>) -> () {
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 4, 4, 1]
   // CHECK: %[[GENERIC:.+]] = linalg.generic
 
@@ -1852,7 +1852,7 @@ func @resize_bilinear_int(%input: tensor<1x2x2x1xi8>) -> () {
 // -----
 
 // CHECK-LABEL: @resize_dyn
-func @resize_dyn(%input: tensor<?x2x2x1xi8>) -> () {
+func.func @resize_dyn(%input: tensor<?x2x2x1xi8>) -> () {
     // CHECK: %[[C0:.+]] = arith.constant 0
   // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]]
   // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[BATCH]], 4, 4, 1]

diff  --git a/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir b/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir
index 59a252918d877..59931137cdf5b 100644
--- a/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir
+++ b/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir
@@ -2,7 +2,7 @@
 
 // CHECK-LABEL: func @while_test
 // CHECK-SAME: ([[ARG0:%.+]]: tensor<i32>)
-func @while_test(%arg0 : tensor<i32>) -> (tensor<i32>) {
+func.func @while_test(%arg0 : tensor<i32>) -> (tensor<i32>) {
   // CHECK: [[WHILE:%.+]] = scf.while ([[ARG1:%.+]] = [[ARG0]])
   %1 = "tosa.while_loop"(%arg0) ({
   ^bb0(%arg2: tensor<i32>):
@@ -34,7 +34,7 @@ func @while_test(%arg0 : tensor<i32>) -> (tensor<i32>) {
 
 // CHECK-LABEL: func @if_test
 // CHECK-SAME: ([[ARG0:%.+]]: tensor<f32>, [[ARG1:%.+]]: tensor<f32>, [[ARG2:%.+]]: tensor<i1>)
-func @if_test(%arg0 : tensor<f32>, %arg1 : tensor<f32>, %arg2 : tensor<i1>) -> (tensor<f32>) {
+func.func @if_test(%arg0 : tensor<f32>, %arg1 : tensor<f32>, %arg2 : tensor<i1>) -> (tensor<f32>) {
   // CHECK: [[EX:%.+]] = tensor.extract [[ARG2]]
   // CHECK: [[IF:%.+]] = scf.if [[EX]] -> (tensor<f32>) {
   %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({

diff  --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
index 2290937b180ac..12eb51f58abed 100644
--- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
+++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt --split-input-file --tosa-to-tensor %s -o -| FileCheck %s
 
 // CHECK-LABLE: func @slice
-func @slice(%arg0: tensor<6xf32>) ->() {
+func.func @slice(%arg0: tensor<6xf32>) ->() {
   // CHECK: [[SLICE:%.+]] = tensor.extract_slice %arg0[2] [1] [1]
   %0 = "tosa.slice"(%arg0) {start = [2], size = [1]} : (tensor<6xf32>)  -> (tensor<1xf32>)
   return

diff  --git a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir
index 4fe710b796963..b5d08e0817100 100644
--- a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir
+++ b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir
@@ -12,7 +12,7 @@
 //   CHECK-DAG:   %[[C:.+]] = gpu.subgroup_mma_load_matrix %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[D]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<16x16xf16>
-func @matmul(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
+func.func @matmul(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f16
@@ -31,7 +31,7 @@ func @matmul(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<1
 //   CHECK-DAG:   %[[C:.+]] = gpu.subgroup_mma_constant_matrix %[[CST]] : !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[D]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<16x16xf16>
-func @matmul_cst(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
+func.func @matmul_cst(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f16
@@ -49,7 +49,7 @@ func @matmul_cst(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memr
 //   CHECK-DAG:   %[[B:.+]] = gpu.subgroup_mma_load_matrix %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "BOp">
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[D]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<16x16xf16>
-func @matmul_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>, %f: f16) {
+func.func @matmul_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>, %f: f16) {
   %C = vector.broadcast %f : f16 to vector<16x16xf16>
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f16
@@ -69,7 +69,7 @@ func @matmul_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2
 //  CHECK-NEXT:     scf.yield %[[D]] : !gpu.mma_matrix<16x16xf16, "COp">
 //  CHECK-NEXT:   }
 //  CHECK-NEXT:   gpu.subgroup_mma_store_matrix %[[ACC]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 128 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<128x128xf16>
-func @matmul_loop(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) {
+func.func @matmul_loop(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) {
   %c0 = arith.constant 0 : index
   %c128 = arith.constant 128 : index
   %c32 = arith.constant 32 : index
@@ -95,7 +95,7 @@ func @matmul_loop(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2:
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C0]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[E:.+]] = gpu.subgroup_mma_elementwise addf %[[D]], %[[C1]] : (!gpu.mma_matrix<16x16xf16, "COp">, !gpu.mma_matrix<16x16xf16, "COp">) -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[E]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<16x16xf16>
-func @matmul_fused_elementwise(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
+func.func @matmul_fused_elementwise(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %cst_1 = arith.constant dense<1.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
@@ -117,7 +117,7 @@ func @matmul_fused_elementwise(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16
 //       CHECK:   %[[E:.+]] = gpu.subgroup_mma_load_matrix %{{.*}}[%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}] {leadDimension = 0 : index} : memref<16x16x16x16xf16> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[F:.+]] = gpu.subgroup_mma_elementwise divf %[[D]], %[[E]] : (!gpu.mma_matrix<16x16xf16, "COp">, !gpu.mma_matrix<16x16xf16, "COp">) -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[F]], %{{.*}}[%{{.*}}, %{{.*}}] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<16x16xf16>
-func @matmul_fused_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>,
+func.func @matmul_fused_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>,
   %arg2: memref<16x16xf16>, %arg3: memref<16x16x16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
@@ -140,7 +140,7 @@ func @matmul_fused_broadcast(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>,
 //   CHECK-DAG:   %[[C:.+]] = gpu.subgroup_mma_load_matrix %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {leadDimension = 16 : index} : memref<2x16x16xf16> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[D]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<2x16x16xf16>
-func @matmul_3Dmemref(%arg0: memref<2x16x16xf16>, %arg1: memref<16xf16>, %arg2: memref<2x16x16xf16>) {
+func.func @matmul_3Dmemref(%arg0: memref<2x16x16xf16>, %arg1: memref<16xf16>, %arg2: memref<2x16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f16
@@ -159,7 +159,7 @@ func @matmul_3Dmemref(%arg0: memref<2x16x16xf16>, %arg1: memref<16xf16>, %arg2:
 //   CHECK-DAG:   %[[C:.+]] = gpu.subgroup_mma_load_matrix %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {leadDimension = 16 : index} : memref<2x16x16xf16> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   %[[D:.+]] = gpu.subgroup_mma_compute %[[A]], %[[B]], %[[C]] : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
 //       CHECK:   gpu.subgroup_mma_store_matrix %[[D]], %{{.*}}[%[[C0]], %[[C0]], %[[C0]]] {leadDimension = 16 : index} : !gpu.mma_matrix<16x16xf16, "COp">, memref<2x16x16xf16>
-func @matmul_memref_strided(%arg0: memref<2x16x16xf16, affine_map<(d0, d1, d2) -> (d0 * 512 + d1 * 32 + d2)>>, %arg1: memref<16xf16>, %arg2: memref<2x16x16xf16>) {
+func.func @matmul_memref_strided(%arg0: memref<2x16x16xf16, affine_map<(d0, d1, d2) -> (d0 * 512 + d1 * 32 + d2)>>, %arg1: memref<16xf16>, %arg2: memref<2x16x16xf16>) {
   %cst_0 = arith.constant dense<0.000000e+00> : vector<16x16xf16>
   %c0 = arith.constant 0 : index
   %cst = arith.constant 0.000000e+00 : f16

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
index 0c44cea440faa..10da1d7c5ae0e 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
@@ -19,7 +19,7 @@
 // CMP64: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<11xi64>
 // CMP64: return %[[T4]] : vector<11xi1>
 
-func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
+func.func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
   %0 = vector.create_mask %arg0 : vector<11xi1>
   return %0 : vector<11xi1>
 }
@@ -42,7 +42,7 @@ func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
 // CMP64: %[[T4:.*]] = arith.cmpi slt, %[[T0]], %[[T3]] : vector<[11]xi64>
 // CMP64: return %[[T4]] : vector<[11]xi1>
 
-func @genbool_var_1d_scalable(%arg0: index) -> vector<[11]xi1> {
+func.func @genbool_var_1d_scalable(%arg0: index) -> vector<[11]xi1> {
   %0 = vector.create_mask %arg0 : vector<[11]xi1>
   return %0 : vector<[11]xi1>
 }
@@ -71,7 +71,7 @@ func @genbool_var_1d_scalable(%arg0: index) -> vector<[11]xi1> {
 // CMP64: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %[[M]], %{{.*}}
 // CMP64: return %[[L]] : vector<16xf32>
 
-func @transfer_read_1d(%A : memref<?xf32>, %i: index) -> vector<16xf32> {
+func.func @transfer_read_1d(%A : memref<?xf32>, %i: index) -> vector<16xf32> {
   %d = arith.constant -1.0: f32
   %f = vector.transfer_read %A[%i], %d {permutation_map = affine_map<(d0) -> (d0)>} : memref<?xf32>, vector<16xf32>
   return %f : vector<16xf32>

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
index a717c7dc500fb..55cfec5c6ff11 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
@@ -16,7 +16,7 @@
 // REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
 //      REASSOC: return %[[V]] : f32
 //
-func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
+func.func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction <add>, %arg0 : vector<16xf32> into f32
   return %0 : f32
 }
@@ -36,7 +36,7 @@ func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
 // REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
 //      REASSOC: return %[[V]] : f32
 //
-func @reduce_mul_f32(%arg0: vector<16xf32>) -> f32 {
+func.func @reduce_mul_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction <mul>, %arg0 : vector<16xf32> into f32
   return %0 : f32
 }

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 42901f07af463..adad1ea016ad2 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
 
 
-func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
+func.func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
   %0 = vector.bitcast %input : vector<f32> to vector<i32>
   return %0 : vector<i32>
 }
@@ -15,7 +15,7 @@ func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
 
 // -----
 
-func @bitcast_f32_to_i32_vector(%input: vector<16xf32>) -> vector<16xi32> {
+func.func @bitcast_f32_to_i32_vector(%input: vector<16xf32>) -> vector<16xi32> {
   %0 = vector.bitcast %input : vector<16xf32> to vector<16xi32>
   return %0 : vector<16xi32>
 }
@@ -26,7 +26,7 @@ func @bitcast_f32_to_i32_vector(%input: vector<16xf32>) -> vector<16xi32> {
 
 // -----
 
-func @bitcast_i8_to_f32_vector(%input: vector<64xi8>) -> vector<16xf32> {
+func.func @bitcast_i8_to_f32_vector(%input: vector<64xi8>) -> vector<16xf32> {
   %0 = vector.bitcast %input : vector<64xi8> to vector<16xf32>
   return %0 : vector<16xf32>
 }
@@ -37,7 +37,7 @@ func @bitcast_i8_to_f32_vector(%input: vector<64xi8>) -> vector<16xf32> {
 
 // -----
 
-func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
+func.func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
   %0 = vector.bitcast %input : vector<16xindex> to vector<128xi8>
   return %0 : vector<128xi8>
 }
@@ -49,7 +49,7 @@ func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
 
 // -----
 
-func @broadcast_vec0d_from_f32(%arg0: f32) -> vector<f32> {
+func.func @broadcast_vec0d_from_f32(%arg0: f32) -> vector<f32> {
   %0 = vector.broadcast %arg0 : f32 to vector<f32>
   return %0 : vector<f32>
 }
@@ -61,7 +61,7 @@ func @broadcast_vec0d_from_f32(%arg0: f32) -> vector<f32> {
 
 // -----
 
-func @broadcast_vec0d_from_vec0d(%arg0: vector<f32>) -> vector<f32> {
+func.func @broadcast_vec0d_from_vec0d(%arg0: vector<f32>) -> vector<f32> {
   %0 = vector.broadcast %arg0 : vector<f32> to vector<f32>
   return %0 : vector<f32>
 }
@@ -71,7 +71,7 @@ func @broadcast_vec0d_from_vec0d(%arg0: vector<f32>) -> vector<f32> {
 
 // -----
 
-func @broadcast_vec1d_from_f32(%arg0: f32) -> vector<2xf32> {
+func.func @broadcast_vec1d_from_f32(%arg0: f32) -> vector<2xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2xf32>
   return %0 : vector<2xf32>
 }
@@ -83,7 +83,7 @@ func @broadcast_vec1d_from_f32(%arg0: f32) -> vector<2xf32> {
 
 // -----
 
-func @broadcast_vec1d_from_index(%arg0: index) -> vector<2xindex> {
+func.func @broadcast_vec1d_from_index(%arg0: index) -> vector<2xindex> {
   %0 = vector.broadcast %arg0 : index to vector<2xindex>
   return %0 : vector<2xindex>
 }
@@ -97,7 +97,7 @@ func @broadcast_vec1d_from_index(%arg0: index) -> vector<2xindex> {
 
 // -----
 
-func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
+func.func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x3xf32>
   return %0 : vector<2x3xf32>
 }
@@ -112,7 +112,7 @@ func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
 
 // -----
 
-func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
+func.func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x3x4xf32>
   return %0 : vector<2x3x4xf32>
 }
@@ -128,7 +128,7 @@ func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
 
 // -----
 
-func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> {
+func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<2xf32>
   return %0 : vector<2xf32>
 }
@@ -138,7 +138,7 @@ func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> {
 
 // -----
 
-func @broadcast_vec2d_from_vec0d(%arg0: vector<f32>) -> vector<3x2xf32> {
+func.func @broadcast_vec2d_from_vec0d(%arg0: vector<f32>) -> vector<3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<f32> to vector<3x2xf32>
   return %0 : vector<3x2xf32>
 }
@@ -159,7 +159,7 @@ func @broadcast_vec2d_from_vec0d(%arg0: vector<f32>) -> vector<3x2xf32> {
 
 // -----
 
-func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
+func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<3x2xf32>
   return %0 : vector<3x2xf32>
 }
@@ -175,7 +175,7 @@ func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
 
 // -----
 
-func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x2xindex> {
+func.func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x2xindex> {
   %0 = vector.broadcast %arg0 : vector<2xindex> to vector<3x2xindex>
   return %0 : vector<3x2xindex>
 }
@@ -191,7 +191,7 @@ func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x2xind
 
 // -----
 
-func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> {
+func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
@@ -216,7 +216,7 @@ func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> {
 
 // -----
 
-func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf32> {
+func.func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<3x2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
@@ -235,7 +235,7 @@ func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf32> {
 
 // -----
 
-func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
+func.func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
   %0 = vector.broadcast %arg0 : vector<1xf32> to vector<4xf32>
   return %0 : vector<4xf32>
 }
@@ -249,7 +249,7 @@ func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
 
 // -----
 
-func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
+func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
   %0 = vector.broadcast %arg0 : vector<1x4xf32> to vector<3x4xf32>
   return %0 : vector<3x4xf32>
 }
@@ -267,7 +267,7 @@ func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
 
 // -----
 
-func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
+func.func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
   %0 = vector.broadcast %arg0 : vector<4x1xf32> to vector<4x3xf32>
   return %0 : vector<4x3xf32>
 }
@@ -305,7 +305,7 @@ func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
 
 // -----
 
-func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32> {
+func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<4x1x2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
@@ -341,7 +341,7 @@ func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32>
 
 // -----
 
-func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32> {
+func.func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
 }
@@ -367,7 +367,7 @@ func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32
 
 // -----
 
-func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) -> vector<2x3xindex> {
+func.func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) -> vector<2x3xindex> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xindex>, vector<3xindex>
   return %2 : vector<2x3xindex>
 }
@@ -388,7 +388,7 @@ func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) -> vect
 
 // -----
 
-func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector<2x3xf32>) -> vector<2x3xf32> {
+func.func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector<2x3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1, %arg2 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
 }
@@ -418,7 +418,7 @@ func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector
 
 // -----
 
-func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2xf32> {
+func.func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2xf32> {
   %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xf32>, vector<2xf32>
   return %1 : vector<2xf32>
 }
@@ -429,7 +429,7 @@ func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2x
 
 // -----
 
-func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) -> vector<2xindex> {
+func.func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) -> vector<2xindex> {
   %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xindex>, vector<2xindex>
   return %1 : vector<2xindex>
 }
@@ -440,7 +440,7 @@ func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) ->
 
 // -----
 
-func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> {
+func.func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> {
   %1 = vector.shuffle %arg0, %arg1 [4, 3, 2, 1, 0] : vector<2xf32>, vector<3xf32>
   return %1 : vector<5xf32>
 }
@@ -472,7 +472,7 @@ func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> {
 
 // -----
 
-func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
+func.func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
   %1 = vector.shuffle %a, %b[1, 0, 2] : vector<1x4xf32>, vector<2x4xf32>
   return %1 : vector<3x4xf32>
 }
@@ -494,7 +494,7 @@ func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
 // -----
 
 // CHECK-LABEL: @extract_element_0d
-func @extract_element_0d(%a: vector<f32>) -> f32 {
+func.func @extract_element_0d(%a: vector<f32>) -> f32 {
   // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
   // CHECK: llvm.extractelement %{{.*}}[%[[C0]] : {{.*}}] : vector<1xf32>
   %1 = vector.extractelement %a[] : vector<f32>
@@ -503,7 +503,7 @@ func @extract_element_0d(%a: vector<f32>) -> f32 {
 
 // -----
 
-func @extract_element(%arg0: vector<16xf32>) -> f32 {
+func.func @extract_element(%arg0: vector<16xf32>) -> f32 {
   %0 = arith.constant 15 : i32
   %1 = vector.extractelement %arg0[%0 : i32]: vector<16xf32>
   return %1 : f32
@@ -516,7 +516,7 @@ func @extract_element(%arg0: vector<16xf32>) -> f32 {
 
 // -----
 
-func @extract_element_index(%arg0: vector<16xf32>) -> f32 {
+func.func @extract_element_index(%arg0: vector<16xf32>) -> f32 {
   %0 = arith.constant 15 : index
   %1 = vector.extractelement %arg0[%0 : index]: vector<16xf32>
   return %1 : f32
@@ -530,7 +530,7 @@ func @extract_element_index(%arg0: vector<16xf32>) -> f32 {
 
 // -----
 
-func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
+func.func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.extract %arg0[15]: vector<16xf32>
   return %0 : f32
 }
@@ -541,7 +541,7 @@ func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
 
 // -----
 
-func @extract_index_element_from_vec_1d(%arg0: vector<16xindex>) -> index {
+func.func @extract_index_element_from_vec_1d(%arg0: vector<16xindex>) -> index {
   %0 = vector.extract %arg0[15]: vector<16xindex>
   return %0 : index
 }
@@ -555,7 +555,7 @@ func @extract_index_element_from_vec_1d(%arg0: vector<16xindex>) -> index {
 
 // -----
 
-func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> {
+func.func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> {
   %0 = vector.extract %arg0[0]: vector<4x3x16xf32>
   return %0 : vector<3x16xf32>
 }
@@ -565,7 +565,7 @@ func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32>
 
 // -----
 
-func @extract_vec_1d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<16xf32> {
+func.func @extract_vec_1d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<16xf32> {
   %0 = vector.extract %arg0[0, 0]: vector<4x3x16xf32>
   return %0 : vector<16xf32>
 }
@@ -575,7 +575,7 @@ func @extract_vec_1d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<16xf32> {
 
 // -----
 
-func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 {
+func.func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 {
   %0 = vector.extract %arg0[0, 0, 0]: vector<4x3x16xf32>
   return %0 : f32
 }
@@ -589,7 +589,7 @@ func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 {
 
 // CHECK-LABEL: @insert_element_0d
 // CHECK-SAME: %[[A:.*]]: f32,
-func @insert_element_0d(%a: f32, %b: vector<f32>) -> vector<f32> {
+func.func @insert_element_0d(%a: f32, %b: vector<f32>) -> vector<f32> {
   // CHECK: %[[B:.*]] =  builtin.unrealized_conversion_cast %{{.*}} :
   // CHECK:   vector<f32> to vector<1xf32>
   // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -600,7 +600,7 @@ func @insert_element_0d(%a: f32, %b: vector<f32>) -> vector<f32> {
 
 // -----
 
-func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
+func.func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = arith.constant 3 : i32
   %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<4xf32>
   return %1 : vector<4xf32>
@@ -614,7 +614,7 @@ func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
 
 // -----
 
-func @insert_element_index(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
+func.func @insert_element_index(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = arith.constant 3 : index
   %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<4xf32>
   return %1 : vector<4xf32>
@@ -629,7 +629,7 @@ func @insert_element_index(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
 
 // -----
 
-func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
+func.func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32>
   return %0 : vector<4xf32>
 }
@@ -640,7 +640,7 @@ func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf
 
 // -----
 
-func @insert_index_element_into_vec_1d(%arg0: index, %arg1: vector<4xindex>) -> vector<4xindex> {
+func.func @insert_index_element_into_vec_1d(%arg0: index, %arg1: vector<4xindex>) -> vector<4xindex> {
   %0 = vector.insert %arg0, %arg1[3] : index into vector<4xindex>
   return %0 : vector<4xindex>
 }
@@ -656,7 +656,7 @@ func @insert_index_element_into_vec_1d(%arg0: index, %arg1: vector<4xindex>) ->
 
 // -----
 
-func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
+func.func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3] : vector<8x16xf32> into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
@@ -666,7 +666,7 @@ func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf3
 
 // -----
 
-func @insert_vec_1d_into_vec_3d(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
+func.func @insert_vec_1d_into_vec_3d(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7] : vector<16xf32> into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
@@ -676,7 +676,7 @@ func @insert_vec_1d_into_vec_3d(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>
 
 // -----
 
-func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
+func.func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
@@ -689,7 +689,7 @@ func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vecto
 
 // -----
 
-func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
+func.func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref<vector<8x8x8xf32>>
   return %0 : memref<vector<8x8x8xf32>>
 }
@@ -706,7 +706,7 @@ func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
 
 // -----
 
-func @vector_index_type_cast(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8xindex>> {
+func.func @vector_index_type_cast(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8xindex>> {
   %0 = vector.type_cast %arg0: memref<8x8x8xindex> to memref<vector<8x8x8xindex>>
   return %0 : memref<vector<8x8x8xindex>>
 }
@@ -718,7 +718,7 @@ func @vector_index_type_cast(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8x
 
 // -----
 
-func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
+func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref<vector<8x8x8xf32>, 3>
   return %0 : memref<vector<8x8x8xf32>, 3>
 }
@@ -735,7 +735,7 @@ func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref
 
 // -----
 
-func @vector_print_scalar_i1(%arg0: i1) {
+func.func @vector_print_scalar_i1(%arg0: i1) {
   vector.print %arg0 : i1
   return
 }
@@ -750,7 +750,7 @@ func @vector_print_scalar_i1(%arg0: i1) {
 
 // -----
 
-func @vector_print_scalar_i4(%arg0: i4) {
+func.func @vector_print_scalar_i4(%arg0: i4) {
   vector.print %arg0 : i4
   return
 }
@@ -762,7 +762,7 @@ func @vector_print_scalar_i4(%arg0: i4) {
 
 // -----
 
-func @vector_print_scalar_si4(%arg0: si4) {
+func.func @vector_print_scalar_si4(%arg0: si4) {
   vector.print %arg0 : si4
   return
 }
@@ -775,7 +775,7 @@ func @vector_print_scalar_si4(%arg0: si4) {
 
 // -----
 
-func @vector_print_scalar_ui4(%arg0: ui4) {
+func.func @vector_print_scalar_ui4(%arg0: ui4) {
   vector.print %arg0 : ui4
   return
 }
@@ -788,7 +788,7 @@ func @vector_print_scalar_ui4(%arg0: ui4) {
 
 // -----
 
-func @vector_print_scalar_i32(%arg0: i32) {
+func.func @vector_print_scalar_i32(%arg0: i32) {
   vector.print %arg0 : i32
   return
 }
@@ -800,7 +800,7 @@ func @vector_print_scalar_i32(%arg0: i32) {
 
 // -----
 
-func @vector_print_scalar_ui32(%arg0: ui32) {
+func.func @vector_print_scalar_ui32(%arg0: ui32) {
   vector.print %arg0 : ui32
   return
 }
@@ -812,7 +812,7 @@ func @vector_print_scalar_ui32(%arg0: ui32) {
 
 // -----
 
-func @vector_print_scalar_i40(%arg0: i40) {
+func.func @vector_print_scalar_i40(%arg0: i40) {
   vector.print %arg0 : i40
   return
 }
@@ -824,7 +824,7 @@ func @vector_print_scalar_i40(%arg0: i40) {
 
 // -----
 
-func @vector_print_scalar_si40(%arg0: si40) {
+func.func @vector_print_scalar_si40(%arg0: si40) {
   vector.print %arg0 : si40
   return
 }
@@ -837,7 +837,7 @@ func @vector_print_scalar_si40(%arg0: si40) {
 
 // -----
 
-func @vector_print_scalar_ui40(%arg0: ui40) {
+func.func @vector_print_scalar_ui40(%arg0: ui40) {
   vector.print %arg0 : ui40
   return
 }
@@ -850,7 +850,7 @@ func @vector_print_scalar_ui40(%arg0: ui40) {
 
 // -----
 
-func @vector_print_scalar_i64(%arg0: i64) {
+func.func @vector_print_scalar_i64(%arg0: i64) {
   vector.print %arg0 : i64
   return
 }
@@ -861,7 +861,7 @@ func @vector_print_scalar_i64(%arg0: i64) {
 
 // -----
 
-func @vector_print_scalar_ui64(%arg0: ui64) {
+func.func @vector_print_scalar_ui64(%arg0: ui64) {
   vector.print %arg0 : ui64
   return
 }
@@ -873,7 +873,7 @@ func @vector_print_scalar_ui64(%arg0: ui64) {
 
 // -----
 
-func @vector_print_scalar_index(%arg0: index) {
+func.func @vector_print_scalar_index(%arg0: index) {
   vector.print %arg0 : index
   return
 }
@@ -885,7 +885,7 @@ func @vector_print_scalar_index(%arg0: index) {
 
 // -----
 
-func @vector_print_scalar_f32(%arg0: f32) {
+func.func @vector_print_scalar_f32(%arg0: f32) {
   vector.print %arg0 : f32
   return
 }
@@ -896,7 +896,7 @@ func @vector_print_scalar_f32(%arg0: f32) {
 
 // -----
 
-func @vector_print_scalar_f64(%arg0: f64) {
+func.func @vector_print_scalar_f64(%arg0: f64) {
   vector.print %arg0 : f64
   return
 }
@@ -907,7 +907,7 @@ func @vector_print_scalar_f64(%arg0: f64) {
 
 // -----
 
-func @vector_print_vector_0d(%arg0: vector<f32>) {
+func.func @vector_print_vector_0d(%arg0: vector<f32>) {
   vector.print %arg0 : vector<f32>
   return
 }
@@ -924,7 +924,7 @@ func @vector_print_vector_0d(%arg0: vector<f32>) {
 
 // -----
 
-func @vector_print_vector(%arg0: vector<2x2xf32>) {
+func.func @vector_print_vector(%arg0: vector<2x2xf32>) {
   vector.print %arg0 : vector<2x2xf32>
   return
 }
@@ -958,7 +958,7 @@ func @vector_print_vector(%arg0: vector<2x2xf32>) {
 
 // -----
 
-func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> {
+func.func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>
   return %0 : vector<2xf32>
 }
@@ -969,7 +969,7 @@ func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> {
 
 // -----
 
-func @extract_strided_index_slice1(%arg0: vector<4xindex>) -> vector<2xindex> {
+func.func @extract_strided_index_slice1(%arg0: vector<4xindex>) -> vector<2xindex> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xindex> to vector<2xindex>
   return %0 : vector<2xindex>
 }
@@ -982,7 +982,7 @@ func @extract_strided_index_slice1(%arg0: vector<4xindex>) -> vector<2xindex> {
 
 // -----
 
-func @extract_strided_slice2(%arg0: vector<4x8xf32>) -> vector<2x8xf32> {
+func.func @extract_strided_slice2(%arg0: vector<4x8xf32>) -> vector<2x8xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x8xf32> to vector<2x8xf32>
   return %0 : vector<2x8xf32>
 }
@@ -999,7 +999,7 @@ func @extract_strided_slice2(%arg0: vector<4x8xf32>) -> vector<2x8xf32> {
 
 // -----
 
-func @extract_strided_slice3(%arg0: vector<4x8xf32>) -> vector<2x2xf32> {
+func.func @extract_strided_slice3(%arg0: vector<4x8xf32>) -> vector<2x2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x8xf32> to vector<2x2xf32>
   return %0 : vector<2x2xf32>
 }
@@ -1019,7 +1019,7 @@ func @extract_strided_slice3(%arg0: vector<4x8xf32>) -> vector<2x2xf32> {
 
 // -----
 
-func @insert_strided_slice1(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vector<4x4x4xf32> {
+func.func @insert_strided_slice1(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vector<4x4x4xf32> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xf32> into vector<4x4x4xf32>
   return %0 : vector<4x4x4xf32>
 }
@@ -1029,7 +1029,7 @@ func @insert_strided_slice1(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vecto
 
 // -----
 
-func @insert_strided_index_slice1(%b: vector<4x4xindex>, %c: vector<4x4x4xindex>) -> vector<4x4x4xindex> {
+func.func @insert_strided_index_slice1(%b: vector<4x4xindex>, %c: vector<4x4x4xindex>) -> vector<4x4x4xindex> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xindex> into vector<4x4x4xindex>
   return %0 : vector<4x4x4xindex>
 }
@@ -1039,7 +1039,7 @@ func @insert_strided_index_slice1(%b: vector<4x4xindex>, %c: vector<4x4x4xindex>
 
 // -----
 
-func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<4x4xf32> {
+func.func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<4x4xf32> {
   %0 = vector.insert_strided_slice %a, %b {offsets = [2, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
   return %0 : vector<4x4xf32>
 }
@@ -1064,7 +1064,7 @@ func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<
 
 // -----
 
-func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -> vector<16x4x8xf32> {
+func.func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -> vector<16x4x8xf32> {
   %0 = vector.insert_strided_slice %arg0, %arg1 {offsets = [0, 0, 2], strides = [1, 1]}:
         vector<2x4xf32> into vector<16x4x8xf32>
   return %0 : vector<16x4x8xf32>
@@ -1084,7 +1084,7 @@ func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -
 
 // -----
 
-func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>) {
+func.func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>) {
   // CHECK-LABEL: @vector_fma
   //  CHECK-SAME: %[[A:.*]]: vector<8xf32>
   //  CHECK-SAME: %[[B:.*]]: vector<2x4xf32>
@@ -1117,7 +1117,7 @@ func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>)
 
 // -----
 
-func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
+func.func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
   %0 = vector.reduction <add>, %arg0 : vector<16xf16> into f16
   return %0 : f16
 }
@@ -1130,7 +1130,7 @@ func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
 
 // -----
 
-func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
+func.func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction <add>, %arg0 : vector<16xf32> into f32
   return %0 : f32
 }
@@ -1143,7 +1143,7 @@ func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
 
 // -----
 
-func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
+func.func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
   %0 = vector.reduction <add>, %arg0 : vector<16xf64> into f64
   return %0 : f64
 }
@@ -1156,7 +1156,7 @@ func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
 
 // -----
 
-func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
+func.func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
   %0 = vector.reduction <add>, %arg0 : vector<16xi8> into i8
   return %0 : i8
 }
@@ -1167,7 +1167,7 @@ func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
 
 // -----
 
-func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
+func.func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
   %0 = vector.reduction <add>, %arg0 : vector<16xi32> into i32
   return %0 : i32
 }
@@ -1178,7 +1178,7 @@ func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
 
 // -----
 
-func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
+func.func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
   %0 = vector.reduction <add>, %arg0 : vector<16xi64> into i64
   return %0 : i64
 }
@@ -1189,7 +1189,7 @@ func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
 
 // -----
 
-func @reduce_index(%arg0: vector<16xindex>) -> index {
+func.func @reduce_index(%arg0: vector<16xindex>) -> index {
   %0 = vector.reduction <add>, %arg0 : vector<16xindex> into index
   return %0 : index
 }
@@ -1203,7 +1203,7 @@ func @reduce_index(%arg0: vector<16xindex>) -> index {
 //                          4x16                16x3               4x3
 // -----
 
-func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
+func.func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
   %C = vector.matrix_multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } :
     (vector<64xf64>, vector<48xf64>) -> vector<12xf64>
@@ -1216,7 +1216,7 @@ func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
 
 // -----
 
-func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vector<12xindex> {
+func.func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vector<12xindex> {
   %C = vector.matrix_multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } :
     (vector<64xindex>, vector<48xindex>) -> vector<12xindex>
@@ -1229,7 +1229,7 @@ func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vector<12x
 
 // -----
 
-func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
+func.func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
   %f7 = arith.constant 7.0: f32
   %f = vector.transfer_read %A[%base], %f7
       {permutation_map = affine_map<(d0) -> (d0)>} :
@@ -1306,7 +1306,7 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
 
 // -----
 
-func @transfer_read_index_1d(%A : memref<?xindex>, %base: index) -> vector<17xindex> {
+func.func @transfer_read_index_1d(%A : memref<?xindex>, %base: index) -> vector<17xindex> {
   %f7 = arith.constant 7: index
   %f = vector.transfer_read %A[%base], %f7
       {permutation_map = affine_map<(d0) -> (d0)>} :
@@ -1329,7 +1329,7 @@ func @transfer_read_index_1d(%A : memref<?xindex>, %base: index) -> vector<17xin
 
 // -----
 
-func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index) -> vector<17xf32> {
+func.func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index) -> vector<17xf32> {
   %f7 = arith.constant 7.0: f32
   %f = vector.transfer_read %A[%base0, %base1], %f7
       {permutation_map = affine_map<(d0, d1) -> (d1)>} :
@@ -1358,7 +1358,7 @@ func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index)
 
 // -----
 
-func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -> vector<17xf32> {
+func.func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -> vector<17xf32> {
   %f7 = arith.constant 7.0: f32
   %f = vector.transfer_read %A[%base], %f7
       {permutation_map = affine_map<(d0) -> (d0)>} :
@@ -1389,7 +1389,7 @@ func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -
 
 // -----
 
-func @transfer_read_1d_inbounds(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
+func.func @transfer_read_1d_inbounds(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
   %f7 = arith.constant 7.0: f32
   %f = vector.transfer_read %A[%base], %f7 {in_bounds = [true]} :
     memref<?xf32>, vector<17xf32>
@@ -1415,7 +1415,7 @@ func @transfer_read_1d_inbounds(%A : memref<?xf32>, %base: index) -> vector<17xf
 // CHECK: %[[mask2:.*]] = arith.andi %[[cmpi]], %[[mask1]]
 // CHECK: %[[r:.*]] = llvm.intr.masked.load %{{.*}}, %[[mask2]]
 // CHECK: return %[[r]]
-func @transfer_read_1d_mask(%A : memref<?xf32>, %base : index) -> vector<5xf32> {
+func.func @transfer_read_1d_mask(%A : memref<?xf32>, %base : index) -> vector<5xf32> {
   %m = arith.constant dense<[0, 0, 1, 0, 1]> : vector<5xi1>
   %f7 = arith.constant 7.0: f32
   %f = vector.transfer_read %A[%base], %f7, %m : memref<?xf32>, vector<5xf32>
@@ -1424,7 +1424,7 @@ func @transfer_read_1d_mask(%A : memref<?xf32>, %base : index) -> vector<5xf32>
 
 // -----
 
-func @genbool_0d_f() -> vector<i1> {
+func.func @genbool_0d_f() -> vector<i1> {
   %0 = vector.constant_mask [0] : vector<i1>
   return %0 : vector<i1>
 }
@@ -1434,7 +1434,7 @@ func @genbool_0d_f() -> vector<i1> {
 
 // -----
 
-func @genbool_0d_t() -> vector<i1> {
+func.func @genbool_0d_t() -> vector<i1> {
   %0 = vector.constant_mask [1] : vector<i1>
   return %0 : vector<i1>
 }
@@ -1444,7 +1444,7 @@ func @genbool_0d_t() -> vector<i1> {
 
 // -----
 
-func @genbool_1d() -> vector<8xi1> {
+func.func @genbool_1d() -> vector<8xi1> {
   %0 = vector.constant_mask [4] : vector<8xi1>
   return %0 : vector<8xi1>
 }
@@ -1454,7 +1454,7 @@ func @genbool_1d() -> vector<8xi1> {
 
 // -----
 
-func @genbool_1d_scalable() -> vector<[8]xi1> {
+func.func @genbool_1d_scalable() -> vector<[8]xi1> {
   %0 = vector.constant_mask [0] : vector<[8]xi1>
   return %0 : vector<[8]xi1>
 }
@@ -1464,7 +1464,7 @@ func @genbool_1d_scalable() -> vector<[8]xi1> {
 
 // -----
 
-func @genbool_2d() -> vector<4x4xi1> {
+func.func @genbool_2d() -> vector<4x4xi1> {
   %v = vector.constant_mask [2, 2] : vector<4x4xi1>
   return %v: vector<4x4xi1>
 }
@@ -1480,7 +1480,7 @@ func @genbool_2d() -> vector<4x4xi1> {
 
 // -----
 
-func @create_mask_0d(%a : index) -> vector<i1> {
+func.func @create_mask_0d(%a : index) -> vector<i1> {
   %v = vector.create_mask %a : vector<i1>
   return %v: vector<i1>
 }
@@ -1496,7 +1496,7 @@ func @create_mask_0d(%a : index) -> vector<i1> {
 
 // -----
 
-func @create_mask_1d(%a : index) -> vector<4xi1> {
+func.func @create_mask_1d(%a : index) -> vector<4xi1> {
   %v = vector.create_mask %a : vector<4xi1>
   return %v: vector<4xi1>
 }
@@ -1510,7 +1510,7 @@ func @create_mask_1d(%a : index) -> vector<4xi1> {
 // CHECK:  %[[result:.*]] = arith.cmpi slt, %[[indices]], %[[bounds]] : vector<4xi32>
 // CHECK:  return %[[result]] : vector<4xi1>
 
-func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
+func.func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
   %v = vector.create_mask %a : vector<[4]xi1>
   return %v: vector<[4]xi1>
 }
@@ -1526,7 +1526,7 @@ func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
 
 // -----
 
-func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
+func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
   %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
      : vector<16xf32> -> vector<16xf32>
   return %0 : vector<16xf32>
@@ -1541,7 +1541,7 @@ func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
 
 // -----
 
-func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
+func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
   %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
      : vector<16xindex> -> vector<16xindex>
   return %0 : vector<16xindex>
@@ -1557,7 +1557,7 @@ func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
 
 // -----
 
-func @vector_load_op(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+func.func @vector_load_op(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
@@ -1572,7 +1572,7 @@ func @vector_load_op(%memref : memref<200x100xf32>, %i : index, %j : index) -> v
 
 // -----
 
-func @vector_load_op_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> {
+func.func @vector_load_op_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<8xindex>
   return %0 : vector<8xindex>
 }
@@ -1583,7 +1583,7 @@ func @vector_load_op_index(%memref : memref<200x100xindex>, %i : index, %j : ind
 
 // -----
 
-func @vector_store_op(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @vector_store_op(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<4xf32>
   vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<4xf32>
   return
@@ -1599,7 +1599,7 @@ func @vector_store_op(%memref : memref<200x100xf32>, %i : index, %j : index) {
 
 // -----
 
-func @vector_store_op_index(%memref : memref<200x100xindex>, %i : index, %j : index) {
+func.func @vector_store_op_index(%memref : memref<200x100xindex>, %i : index, %j : index) {
   %val = arith.constant dense<11> : vector<4xindex>
   vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<4xindex>
   return
@@ -1609,7 +1609,7 @@ func @vector_store_op_index(%memref : memref<200x100xindex>, %i : index, %j : in
 
 // -----
 
-func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
+func.func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
   %c0 = arith.constant 0: index
   %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return %0 : vector<16xf32>
@@ -1625,7 +1625,7 @@ func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<1
 
 // -----
 
-func @masked_load_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2: vector<16xindex>) -> vector<16xindex> {
+func.func @masked_load_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2: vector<16xindex>) -> vector<16xindex> {
   %c0 = arith.constant 0: index
   %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref<?xindex>, vector<16xi1>, vector<16xindex> into vector<16xindex>
   return %0 : vector<16xindex>
@@ -1635,7 +1635,7 @@ func @masked_load_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2:
 
 // -----
 
-func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
+func.func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
   %c0 = arith.constant 0: index
   vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32>
   return
@@ -1650,7 +1650,7 @@ func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<
 
 // -----
 
-func @masked_store_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2: vector<16xindex>) {
+func.func @masked_store_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2: vector<16xindex>) {
   %c0 = arith.constant 0: index
   vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref<?xindex>, vector<16xi1>, vector<16xindex>
   return
@@ -1660,7 +1660,7 @@ func @masked_store_op_index(%arg0: memref<?xindex>, %arg1: vector<16xi1>, %arg2:
 
 // -----
 
-func @gather_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> {
+func.func @gather_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> {
   %0 = arith.constant 0: index
   %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref<?xf32>, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32>
   return %1 : vector<3xf32>
@@ -1673,7 +1673,7 @@ func @gather_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>,
 
 // -----
 
-func @gather_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) -> vector<3xindex> {
+func.func @gather_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) -> vector<3xindex> {
   %0 = arith.constant 0: index
   %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref<?xindex>, vector<3xindex>, vector<3xi1>, vector<3xindex> into vector<3xindex>
   return %1 : vector<3xindex>
@@ -1686,7 +1686,7 @@ func @gather_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vec
 
 // -----
 
-func @gather_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) -> vector<4xf32> {
+func.func @gather_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) -> vector<4xf32> {
   %0 = arith.constant 3 : index
   %1 = vector.gather %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32> into vector<4xf32>
   return %1 : vector<4xf32>
@@ -1700,7 +1700,7 @@ func @gather_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4
 
 // -----
 
-func @scatter_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) {
+func.func @scatter_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) {
   %0 = arith.constant 0: index
   vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref<?xf32>, vector<3xi32>, vector<3xi1>, vector<3xf32>
   return
@@ -1712,7 +1712,7 @@ func @scatter_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>
 
 // -----
 
-func @scatter_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) {
+func.func @scatter_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vector<3xi1>, %arg3: vector<3xindex>) {
   %0 = arith.constant 0: index
   vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref<?xindex>, vector<3xindex>, vector<3xi1>, vector<3xindex>
   return
@@ -1724,7 +1724,7 @@ func @scatter_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: ve
 
 // -----
 
-func @scatter_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) {
+func.func @scatter_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<4xi1>, %arg3: vector<4xf32>) {
   %0 = arith.constant 3 : index
   vector.scatter %arg0[%0, %0][%arg1], %arg2, %arg3 : memref<4x4xf32>, vector<4xi32>, vector<4xi1>, vector<4xf32>
   return
@@ -1737,7 +1737,7 @@ func @scatter_2d_op(%arg0: memref<4x4xf32>, %arg1: vector<4xi32>, %arg2: vector<
 
 // -----
 
-func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> {
+func.func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> {
   %c0 = arith.constant 0: index
   %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<11xi1>, vector<11xf32> into vector<11xf32>
   return %0 : vector<11xf32>
@@ -1752,7 +1752,7 @@ func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<1
 
 // -----
 
-func @expand_load_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg2: vector<11xindex>) -> vector<11xindex> {
+func.func @expand_load_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg2: vector<11xindex>) -> vector<11xindex> {
   %c0 = arith.constant 0: index
   %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref<?xindex>, vector<11xi1>, vector<11xindex> into vector<11xindex>
   return %0 : vector<11xindex>
@@ -1762,7 +1762,7 @@ func @expand_load_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg2:
 
 // -----
 
-func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) {
+func.func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) {
   %c0 = arith.constant 0: index
   vector.compressstore %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<11xi1>, vector<11xf32>
   return
@@ -1776,7 +1776,7 @@ func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vecto
 
 // -----
 
-func @compress_store_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg2: vector<11xindex>) {
+func.func @compress_store_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg2: vector<11xindex>) {
   %c0 = arith.constant 0: index
   vector.compressstore %arg0[%c0], %arg1, %arg2 : memref<?xindex>, vector<11xi1>, vector<11xindex>
   return
@@ -1788,7 +1788,7 @@ func @compress_store_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %arg
 
 // CHECK-LABEL: @splat_0d
 // CHECK-SAME: %[[ARG:.*]]: f32
-func @splat_0d(%a: f32) -> vector<f32> {
+func.func @splat_0d(%a: f32) -> vector<f32> {
   %v = vector.splat %a : vector<f32>
   return %v : vector<f32>
 }
@@ -1803,7 +1803,7 @@ func @splat_0d(%a: f32) -> vector<f32> {
 // CHECK-LABEL: @splat
 // CHECK-SAME: %[[A:arg[0-9]+]]: vector<4xf32>
 // CHECK-SAME: %[[ELT:arg[0-9]+]]: f32
-func @splat(%a: vector<4xf32>, %b: f32) -> vector<4xf32> {
+func.func @splat(%a: vector<4xf32>, %b: f32) -> vector<4xf32> {
   %vb = vector.splat %b : vector<4xf32>
   %r = arith.mulf %a, %vb : vector<4xf32>
   return %r : vector<4xf32>

diff  --git a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir
index 539828d1bbcb1..1a91e8b9fbc30 100644
--- a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir
@@ -9,7 +9,7 @@
 //       CHECK: }
 //       CHECK: %[[LOADED:.*]] = memref.load %[[ALLOC]][] : memref<vector<4x9xf32>>
 //       CHECK: return %[[LOADED]] : vector<4x9xf32>
-func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
+func.func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
     -> (vector<4x9xf32>){
   %p = arith.constant -42.0: f32
   %f = vector.transfer_read %A[%base1, %base2], %p {in_bounds = [true, true]}
@@ -29,7 +29,7 @@ func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
 //       CHECK:   scf.yield %[[WRITE]] : tensor<?x?xf32>
 //       CHECK: }
 //       CHECK: return %[[RESULT]] : tensor<?x?xf32>
-func @transfer_write_2d(%A : tensor<?x?xf32>, %vec : vector<2x3xf32>,
+func.func @transfer_write_2d(%A : tensor<?x?xf32>, %vec : vector<2x3xf32>,
                         %base1 : index, %base2 : index) -> (tensor<?x?xf32>) {
   %t = vector.transfer_write %vec, %A[%base1, %base2] {in_bounds = [true, true]}
       : vector<2x3xf32>, tensor<?x?xf32>

diff  --git a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir
index c03253d4cba6a..4da63a90c5c1a 100644
--- a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir
@@ -11,7 +11,7 @@
 //       CHECK: %[[V3:.*]] = vector.transfer_read %{{.*}}[{{.*}}], %{{.*}} {in_bounds = [true]} : tensor<?x?xf32>, vector<9xf32>
 //       CHECK: %[[I3:.*]] = vector.insert %[[V3]], %[[I2]] [3] : vector<9xf32> into vector<4x9xf32>
 //       CHECK: return %[[I3]] : vector<4x9xf32>
-func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
+func.func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
     -> (vector<4x9xf32>){
   %p = arith.constant -42.0: f32
   %f = vector.transfer_read %A[%base1, %base2], %p {in_bounds = [true, true]}
@@ -27,7 +27,7 @@ func @transfer_read_2d(%A : tensor<?x?xf32>, %base1 : index, %base2 : index)
 //       CHECK:   %[[V1:.*]] = vector.extract %{{.*}}[1] : vector<2x3xf32>
 //       CHECK:   %[[T1:.*]] = vector.transfer_write %[[V1]], %[[T0]][{{.*}}] {in_bounds = [true]} : vector<3xf32>, tensor<?x?xf32>
 //       CHECK:   return %[[T1]] : tensor<?x?xf32>
-func @transfer_write_2d(%A : tensor<?x?xf32>, %vec : vector<2x3xf32>,
+func.func @transfer_write_2d(%A : tensor<?x?xf32>, %vec : vector<2x3xf32>,
                         %base1 : index, %base2 : index) -> (tensor<?x?xf32>) {
   %t = vector.transfer_write %vec, %A[%base1, %base2] {in_bounds = [true, true]}
       : vector<2x3xf32>, tensor<?x?xf32>

diff  --git a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir
index e17588be0592b..7cf8cc1b05d41 100644
--- a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
 
 // CHECK-LABEL: func @transfer_read_inbounds
-func @transfer_read_inbounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
+func.func @transfer_read_inbounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
   %f0 = arith.constant 0.0: f32
   %c0 = arith.constant 0: index
 
@@ -26,7 +26,7 @@ func @transfer_read_inbounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
 // -----
 
 // CHECK-LABEL: func @transfer_read_out_of_bounds
-func @transfer_read_out_of_bounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
+func.func @transfer_read_out_of_bounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
   %f0 = arith.constant 0.0: f32
   %c0 = arith.constant 0: index
 
@@ -57,7 +57,7 @@ func @transfer_read_out_of_bounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>)
 
 // -----
 
-func @transfer_read_mask(%A : memref<?x?x?xf32>, %mask : vector<2x3x4xi1>) -> (vector<2x3x4xf32>) {
+func.func @transfer_read_mask(%A : memref<?x?x?xf32>, %mask : vector<2x3x4xi1>) -> (vector<2x3x4xf32>) {
   %f0 = arith.constant 0.0: f32
   %c0 = arith.constant 0: index
 

diff  --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir
index 035814b11c6cb..d4b5b34a96bad 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir
@@ -22,7 +22,7 @@
 //       CHECK:   return %[[RESULT_T]] : vector<9x4xf32>
 
 // Vector load with mask + transpose.
-func @transfer_read_2d_mask_transposed(
+func.func @transfer_read_2d_mask_transposed(
     %A : memref<?x?xf32>, %base1: index, %base2: index) -> (vector<9x4xf32>) {
   %fm42 = arith.constant -42.0: f32
   %mask = arith.constant dense<[[1, 0, 1, 0], [0, 0, 1, 0],

diff  --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
index ddf6128c63a5a..c5db82cb073f2 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL
 
 // CHECK-LABEL: func @vector_transfer_ops_0d(
-func @vector_transfer_ops_0d(%M: memref<f32>) {
+func.func @vector_transfer_ops_0d(%M: memref<f32>) {
   %f0 = arith.constant 0.0 : f32
 
   // 0-d transfers are left untouched by vector-to-scf.
@@ -21,7 +21,7 @@ func @vector_transfer_ops_0d(%M: memref<f32>) {
 // -----
 
 // CHECK-LABEL: func @materialize_read_1d() {
-func @materialize_read_1d() {
+func.func @materialize_read_1d() {
   %f0 = arith.constant 0.0: f32
   %A = memref.alloc () : memref<7x42xf32>
   affine.for %i0 = 0 to 7 step 4 {
@@ -51,7 +51,7 @@ func @materialize_read_1d() {
 // -----
 
 // CHECK-LABEL: func @materialize_read_1d_partially_specialized
-func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %dyn4 : index) {
+func.func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %dyn4 : index) {
   %f0 = arith.constant 0.0: f32
   %A = memref.alloc (%dyn1, %dyn2, %dyn4) : memref<7x?x?x42x?xf32>
   affine.for %i0 = 0 to 7 {
@@ -81,7 +81,7 @@ func @materialize_read_1d_partially_specialized(%dyn1 : index, %dyn2 : index, %d
 // CHECK: #[[$ADD:map.*]] = affine_map<(d0, d1) -> (d0 + d1)>
 
 // CHECK-LABEL: func @materialize_read(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
-func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
+func.func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
   %f0 = arith.constant 0.0: f32
   // CHECK-DAG:  %[[C0:.*]] = arith.constant 0 : index
   // CHECK-DAG:  %[[C1:.*]] = arith.constant 1 : index
@@ -148,7 +148,7 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
 // CHECK: #[[$ADD:map.*]] = affine_map<(d0, d1) -> (d0 + d1)>
 
 // CHECK-LABEL:func @materialize_write(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
-func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
+func.func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
   // CHECK-DAG:  %{{.*}} = arith.constant dense<1.000000e+00> : vector<5x4x3xf32>
   // CHECK-DAG:  %[[C0:.*]] = arith.constant 0 : index
   // CHECK-DAG:  %[[C1:.*]] = arith.constant 1 : index
@@ -222,7 +222,7 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
 //  FULL-UNROLL-SAME:   %[[A:[a-zA-Z0-9]+]]: memref<?x?xf32>,
 //  FULL-UNROLL-SAME:   %[[base:[a-zA-Z0-9]+]]: index
 
-func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x15xf32> {
+func.func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x15xf32> {
   %f7 = arith.constant 7.0: f32
   // CHECK-DAG: %[[C7:.*]] = arith.constant 7.000000e+00 : f32
   // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
@@ -296,7 +296,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
 //  FULL-UNROLL-SAME:   %[[A:[a-zA-Z0-9]+]]: memref<?x?xf32>,
 //  FULL-UNROLL-SAME:   %[[base:[a-zA-Z0-9]+]]: index,
 //  FULL-UNROLL-SAME:   %[[vec:[a-zA-Z0-9]+]]: vector<3x15xf32>
-func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vector<3x15xf32>) {
+func.func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vector<3x15xf32>) {
   // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
   // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
   // CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
@@ -353,7 +353,7 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
 //  FULL-UNROLL-SAME:   %[[A:[a-zA-Z0-9]+]]: memref<?x?xf32>,
 //  FULL-UNROLL-SAME:   %[[base:[a-zA-Z0-9]+]]: index,
 //  FULL-UNROLL-SAME:   %[[vec:[a-zA-Z0-9]+]]: vector<3x15xf32>
-func @transfer_write_progressive_inbounds(%A : memref<?x?xf32>, %base: index, %vec: vector<3x15xf32>) {
+func.func @transfer_write_progressive_inbounds(%A : memref<?x?xf32>, %base: index, %vec: vector<3x15xf32>) {
   // CHECK-NOT:    scf.if
   // CHECK-DAG:  %[[C0:.*]] = arith.constant 0 : index
   // CHECK-DAG:  %[[C3:.*]] = arith.constant 3 : index
@@ -381,7 +381,7 @@ func @transfer_write_progressive_inbounds(%A : memref<?x?xf32>, %base: index, %v
 // -----
 
 // FULL-UNROLL-LABEL: transfer_read_simple
-func @transfer_read_simple(%A : memref<2x2xf32>) -> vector<2x2xf32> {
+func.func @transfer_read_simple(%A : memref<2x2xf32>) -> vector<2x2xf32> {
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
   // FULL-UNROLL-DAG: %[[VC0:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32>
@@ -395,7 +395,7 @@ func @transfer_read_simple(%A : memref<2x2xf32>) -> vector<2x2xf32> {
   return %0 : vector<2x2xf32>
 }
 
-func @transfer_read_minor_identity(%A : memref<?x?x?x?xf32>) -> vector<3x3xf32> {
+func.func @transfer_read_minor_identity(%A : memref<?x?x?x?xf32>) -> vector<3x3xf32> {
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
   %0 = vector.transfer_read %A[%c0, %c0, %c0, %c0], %f0
@@ -427,7 +427,7 @@ func @transfer_read_minor_identity(%A : memref<?x?x?x?xf32>) -> vector<3x3xf32>
 //  CHECK:        %[[ret:.*]]  = memref.load %[[m]][] : memref<vector<3x3xf32>>
 //  CHECK:        return %[[ret]] : vector<3x3xf32>
 
-func @transfer_write_minor_identity(%A : vector<3x3xf32>, %B : memref<?x?x?x?xf32>) {
+func.func @transfer_write_minor_identity(%A : vector<3x3xf32>, %B : memref<?x?x?x?xf32>) {
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
   vector.transfer_write %A, %B[%c0, %c0, %c0, %c0]
@@ -459,7 +459,7 @@ func @transfer_write_minor_identity(%A : vector<3x3xf32>, %B : memref<?x?x?x?xf3
 
 // -----
 
-func @transfer_read_strided(%A : memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1 * 8)>>) -> vector<4xf32> {
+func.func @transfer_read_strided(%A : memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1 * 8)>>) -> vector<4xf32> {
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
   %0 = vector.transfer_read %A[%c0, %c0], %f0
@@ -471,7 +471,7 @@ func @transfer_read_strided(%A : memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1
 // CHECK: scf.for
 // CHECK: memref.load
 
-func @transfer_write_strided(%A : vector<4xf32>, %B : memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1 * 8)>>) {
+func.func @transfer_write_strided(%A : vector<4xf32>, %B : memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1 * 8)>>) {
   %c0 = arith.constant 0 : index
   vector.transfer_write %A, %B[%c0, %c0] :
     vector<4xf32>, memref<8x4xf32, affine_map<(d0, d1) -> (d0 + d1 * 8)>>
@@ -484,10 +484,10 @@ func @transfer_write_strided(%A : vector<4xf32>, %B : memref<8x4xf32, affine_map
 
 // -----
 
-func private @fake_side_effecting_fun(%0: vector<2x2xf32>) -> ()
+func.func private @fake_side_effecting_fun(%0: vector<2x2xf32>) -> ()
 
 // CHECK-LABEL: transfer_read_within_async_execute
-func @transfer_read_within_async_execute(%A : memref<2x2xf32>) -> !async.token {
+func.func @transfer_read_within_async_execute(%A : memref<2x2xf32>) -> !async.token {
   %c0 = arith.constant 0 : index
   %f0 = arith.constant 0.0 : f32
   // CHECK-NOT: alloca

diff  --git a/mlir/test/Conversion/VectorToSPIRV/simple.mlir b/mlir/test/Conversion/VectorToSPIRV/simple.mlir
index 949e59951a4f4..421c803b5a91a 100644
--- a/mlir/test/Conversion/VectorToSPIRV/simple.mlir
+++ b/mlir/test/Conversion/VectorToSPIRV/simple.mlir
@@ -6,7 +6,7 @@ module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16], [
 //  CHECK-SAME: %[[ARG0:.+]]: vector<2xf32>, %[[ARG1:.+]]: vector<2xf16>
 //       CHECK:   spv.Bitcast %[[ARG0]] : vector<2xf32> to vector<4xf16>
 //       CHECK:   spv.Bitcast %[[ARG1]] : vector<2xf16> to f32
-func @bitcast(%arg0 : vector<2xf32>, %arg1: vector<2xf16>) -> (vector<4xf16>, vector<1xf32>) {
+func.func @bitcast(%arg0 : vector<2xf32>, %arg1: vector<2xf16>) -> (vector<4xf16>, vector<1xf32>) {
   %0 = vector.bitcast %arg0 : vector<2xf32> to vector<4xf16>
   %1 = vector.bitcast %arg1 : vector<2xf16> to vector<1xf32>
   return %0, %1: vector<4xf16>, vector<1xf32>
@@ -20,7 +20,7 @@ func @bitcast(%arg0 : vector<2xf32>, %arg1: vector<2xf16>) -> (vector<4xf16>, ve
 //  CHECK-SAME: %[[A:.*]]: f32
 //       CHECK:   spv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]] : vector<4xf32>
 //       CHECK:   spv.CompositeConstruct %[[A]], %[[A]] : vector<2xf32>
-func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) {
+func.func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) {
   %0 = vector.broadcast %arg0 : f32 to vector<4xf32>
   %1 = vector.broadcast %arg0 : f32 to vector<2xf32>
   return %0, %1: vector<4xf32>, vector<2xf32>
@@ -32,7 +32,7 @@ func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) {
 //  CHECK-SAME: %[[ARG:.+]]: vector<2xf32>
 //       CHECK:   spv.CompositeExtract %[[ARG]][0 : i32] : vector<2xf32>
 //       CHECK:   spv.CompositeExtract %[[ARG]][1 : i32] : vector<2xf32>
-func @extract(%arg0 : vector<2xf32>) -> (vector<1xf32>, f32) {
+func.func @extract(%arg0 : vector<2xf32>) -> (vector<1xf32>, f32) {
   %0 = "vector.extract"(%arg0) {position = [0]} : (vector<2xf32>) -> vector<1xf32>
   %1 = "vector.extract"(%arg0) {position = [1]} : (vector<2xf32>) -> f32
   return %0, %1: vector<1xf32>, f32
@@ -44,7 +44,7 @@ func @extract(%arg0 : vector<2xf32>) -> (vector<1xf32>, f32) {
 //  CHECK-SAME: %[[ARG0:.+]]: vector<1xf32>
 //       CHECK:   %[[R:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
 //       CHECK:   return %[[R]]
-func @extract_size1_vector(%arg0 : vector<1xf32>) -> f32 {
+func.func @extract_size1_vector(%arg0 : vector<1xf32>) -> f32 {
   %0 = vector.extract %arg0[0] : vector<1xf32>
 	return %0: f32
 }
@@ -54,7 +54,7 @@ func @extract_size1_vector(%arg0 : vector<1xf32>) -> f32 {
 // CHECK-LABEL: @insert
 //  CHECK-SAME: %[[V:.*]]: vector<4xf32>, %[[S:.*]]: f32
 //       CHECK:   spv.CompositeInsert %[[S]], %[[V]][2 : i32] : f32 into vector<4xf32>
-func @insert(%arg0 : vector<4xf32>, %arg1: f32) -> vector<4xf32> {
+func.func @insert(%arg0 : vector<4xf32>, %arg1: f32) -> vector<4xf32> {
   %1 = vector.insert %arg1, %arg0[2] : f32 into vector<4xf32>
 	return %1: vector<4xf32>
 }
@@ -65,7 +65,7 @@ func @insert(%arg0 : vector<4xf32>, %arg1: f32) -> vector<4xf32> {
 //  CHECK-SAME: %[[V:.*]]: vector<1xf32>, %[[S:.*]]: f32
 //       CHECK:   %[[R:.+]] = builtin.unrealized_conversion_cast %[[S]]
 //       CHECK:   return %[[R]]
-func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: f32) -> vector<1xf32> {
+func.func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: f32) -> vector<1xf32> {
   %1 = vector.insert %arg1, %arg0[0] : f32 into vector<1xf32>
 	return %1 : vector<1xf32>
 }
@@ -75,7 +75,7 @@ func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: f32) -> vector<1xf32> {
 // CHECK-LABEL: @extract_element
 //  CHECK-SAME: %[[V:.*]]: vector<4xf32>, %[[ID:.*]]: i32
 //       CHECK:   spv.VectorExtractDynamic %[[V]][%[[ID]]] : vector<4xf32>, i32
-func @extract_element(%arg0 : vector<4xf32>, %id : i32) -> f32 {
+func.func @extract_element(%arg0 : vector<4xf32>, %id : i32) -> f32 {
   %0 = vector.extractelement %arg0[%id : i32] : vector<4xf32>
   return %0: f32
 }
@@ -83,7 +83,7 @@ func @extract_element(%arg0 : vector<4xf32>, %id : i32) -> f32 {
 // -----
 
 // CHECK-LABEL: @extract_element_index
-func @extract_element_index(%arg0 : vector<4xf32>, %id : index) -> f32 {
+func.func @extract_element_index(%arg0 : vector<4xf32>, %id : index) -> f32 {
 	// CHECK: vector.extractelement
   %0 = vector.extractelement %arg0[%id : index] : vector<4xf32>
   return %0: f32
@@ -92,7 +92,7 @@ func @extract_element_index(%arg0 : vector<4xf32>, %id : index) -> f32 {
 // -----
 
 // CHECK-LABEL: @extract_element_size5_vector
-func @extract_element_size5_vector(%arg0 : vector<5xf32>, %id : i32) -> f32 {
+func.func @extract_element_size5_vector(%arg0 : vector<5xf32>, %id : i32) -> f32 {
 	// CHECK: vector.extractelement
   %0 = vector.extractelement %arg0[%id : i32] : vector<5xf32>
   return %0: f32
@@ -104,7 +104,7 @@ func @extract_element_size5_vector(%arg0 : vector<5xf32>, %id : i32) -> f32 {
 //  CHECK-SAME: %[[ARG:.+]]: vector<4xf32>
 //       CHECK:   spv.VectorShuffle [1 : i32, 2 : i32] %[[ARG]] : vector<4xf32>, %[[ARG]] : vector<4xf32> -> vector<2xf32>
 //       CHECK:   spv.CompositeExtract %[[ARG]][1 : i32] : vector<4xf32>
-func @extract_strided_slice(%arg0: vector<4xf32>) -> (vector<2xf32>, vector<1xf32>) {
+func.func @extract_strided_slice(%arg0: vector<4xf32>) -> (vector<2xf32>, vector<1xf32>) {
   %0 = vector.extract_strided_slice %arg0 {offsets = [1], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>
   %1 = vector.extract_strided_slice %arg0 {offsets = [1], sizes = [1], strides = [1]} : vector<4xf32> to vector<1xf32>
   return %0, %1 : vector<2xf32>, vector<1xf32>
@@ -115,7 +115,7 @@ func @extract_strided_slice(%arg0: vector<4xf32>) -> (vector<2xf32>, vector<1xf3
 // CHECK-LABEL: @insert_element
 //  CHECK-SAME: %[[VAL:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[ID:.*]]: i32
 //       CHECK:   spv.VectorInsertDynamic %[[VAL]], %[[V]][%[[ID]]] : vector<4xf32>, i32
-func @insert_element(%val: f32, %arg0 : vector<4xf32>, %id : i32) -> vector<4xf32> {
+func.func @insert_element(%val: f32, %arg0 : vector<4xf32>, %id : i32) -> vector<4xf32> {
   %0 = vector.insertelement %val, %arg0[%id : i32] : vector<4xf32>
   return %0: vector<4xf32>
 }
@@ -123,7 +123,7 @@ func @insert_element(%val: f32, %arg0 : vector<4xf32>, %id : i32) -> vector<4xf3
 // -----
 
 // CHECK-LABEL: @insert_element_index
-func @insert_element_index(%val: f32, %arg0 : vector<4xf32>, %id : index) -> vector<4xf32> {
+func.func @insert_element_index(%val: f32, %arg0 : vector<4xf32>, %id : index) -> vector<4xf32> {
 	// CHECK: vector.insertelement
   %0 = vector.insertelement %val, %arg0[%id : index] : vector<4xf32>
   return %0: vector<4xf32>
@@ -132,7 +132,7 @@ func @insert_element_index(%val: f32, %arg0 : vector<4xf32>, %id : index) -> vec
 // -----
 
 // CHECK-LABEL: @insert_element_size5_vector
-func @insert_element_size5_vector(%val: f32, %arg0 : vector<5xf32>, %id : i32) -> vector<5xf32> {
+func.func @insert_element_size5_vector(%val: f32, %arg0 : vector<5xf32>, %id : i32) -> vector<5xf32> {
 	// CHECK: vector.insertelement
   %0 = vector.insertelement %val, %arg0[%id : i32] : vector<5xf32>
 	return %0 : vector<5xf32>
@@ -143,7 +143,7 @@ func @insert_element_size5_vector(%val: f32, %arg0 : vector<5xf32>, %id : i32) -
 // CHECK-LABEL: @insert_strided_slice
 //  CHECK-SAME: %[[PART:.+]]: vector<2xf32>, %[[ALL:.+]]: vector<4xf32>
 //       CHECK: 	spv.VectorShuffle [0 : i32, 4 : i32, 5 : i32, 3 : i32] %[[ALL]] : vector<4xf32>, %[[PART]] : vector<2xf32> -> vector<4xf32>
-func @insert_strided_slice(%arg0: vector<2xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+func.func @insert_strided_slice(%arg0: vector<2xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert_strided_slice %arg0, %arg1 {offsets = [1], strides = [1]} : vector<2xf32> into vector<4xf32>
 	return %0 : vector<4xf32>
 }
@@ -154,7 +154,7 @@ func @insert_strided_slice(%arg0: vector<2xf32>, %arg1: vector<4xf32>) -> vector
 //  CHECK-SAME: %[[SUB:.*]]: vector<1xf32>, %[[FULL:.*]]: vector<3xf32>
 //       CHECK:   %[[S:.+]] = builtin.unrealized_conversion_cast %[[SUB]]
 //       CHECK:   spv.CompositeInsert %[[S]], %[[FULL]][2 : i32] : f32 into vector<3xf32>
-func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: vector<3xf32>) -> vector<3xf32> {
+func.func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: vector<3xf32>) -> vector<3xf32> {
   %1 = vector.insert_strided_slice %arg0, %arg1 {offsets = [2], strides = [1]} : vector<1xf32> into vector<3xf32>
 	return %1 : vector<3xf32>
 }
@@ -164,7 +164,7 @@ func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: vector<3xf32>) -> vector
 // CHECK-LABEL: @fma
 //  CHECK-SAME: %[[A:.*]]: vector<4xf32>, %[[B:.*]]: vector<4xf32>, %[[C:.*]]: vector<4xf32>
 //       CHECK:   spv.GLSL.Fma %[[A]], %[[B]], %[[C]] : vector<4xf32>
-func @fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vector<4xf32> {
+func.func @fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.fma %a, %b, %c: vector<4xf32>
 	return %0 : vector<4xf32>
 }
@@ -175,7 +175,7 @@ func @fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vector<4xf
 //  CHECK-SAME: (%[[A:.+]]: f32)
 //       CHECK:   %[[VAL:.+]] = spv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]] : vector<4xf32>
 //       CHECK:   return %[[VAL]]
-func @splat(%f : f32) -> vector<4xf32> {
+func.func @splat(%f : f32) -> vector<4xf32> {
   %splat = vector.splat %f : vector<4xf32>
   return %splat : vector<4xf32>
 }
@@ -187,7 +187,7 @@ func @splat(%f : f32) -> vector<4xf32> {
 //       CHECK:    %[[V0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
 //       CHECK:    %[[V1:.+]] = builtin.unrealized_conversion_cast %[[ARG1]]
 //       CHECK:    spv.CompositeConstruct %[[V0]], %[[V1]], %[[V1]], %[[V0]] : vector<4xf32>
-func @shuffle(%v0 : vector<1xf32>, %v1: vector<1xf32>) -> vector<4xf32> {
+func.func @shuffle(%v0 : vector<1xf32>, %v1: vector<1xf32>) -> vector<4xf32> {
   %shuffle = vector.shuffle %v0, %v1 [0, 1, 1, 0] : vector<1xf32>, vector<1xf32>
   return %shuffle : vector<4xf32>
 }
@@ -197,7 +197,7 @@ func @shuffle(%v0 : vector<1xf32>, %v1: vector<1xf32>) -> vector<4xf32> {
 // CHECK-LABEL:  func @shuffle
 //  CHECK-SAME:  %[[V0:.+]]: vector<3xf32>, %[[V1:.+]]: vector<3xf32>
 //       CHECK:    spv.VectorShuffle [3 : i32, 2 : i32, 5 : i32, 1 : i32] %[[V0]] : vector<3xf32>, %[[V1]] : vector<3xf32> -> vector<4xf32>
-func @shuffle(%v0 : vector<3xf32>, %v1: vector<3xf32>) -> vector<4xf32> {
+func.func @shuffle(%v0 : vector<3xf32>, %v1: vector<3xf32>) -> vector<4xf32> {
   %shuffle = vector.shuffle %v0, %v1 [3, 2, 5, 1] : vector<3xf32>, vector<3xf32>
   return %shuffle : vector<4xf32>
 }
@@ -205,7 +205,7 @@ func @shuffle(%v0 : vector<3xf32>, %v1: vector<3xf32>) -> vector<4xf32> {
 // -----
 
 // CHECK-LABEL:  func @shuffle
-func @shuffle(%v0 : vector<2x16xf32>, %v1: vector<1x16xf32>) -> vector<3x16xf32> {
+func.func @shuffle(%v0 : vector<2x16xf32>, %v1: vector<1x16xf32>) -> vector<3x16xf32> {
   // CHECK: vector.shuffle
   %shuffle = vector.shuffle %v0, %v1 [0, 1, 2] : vector<2x16xf32>, vector<1x16xf32>
   return %shuffle : vector<3x16xf32>


        


More information about the Mlir-commits mailing list