[flang-commits] [flang] 58ceae9 - [mlir:NFC] Remove the forward declaration of FuncOp in the mlir namespace

River Riddle via flang-commits flang-commits at lists.llvm.org
Mon Apr 18 12:02:49 PDT 2022


Author: River Riddle
Date: 2022-04-18T12:01:55-07:00
New Revision: 58ceae9561fbae75fa175244db4e0e4bfe7416fd

URL: https://github.com/llvm/llvm-project/commit/58ceae9561fbae75fa175244db4e0e4bfe7416fd
DIFF: https://github.com/llvm/llvm-project/commit/58ceae9561fbae75fa175244db4e0e4bfe7416fd.diff

LOG: [mlir:NFC] Remove the forward declaration of FuncOp in the mlir namespace

FuncOp has been moved to the `func` namespace for a little over a month, the
using directive can be dropped now.

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Builder/FIRBuilder.h
    flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
    flang/include/flang/Optimizer/Dialect/FIROps.td
    flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
    flang/include/flang/Optimizer/Transforms/Passes.td
    flang/include/flang/Tools/CLOptions.inc
    flang/lib/Lower/Allocatable.cpp
    flang/lib/Lower/Bridge.cpp
    flang/lib/Lower/CallInterface.cpp
    flang/lib/Lower/ConvertExpr.cpp
    flang/lib/Lower/HostAssociations.cpp
    flang/lib/Lower/IO.cpp
    flang/lib/Lower/IntrinsicCall.cpp
    flang/lib/Lower/Runtime.cpp
    flang/lib/Optimizer/Builder/FIRBuilder.cpp
    flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp
    flang/lib/Optimizer/Builder/Runtime/Character.cpp
    flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp
    flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
    flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
    flang/lib/Optimizer/Builder/Runtime/Stop.cpp
    flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
    flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
    flang/lib/Optimizer/Dialect/FIROps.cpp
    flang/lib/Optimizer/Transforms/AffinePromotion.cpp
    flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
    flang/lib/Optimizer/Transforms/ExternalNameConversion.cpp
    flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp
    flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
    flang/unittests/Optimizer/Builder/CharacterTest.cpp
    flang/unittests/Optimizer/Builder/ComplexTest.cpp
    flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
    flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
    mlir/docs/Bufferization.md
    mlir/docs/DialectConversion.md
    mlir/docs/PassManagement.md
    mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch5/toyc.cpp
    mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch6/toyc.cpp
    mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch7/toyc.cpp
    mlir/include/mlir/Dialect/Affine/Passes.td
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
    mlir/include/mlir/Dialect/Func/IR/FuncOps.h
    mlir/include/mlir/Dialect/GPU/Passes.td
    mlir/include/mlir/Dialect/Linalg/Passes.h
    mlir/include/mlir/Dialect/Linalg/Passes.td
    mlir/include/mlir/Dialect/Quant/Passes.td
    mlir/include/mlir/Dialect/SCF/Passes.td
    mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
    mlir/include/mlir/Dialect/Shape/Transforms/Passes.td
    mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
    mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td
    mlir/include/mlir/Dialect/Vector/Transforms/Passes.td
    mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
    mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
    mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
    mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
    mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
    mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp
    mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
    mlir/lib/Conversion/MathToLibm/MathToLibm.cpp
    mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
    mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp
    mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp
    mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
    mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
    mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
    mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
    mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
    mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
    mlir/lib/Dialect/Affine/Utils/Utils.cpp
    mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
    mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
    mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
    mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp
    mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp
    mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp
    mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
    mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
    mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
    mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp
    mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
    mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
    mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
    mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
    mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
    mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp
    mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
    mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
    mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
    mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
    mlir/lib/Dialect/SCF/Utils/Utils.cpp
    mlir/lib/Dialect/SPIRV/Transforms/DecorateCompositeTypeLayoutPass.cpp
    mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
    mlir/lib/Dialect/Shape/IR/Shape.cpp
    mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Shape/Transforms/RemoveShapeConstraints.cpp
    mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
    mlir/test/IR/attribute.mlir
    mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
    mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
    mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
    mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
    mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
    mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
    mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
    mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
    mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/lib/Dialect/Test/TestPatterns.cpp
    mlir/test/lib/Dialect/Test/TestTraits.cpp
    mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
    mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
    mlir/test/lib/IR/TestPrintInvalid.cpp
    mlir/test/lib/IR/TestSlicing.cpp
    mlir/test/lib/IR/TestSymbolUses.cpp
    mlir/test/lib/IR/TestTypes.cpp
    mlir/test/lib/Pass/TestPassManager.cpp
    mlir/test/lib/Transforms/TestControlFlowSink.cpp
    mlir/test/lib/Transforms/TestInlining.cpp
    mlir/unittests/ExecutionEngine/Invoke.cpp
    mlir/unittests/Interfaces/InferTypeOpInterfaceTest.cpp
    mlir/unittests/Pass/AnalysisManagerTest.cpp
    mlir/unittests/Pass/PassManagerTest.cpp

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index c350d44df33e1..f899a0e8cd662 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -52,8 +52,8 @@ class FirOpBuilder : public mlir::OpBuilder {
   }
 
   /// Get the current Function
-  mlir::FuncOp getFunction() {
-    return getRegion().getParentOfType<mlir::FuncOp>();
+  mlir::func::FuncOp getFunction() {
+    return getRegion().getParentOfType<mlir::func::FuncOp>();
   }
 
   /// Get a reference to the kind map.
@@ -222,19 +222,19 @@ class FirOpBuilder : public mlir::OpBuilder {
 
   /// Get a function by name. If the function exists in the current module, it
   /// is returned. Otherwise, a null FuncOp is returned.
-  mlir::FuncOp getNamedFunction(llvm::StringRef name) {
+  mlir::func::FuncOp getNamedFunction(llvm::StringRef name) {
     return getNamedFunction(getModule(), name);
   }
-  static mlir::FuncOp getNamedFunction(mlir::ModuleOp module,
-                                       llvm::StringRef name);
+  static mlir::func::FuncOp getNamedFunction(mlir::ModuleOp module,
+                                             llvm::StringRef name);
 
   /// Get a function by symbol name. The result will be null if there is no
   /// function with the given symbol in the module.
-  mlir::FuncOp getNamedFunction(mlir::SymbolRefAttr symbol) {
+  mlir::func::FuncOp getNamedFunction(mlir::SymbolRefAttr symbol) {
     return getNamedFunction(getModule(), symbol);
   }
-  static mlir::FuncOp getNamedFunction(mlir::ModuleOp module,
-                                       mlir::SymbolRefAttr symbol);
+  static mlir::func::FuncOp getNamedFunction(mlir::ModuleOp module,
+                                             mlir::SymbolRefAttr symbol);
 
   fir::GlobalOp getNamedGlobal(llvm::StringRef name) {
     return getNamedGlobal(getModule(), name);
@@ -254,28 +254,29 @@ class FirOpBuilder : public mlir::OpBuilder {
 
   /// Create a new FuncOp. If the function may have already been created, use
   /// `addNamedFunction` instead.
-  mlir::FuncOp createFunction(mlir::Location loc, llvm::StringRef name,
-                              mlir::FunctionType ty) {
+  mlir::func::FuncOp createFunction(mlir::Location loc, llvm::StringRef name,
+                                    mlir::FunctionType ty) {
     return createFunction(loc, getModule(), name, ty);
   }
 
-  static mlir::FuncOp createFunction(mlir::Location loc, mlir::ModuleOp module,
-                                     llvm::StringRef name,
-                                     mlir::FunctionType ty);
+  static mlir::func::FuncOp createFunction(mlir::Location loc,
+                                           mlir::ModuleOp module,
+                                           llvm::StringRef name,
+                                           mlir::FunctionType ty);
 
   /// Determine if the named function is already in the module. Return the
   /// instance if found, otherwise add a new named function to the module.
-  mlir::FuncOp addNamedFunction(mlir::Location loc, llvm::StringRef name,
-                                mlir::FunctionType ty) {
+  mlir::func::FuncOp addNamedFunction(mlir::Location loc, llvm::StringRef name,
+                                      mlir::FunctionType ty) {
     if (auto func = getNamedFunction(name))
       return func;
     return createFunction(loc, name, ty);
   }
 
-  static mlir::FuncOp addNamedFunction(mlir::Location loc,
-                                       mlir::ModuleOp module,
-                                       llvm::StringRef name,
-                                       mlir::FunctionType ty) {
+  static mlir::func::FuncOp addNamedFunction(mlir::Location loc,
+                                             mlir::ModuleOp module,
+                                             llvm::StringRef name,
+                                             mlir::FunctionType ty) {
     if (auto func = getNamedFunction(module, name))
       return func;
     return createFunction(loc, module, name, ty);

diff  --git a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
index 8fea99e008735..2d674593511c7 100644
--- a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
@@ -391,8 +391,8 @@ struct RuntimeTableEntry<RuntimeTableKey<KT>, RuntimeIdentifier<Cs...>> {
 /// Get (or generate) the MLIR FuncOp for a given runtime function. Its template
 /// argument is intended to be of the form: <mkRTKey(runtime function name)>.
 template <typename RuntimeEntry>
-static mlir::FuncOp getRuntimeFunc(mlir::Location loc,
-                                   fir::FirOpBuilder &builder) {
+static mlir::func::FuncOp getRuntimeFunc(mlir::Location loc,
+                                         fir::FirOpBuilder &builder) {
   using namespace Fortran::runtime;
   auto name = RuntimeEntry::name;
   auto func = builder.getNamedFunction(name);

diff  --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index e0c09396dc7c9..eda93d7651759 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -2282,7 +2282,7 @@ def fir_CallOp : fir_Op<"call", [CallOpInterface]> {
   let hasCustomAssemblyFormat = 1;
 
   let builders = [
-    OpBuilder<(ins "mlir::FuncOp":$callee,
+    OpBuilder<(ins "mlir::func::FuncOp":$callee,
         CArg<"mlir::ValueRange", "{}">:$operands)>,
     OpBuilder<(ins "mlir::SymbolRefAttr":$callee,
         "llvm::ArrayRef<mlir::Type>":$results,

diff  --git a/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h b/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
index 2324b28de684c..2d1a4e6b550fa 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
+++ b/flang/include/flang/Optimizer/Dialect/FIROpsSupport.h
@@ -53,9 +53,10 @@ inline bool pureCall(mlir::Operation *op) {
 ///
 /// If `module` already contains FuncOp `name`, it is returned. Otherwise, a new
 /// FuncOp is created, and that new FuncOp is returned.
-mlir::FuncOp createFuncOp(mlir::Location loc, mlir::ModuleOp module,
-                          llvm::StringRef name, mlir::FunctionType type,
-                          llvm::ArrayRef<mlir::NamedAttribute> attrs = {});
+mlir::func::FuncOp
+createFuncOp(mlir::Location loc, mlir::ModuleOp module, llvm::StringRef name,
+             mlir::FunctionType type,
+             llvm::ArrayRef<mlir::NamedAttribute> attrs = {});
 
 /// Get or create a GlobalOp in a module.
 fir::GlobalOp createGlobalOp(mlir::Location loc, mlir::ModuleOp module,
@@ -87,7 +88,7 @@ static constexpr llvm::StringRef getHostAssocAttrName() {
 
 /// Does the function, \p func, have a host-associations tuple argument?
 /// Some internal procedures may have access to host procedure variables.
-bool hasHostAssociationArgument(mlir::FuncOp func);
+bool hasHostAssociationArgument(mlir::func::FuncOp func);
 
 /// Tell if \p value is:
 ///   - a function argument that has attribute \p attributeName
@@ -102,7 +103,7 @@ bool valueHasFirAttribute(mlir::Value value, llvm::StringRef attributeName);
 /// Scan the arguments of a FuncOp to determine if any arguments have the
 /// attribute `attr` placed on them. This can be used to determine if the
 /// function has any host associations, for example.
-bool anyFuncArgsHaveAttr(mlir::FuncOp func, llvm::StringRef attr);
+bool anyFuncArgsHaveAttr(mlir::func::FuncOp func, llvm::StringRef attr);
 
 } // namespace fir
 

diff  --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index aec7ef212c60c..8bfc55d01cf83 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -16,7 +16,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def AbstractResultOpt : Pass<"abstract-result-opt", "mlir::FuncOp"> {
+def AbstractResultOpt : Pass<"abstract-result-opt", "mlir::func::FuncOp"> {
   let summary = "Convert fir.array, fir.box and fir.rec function result to "
                 "function argument";
   let description = [{
@@ -35,7 +35,7 @@ def AbstractResultOpt : Pass<"abstract-result-opt", "mlir::FuncOp"> {
   ];
 }
 
-def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::FuncOp"> {
+def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::func::FuncOp"> {
   let summary = "Promotes `fir.{do_loop,if}` to `affine.{for,if}`.";
   let description = [{
     Convert fir operations which satisfy affine constraints to the affine
@@ -60,7 +60,7 @@ def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::FuncOp"> {
   ];
 }
 
-def AffineDialectDemotion : Pass<"demote-affine", "::mlir::FuncOp"> {
+def AffineDialectDemotion : Pass<"demote-affine", "::mlir::func::FuncOp"> {
   let summary = "Converts `affine.{load,store}` back to fir operations";
   let description = [{
     Affine dialect's default lowering for loads and stores is 
diff erent from
@@ -91,7 +91,7 @@ def AnnotateConstantOperands : Pass<"annotate-constant"> {
   let dependentDialects = [ "fir::FIROpsDialect" ];
 }
 
-def ArrayValueCopy : Pass<"array-value-copy", "::mlir::FuncOp"> {
+def ArrayValueCopy : Pass<"array-value-copy", "::mlir::func::FuncOp"> {
   let summary = "Convert array value operations to memory operations.";
   let description = [{
     Transform the set of array value primitives to a memory-based array
@@ -129,7 +129,7 @@ def CharacterConversion : Pass<"character-conversion"> {
   ];
 }
 
-def CFGConversion : Pass<"cfg-conversion", "::mlir::FuncOp"> {
+def CFGConversion : Pass<"cfg-conversion", "::mlir::func::FuncOp"> {
   let summary = "Convert FIR structured control flow ops to CFG ops.";
   let description = [{
     Transform the `fir.do_loop`, `fir.if`, and `fir.iterate_while` ops into
@@ -157,7 +157,7 @@ def ExternalNameConversion : Pass<"external-name-interop", "mlir::ModuleOp"> {
   let constructor = "::fir::createExternalNameConversionPass()";
 }
 
-def MemRefDataFlowOpt : Pass<"fir-memref-dataflow-opt", "::mlir::FuncOp"> {
+def MemRefDataFlowOpt : Pass<"fir-memref-dataflow-opt", "::mlir::func::FuncOp"> {
   let summary =
     "Perform store/load forwarding and potentially removing dead stores.";
   let description = [{
@@ -170,7 +170,7 @@ def MemRefDataFlowOpt : Pass<"fir-memref-dataflow-opt", "::mlir::FuncOp"> {
   ];
 }
 
-def MemoryAllocationOpt : Pass<"memory-allocation-opt", "mlir::FuncOp"> {
+def MemoryAllocationOpt : Pass<"memory-allocation-opt", "mlir::func::FuncOp"> {
   let summary = "Convert stack to heap allocations and vice versa.";
   let description = [{
     Convert stack allocations to heap allocations and vice versa based on

diff  --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc
index c81c1caa45e0f..2a5a670d41629 100644
--- a/flang/include/flang/Tools/CLOptions.inc
+++ b/flang/include/flang/Tools/CLOptions.inc
@@ -92,17 +92,17 @@ static void defaultFlangInlinerOptPipeline(mlir::OpPassManager &pm) {
 }
 
 inline void addCfgConversionPass(mlir::PassManager &pm) {
-  addNestedPassConditionally<mlir::FuncOp>(
+  addNestedPassConditionally<mlir::func::FuncOp>(
       pm, disableCfgConversion, fir::createFirToCfgPass);
 }
 
 inline void addAVC(mlir::PassManager &pm) {
-  addNestedPassConditionally<mlir::FuncOp>(
+  addNestedPassConditionally<mlir::func::FuncOp>(
       pm, disableFirAvc, fir::createArrayValueCopyPass);
 }
 
 inline void addMemoryAllocationOpt(mlir::PassManager &pm) {
-  addNestedPassConditionally<mlir::FuncOp>(pm, disableFirMao, [&]() {
+  addNestedPassConditionally<mlir::func::FuncOp>(pm, disableFirMao, [&]() {
     return fir::createMemoryAllocationPass(
         dynamicArrayStackToHeapAllocation, arrayStackAllocationThreshold);
   });
@@ -148,7 +148,7 @@ inline void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm) {
   mlir::GreedyRewriteConfig config;
   config.enableRegionSimplification = false;
   fir::addAVC(pm);
-  pm.addNestedPass<mlir::FuncOp>(fir::createCharacterConversionPass());
+  pm.addNestedPass<mlir::func::FuncOp>(fir::createCharacterConversionPass());
   pm.addPass(mlir::createCanonicalizerPass(config));
   pm.addPass(fir::createSimplifyRegionLitePass());
   fir::addMemoryAllocationOpt(pm);
@@ -171,7 +171,7 @@ inline void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm) {
 #if !defined(FLANG_EXCLUDE_CODEGEN)
 inline void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm) {
   fir::addBoxedProcedurePass(pm);
-  pm.addNestedPass<mlir::FuncOp>(fir::createAbstractResultOptPass());
+  pm.addNestedPass<mlir::func::FuncOp>(fir::createAbstractResultOptPass());
   fir::addCodeGenRewritePass(pm);
   fir::addTargetRewritePass(pm);
   fir::addFIRToLLVMPass(pm);

diff  --git a/flang/lib/Lower/Allocatable.cpp b/flang/lib/Lower/Allocatable.cpp
index ac0ebacb781d9..4a622caafebae 100644
--- a/flang/lib/Lower/Allocatable.cpp
+++ b/flang/lib/Lower/Allocatable.cpp
@@ -118,7 +118,7 @@ static void genRuntimeSetBounds(fir::FirOpBuilder &builder, mlir::Location loc,
                                 const fir::MutableBoxValue &box,
                                 mlir::Value dimIndex, mlir::Value lowerBound,
                                 mlir::Value upperBound) {
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       box.isPointer()
           ? fir::runtime::getRuntimeFunc<mkRTKey(PointerSetBounds)>(loc,
                                                                     builder)
@@ -138,7 +138,7 @@ static void genRuntimeInitCharacter(fir::FirOpBuilder &builder,
                                     mlir::Location loc,
                                     const fir::MutableBoxValue &box,
                                     mlir::Value len) {
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       box.isPointer()
           ? fir::runtime::getRuntimeFunc<mkRTKey(PointerNullifyCharacter)>(
                 loc, builder)
@@ -166,7 +166,7 @@ static mlir::Value genRuntimeAllocate(fir::FirOpBuilder &builder,
                                       mlir::Location loc,
                                       const fir::MutableBoxValue &box,
                                       ErrorManager &errorManager) {
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       box.isPointer()
           ? fir::runtime::getRuntimeFunc<mkRTKey(PointerAllocate)>(loc, builder)
           : fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc,
@@ -187,7 +187,7 @@ static mlir::Value genRuntimeDeallocate(fir::FirOpBuilder &builder,
                                         ErrorManager &errorManager) {
   // Ensure fir.box is up-to-date before passing it to deallocate runtime.
   mlir::Value boxAddress = fir::factory::getMutableIRBox(builder, loc, box);
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       box.isPointer()
           ? fir::runtime::getRuntimeFunc<mkRTKey(PointerDeallocate)>(loc,
                                                                      builder)

diff  --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index bc14e6b74da51..675f9c93d56dd 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -195,8 +195,8 @@ class FirConverter : public Fortran::lower::AbstractConverter {
     for (int entryIndex = 0, last = funit.entryPointList.size();
          entryIndex < last; ++entryIndex) {
       funit.setActiveEntry(entryIndex);
-      // Calling CalleeInterface ctor will build a declaration mlir::FuncOp with
-      // no other side effects.
+      // Calling CalleeInterface ctor will build a declaration
+      // mlir::func::FuncOp with no other side effects.
       // TODO: when doing some compiler profiling on real apps, it may be worth
       // to check it's better to save the CalleeInterface instead of recomputing
       // it later when lowering the body. CalleeInterface ctor should be linear
@@ -742,8 +742,8 @@ class FirConverter : public Fortran::lower::AbstractConverter {
     return cond;
   }
 
-  mlir::FuncOp getFunc(llvm::StringRef name, mlir::FunctionType ty) {
-    if (mlir::FuncOp func = builder->getNamedFunction(name)) {
+  mlir::func::FuncOp getFunc(llvm::StringRef name, mlir::FunctionType ty) {
+    if (mlir::func::FuncOp func = builder->getNamedFunction(name)) {
       assert(func.getFunctionType() == ty);
       return func;
     }
@@ -2191,7 +2191,7 @@ class FirConverter : public Fortran::lower::AbstractConverter {
   void startNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
     assert(!builder && "expected nullptr");
     Fortran::lower::CalleeInterface callee(funit, *this);
-    mlir::FuncOp func = callee.addEntryBlockAndMapArguments();
+    mlir::func::FuncOp func = callee.addEntryBlockAndMapArguments();
     builder = new fir::FirOpBuilder(func, bridge.getKindMap());
     assert(builder && "FirOpBuilder did not instantiate");
     builder->setInsertionPointToStart(&func.front());
@@ -2387,7 +2387,7 @@ class FirConverter : public Fortran::lower::AbstractConverter {
     // FIXME: get rid of the bogus function context and instantiate the
     // globals directly into the module.
     mlir::MLIRContext *context = &getMLIRContext();
-    mlir::FuncOp func = fir::FirOpBuilder::createFunction(
+    mlir::func::FuncOp func = fir::FirOpBuilder::createFunction(
         mlir::UnknownLoc::get(context), getModuleOp(),
         fir::NameUniquer::doGenerated("Sham"),
         mlir::FunctionType::get(context, llvm::None, llvm::None));

diff  --git a/flang/lib/Lower/CallInterface.cpp b/flang/lib/Lower/CallInterface.cpp
index a62e53dafebdf..071cda6880c50 100644
--- a/flang/lib/Lower/CallInterface.cpp
+++ b/flang/lib/Lower/CallInterface.cpp
@@ -360,7 +360,8 @@ bool Fortran::lower::CalleeInterface::isMainProgram() const {
   return funit.isMainProgram();
 }
 
-mlir::FuncOp Fortran::lower::CalleeInterface::addEntryBlockAndMapArguments() {
+mlir::func::FuncOp
+Fortran::lower::CalleeInterface::addEntryBlockAndMapArguments() {
   // On the callee side, directly map the mlir::value argument of
   // the function block to the Fortran symbols.
   func.addEntryBlock();
@@ -387,7 +388,7 @@ mlir::Value Fortran::lower::CalleeInterface::getHostAssociatedTuple() const {
 // sides.
 //===----------------------------------------------------------------------===//
 
-static void addSymbolAttribute(mlir::FuncOp func,
+static void addSymbolAttribute(mlir::func::FuncOp func,
                                const Fortran::semantics::Symbol &sym,
                                mlir::MLIRContext &mlirContext) {
   // Only add this on bind(C) functions for which the symbol is not reflected in
@@ -401,7 +402,7 @@ static void addSymbolAttribute(mlir::FuncOp func,
 }
 
 /// Declare drives the 
diff erent actions to be performed while analyzing the
-/// signature and building/finding the mlir::FuncOp.
+/// signature and building/finding the mlir::func::FuncOp.
 template <typename T>
 void Fortran::lower::CallInterface<T>::declare() {
   if (!side().isMainProgram()) {
@@ -431,8 +432,9 @@ void Fortran::lower::CallInterface<T>::declare() {
   }
 }
 
-/// Once the signature has been analyzed and the mlir::FuncOp was built/found,
-/// map the fir inputs to Fortran entities (the symbols or expressions).
+/// Once the signature has been analyzed and the mlir::func::FuncOp was
+/// built/found, map the fir inputs to Fortran entities (the symbols or
+/// expressions).
 template <typename T>
 void Fortran::lower::CallInterface<T>::mapPassedEntities() {
   // map back fir inputs to passed entities
@@ -1165,11 +1167,11 @@ mlir::FunctionType Fortran::lower::translateSignature(
       .getFunctionType();
 }
 
-mlir::FuncOp Fortran::lower::getOrDeclareFunction(
+mlir::func::FuncOp Fortran::lower::getOrDeclareFunction(
     llvm::StringRef name, const Fortran::evaluate::ProcedureDesignator &proc,
     Fortran::lower::AbstractConverter &converter) {
   mlir::ModuleOp module = converter.getModuleOp();
-  mlir::FuncOp func = fir::FirOpBuilder::getNamedFunction(module, name);
+  mlir::func::FuncOp func = fir::FirOpBuilder::getNamedFunction(module, name);
   if (func)
     return func;
 
@@ -1185,7 +1187,7 @@ mlir::FuncOp Fortran::lower::getOrDeclareFunction(
   mlir::FunctionType ty = SignatureBuilder{characteristics.value(), converter,
                                            /*forceImplicit=*/false}
                               .getFunctionType();
-  mlir::FuncOp newFunc =
+  mlir::func::FuncOp newFunc =
       fir::FirOpBuilder::createFunction(loc, module, name, ty);
   addSymbolAttribute(newFunc, *symbol, converter.getMLIRContext());
   return newFunc;

diff  --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index 68cd69da958f3..52f207beed754 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -674,8 +674,9 @@ class ScalarExprLowering {
 
   mlir::Type getSomeKindInteger() { return builder.getIndexType(); }
 
-  mlir::FuncOp getFunction(llvm::StringRef name, mlir::FunctionType funTy) {
-    if (mlir::FuncOp func = builder.getNamedFunction(name))
+  mlir::func::FuncOp getFunction(llvm::StringRef name,
+                                 mlir::FunctionType funTy) {
+    if (mlir::func::FuncOp func = builder.getNamedFunction(name))
       return func;
     return builder.createFunction(getLoc(), name, funTy);
   }
@@ -800,7 +801,7 @@ class ScalarExprLowering {
             fir::factory::extractCharacterProcedureTuple(builder, loc, funcPtr);
     } else {
       std::string name = converter.mangleName(*symbol);
-      mlir::FuncOp func =
+      mlir::func::FuncOp func =
           Fortran::lower::getOrDeclareFunction(name, proc, converter);
       funcPtr = builder.create<fir::AddrOfOp>(loc, func.getFunctionType(),
                                               builder.getSymbolRefAttr(name));
@@ -2258,7 +2259,8 @@ class ScalarExprLowering {
 
   // Find the argument that corresponds to the host associations.
   // Verify some assumptions about how the signature was built here.
-  [[maybe_unused]] static unsigned findHostAssocTuplePos(mlir::FuncOp fn) {
+  [[maybe_unused]] static unsigned
+  findHostAssocTuplePos(mlir::func::FuncOp fn) {
     // Scan the argument list from last to first as the host associations are
     // appended for now.
     for (unsigned i = fn.getNumArguments(); i > 0; --i)
@@ -2447,7 +2449,7 @@ class ScalarExprLowering {
     // 
diff erent view of what the function signature is in 
diff erent locations.
     // Casts are inserted as needed below to accommodate this.
 
-    // The mlir::FuncOp type prevails, unless it has a 
diff erent number of
+    // The mlir::func::FuncOp type prevails, unless it has a 
diff erent number of
     // arguments which can happen in legal program if it was passed as a dummy
     // procedure argument earlier with no further type information.
     mlir::SymbolRefAttr funcSymbolAttr;
@@ -3160,7 +3162,7 @@ static mlir::Type adjustedArrayElementType(mlir::Type t) {
 /// Helper to generate calls to scalar user defined assignment procedures.
 static void genScalarUserDefinedAssignmentCall(fir::FirOpBuilder &builder,
                                                mlir::Location loc,
-                                               mlir::FuncOp func,
+                                               mlir::func::FuncOp func,
                                                const fir::ExtendedValue &lhs,
                                                const fir::ExtendedValue &rhs) {
   auto prepareUserDefinedArg =
@@ -3640,12 +3642,12 @@ class ArrayExprLowering {
     const auto *rhs = procRef.arguments()[1].value().UnwrapExpr();
     assert(lhs && rhs &&
            "user defined assignment arguments must be expressions");
-    mlir::FuncOp func =
+    mlir::func::FuncOp func =
         Fortran::lower::CallerInterface(procRef, converter).getFuncOp();
     ael.lowerElementalUserAssignment(func, *lhs, *rhs);
   }
 
-  void lowerElementalUserAssignment(mlir::FuncOp userAssignment,
+  void lowerElementalUserAssignment(mlir::func::FuncOp userAssignment,
                                     const Fortran::lower::SomeExpr &lhs,
                                     const Fortran::lower::SomeExpr &rhs) {
     mlir::Location loc = getLoc();
@@ -3737,7 +3739,8 @@ class ArrayExprLowering {
       Fortran::lower::AbstractConverter &converter,
       Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx,
       Fortran::lower::ExplicitIterSpace &explicitIterSpace,
-      mlir::FuncOp userAssignmentFunction, const Fortran::lower::SomeExpr &lhs,
+      mlir::func::FuncOp userAssignmentFunction,
+      const Fortran::lower::SomeExpr &lhs,
       const Fortran::lower::SomeExpr &rhs) {
     Fortran::lower::ImplicitIterSpace implicit;
     ArrayExprLowering ael(converter, stmtCtx, symMap,
@@ -3746,7 +3749,7 @@ class ArrayExprLowering {
     return ael.lowerScalarUserAssignment(userAssignmentFunction, lhs, rhs);
   }
 
-  ExtValue lowerScalarUserAssignment(mlir::FuncOp userAssignment,
+  ExtValue lowerScalarUserAssignment(mlir::func::FuncOp userAssignment,
                                      const Fortran::lower::SomeExpr &lhs,
                                      const Fortran::lower::SomeExpr &rhs) {
     mlir::Location loc = getLoc();
@@ -5904,7 +5907,7 @@ class ArrayExprLowering {
   /// Create a call to the LLVM memcpy intrinsic.
   void createCallMemcpy(llvm::ArrayRef<mlir::Value> args) {
     mlir::Location loc = getLoc();
-    mlir::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
+    mlir::func::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
     mlir::SymbolRefAttr funcSymAttr =
         builder.getSymbolRefAttr(memcpyFunc.getName());
     mlir::FunctionType funcTy = memcpyFunc.getFunctionType();
@@ -5917,7 +5920,7 @@ class ArrayExprLowering {
                          mlir::Value bufferSize, mlir::Value buffSize,
                          mlir::Value eleSz) {
     mlir::Location loc = getLoc();
-    mlir::FuncOp reallocFunc = fir::factory::getRealloc(builder);
+    mlir::func::FuncOp reallocFunc = fir::factory::getRealloc(builder);
     auto cond = builder.create<mlir::arith::CmpIOp>(
         loc, mlir::arith::CmpIPredicate::sle, bufferSize, needed);
     auto ifOp = builder.create<fir::IfOp>(loc, mem.getType(), cond,
@@ -7051,7 +7054,7 @@ mlir::Value Fortran::lower::createSubroutineCall(
           call);
     } else if (explicitIterSpace.isActive() && lhs->Rank() == 0) {
       // Scalar defined assignment (elemental or not) in a FORALL context.
-      mlir::FuncOp func =
+      mlir::func::FuncOp func =
           Fortran::lower::CallerInterface(call, converter).getFuncOp();
       ArrayExprLowering::lowerScalarUserAssignment(
           converter, symMap, stmtCtx, explicitIterSpace, func, *lhs, *rhs);

diff  --git a/flang/lib/Lower/HostAssociations.cpp b/flang/lib/Lower/HostAssociations.cpp
index 503b4c9b8fcde..69d4336150c8f 100644
--- a/flang/lib/Lower/HostAssociations.cpp
+++ b/flang/lib/Lower/HostAssociations.cpp
@@ -513,7 +513,7 @@ void Fortran::lower::HostAssociations::internalProcedureBindings(
   mlir::Type argTy = getArgumentType(converter);
   mlir::TupleType tupTy = unwrapTupleTy(argTy);
   mlir::Location loc = converter.getCurrentLocation();
-  mlir::FuncOp func = builder.getFunction();
+  mlir::func::FuncOp func = builder.getFunction();
   mlir::Value tupleArg;
   for (auto [ty, arg] : llvm::reverse(llvm::zip(
            func.getFunctionType().getInputs(), func.front().getArguments())))

diff  --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp
index acaea5d75fdf7..679452854e071 100644
--- a/flang/lib/Lower/IO.cpp
+++ b/flang/lib/Lower/IO.cpp
@@ -159,10 +159,10 @@ inline int64_t getLength(mlir::Type argTy) {
 
 /// Get (or generate) the MLIR FuncOp for a given IO runtime function.
 template <typename E>
-static mlir::FuncOp getIORuntimeFunc(mlir::Location loc,
-                                     fir::FirOpBuilder &builder) {
+static mlir::func::FuncOp getIORuntimeFunc(mlir::Location loc,
+                                           fir::FirOpBuilder &builder) {
   llvm::StringRef name = getName<E>();
-  mlir::FuncOp func = builder.getNamedFunction(name);
+  mlir::func::FuncOp func = builder.getNamedFunction(name);
   if (func)
     return func;
   auto funTy = getTypeModel<E>()(builder.getContext());
@@ -180,7 +180,8 @@ static mlir::Value genEndIO(Fortran::lower::AbstractConverter &converter,
                             Fortran::lower::StatementContext &stmtCtx) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   if (csi.ioMsgExpr) {
-    mlir::FuncOp getIoMsg = getIORuntimeFunc<mkIOKey(GetIoMsg)>(loc, builder);
+    mlir::func::FuncOp getIoMsg =
+        getIORuntimeFunc<mkIOKey(GetIoMsg)>(loc, builder);
     fir::ExtendedValue ioMsgVar =
         converter.genExprAddr(csi.ioMsgExpr, stmtCtx, loc);
     builder.create<fir::CallOp>(
@@ -192,7 +193,7 @@ static mlir::Value genEndIO(Fortran::lower::AbstractConverter &converter,
             builder.createConvert(loc, getIoMsg.getFunctionType().getInput(2),
                                   fir::getLen(ioMsgVar))});
   }
-  mlir::FuncOp endIoStatement =
+  mlir::func::FuncOp endIoStatement =
       getIORuntimeFunc<mkIOKey(EndIoStatement)>(loc, builder);
   auto call = builder.create<fir::CallOp>(loc, endIoStatement,
                                           mlir::ValueRange{cookie});
@@ -387,7 +388,7 @@ getNamelistGroup(Fortran::lower::AbstractConverter &converter,
 
 /// Generate a namelist IO call.
 static void genNamelistIO(Fortran::lower::AbstractConverter &converter,
-                          mlir::Value cookie, mlir::FuncOp funcOp,
+                          mlir::Value cookie, mlir::func::FuncOp funcOp,
                           Fortran::semantics::Symbol &symbol, bool checkResult,
                           mlir::Value &ok,
                           Fortran::lower::StatementContext &stmtCtx) {
@@ -402,9 +403,9 @@ static void genNamelistIO(Fortran::lower::AbstractConverter &converter,
 }
 
 /// Get the output function to call for a value of the given type.
-static mlir::FuncOp getOutputFunc(mlir::Location loc,
-                                  fir::FirOpBuilder &builder, mlir::Type type,
-                                  bool isFormatted) {
+static mlir::func::FuncOp getOutputFunc(mlir::Location loc,
+                                        fir::FirOpBuilder &builder,
+                                        mlir::Type type, bool isFormatted) {
   if (!isFormatted)
     return getIORuntimeFunc<mkIOKey(OutputDescriptor)>(loc, builder);
   if (auto ty = type.dyn_cast<mlir::IntegerType>()) {
@@ -478,7 +479,8 @@ genOutputItemList(Fortran::lower::AbstractConverter &converter,
     if (!expr)
       fir::emitFatalError(loc, "internal error: could not get evaluate::Expr");
     mlir::Type itemTy = converter.genType(*expr);
-    mlir::FuncOp outputFunc = getOutputFunc(loc, builder, itemTy, isFormatted);
+    mlir::func::FuncOp outputFunc =
+        getOutputFunc(loc, builder, itemTy, isFormatted);
     mlir::Type argType = outputFunc.getFunctionType().getInput(1);
     assert((isFormatted || argType.isa<fir::BoxType>()) &&
            "expect descriptor for unformatted IO runtime");
@@ -517,8 +519,9 @@ genOutputItemList(Fortran::lower::AbstractConverter &converter,
 }
 
 /// Get the input function to call for a value of the given type.
-static mlir::FuncOp getInputFunc(mlir::Location loc, fir::FirOpBuilder &builder,
-                                 mlir::Type type, bool isFormatted) {
+static mlir::func::FuncOp getInputFunc(mlir::Location loc,
+                                       fir::FirOpBuilder &builder,
+                                       mlir::Type type, bool isFormatted) {
   if (!isFormatted)
     return getIORuntimeFunc<mkIOKey(InputDescriptor)>(loc, builder);
   if (auto ty = type.dyn_cast<mlir::IntegerType>())
@@ -569,7 +572,7 @@ static void boolRefToLogical(mlir::Location loc, fir::FirOpBuilder &builder,
 
 static mlir::Value createIoRuntimeCallForItem(mlir::Location loc,
                                               fir::FirOpBuilder &builder,
-                                              mlir::FuncOp inputFunc,
+                                              mlir::func::FuncOp inputFunc,
                                               mlir::Value cookie,
                                               const fir::ExtendedValue &item) {
   mlir::Type argType = inputFunc.getFunctionType().getInput(1);
@@ -624,7 +627,7 @@ static void genInputItemList(Fortran::lower::AbstractConverter &converter,
     if (Fortran::evaluate::HasVectorSubscript(*expr)) {
       auto vectorSubscriptBox =
           Fortran::lower::genVectorSubscriptBox(loc, converter, stmtCtx, *expr);
-      mlir::FuncOp inputFunc = getInputFunc(
+      mlir::func::FuncOp inputFunc = getInputFunc(
           loc, builder, vectorSubscriptBox.getElementType(), isFormatted);
       const bool mustBox =
           inputFunc.getFunctionType().getInput(1).isa<fir::BoxType>();
@@ -650,7 +653,8 @@ static void genInputItemList(Fortran::lower::AbstractConverter &converter,
       continue;
     }
     mlir::Type itemTy = converter.genType(*expr);
-    mlir::FuncOp inputFunc = getInputFunc(loc, builder, itemTy, isFormatted);
+    mlir::func::FuncOp inputFunc =
+        getInputFunc(loc, builder, itemTy, isFormatted);
     auto itemExv = inputFunc.getFunctionType().getInput(1).isa<fir::BoxType>()
                        ? converter.genExprBox(*expr, stmtCtx, loc)
                        : converter.genExprAddr(expr, stmtCtx, loc);
@@ -870,7 +874,7 @@ mlir::Value genIntIOOption(Fortran::lower::AbstractConverter &converter,
                            const B &spec) {
   Fortran::lower::StatementContext localStatementCtx;
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
+  mlir::func::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
   mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
   mlir::Value expr = fir::getBase(converter.genExprValue(
       Fortran::semantics::GetExpr(spec.v), localStatementCtx, loc));
@@ -887,7 +891,7 @@ mlir::Value genCharIOOption(Fortran::lower::AbstractConverter &converter,
                             const B &spec) {
   Fortran::lower::StatementContext localStatementCtx;
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
+  mlir::func::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
   mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
   std::tuple<mlir::Value, mlir::Value, mlir::Value> tup =
       lowerStringLit(converter, loc, localStatementCtx, spec,
@@ -919,7 +923,7 @@ mlir::Value genIOOption<Fortran::parser::FileNameExpr>(
   Fortran::lower::StatementContext localStatementCtx;
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   // has an extra KIND argument
-  mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(SetFile)>(loc, builder);
+  mlir::func::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(SetFile)>(loc, builder);
   mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
   std::tuple<mlir::Value, mlir::Value, mlir::Value> tup =
       lowerStringLit(converter, loc, localStatementCtx, spec,
@@ -934,7 +938,7 @@ mlir::Value genIOOption<Fortran::parser::ConnectSpec::CharExpr>(
     Fortran::lower::AbstractConverter &converter, mlir::Location loc,
     mlir::Value cookie, const Fortran::parser::ConnectSpec::CharExpr &spec) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp ioFunc;
+  mlir::func::FuncOp ioFunc;
   switch (std::get<Fortran::parser::ConnectSpec::CharExpr::Kind>(spec.t)) {
   case Fortran::parser::ConnectSpec::CharExpr::Kind::Access:
     ioFunc = getIORuntimeFunc<mkIOKey(SetAccess)>(loc, builder);
@@ -1010,7 +1014,7 @@ mlir::Value genIOOption<Fortran::parser::IoControlSpec::CharExpr>(
     Fortran::lower::AbstractConverter &converter, mlir::Location loc,
     mlir::Value cookie, const Fortran::parser::IoControlSpec::CharExpr &spec) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp ioFunc;
+  mlir::func::FuncOp ioFunc;
   switch (std::get<Fortran::parser::IoControlSpec::CharExpr::Kind>(spec.t)) {
   case Fortran::parser::IoControlSpec::CharExpr::Kind::Advance:
     ioFunc = getIORuntimeFunc<mkIOKey(SetAdvance)>(loc, builder);
@@ -1090,7 +1094,8 @@ static void genIOReadSize(Fortran::lower::AbstractConverter &converter,
             std::get_if<Fortran::parser::IoControlSpec::Size>(&spec.u)) {
 
       fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-      mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(GetSize)>(loc, builder);
+      mlir::func::FuncOp ioFunc =
+          getIORuntimeFunc<mkIOKey(GetSize)>(loc, builder);
       auto sizeValue =
           builder.create<fir::CallOp>(loc, ioFunc, mlir::ValueRange{cookie})
               .getResult(0);
@@ -1201,7 +1206,7 @@ genConditionHandlerCall(Fortran::lower::AbstractConverter &converter,
   if (!csi.hasAnyConditionSpec())
     return;
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp enableHandlers =
+  mlir::func::FuncOp enableHandlers =
       getIORuntimeFunc<mkIOKey(EnableHandlers)>(loc, builder);
   mlir::Type boolType = enableHandlers.getFunctionType().getInput(1);
   auto boolValue = [&](bool specifierIsPresent) {
@@ -1557,7 +1562,7 @@ static mlir::Value genBasicIOStmt(Fortran::lower::AbstractConverter &converter,
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   Fortran::lower::StatementContext stmtCtx;
   mlir::Location loc = converter.getCurrentLocation();
-  mlir::FuncOp beginFunc = getIORuntimeFunc<K>(loc, builder);
+  mlir::func::FuncOp beginFunc = getIORuntimeFunc<K>(loc, builder);
   mlir::FunctionType beginFuncTy = beginFunc.getFunctionType();
   mlir::Value unit = fir::getBase(converter.genExprValue(
       getExpr<Fortran::parser::FileUnitNumber>(stmt), stmtCtx, loc));
@@ -1610,7 +1615,8 @@ genNewunitSpec(Fortran::lower::AbstractConverter &converter, mlir::Location loc,
             std::get_if<Fortran::parser::ConnectSpec::Newunit>(&spec.u)) {
       Fortran::lower::StatementContext stmtCtx;
       fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-      mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(GetNewUnit)>(loc, builder);
+      mlir::func::FuncOp ioFunc =
+          getIORuntimeFunc<mkIOKey(GetNewUnit)>(loc, builder);
       mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
       const auto *var = Fortran::semantics::GetExpr(newunit->v);
       mlir::Value addr = builder.createConvert(
@@ -1629,7 +1635,7 @@ Fortran::lower::genOpenStatement(Fortran::lower::AbstractConverter &converter,
                                  const Fortran::parser::OpenStmt &stmt) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   Fortran::lower::StatementContext stmtCtx;
-  mlir::FuncOp beginFunc;
+  mlir::func::FuncOp beginFunc;
   llvm::SmallVector<mlir::Value> beginArgs;
   mlir::Location loc = converter.getCurrentLocation();
   bool hasNewunitSpec = false;
@@ -1676,7 +1682,7 @@ Fortran::lower::genWaitStatement(Fortran::lower::AbstractConverter &converter,
   Fortran::lower::StatementContext stmtCtx;
   mlir::Location loc = converter.getCurrentLocation();
   bool hasId = hasSpec<Fortran::parser::IdExpr>(stmt);
-  mlir::FuncOp beginFunc =
+  mlir::func::FuncOp beginFunc =
       hasId ? getIORuntimeFunc<mkIOKey(BeginWait)>(loc, builder)
             : getIORuntimeFunc<mkIOKey(BeginWaitAll)>(loc, builder);
   mlir::FunctionType beginFuncTy = beginFunc.getFunctionType();
@@ -1710,7 +1716,7 @@ Fortran::lower::genWaitStatement(Fortran::lower::AbstractConverter &converter,
 
 // Get the begin data transfer IO function to call for the given values.
 template <bool isInput>
-mlir::FuncOp
+mlir::func::FuncOp
 getBeginDataTransferFunc(mlir::Location loc, fir::FirOpBuilder &builder,
                          bool isFormatted, bool isListOrNml, bool isInternal,
                          bool isInternalWithDesc, bool isAsync) {
@@ -1844,7 +1850,7 @@ genDataTransferStmt(Fortran::lower::AbstractConverter &converter,
   const bool isNml = isDataTransferNamelist(stmt);
 
   // Generate the begin data transfer function call.
-  mlir::FuncOp ioFunc = getBeginDataTransferFunc<isInput>(
+  mlir::func::FuncOp ioFunc = getBeginDataTransferFunc<isInput>(
       loc, builder, isFormatted, isList || isNml, isInternal,
       isInternalWithDesc, isAsync);
   llvm::SmallVector<mlir::Value> ioArgs;
@@ -1960,7 +1966,7 @@ mlir::Value genInquireSpec<Fortran::parser::InquireSpec::CharVar>(
       Fortran::parser::InquireSpec::CharVar::Kind::Iomsg)
     return {};
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp specFunc =
+  mlir::func::FuncOp specFunc =
       getIORuntimeFunc<mkIOKey(InquireCharacter)>(loc, builder);
   mlir::FunctionType specFuncTy = specFunc.getFunctionType();
   const auto *varExpr = Fortran::semantics::GetExpr(
@@ -1990,7 +1996,7 @@ mlir::Value genInquireSpec<Fortran::parser::InquireSpec::IntVar>(
       Fortran::parser::InquireSpec::IntVar::Kind::Iostat)
     return {};
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
-  mlir::FuncOp specFunc =
+  mlir::func::FuncOp specFunc =
       getIORuntimeFunc<mkIOKey(InquireInteger64)>(loc, builder);
   mlir::FunctionType specFuncTy = specFunc.getFunctionType();
   const auto *varExpr = Fortran::semantics::GetExpr(
@@ -2027,7 +2033,7 @@ mlir::Value genInquireSpec<Fortran::parser::InquireSpec::LogVar>(
   bool pendId =
       idExpr &&
       logVarKind == Fortran::parser::InquireSpec::LogVar::Kind::Pending;
-  mlir::FuncOp specFunc =
+  mlir::func::FuncOp specFunc =
       pendId ? getIORuntimeFunc<mkIOKey(InquirePendingId)>(loc, builder)
              : getIORuntimeFunc<mkIOKey(InquireLogical)>(loc, builder);
   mlir::FunctionType specFuncTy = specFunc.getFunctionType();
@@ -2095,7 +2101,7 @@ mlir::Value Fortran::lower::genInquireStatement(
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   Fortran::lower::StatementContext stmtCtx;
   mlir::Location loc = converter.getCurrentLocation();
-  mlir::FuncOp beginFunc;
+  mlir::func::FuncOp beginFunc;
   ConditionSpecInfo csi;
   llvm::SmallVector<mlir::Value> beginArgs;
   const auto *list =

diff  --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp
index 1b02bdcf9e4d4..cf68b141bd24e 100644
--- a/flang/lib/Lower/IntrinsicCall.cpp
+++ b/flang/lib/Lower/IntrinsicCall.cpp
@@ -573,8 +573,9 @@ struct IntrinsicLibrary {
                            llvm::ArrayRef<fir::ExtendedValue> args);
 
   template <typename GeneratorType>
-  mlir::FuncOp getWrapper(GeneratorType, llvm::StringRef name,
-                          mlir::FunctionType, bool loadRefArguments = false);
+  mlir::func::FuncOp getWrapper(GeneratorType, llvm::StringRef name,
+                                mlir::FunctionType,
+                                bool loadRefArguments = false);
 
   /// Generate calls to ElementalGenerator, handling the elemental aspects
   template <typename GeneratorType>
@@ -1226,11 +1227,12 @@ class FunctionDistance {
   bool infinite = false; // When forbidden conversion or wrong argument number
 };
 
-/// Build mlir::FuncOp from runtime symbol description and add
+/// Build mlir::func::FuncOp from runtime symbol description and add
 /// fir.runtime attribute.
-static mlir::FuncOp getFuncOp(mlir::Location loc, fir::FirOpBuilder &builder,
-                              const RuntimeFunction &runtime) {
-  mlir::FuncOp function = builder.addNamedFunction(
+static mlir::func::FuncOp getFuncOp(mlir::Location loc,
+                                    fir::FirOpBuilder &builder,
+                                    const RuntimeFunction &runtime) {
+  mlir::func::FuncOp function = builder.addNamedFunction(
       loc, runtime.symbol, runtime.typeGenerator(builder.getContext()));
   function->setAttr("fir.runtime", builder.getUnitAttr());
   return function;
@@ -1239,8 +1241,8 @@ static mlir::FuncOp getFuncOp(mlir::Location loc, fir::FirOpBuilder &builder,
 /// Select runtime function that has the smallest distance to the intrinsic
 /// function type and that will not imply narrowing arguments or extending the
 /// result.
-/// If nothing is found, the mlir::FuncOp will contain a nullptr.
-mlir::FuncOp searchFunctionInLibrary(
+/// If nothing is found, the mlir::func::FuncOp will contain a nullptr.
+mlir::func::FuncOp searchFunctionInLibrary(
     mlir::Location loc, fir::FirOpBuilder &builder,
     const Fortran::common::StaticMultimapView<RuntimeFunction> &lib,
     llvm::StringRef name, mlir::FunctionType funcType,
@@ -1266,14 +1268,14 @@ mlir::FuncOp searchFunctionInLibrary(
 /// Search runtime for the best runtime function given an intrinsic name
 /// and interface. The interface may not be a perfect match in which case
 /// the caller is responsible to insert argument and return value conversions.
-/// If nothing is found, the mlir::FuncOp will contain a nullptr.
-static mlir::FuncOp getRuntimeFunction(mlir::Location loc,
-                                       fir::FirOpBuilder &builder,
-                                       llvm::StringRef name,
-                                       mlir::FunctionType funcType) {
+/// If nothing is found, the mlir::func::FuncOp will contain a nullptr.
+static mlir::func::FuncOp getRuntimeFunction(mlir::Location loc,
+                                             fir::FirOpBuilder &builder,
+                                             llvm::StringRef name,
+                                             mlir::FunctionType funcType) {
   const RuntimeFunction *bestNearMatch = nullptr;
   FunctionDistance bestMatchDistance{};
-  mlir::FuncOp match;
+  mlir::func::FuncOp match;
   using RtMap = Fortran::common::StaticMultimapView<RuntimeFunction>;
   static constexpr RtMap pgmathF(pgmathFast);
   static_assert(pgmathF.Verify() && "map must be sorted");
@@ -1300,7 +1302,7 @@ static mlir::FuncOp getRuntimeFunction(mlir::Location loc,
   // mathRuntimeVersion == llvmOnly
   static constexpr RtMap llvmIntr(llvmIntrinsics);
   static_assert(llvmIntr.Verify() && "map must be sorted");
-  if (mlir::FuncOp exactMatch =
+  if (mlir::func::FuncOp exactMatch =
           searchFunctionInLibrary(loc, builder, llvmIntr, name, funcType,
                                   &bestNearMatch, bestMatchDistance))
     return exactMatch;
@@ -1577,12 +1579,12 @@ IntrinsicLibrary::invokeGenerator(SubroutineGenerator generator,
 }
 
 template <typename GeneratorType>
-mlir::FuncOp IntrinsicLibrary::getWrapper(GeneratorType generator,
-                                          llvm::StringRef name,
-                                          mlir::FunctionType funcType,
-                                          bool loadRefArguments) {
+mlir::func::FuncOp IntrinsicLibrary::getWrapper(GeneratorType generator,
+                                                llvm::StringRef name,
+                                                mlir::FunctionType funcType,
+                                                bool loadRefArguments) {
   std::string wrapperName = fir::mangleIntrinsicProcedure(name, funcType);
-  mlir::FuncOp function = builder.getNamedFunction(wrapperName);
+  mlir::func::FuncOp function = builder.getNamedFunction(wrapperName);
   if (!function) {
     // First time this wrapper is needed, build it.
     function = builder.createFunction(loc, wrapperName, funcType);
@@ -1663,7 +1665,7 @@ IntrinsicLibrary::outlineInWrapper(GeneratorType generator,
   }
 
   mlir::FunctionType funcType = getFunctionType(resultType, args, builder);
-  mlir::FuncOp wrapper = getWrapper(generator, name, funcType);
+  mlir::func::FuncOp wrapper = getWrapper(generator, name, funcType);
   return builder.create<fir::CallOp>(loc, wrapper, args).getResult(0);
 }
 
@@ -1679,7 +1681,7 @@ fir::ExtendedValue IntrinsicLibrary::outlineInExtendedWrapper(
   for (const auto &extendedVal : args)
     mlirArgs.emplace_back(toValue(extendedVal, builder, loc));
   mlir::FunctionType funcType = getFunctionType(resultType, mlirArgs, builder);
-  mlir::FuncOp wrapper = getWrapper(generator, name, funcType);
+  mlir::func::FuncOp wrapper = getWrapper(generator, name, funcType);
   auto call = builder.create<fir::CallOp>(loc, wrapper, mlirArgs);
   if (resultType)
     return toExtendedValue(call.getResult(0), builder, loc);
@@ -1690,7 +1692,8 @@ fir::ExtendedValue IntrinsicLibrary::outlineInExtendedWrapper(
 IntrinsicLibrary::RuntimeCallGenerator
 IntrinsicLibrary::getRuntimeCallGenerator(llvm::StringRef name,
                                           mlir::FunctionType soughtFuncType) {
-  mlir::FuncOp funcOp = getRuntimeFunction(loc, builder, name, soughtFuncType);
+  mlir::func::FuncOp funcOp =
+      getRuntimeFunction(loc, builder, name, soughtFuncType);
   if (!funcOp) {
     std::string buffer("not yet implemented: missing intrinsic lowering: ");
     llvm::raw_string_ostream sstream(buffer);
@@ -1722,7 +1725,7 @@ mlir::SymbolRefAttr IntrinsicLibrary::getUnrestrictedIntrinsicSymbolRefAttr(
   // So instead of duplicating the runtime, just have the wrappers loading
   // this before calling the code generators.
   bool loadRefArguments = true;
-  mlir::FuncOp funcOp;
+  mlir::func::FuncOp funcOp;
   if (const IntrinsicHandler *handler = findIntrinsicHandler(name))
     funcOp = std::visit(
         [&](auto generator) {

diff  --git a/flang/lib/Lower/Runtime.cpp b/flang/lib/Lower/Runtime.cpp
index 65fb2eeb6ae60..a6ccaa61aaa93 100644
--- a/flang/lib/Lower/Runtime.cpp
+++ b/flang/lib/Lower/Runtime.cpp
@@ -45,7 +45,7 @@ void Fortran::lower::genStopStatement(
   mlir::Location loc = converter.getCurrentLocation();
   Fortran::lower::StatementContext stmtCtx;
   llvm::SmallVector<mlir::Value> operands;
-  mlir::FuncOp callee;
+  mlir::func::FuncOp callee;
   mlir::FunctionType calleeType;
   // First operand is stop code (zero if absent)
   if (const auto &code =
@@ -111,7 +111,7 @@ void Fortran::lower::genFailImageStatement(
     Fortran::lower::AbstractConverter &converter) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   mlir::Location loc = converter.getCurrentLocation();
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       fir::runtime::getRuntimeFunc<mkRTKey(FailImageStatement)>(loc, builder);
   builder.create<fir::CallOp>(loc, callee, llvm::None);
   genUnreachable(builder, loc);
@@ -170,7 +170,7 @@ void Fortran::lower::genPauseStatement(
     const Fortran::parser::PauseStmt &) {
   fir::FirOpBuilder &builder = converter.getFirOpBuilder();
   mlir::Location loc = converter.getCurrentLocation();
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       fir::runtime::getRuntimeFunc<mkRTKey(PauseStatement)>(loc, builder);
   builder.create<fir::CallOp>(loc, callee, llvm::None);
 }
@@ -179,7 +179,7 @@ mlir::Value Fortran::lower::genAssociated(fir::FirOpBuilder &builder,
                                           mlir::Location loc,
                                           mlir::Value pointer,
                                           mlir::Value target) {
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(PointerIsAssociatedWith)>(loc,
                                                                      builder);
   llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
@@ -189,7 +189,7 @@ mlir::Value Fortran::lower::genAssociated(fir::FirOpBuilder &builder,
 
 mlir::Value Fortran::lower::genCpuTime(fir::FirOpBuilder &builder,
                                        mlir::Location loc) {
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(CpuTime)>(loc, builder);
   return builder.create<fir::CallOp>(loc, func, llvm::None).getResult(0);
 }
@@ -200,7 +200,7 @@ void Fortran::lower::genDateAndTime(fir::FirOpBuilder &builder,
                                     llvm::Optional<fir::CharBoxValue> time,
                                     llvm::Optional<fir::CharBoxValue> zone,
                                     mlir::Value values) {
-  mlir::FuncOp callee =
+  mlir::func::FuncOp callee =
       fir::runtime::getRuntimeFunc<mkRTKey(DateAndTime)>(loc, builder);
   mlir::FunctionType funcTy = callee.getFunctionType();
   mlir::Type idxTy = builder.getIndexType();
@@ -240,7 +240,7 @@ void Fortran::lower::genDateAndTime(fir::FirOpBuilder &builder,
 void Fortran::lower::genRandomInit(fir::FirOpBuilder &builder,
                                    mlir::Location loc, mlir::Value repeatable,
                                    mlir::Value imageDistinct) {
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(RandomInit)>(loc, builder);
   llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
       builder, loc, func.getFunctionType(), repeatable, imageDistinct);
@@ -249,7 +249,7 @@ void Fortran::lower::genRandomInit(fir::FirOpBuilder &builder,
 
 void Fortran::lower::genRandomNumber(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value harvest) {
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(RandomNumber)>(loc, builder);
   mlir::FunctionType funcTy = func.getFunctionType();
   mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -263,7 +263,7 @@ void Fortran::lower::genRandomNumber(fir::FirOpBuilder &builder,
 void Fortran::lower::genRandomSeed(fir::FirOpBuilder &builder,
                                    mlir::Location loc, int argIndex,
                                    mlir::Value argBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   // argIndex is the nth (0-origin) argument in declaration order,
   // or -1 if no argument is present.
   switch (argIndex) {
@@ -298,7 +298,7 @@ void Fortran::lower::genTransfer(fir::FirOpBuilder &builder, mlir::Location loc,
                                  mlir::Value resultBox, mlir::Value sourceBox,
                                  mlir::Value moldBox) {
 
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(Transfer)>(loc, builder);
   mlir::FunctionType fTy = func.getFunctionType();
   mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -314,7 +314,7 @@ void Fortran::lower::genTransferSize(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value resultBox,
                                      mlir::Value sourceBox, mlir::Value moldBox,
                                      mlir::Value size) {
-  mlir::FuncOp func =
+  mlir::func::FuncOp func =
       fir::runtime::getRuntimeFunc<mkRTKey(TransferSize)>(loc, builder);
   mlir::FunctionType fTy = func.getFunctionType();
   mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -331,7 +331,7 @@ void Fortran::lower::genTransferSize(fir::FirOpBuilder &builder,
 void Fortran::lower::genSystemClock(fir::FirOpBuilder &builder,
                                     mlir::Location loc, mlir::Value count,
                                     mlir::Value rate, mlir::Value max) {
-  auto makeCall = [&](mlir::FuncOp func, mlir::Value arg) {
+  auto makeCall = [&](mlir::func::FuncOp func, mlir::Value arg) {
     mlir::Type kindTy = func.getFunctionType().getInput(0);
     int integerKind = 8;
     if (auto intType =

diff  --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
index 52760be28194d..6c12c515c2c50 100644
--- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp
+++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
@@ -31,21 +31,22 @@ static llvm::cl::opt<std::size_t>
                                       "name"),
                        llvm::cl::init(32));
 
-mlir::FuncOp fir::FirOpBuilder::createFunction(mlir::Location loc,
-                                               mlir::ModuleOp module,
-                                               llvm::StringRef name,
-                                               mlir::FunctionType ty) {
+mlir::func::FuncOp fir::FirOpBuilder::createFunction(mlir::Location loc,
+                                                     mlir::ModuleOp module,
+                                                     llvm::StringRef name,
+                                                     mlir::FunctionType ty) {
   return fir::createFuncOp(loc, module, name, ty);
 }
 
-mlir::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
-                                                 llvm::StringRef name) {
-  return modOp.lookupSymbol<mlir::FuncOp>(name);
+mlir::func::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
+                                                       llvm::StringRef name) {
+  return modOp.lookupSymbol<mlir::func::FuncOp>(name);
 }
 
-mlir::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
-                                                 mlir::SymbolRefAttr symbol) {
-  return modOp.lookupSymbol<mlir::FuncOp>(symbol);
+mlir::func::FuncOp
+fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
+                                    mlir::SymbolRefAttr symbol) {
+  return modOp.lookupSymbol<mlir::func::FuncOp>(symbol);
 }
 
 fir::GlobalOp fir::FirOpBuilder::getNamedGlobal(mlir::ModuleOp modOp,

diff  --git a/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp b/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp
index e07b7eff5e32d..d6d5efb43b381 100644
--- a/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp
+++ b/flang/lib/Optimizer/Builder/LowLevelIntrinsics.cpp
@@ -21,7 +21,7 @@
 #include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
 #include "flang/Optimizer/Builder/FIRBuilder.h"
 
-mlir::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
                                         builder.getI1Type()};
@@ -31,7 +31,7 @@ mlir::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
                                   "llvm.memcpy.p0i8.p0i8.i64", memcpyTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
                                         builder.getI1Type()};
@@ -41,7 +41,7 @@ mlir::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
                                   "llvm.memmove.p0i8.p0i8.i64", memmoveTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
                                         builder.getI1Type()};
@@ -51,7 +51,7 @@ mlir::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
                                   "llvm.memset.p0i8.p0i8.i64", memsetTy);
 }
 
-mlir::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   llvm::SmallVector<mlir::Type> args = {ptrTy, builder.getI64Type()};
   auto reallocTy = mlir::FunctionType::get(builder.getContext(), args, {ptrTy});
@@ -59,7 +59,7 @@ mlir::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
                                   reallocTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   auto funcTy =
       mlir::FunctionType::get(builder.getContext(), llvm::None, {ptrTy});
@@ -67,7 +67,8 @@ mlir::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
                                   funcTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   auto funcTy =
       mlir::FunctionType::get(builder.getContext(), {ptrTy}, llvm::None);
@@ -75,7 +76,8 @@ mlir::FuncOp fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) {
                                   funcTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   auto funcTy = mlir::FunctionType::get(builder.getContext(),
                                         {ptrTy, ptrTy, ptrTy}, llvm::None);
@@ -83,7 +85,8 @@ mlir::FuncOp fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
                                   "llvm.init.trampoline", funcTy);
 }
 
-mlir::FuncOp fir::factory::getLlvmAdjustTrampoline(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmAdjustTrampoline(fir::FirOpBuilder &builder) {
   auto ptrTy = builder.getRefType(builder.getIntegerType(8));
   auto funcTy = mlir::FunctionType::get(builder.getContext(), {ptrTy}, {ptrTy});
   return builder.addNamedFunction(builder.getUnknownLoc(),

diff  --git a/flang/lib/Optimizer/Builder/Runtime/Character.cpp b/flang/lib/Optimizer/Builder/Runtime/Character.cpp
index 2e6825fdb695b..947201c615f4a 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Character.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Character.cpp
@@ -60,11 +60,11 @@ static int discoverKind(mlir::Type ty) {
 ///
 /// \p resultBox must be an unallocated allocatable used for the temporary
 /// result.  \p StringBox must be a fir.box describing the adjustr string
-/// argument.  The \p adjustFunc should be a mlir::FuncOp for the appropriate
-/// runtime entry function.
+/// argument.  The \p adjustFunc should be a mlir::func::FuncOp for the
+/// appropriate runtime entry function.
 static void genAdjust(fir::FirOpBuilder &builder, mlir::Location loc,
                       mlir::Value resultBox, mlir::Value stringBox,
-                      mlir::FuncOp &adjustFunc) {
+                      mlir::func::FuncOp &adjustFunc) {
 
   auto fTy = adjustFunc.getFunctionType();
   auto sourceLine =
@@ -94,7 +94,7 @@ fir::runtime::genCharCompare(fir::FirOpBuilder &builder, mlir::Location loc,
                              mlir::arith::CmpIPredicate cmp,
                              mlir::Value lhsBuff, mlir::Value lhsLen,
                              mlir::Value rhsBuff, mlir::Value rhsLen) {
-  mlir::FuncOp beginFunc;
+  mlir::func::FuncOp beginFunc;
   switch (discoverKind(lhsBuff.getType())) {
   case 1:
     beginFunc = fir::runtime::getRuntimeFunc<mkRTKey(CharacterCompareScalar1)>(
@@ -146,7 +146,7 @@ mlir::Value fir::runtime::genIndex(fir::FirOpBuilder &builder,
                                    mlir::Value stringLen,
                                    mlir::Value substringBase,
                                    mlir::Value substringLen, mlir::Value back) {
-  mlir::FuncOp indexFunc;
+  mlir::func::FuncOp indexFunc;
   switch (kind) {
   case 1:
     indexFunc = fir::runtime::getRuntimeFunc<mkRTKey(Index1)>(loc, builder);
@@ -219,7 +219,7 @@ mlir::Value fir::runtime::genScan(fir::FirOpBuilder &builder,
                                   mlir::Value stringBase, mlir::Value stringLen,
                                   mlir::Value setBase, mlir::Value setLen,
                                   mlir::Value back) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   switch (kind) {
   case 1:
     func = fir::runtime::getRuntimeFunc<mkRTKey(Scan1)>(loc, builder);
@@ -256,7 +256,7 @@ mlir::Value fir::runtime::genVerify(fir::FirOpBuilder &builder,
                                     mlir::Value stringBase,
                                     mlir::Value stringLen, mlir::Value setBase,
                                     mlir::Value setLen, mlir::Value back) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   switch (kind) {
   case 1:
     func = fir::runtime::getRuntimeFunc<mkRTKey(Verify1)>(loc, builder);

diff  --git a/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp b/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp
index bae7ea64234ca..7b9e6ab7e479d 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Inquiry.cpp
@@ -17,7 +17,7 @@ using namespace Fortran::runtime;
 mlir::Value fir::runtime::genLboundDim(fir::FirOpBuilder &builder,
                                        mlir::Location loc, mlir::Value array,
                                        mlir::Value dim) {
-  mlir::FuncOp lboundFunc =
+  mlir::func::FuncOp lboundFunc =
       fir::runtime::getRuntimeFunc<mkRTKey(LboundDim)>(loc, builder);
   auto fTy = lboundFunc.getFunctionType();
   auto sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -34,7 +34,7 @@ mlir::Value fir::runtime::genLboundDim(fir::FirOpBuilder &builder,
 void fir::runtime::genUbound(fir::FirOpBuilder &builder, mlir::Location loc,
                              mlir::Value resultBox, mlir::Value array,
                              mlir::Value kind) {
-  mlir::FuncOp uboundFunc =
+  mlir::func::FuncOp uboundFunc =
       fir::runtime::getRuntimeFunc<mkRTKey(Ubound)>(loc, builder);
   auto fTy = uboundFunc.getFunctionType();
   auto sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -50,7 +50,7 @@ void fir::runtime::genUbound(fir::FirOpBuilder &builder, mlir::Location loc,
 mlir::Value fir::runtime::genSizeDim(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value array,
                                      mlir::Value dim) {
-  mlir::FuncOp sizeFunc =
+  mlir::func::FuncOp sizeFunc =
       fir::runtime::getRuntimeFunc<mkRTKey(SizeDim)>(loc, builder);
   auto fTy = sizeFunc.getFunctionType();
   auto sourceFile = fir::factory::locationToFilename(builder, loc);
@@ -65,7 +65,7 @@ mlir::Value fir::runtime::genSizeDim(fir::FirOpBuilder &builder,
 /// the DIM argument is absent.
 mlir::Value fir::runtime::genSize(fir::FirOpBuilder &builder,
                                   mlir::Location loc, mlir::Value array) {
-  mlir::FuncOp sizeFunc =
+  mlir::func::FuncOp sizeFunc =
       fir::runtime::getRuntimeFunc<mkRTKey(Size)>(loc, builder);
   auto fTy = sizeFunc.getFunctionType();
   auto sourceFile = fir::factory::locationToFilename(builder, loc);

diff  --git a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
index e9e3d9713a0f5..dc7ca80278f15 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
@@ -209,7 +209,7 @@ struct ForcedSpacing16 {
 mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
                                       mlir::Location loc, mlir::Type resultType,
                                       mlir::Value x) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32()) {
@@ -245,7 +245,7 @@ mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
 /// Generate call to Fraction instrinsic runtime routine.
 mlir::Value fir::runtime::genFraction(fir::FirOpBuilder &builder,
                                       mlir::Location loc, mlir::Value x) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())
@@ -270,7 +270,7 @@ mlir::Value fir::runtime::genFraction(fir::FirOpBuilder &builder,
 mlir::Value fir::runtime::genNearest(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value x,
                                      mlir::Value s) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())
@@ -305,7 +305,7 @@ mlir::Value fir::runtime::genNearest(fir::FirOpBuilder &builder,
 /// Generate call to RRSpacing intrinsic runtime routine.
 mlir::Value fir::runtime::genRRSpacing(fir::FirOpBuilder &builder,
                                        mlir::Location loc, mlir::Value x) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())
@@ -330,7 +330,7 @@ mlir::Value fir::runtime::genRRSpacing(fir::FirOpBuilder &builder,
 mlir::Value fir::runtime::genScale(fir::FirOpBuilder &builder,
                                    mlir::Location loc, mlir::Value x,
                                    mlir::Value i) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())
@@ -354,7 +354,7 @@ mlir::Value fir::runtime::genScale(fir::FirOpBuilder &builder,
 mlir::Value fir::runtime::genSetExponent(fir::FirOpBuilder &builder,
                                          mlir::Location loc, mlir::Value x,
                                          mlir::Value i) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())
@@ -377,7 +377,7 @@ mlir::Value fir::runtime::genSetExponent(fir::FirOpBuilder &builder,
 /// Generate call to Spacing intrinsic runtime routine.
 mlir::Value fir::runtime::genSpacing(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value x) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   mlir::Type fltTy = x.getType();
 
   if (fltTy.isF32())

diff  --git a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
index 8f8ca1c3d334f..8b98228ac7039 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
@@ -539,7 +539,7 @@ void fir::runtime::genMaxlocDim(fir::FirOpBuilder &builder, mlir::Location loc,
 mlir::Value fir::runtime::genMaxval(fir::FirOpBuilder &builder,
                                     mlir::Location loc, mlir::Value arrayBox,
                                     mlir::Value maskBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   auto ty = arrayBox.getType();
   auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
   auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
@@ -658,7 +658,7 @@ void fir::runtime::genMinvalChar(fir::FirOpBuilder &builder, mlir::Location loc,
 mlir::Value fir::runtime::genMinval(fir::FirOpBuilder &builder,
                                     mlir::Location loc, mlir::Value arrayBox,
                                     mlir::Value maskBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   auto ty = arrayBox.getType();
   auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
   auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
@@ -715,7 +715,7 @@ mlir::Value fir::runtime::genProduct(fir::FirOpBuilder &builder,
                                      mlir::Location loc, mlir::Value arrayBox,
                                      mlir::Value maskBox,
                                      mlir::Value resultBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   auto ty = arrayBox.getType();
   auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
   auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
@@ -783,7 +783,7 @@ mlir::Value fir::runtime::genDotProduct(fir::FirOpBuilder &builder,
                                         mlir::Value vectorABox,
                                         mlir::Value vectorBBox,
                                         mlir::Value resultBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   auto ty = vectorABox.getType();
   auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
   auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
@@ -867,7 +867,7 @@ void fir::runtime::genSumDim(fir::FirOpBuilder &builder, mlir::Location loc,
 mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
                                  mlir::Value arrayBox, mlir::Value maskBox,
                                  mlir::Value resultBox) {
-  mlir::FuncOp func;
+  mlir::func::FuncOp func;
   auto ty = arrayBox.getType();
   auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
   auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();

diff  --git a/flang/lib/Optimizer/Builder/Runtime/Stop.cpp b/flang/lib/Optimizer/Builder/Runtime/Stop.cpp
index 1248eaf49d759..7596b06d02f75 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Stop.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Stop.cpp
@@ -25,7 +25,7 @@ void fir::runtime::genExit(fir::FirOpBuilder &builder, mlir::Location loc,
 void fir::runtime::genReportFatalUserError(fir::FirOpBuilder &builder,
                                            mlir::Location loc,
                                            llvm::StringRef message) {
-  mlir::FuncOp crashFunc =
+  mlir::func::FuncOp crashFunc =
       fir::runtime::getRuntimeFunc<mkRTKey(ReportFatalUserError)>(loc, builder);
   mlir::FunctionType funcTy = crashFunc.getFunctionType();
   mlir::Value msgVal = fir::getBase(

diff  --git a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
index 74c79c03b399d..b10079c9e13a4 100644
--- a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
+++ b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
@@ -183,7 +183,7 @@ class BoxedProcedurePass : public BoxedProcedurePassBase<BoxedProcedurePass> {
             rewriter.replaceOpWithNewOp<ConvertOp>(
                 addr, typeConverter.convertType(addr.getType()), addr.getVal());
           }
-        } else if (auto func = mlir::dyn_cast<mlir::FuncOp>(op)) {
+        } else if (auto func = mlir::dyn_cast<mlir::func::FuncOp>(op)) {
           mlir::FunctionType ty = func.getFunctionType();
           if (typeConverter.needsConversion(ty)) {
             rewriter.startRootUpdate(func);

diff  --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
index 0d64aee25eec9..ba12e2751952e 100644
--- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
+++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
@@ -53,16 +53,16 @@ struct FixupTy {
   FixupTy(Codes code, std::size_t index, std::size_t second = 0)
       : code{code}, index{index}, second{second} {}
   FixupTy(Codes code, std::size_t index,
-          std::function<void(mlir::FuncOp)> &&finalizer)
+          std::function<void(mlir::func::FuncOp)> &&finalizer)
       : code{code}, index{index}, finalizer{finalizer} {}
   FixupTy(Codes code, std::size_t index, std::size_t second,
-          std::function<void(mlir::FuncOp)> &&finalizer)
+          std::function<void(mlir::func::FuncOp)> &&finalizer)
       : code{code}, index{index}, second{second}, finalizer{finalizer} {}
 
   Codes code;
   std::size_t index;
   std::size_t second{};
-  llvm::Optional<std::function<void(mlir::FuncOp)>> finalizer{};
+  llvm::Optional<std::function<void(mlir::func::FuncOp)>> finalizer{};
 }; // namespace
 
 /// Target-specific rewriting of the FIR. This is a prerequisite pass to code
@@ -238,9 +238,9 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
             bool sret;
             if constexpr (std::is_same_v<std::decay_t<A>, fir::CallOp>) {
               sret = callOp.getCallee() &&
-                     functionArgIsSRet(index,
-                                       getModule().lookupSymbol<mlir::FuncOp>(
-                                           *callOp.getCallee()));
+                     functionArgIsSRet(
+                         index, getModule().lookupSymbol<mlir::func::FuncOp>(
+                                    *callOp.getCallee()));
             } else {
               // TODO: dispatch case; how do we put arguments on a call?
               // We cannot put both an sret and the dispatch object first.
@@ -282,8 +282,8 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
                   // confirm that this is a dummy procedure and should be split.
                   // It cannot be used to match because attributes are not
                   // available in case of indirect calls.
-                  auto funcOp =
-                      module.lookupSymbol<mlir::FuncOp>(*callOp.getCallee());
+                  auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(
+                      *callOp.getCallee());
                   if (funcOp &&
                       !funcOp.template getArgAttrOfType<mlir::UnitAttr>(
                           index, charProcAttr))
@@ -423,7 +423,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
   /// As the type signature is being changed, this must also update the
   /// function itself to use any new arguments, etc.
   mlir::LogicalResult convertTypes(mlir::ModuleOp mod) {
-    for (auto fn : mod.getOps<mlir::FuncOp>())
+    for (auto fn : mod.getOps<mlir::func::FuncOp>())
       convertSignature(fn);
     return mlir::success();
   }
@@ -453,7 +453,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
 
   /// Determine if the signature has host associations. The host association
   /// argument may need special target specific rewriting.
-  static bool hasHostAssociations(mlir::FuncOp func) {
+  static bool hasHostAssociations(mlir::func::FuncOp func) {
     std::size_t end = func.getFunctionType().getInputs().size();
     for (std::size_t i = 0; i < end; ++i)
       if (func.getArgAttrOfType<mlir::UnitAttr>(i, getHostAssocAttrName()))
@@ -463,7 +463,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
 
   /// Rewrite the signatures and body of the `FuncOp`s in the module for
   /// the immediately subsequent target code gen.
-  void convertSignature(mlir::FuncOp func) {
+  void convertSignature(mlir::func::FuncOp func) {
     auto funcTy = func.getFunctionType().cast<mlir::FunctionType>();
     if (hasPortableSignature(funcTy) && !hasHostAssociations(func))
       return;
@@ -711,7 +711,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
         (*fixup.finalizer)(func);
   }
 
-  inline bool functionArgIsSRet(unsigned index, mlir::FuncOp func) {
+  inline bool functionArgIsSRet(unsigned index, mlir::func::FuncOp func) {
     if (auto attr = func.getArgAttrOfType<mlir::UnitAttr>(index, "llvm.sret"))
       return true;
     return false;
@@ -721,8 +721,8 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
   /// value to a "hidden" first argument or packing the complex into a wide
   /// GPR.
   template <typename A, typename B, typename C>
-  void doComplexReturn(mlir::FuncOp func, A cmplx, B &newResTys, B &newInTys,
-                       C &fixups) {
+  void doComplexReturn(mlir::func::FuncOp func, A cmplx, B &newResTys,
+                       B &newInTys, C &fixups) {
     if (noComplexConversion) {
       newResTys.push_back(cmplx);
       return;
@@ -735,7 +735,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
     if (attr.isSRet()) {
       unsigned argNo = newInTys.size();
       fixups.emplace_back(
-          FixupTy::Codes::ReturnAsStore, argNo, [=](mlir::FuncOp func) {
+          FixupTy::Codes::ReturnAsStore, argNo, [=](mlir::func::FuncOp func) {
             func.setArgAttr(argNo, "llvm.sret", rewriter->getUnitAttr());
           });
       newInTys.push_back(argTy);
@@ -749,7 +749,7 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
   /// a temporary memory location or factoring the value into two distinct
   /// arguments.
   template <typename A, typename B, typename C>
-  void doComplexArg(mlir::FuncOp func, A cmplx, B &newInTys, C &fixups) {
+  void doComplexArg(mlir::func::FuncOp func, A cmplx, B &newInTys, C &fixups) {
     if (noComplexConversion) {
       newInTys.push_back(cmplx);
       return;
@@ -766,7 +766,8 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
       if (attr.isByVal()) {
         if (auto align = attr.getAlignment())
           fixups.emplace_back(
-              FixupTy::Codes::ArgumentAsLoad, argNo, [=](mlir::FuncOp func) {
+              FixupTy::Codes::ArgumentAsLoad, argNo,
+              [=](mlir::func::FuncOp func) {
                 func.setArgAttr(argNo, "llvm.byval", rewriter->getUnitAttr());
                 func.setArgAttr(argNo, "llvm.align",
                                 rewriter->getIntegerAttr(
@@ -774,17 +775,18 @@ class TargetRewrite : public TargetRewriteBase<TargetRewrite> {
               });
         else
           fixups.emplace_back(FixupTy::Codes::ArgumentAsLoad, newInTys.size(),
-                              [=](mlir::FuncOp func) {
+                              [=](mlir::func::FuncOp func) {
                                 func.setArgAttr(argNo, "llvm.byval",
                                                 rewriter->getUnitAttr());
                               });
       } else {
         if (auto align = attr.getAlignment())
-          fixups.emplace_back(fixupCode, argNo, index, [=](mlir::FuncOp func) {
-            func.setArgAttr(
-                argNo, "llvm.align",
-                rewriter->getIntegerAttr(rewriter->getIntegerType(32), align));
-          });
+          fixups.emplace_back(
+              fixupCode, argNo, index, [=](mlir::func::FuncOp func) {
+                func.setArgAttr(argNo, "llvm.align",
+                                rewriter->getIntegerAttr(
+                                    rewriter->getIntegerType(32), align));
+              });
         else
           fixups.emplace_back(fixupCode, argNo, index);
       }

diff  --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp
index 515cb863d3f4f..08f78c0d17852 100644
--- a/flang/lib/Optimizer/Dialect/FIROps.cpp
+++ b/flang/lib/Optimizer/Dialect/FIROps.cpp
@@ -669,7 +669,7 @@ mlir::ParseResult fir::CallOp::parse(mlir::OpAsmParser &parser,
 }
 
 void fir::CallOp::build(mlir::OpBuilder &builder, mlir::OperationState &result,
-                        mlir::FuncOp callee, mlir::ValueRange operands) {
+                        mlir::func::FuncOp callee, mlir::ValueRange operands) {
   result.addOperands(operands);
   result.addAttribute(getCalleeAttrNameStr(), SymbolRefAttr::get(callee));
   result.addTypes(callee.getFunctionType().getResults());
@@ -3312,14 +3312,15 @@ bool fir::isReferenceLike(mlir::Type type) {
          type.isa<fir::PointerType>();
 }
 
-mlir::FuncOp fir::createFuncOp(mlir::Location loc, mlir::ModuleOp module,
-                               StringRef name, mlir::FunctionType type,
-                               llvm::ArrayRef<mlir::NamedAttribute> attrs) {
-  if (auto f = module.lookupSymbol<mlir::FuncOp>(name))
+mlir::func::FuncOp
+fir::createFuncOp(mlir::Location loc, mlir::ModuleOp module, StringRef name,
+                  mlir::FunctionType type,
+                  llvm::ArrayRef<mlir::NamedAttribute> attrs) {
+  if (auto f = module.lookupSymbol<mlir::func::FuncOp>(name))
     return f;
   mlir::OpBuilder modBuilder(module.getBodyRegion());
   modBuilder.setInsertionPointToEnd(module.getBody());
-  auto result = modBuilder.create<mlir::FuncOp>(loc, name, type, attrs);
+  auto result = modBuilder.create<mlir::func::FuncOp>(loc, name, type, attrs);
   result.setVisibility(mlir::SymbolTable::Visibility::Private);
   return result;
 }
@@ -3335,7 +3336,7 @@ fir::GlobalOp fir::createGlobalOp(mlir::Location loc, mlir::ModuleOp module,
   return result;
 }
 
-bool fir::hasHostAssociationArgument(mlir::FuncOp func) {
+bool fir::hasHostAssociationArgument(mlir::func::FuncOp func) {
   if (auto allArgAttrs = func.getAllArgAttrs())
     for (auto attr : allArgAttrs)
       if (auto dict = attr.template dyn_cast_or_null<mlir::DictionaryAttr>())
@@ -3355,8 +3356,8 @@ bool fir::valueHasFirAttribute(mlir::Value value,
   // If this is a function argument, look in the argument attributes.
   if (auto blockArg = value.dyn_cast<mlir::BlockArgument>()) {
     if (blockArg.getOwner() && blockArg.getOwner()->isEntryBlock())
-      if (auto funcOp =
-              mlir::dyn_cast<mlir::FuncOp>(blockArg.getOwner()->getParentOp()))
+      if (auto funcOp = mlir::dyn_cast<mlir::func::FuncOp>(
+              blockArg.getOwner()->getParentOp()))
         if (funcOp.getArgAttr(blockArg.getArgNumber(), attributeName))
           return true;
     return false;
@@ -3385,7 +3386,7 @@ bool fir::valueHasFirAttribute(mlir::Value value,
   return false;
 }
 
-bool fir::anyFuncArgsHaveAttr(mlir::FuncOp func, llvm::StringRef attr) {
+bool fir::anyFuncArgsHaveAttr(mlir::func::FuncOp func, llvm::StringRef attr) {
   for (unsigned i = 0, end = func.getNumArguments(); i < end; ++i)
     if (func.getArgAttr(i, attr))
       return true;

diff  --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index b7894321825f9..fe0dc696066ad 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -44,7 +44,7 @@ struct AffineIfAnalysis;
 /// these analysis are used twice, first for marking operations for rewrite and
 /// second when doing rewrite.
 struct AffineFunctionAnalysis {
-  explicit AffineFunctionAnalysis(mlir::FuncOp funcOp) {
+  explicit AffineFunctionAnalysis(mlir::func::FuncOp funcOp) {
     for (fir::DoLoopOp op : funcOp.getOps<fir::DoLoopOp>())
       loopAnalysisMap.try_emplace(op, op, *this);
   }

diff  --git a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
index 0039f8377376c..5e44fdc1b6601 100644
--- a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
+++ b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp
@@ -684,7 +684,7 @@ conservativeCallConflict(llvm::ArrayRef<mlir::Operation *> reaches) {
               call.getCallableForCallee().dyn_cast<mlir::SymbolRefAttr>()) {
         auto module = op->getParentOfType<mlir::ModuleOp>();
         return hasHostAssociationArgument(
-            module.lookupSymbol<mlir::FuncOp>(callee));
+            module.lookupSymbol<mlir::func::FuncOp>(callee));
       }
     return false;
   });

diff  --git a/flang/lib/Optimizer/Transforms/ExternalNameConversion.cpp b/flang/lib/Optimizer/Transforms/ExternalNameConversion.cpp
index ef11b442a1613..6170556c24ac9 100644
--- a/flang/lib/Optimizer/Transforms/ExternalNameConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/ExternalNameConversion.cpp
@@ -61,12 +61,12 @@ class MangleNameOnCallOp : public mlir::OpRewritePattern<fir::CallOp> {
   }
 };
 
-struct MangleNameOnFuncOp : public mlir::OpRewritePattern<mlir::FuncOp> {
+struct MangleNameOnFuncOp : public mlir::OpRewritePattern<mlir::func::FuncOp> {
 public:
   using OpRewritePattern::OpRewritePattern;
 
   mlir::LogicalResult
-  matchAndRewrite(mlir::FuncOp op,
+  matchAndRewrite(mlir::func::FuncOp op,
                   mlir::PatternRewriter &rewriter) const override {
     rewriter.startRootUpdate(op);
     auto result = fir::NameUniquer::deconstruct(op.getSymName());
@@ -143,7 +143,7 @@ void ExternalNameConversionPass::runOnOperation() {
     return true;
   });
 
-  target.addDynamicallyLegalOp<mlir::FuncOp>([](mlir::FuncOp op) {
+  target.addDynamicallyLegalOp<mlir::func::FuncOp>([](mlir::func::FuncOp op) {
     return !fir::NameUniquer::needExternalNameMangling(op.getSymName());
   });
 

diff  --git a/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp b/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp
index 4ce456643f2af..7375b6c6bbe32 100644
--- a/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp
+++ b/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp
@@ -97,7 +97,7 @@ class LoadStoreForwarding {
 class MemDataFlowOpt : public fir::MemRefDataFlowOptBase<MemDataFlowOpt> {
 public:
   void runOnOperation() override {
-    mlir::FuncOp f = getOperation();
+    mlir::func::FuncOp f = getOperation();
 
     auto *domInfo = &getAnalysis<mlir::DominanceInfo>();
     LoadStoreForwarding<fir::LoadOp, fir::StoreOp> lsf(domInfo);

diff  --git a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
index d2a0c70575cf9..373c8e45b322d 100644
--- a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
+++ b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
@@ -40,7 +40,7 @@ class ReturnAnalysis {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ReturnAnalysis)
 
   ReturnAnalysis(mlir::Operation *op) {
-    if (auto func = mlir::dyn_cast<mlir::FuncOp>(op))
+    if (auto func = mlir::dyn_cast<mlir::func::FuncOp>(op))
       for (mlir::Block &block : func)
         for (mlir::Operation &i : block)
           if (mlir::isa<mlir::func::ReturnOp>(i)) {

diff  --git a/flang/unittests/Optimizer/Builder/CharacterTest.cpp b/flang/unittests/Optimizer/Builder/CharacterTest.cpp
index 85ca91a686e71..347ec78e8fec3 100644
--- a/flang/unittests/Optimizer/Builder/CharacterTest.cpp
+++ b/flang/unittests/Optimizer/Builder/CharacterTest.cpp
@@ -27,7 +27,7 @@ struct CharacterTest : public testing::Test {
     // Set up a Module with a dummy function operation inside.
     // Set the insertion point in the function entry block.
     mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
-    mlir::FuncOp func = mlir::FuncOp::create(
+    mlir::func::FuncOp func = mlir::func::FuncOp::create(
         loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
     auto *entryBlock = func.addEntryBlock();
     mod.push_back(mod);

diff  --git a/flang/unittests/Optimizer/Builder/ComplexTest.cpp b/flang/unittests/Optimizer/Builder/ComplexTest.cpp
index b44ee6a3320d9..17a6307e6008b 100644
--- a/flang/unittests/Optimizer/Builder/ComplexTest.cpp
+++ b/flang/unittests/Optimizer/Builder/ComplexTest.cpp
@@ -23,7 +23,7 @@ struct ComplexTest : public testing::Test {
     // Set up a Module with a dummy function operation inside.
     // Set the insertion point in the function entry block.
     mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
-    mlir::FuncOp func = mlir::FuncOp::create(
+    mlir::func::FuncOp func = mlir::func::FuncOp::create(
         loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
     auto *entryBlock = func.addEntryBlock();
     mod.push_back(mod);

diff  --git a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
index 925df97c1d0c6..77ec88c1c340a 100644
--- a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
+++ b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp
@@ -27,7 +27,7 @@ struct FIRBuilderTest : public testing::Test {
     // Set up a Module with a dummy function operation inside.
     // Set the insertion point in the function entry block.
     mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
-    mlir::FuncOp func = mlir::FuncOp::create(
+    mlir::func::FuncOp func = mlir::func::FuncOp::create(
         loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
     auto *entryBlock = func.addEntryBlock();
     mod.push_back(mod);

diff  --git a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
index 42f9683fffe36..8d549c54a0530 100644
--- a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
+++ b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h
@@ -25,8 +25,9 @@ struct RuntimeCallTest : public testing::Test {
     // Set up a Module with a dummy function operation inside.
     // Set the insertion point in the function entry block.
     mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
-    mlir::FuncOp func = mlir::FuncOp::create(loc, "runtime_unit_tests_func",
-        builder.getFunctionType(llvm::None, llvm::None));
+    mlir::func::FuncOp func =
+        mlir::func::FuncOp::create(loc, "runtime_unit_tests_func",
+            builder.getFunctionType(llvm::None, llvm::None));
     auto *entryBlock = func.addEntryBlock();
     mod.push_back(mod);
     builder.setInsertionPointToStart(entryBlock);

diff  --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md
index f9eee58e43b56..453794904cd04 100644
--- a/mlir/docs/Bufferization.md
+++ b/mlir/docs/Bufferization.md
@@ -507,14 +507,14 @@ The code, slightly simplified and annotated, is reproduced here:
 ```c++
   // Partial bufferization passes.
   pm.addPass(createTensorConstantBufferizePass());
-  pm.addNestedPass<FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
-  pm.addNestedPass<FuncOp>(createSCFBufferizePass());
-  pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
-  pm.addNestedPass<FuncOp>(createTensorBufferizePass());
+  pm.addNestedPass<func::FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
+  pm.addNestedPass<func::FuncOp>(createSCFBufferizePass());
+  pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
+  pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
   pm.addPass(createFuncBufferizePass());
 
   // Finalizing bufferization pass.
-  pm.addNestedPass<FuncOp>(createFinalizingBufferizePass());
+  pm.addNestedPass<func::FuncOp>(createFinalizingBufferizePass());
 ```
 
 Looking first at the partial bufferization passes, we see that there are a

diff  --git a/mlir/docs/DialectConversion.md b/mlir/docs/DialectConversion.md
index 1173a0d01dbab..8cba74ab47a54 100644
--- a/mlir/docs/DialectConversion.md
+++ b/mlir/docs/DialectConversion.md
@@ -379,7 +379,7 @@ move into that region. As noted above, the conversions performed by this method
 use the argument materialization hook on the `TypeConverter`. This hook also
 takes an optional `TypeConverter::SignatureConversion` parameter that applies a
 custom conversion to the entry block of the region. The types of the entry block
-arguments are often tied semantically to details on the operation, e.g. FuncOp,
+arguments are often tied semantically to details on the operation, e.g. func::FuncOp,
 AffineForOp, etc. To convert the signature of just the region entry block, and
 not any other blocks within the region, the `applySignatureConversion` hook may
 be used instead. A signature conversion, `TypeConverter::SignatureConversion`,

diff  --git a/mlir/docs/PassManagement.md b/mlir/docs/PassManagement.md
index 7c84d56507020..41a37f5aa7af4 100644
--- a/mlir/docs/PassManagement.md
+++ b/mlir/docs/PassManagement.md
@@ -71,10 +71,10 @@ namespace {
 /// in C++. Passes defined declaratively use a cleaner mechanism for providing
 /// these utilities.
 struct MyFunctionPass : public PassWrapper<MyFunctionPass,
-                                           OperationPass<FuncOp>> {
+                                           OperationPass<func::FuncOp>> {
   void runOnOperation() override {
-    // Get the current FuncOp operation being operated on.
-    FuncOp f = getOperation();
+    // Get the current func::FuncOp operation being operated on.
+    func::FuncOp f = getOperation();
 
     // Walk the operations within the function.
     f.walk([](Operation *inst) {
@@ -356,7 +356,7 @@ nestedModulePM.addPass(std::make_unique<MySPIRVModulePass>());
 
 // Nest a pass manager that operates on functions within the nested SPIRV
 // module.
-OpPassManager &nestedFunctionPM = nestedModulePM.nest<FuncOp>();
+OpPassManager &nestedFunctionPM = nestedModulePM.nest<func::FuncOp>();
 nestedFunctionPM.addPass(std::make_unique<MyFunctionPass>());
 
 // Run the pass manager on the top-level module.
@@ -372,12 +372,12 @@ OpPassManager<ModuleOp>
   MyModulePass
   OpPassManager<spirv::ModuleOp>
     MySPIRVModulePass
-    OpPassManager<FuncOp>
+    OpPassManager<func::FuncOp>
       MyFunctionPass
 ```
 
 These pipelines are then run over a single operation at a time. This means that,
-for example, given a series of consecutive passes on FuncOp, it will execute all
+for example, given a series of consecutive passes on func::FuncOp, it will execute all
 on the first function, then all on the second function, etc. until the entire
 program has been run through the passes. This provides several benefits:
 

diff  --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
index 000f99886b01c..b518a287b6e14 100644
--- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
@@ -221,8 +221,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
     }
 
     // Create a new non-toy function, with the same region.
-    auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
-                                              op.getFunctionType());
+    auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+                                                    op.getFunctionType());
     rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
     rewriter.eraseOp(op);
     return success();

diff  --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp
index 4215d7c5685a9..633131829c2ae 100644
--- a/mlir/examples/toy/Ch5/toyc.cpp
+++ b/mlir/examples/toy/Ch5/toyc.cpp
@@ -141,7 +141,7 @@ int dumpMLIR() {
     pm.addPass(mlir::toy::createLowerToAffinePass());
 
     // Add a few cleanups post lowering.
-    mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+    mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
     optPM.addPass(mlir::createCanonicalizerPass());
     optPM.addPass(mlir::createCSEPass());
 

diff  --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
index 000f99886b01c..b518a287b6e14 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
@@ -221,8 +221,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
     }
 
     // Create a new non-toy function, with the same region.
-    auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
-                                              op.getFunctionType());
+    auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+                                                    op.getFunctionType());
     rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
     rewriter.eraseOp(op);
     return success();

diff  --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index 7b93ce7fcae95..32261ec82ebf9 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -157,7 +157,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
     pm.addPass(mlir::toy::createLowerToAffinePass());
 
     // Add a few cleanups post lowering.
-    mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+    mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
     optPM.addPass(mlir::createCanonicalizerPass());
     optPM.addPass(mlir::createCSEPass());
 

diff  --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
index 000f99886b01c..b518a287b6e14 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
@@ -221,8 +221,8 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
     }
 
     // Create a new non-toy function, with the same region.
-    auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
-                                              op.getFunctionType());
+    auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+                                                    op.getFunctionType());
     rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
     rewriter.eraseOp(op);
     return success();

diff  --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp
index 8bcce329bc4e3..2b8dc76f5993a 100644
--- a/mlir/examples/toy/Ch7/toyc.cpp
+++ b/mlir/examples/toy/Ch7/toyc.cpp
@@ -158,7 +158,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
     pm.addPass(mlir::toy::createLowerToAffinePass());
 
     // Add a few cleanups post lowering.
-    mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+    mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
     optPM.addPass(mlir::createCanonicalizerPass());
     optPM.addPass(mlir::createCSEPass());
 

diff  --git a/mlir/include/mlir/Dialect/Affine/Passes.td b/mlir/include/mlir/Dialect/Affine/Passes.td
index e91991213eddb..3e0f2f98f908a 100644
--- a/mlir/include/mlir/Dialect/Affine/Passes.td
+++ b/mlir/include/mlir/Dialect/Affine/Passes.td
@@ -15,7 +15,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "FuncOp"> {
+def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "func::FuncOp"> {
   let summary = "Generate explicit copying for affine memory operations";
   let constructor = "mlir::createAffineDataCopyGenerationPass()";
   let dependentDialects = ["memref::MemRefDialect"];
@@ -43,7 +43,7 @@ def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "FuncOp"> {
   ];
 }
 
-def AffineLoopFusion : Pass<"affine-loop-fusion", "FuncOp"> {
+def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
   let summary = "Fuse affine loop nests";
   let description = [{
     This pass performs fusion of loop nests using a slicing-based approach. It
@@ -176,12 +176,12 @@ def AffineLoopFusion : Pass<"affine-loop-fusion", "FuncOp"> {
 }
 
 def AffineLoopInvariantCodeMotion
-    : Pass<"affine-loop-invariant-code-motion", "FuncOp"> {
+    : Pass<"affine-loop-invariant-code-motion", "func::FuncOp"> {
   let summary = "Hoist loop invariant instructions outside of affine loops";
   let constructor = "mlir::createAffineLoopInvariantCodeMotionPass()";
 }
 
-def AffineLoopTiling : Pass<"affine-loop-tile", "FuncOp"> {
+def AffineLoopTiling : Pass<"affine-loop-tile", "func::FuncOp"> {
   let summary = "Tile affine loop nests";
   let constructor = "mlir::createLoopTilingPass()";
   let options = [
@@ -198,7 +198,7 @@ def AffineLoopTiling : Pass<"affine-loop-tile", "FuncOp"> {
   ];
 }
 
-def AffineLoopUnroll : Pass<"affine-loop-unroll", "FuncOp"> {
+def AffineLoopUnroll : Pass<"affine-loop-unroll", "func::FuncOp"> {
   let summary = "Unroll affine loops";
   let constructor = "mlir::createLoopUnrollPass()";
   let options = [
@@ -216,7 +216,7 @@ def AffineLoopUnroll : Pass<"affine-loop-unroll", "FuncOp"> {
   ];
 }
 
-def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "FuncOp"> {
+def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "func::FuncOp"> {
   let summary = "Unroll and jam affine loops";
   let constructor = "mlir::createLoopUnrollAndJamPass()";
   let options = [
@@ -227,7 +227,7 @@ def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "FuncOp"> {
 }
 
 def AffinePipelineDataTransfer
-    : Pass<"affine-pipeline-data-transfer", "FuncOp"> {
+    : Pass<"affine-pipeline-data-transfer", "func::FuncOp"> {
   let summary = "Pipeline non-blocking data transfers between explicitly "
                 "managed levels of the memory hierarchy";
   let description = [{
@@ -295,7 +295,7 @@ def AffinePipelineDataTransfer
   let constructor = "mlir::createPipelineDataTransferPass()";
 }
 
-def AffineScalarReplacement : Pass<"affine-scalrep", "FuncOp"> {
+def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
   let summary = "Replace affine memref acceses by scalars by forwarding stores "
                 "to loads and eliminating redundant loads";
   let description = [{
@@ -341,7 +341,7 @@ def AffineScalarReplacement : Pass<"affine-scalrep", "FuncOp"> {
   let constructor = "mlir::createAffineScalarReplacementPass()";
 }
 
-def AffineVectorize : Pass<"affine-super-vectorize", "FuncOp"> {
+def AffineVectorize : Pass<"affine-super-vectorize", "func::FuncOp"> {
   let summary = "Vectorize to a target independent n-D vector abstraction";
   let constructor = "mlir::createSuperVectorizePass()";
   let dependentDialects = ["vector::VectorDialect"];
@@ -367,7 +367,7 @@ def AffineVectorize : Pass<"affine-super-vectorize", "FuncOp"> {
   ];
 }
 
-def AffineParallelize : Pass<"affine-parallelize", "FuncOp"> {
+def AffineParallelize : Pass<"affine-parallelize", "func::FuncOp"> {
   let summary = "Convert affine.for ops into 1-D affine.parallel";
   let constructor = "mlir::createAffineParallelizePass()";
   let options = [
@@ -380,19 +380,19 @@ def AffineParallelize : Pass<"affine-parallelize", "FuncOp"> {
   ];
 }
 
-def AffineLoopNormalize : Pass<"affine-loop-normalize", "FuncOp"> {
+def AffineLoopNormalize : Pass<"affine-loop-normalize", "func::FuncOp"> {
   let summary = "Apply normalization transformations to affine loop-like ops";
   let constructor = "mlir::createAffineLoopNormalizePass()";
 }
 
-def LoopCoalescing : Pass<"affine-loop-coalescing", "FuncOp"> {
+def LoopCoalescing : Pass<"affine-loop-coalescing", "func::FuncOp"> {
   let summary = "Coalesce nested loops with independent bounds into a single "
                 "loop";
   let constructor = "mlir::createLoopCoalescingPass()";
   let dependentDialects = ["arith::ArithmeticDialect"];
 }
 
-def SimplifyAffineStructures : Pass<"affine-simplify-structures", "FuncOp"> {
+def SimplifyAffineStructures : Pass<"affine-simplify-structures", "func::FuncOp"> {
   let summary = "Simplify affine expressions in maps/sets and normalize "
                 "memrefs";
   let constructor = "mlir::createSimplifyAffineStructuresPass()";

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 7ccc2480f4be2..c91d719fb09ac 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -266,7 +266,7 @@ enum class BufferRelation {
   Equivalent
 };
 
-/// Return `true` if the given value is a BlockArgument of a FuncOp.
+/// Return `true` if the given value is a BlockArgument of a func::FuncOp.
 bool isFunctionArgument(Value value);
 
 /// Dialect-specific analysis state. Analysis/bufferization information

diff  --git a/mlir/include/mlir/Dialect/Func/IR/FuncOps.h b/mlir/include/mlir/Dialect/Func/IR/FuncOps.h
index 987fe67234bc7..2c3e19796147a 100644
--- a/mlir/include/mlir/Dialect/Func/IR/FuncOps.h
+++ b/mlir/include/mlir/Dialect/Func/IR/FuncOps.h
@@ -30,12 +30,6 @@ class PatternRewriter;
 
 #include "mlir/Dialect/Func/IR/FuncOpsDialect.h.inc"
 
-namespace mlir {
-/// FIXME: This is a temporary using directive to ease the transition of FuncOp
-/// to the Func dialect. This will be removed after all uses are updated.
-using FuncOp = func::FuncOp;
-} // namespace mlir
-
 namespace llvm {
 
 /// Allow stealing the low bits of FuncOp.

diff  --git a/mlir/include/mlir/Dialect/GPU/Passes.td b/mlir/include/mlir/Dialect/GPU/Passes.td
index 0380e1db5e01e..0752839fe5154 100644
--- a/mlir/include/mlir/Dialect/GPU/Passes.td
+++ b/mlir/include/mlir/Dialect/GPU/Passes.td
@@ -23,7 +23,7 @@ def GpuKernelOutlining : Pass<"gpu-kernel-outlining", "ModuleOp"> {
   let dependentDialects = ["mlir::DLTIDialect"];
 }
 
-def GpuAsyncRegionPass : Pass<"gpu-async-region", "FuncOp"> {
+def GpuAsyncRegionPass : Pass<"gpu-async-region", "func::FuncOp"> {
   let summary = "Make GPU ops async";
   let constructor = "mlir::createGpuAsyncRegionPass()";
   let dependentDialects = ["async::AsyncDialect"];

diff  --git a/mlir/include/mlir/Dialect/Linalg/Passes.h b/mlir/include/mlir/Dialect/Linalg/Passes.h
index fadfdb438f0f5..73226b2587a54 100644
--- a/mlir/include/mlir/Dialect/Linalg/Passes.h
+++ b/mlir/include/mlir/Dialect/Linalg/Passes.h
@@ -22,9 +22,6 @@ namespace func {
 class FuncOp;
 } // namespace func
 
-// TODO: Remove when all references have been updated.
-using FuncOp = func::FuncOp;
-
 namespace bufferization {
 struct OneShotBufferizationOptions;
 } // namespace bufferization

diff  --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td
index 05184ce3635e9..9f717d07d276e 100644
--- a/mlir/include/mlir/Dialect/Linalg/Passes.td
+++ b/mlir/include/mlir/Dialect/Linalg/Passes.td
@@ -121,7 +121,7 @@ def LinalgNamedOpConversion: Pass<"linalg-named-op-conversion"> {
   let dependentDialects = ["linalg::LinalgDialect", "tensor::TensorDialect"];
 }
 
-def LinalgInlineScalarOperands : Pass<"linalg-inline-scalar-operands", "FuncOp"> {
+def LinalgInlineScalarOperands : Pass<"linalg-inline-scalar-operands", "func::FuncOp"> {
   let summary = "Inline scalar operands into linalg generic ops";
   let constructor = "mlir::createLinalgInlineScalarOperandsPass()";
   let dependentDialects = [
@@ -129,7 +129,7 @@ def LinalgInlineScalarOperands : Pass<"linalg-inline-scalar-operands", "FuncOp">
   ];
 }
 
-def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "FuncOp"> {
+def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "func::FuncOp"> {
   let summary = "Lower the operations from the linalg dialect into affine "
                 "loops";
   let constructor = "mlir::createConvertLinalgToAffineLoopsPass()";
@@ -137,7 +137,7 @@ def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "FuncOp">
     "AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"];
 }
 
-def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "FuncOp"> {
+def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "func::FuncOp"> {
   let summary = "Lower the operations from the linalg dialect into loops";
   let constructor = "mlir::createConvertLinalgToLoopsPass()";
   let dependentDialects = [
@@ -148,7 +148,7 @@ def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "FuncOp"> {
 }
 
 def LinalgLowerToParallelLoops
-    : Pass<"convert-linalg-to-parallel-loops", "FuncOp"> {
+    : Pass<"convert-linalg-to-parallel-loops", "func::FuncOp"> {
   let summary = "Lower the operations from the linalg dialect into parallel "
                 "loops";
   let constructor = "mlir::createConvertLinalgToParallelLoopsPass()";
@@ -160,7 +160,7 @@ def LinalgLowerToParallelLoops
   ];
 }
 
-def LinalgBufferize : Pass<"linalg-bufferize", "FuncOp"> {
+def LinalgBufferize : Pass<"linalg-bufferize", "func::FuncOp"> {
   let summary = "Bufferize the linalg dialect";
   let constructor = "mlir::createLinalgBufferizePass()";
   let dependentDialects = [
@@ -171,7 +171,7 @@ def LinalgBufferize : Pass<"linalg-bufferize", "FuncOp"> {
   ];
 }
 
-def LinalgPromotion : Pass<"linalg-promote-subviews", "FuncOp"> {
+def LinalgPromotion : Pass<"linalg-promote-subviews", "func::FuncOp"> {
   let summary = "Promote subview ops to local buffers";
   let constructor = "mlir::createLinalgPromotionPass()";
   let options = [
@@ -183,7 +183,7 @@ def LinalgPromotion : Pass<"linalg-promote-subviews", "FuncOp"> {
   let dependentDialects = ["linalg::LinalgDialect"];
 }
 
-def LinalgTiling : Pass<"linalg-tile", "FuncOp"> {
+def LinalgTiling : Pass<"linalg-tile", "func::FuncOp"> {
   let summary = "Tile operations in the linalg dialect";
   let constructor = "mlir::createLinalgTilingPass()";
   let dependentDialects = [
@@ -200,7 +200,7 @@ def LinalgTiling : Pass<"linalg-tile", "FuncOp"> {
   ];
 }
 
-def LinalgGeneralization : Pass<"linalg-generalize-named-ops", "FuncOp"> {
+def LinalgGeneralization : Pass<"linalg-generalize-named-ops", "func::FuncOp"> {
   let summary = "Convert named ops into generic ops";
   let constructor = "mlir::createLinalgGeneralizationPass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -245,7 +245,7 @@ def LinalgDetensorize : Pass<"linalg-detensorize", ""> {
 }
 
 def LinalgStrategyTileAndFusePass
-    : Pass<"linalg-strategy-tile-and-fuse-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-tile-and-fuse-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based tiling and fusion.";
   let constructor = "mlir::createLinalgStrategyTileAndFusePass()";
   let options = [
@@ -257,7 +257,7 @@ def LinalgStrategyTileAndFusePass
 }
 
 def LinalgStrategyTilePass
-    : Pass<"linalg-strategy-tile-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-tile-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based linalg tiling.";
   let constructor = "mlir::createLinalgStrategyTilePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -270,7 +270,7 @@ def LinalgStrategyTilePass
 }
 
 def LinalgStrategyPadPass
-    : Pass<"linalg-strategy-pad-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-pad-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply padding and hoisting.";
   let constructor = "mlir::createLinalgStrategyPadPass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -283,7 +283,7 @@ def LinalgStrategyPadPass
 }
 
 def LinalgStrategyPromotePass
-    : Pass<"linalg-strategy-promote-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-promote-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based linalg promotion.";
   let constructor = "mlir::createLinalgStrategyPromotePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -296,7 +296,7 @@ def LinalgStrategyPromotePass
 }
 
 def LinalgStrategyGeneralizePass
-    : Pass<"linalg-strategy-generalize-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-generalize-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based generalization.";
   let constructor = "mlir::createLinalgStrategyGeneralizePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -310,7 +310,7 @@ def LinalgStrategyGeneralizePass
 
 // TODO: if/when we need finer control add an anchorOp option.
 def LinalgStrategyDecomposePass
-    : Pass<"linalg-strategy-decompose-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-decompose-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based generalization.";
   let constructor = "mlir::createLinalgStrategyDecomposePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -321,7 +321,7 @@ def LinalgStrategyDecomposePass
 }
 
 def LinalgStrategyInterchangePass
-    : Pass<"linalg-strategy-interchange-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-interchange-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based iterator interchange.";
   let constructor = "mlir::createLinalgStrategyInterchangePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -332,7 +332,7 @@ def LinalgStrategyInterchangePass
 }
 
 def LinalgStrategyVectorizePass
-    : Pass<"linalg-strategy-vectorize-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-vectorize-pass", "func::FuncOp"> {
   let summary = "Configurable pass to apply pattern-based linalg vectorization.";
   let constructor = "mlir::createLinalgStrategyVectorizePass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -347,7 +347,7 @@ def LinalgStrategyVectorizePass
 }
 
 def LinalgStrategyEnablePass
-    : Pass<"linalg-strategy-enable-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-enable-pass", "func::FuncOp"> {
   let summary = "Configurable pass to enable the application of other "
     "pattern-based linalg passes.";
   let constructor = "mlir::createLinalgStrategyEnablePass()";
@@ -359,7 +359,7 @@ def LinalgStrategyEnablePass
 }
 
 def LinalgStrategyLowerVectorsPass
-    : Pass<"linalg-strategy-lower-vectors-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-lower-vectors-pass", "func::FuncOp"> {
   let summary = "Configurable pass to lower vector operations.";
   let constructor = "mlir::createLinalgStrategyLowerVectorsPass()";
   let dependentDialects = ["linalg::LinalgDialect"];
@@ -370,7 +370,7 @@ def LinalgStrategyLowerVectorsPass
 }
 
 def LinalgStrategyRemoveMarkersPass
-    : Pass<"linalg-strategy-remove-markers-pass", "FuncOp"> {
+    : Pass<"linalg-strategy-remove-markers-pass", "func::FuncOp"> {
   let summary = "Cleanup pass that drops markers.";
   let constructor = "mlir::createLinalgStrategyRemoveMarkersPass()";
   let dependentDialects = ["linalg::LinalgDialect"];

diff  --git a/mlir/include/mlir/Dialect/Quant/Passes.td b/mlir/include/mlir/Dialect/Quant/Passes.td
index 65dfb47d76b04..a1afda4a89293 100644
--- a/mlir/include/mlir/Dialect/Quant/Passes.td
+++ b/mlir/include/mlir/Dialect/Quant/Passes.td
@@ -11,14 +11,14 @@
 
 include "mlir/Pass/PassBase.td"
 
-def QuantConvertConst : Pass<"quant-convert-const", "FuncOp"> {
+def QuantConvertConst : Pass<"quant-convert-const", "func::FuncOp"> {
   let summary = "Converts constants followed by qbarrier to actual quantized "
                 "values";
   let constructor = "mlir::quant::createConvertConstPass()";
 }
 
 def QuantConvertSimulatedQuant
-    : Pass<"quant-convert-simulated-quantization", "FuncOp"> {
+    : Pass<"quant-convert-simulated-quantization", "func::FuncOp"> {
   let summary = "Converts training-time simulated quantization ops to "
                 "corresponding quantize/dequantize casts";
   let constructor = "mlir::quant::createConvertSimulatedQuantPass()";

diff  --git a/mlir/include/mlir/Dialect/SCF/Passes.td b/mlir/include/mlir/Dialect/SCF/Passes.td
index 72f7ff75cc7b1..d910f2e02d090 100644
--- a/mlir/include/mlir/Dialect/SCF/Passes.td
+++ b/mlir/include/mlir/Dialect/SCF/Passes.td
@@ -11,7 +11,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def SCFBufferize : Pass<"scf-bufferize", "FuncOp"> {
+def SCFBufferize : Pass<"scf-bufferize", "func::FuncOp"> {
   let summary = "Bufferize the scf dialect.";
   let constructor = "mlir::createSCFBufferizePass()";
   let dependentDialects = ["bufferization::BufferizationDialect",
@@ -21,14 +21,14 @@ def SCFBufferize : Pass<"scf-bufferize", "FuncOp"> {
 // Note: Making these canonicalization patterns would require a dependency
 // of the SCF dialect on the Affine/Tensor/MemRef dialects or vice versa.
 def SCFForLoopCanonicalization
-    : Pass<"scf-for-loop-canonicalization", "FuncOp"> {
+    : Pass<"scf-for-loop-canonicalization", "func::FuncOp"> {
   let summary = "Canonicalize operations within scf.for loop bodies";
   let constructor = "mlir::createSCFForLoopCanonicalizationPass()";
   let dependentDialects = ["AffineDialect", "tensor::TensorDialect",
                            "memref::MemRefDialect"];
 }
 
-def SCFForLoopPeeling : Pass<"scf-for-loop-peeling", "FuncOp"> {
+def SCFForLoopPeeling : Pass<"scf-for-loop-peeling", "func::FuncOp"> {
   let summary = "Peel `for` loops at their upper bounds.";
   let constructor = "mlir::createForLoopPeelingPass()";
   let options = [
@@ -40,7 +40,7 @@ def SCFForLoopPeeling : Pass<"scf-for-loop-peeling", "FuncOp"> {
   let dependentDialects = ["AffineDialect"];
 }
 
-def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization", "FuncOp"> {
+def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization", "func::FuncOp"> {
   let summary = "Specialize `for` loops for vectorization";
   let constructor = "mlir::createForLoopSpecializationPass()";
 }
@@ -64,12 +64,12 @@ def SCFParallelLoopCollapsing : Pass<"scf-parallel-loop-collapsing"> {
 }
 
 def SCFParallelLoopSpecialization
-    : Pass<"scf-parallel-loop-specialization", "FuncOp"> {
+    : Pass<"scf-parallel-loop-specialization", "func::FuncOp"> {
   let summary = "Specialize parallel loops for vectorization";
   let constructor = "mlir::createParallelLoopSpecializationPass()";
 }
 
-def SCFParallelLoopTiling : Pass<"scf-parallel-loop-tiling", "FuncOp"> {
+def SCFParallelLoopTiling : Pass<"scf-parallel-loop-tiling", "func::FuncOp"> {
   let summary = "Tile parallel loops";
   let constructor = "mlir::createParallelLoopTilingPass()";
   let options = [
@@ -88,7 +88,7 @@ def SCFForLoopRangeFolding : Pass<"scf-for-loop-range-folding"> {
   let constructor = "mlir::createForLoopRangeFoldingPass()";
 }
 
-def SCFForToWhileLoop : Pass<"scf-for-to-while", "FuncOp"> {
+def SCFForToWhileLoop : Pass<"scf-for-to-while", "func::FuncOp"> {
   let summary = "Convert SCF for loops to SCF while loops";
   let constructor = "mlir::createForToWhileLoopPass()";
   let description = [{

diff  --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
index 51534dd554157..5f89a253fec28 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
@@ -1022,7 +1022,7 @@ def Shape_FunctionLibraryOp : Shape_Op<"function_library",
 
   let extraClassDeclaration = [{
     /// Returns an associated shape function for an operation if defined.
-    FuncOp getShapeFunction(Operation *op);
+    func::FuncOp getShapeFunction(Operation *op);
   }];
 
   let builders = [OpBuilder<(ins "StringRef":$name)>];

diff  --git a/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td b/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td
index 7749d9c9f3bb6..47d16ce64d670 100644
--- a/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td
@@ -11,18 +11,18 @@
 
 include "mlir/Pass/PassBase.td"
 
-def RemoveShapeConstraints : Pass<"remove-shape-constraints", "FuncOp"> {
+def RemoveShapeConstraints : Pass<"remove-shape-constraints", "func::FuncOp"> {
   let summary = "Replace all cstr_ ops with a true witness";
   let constructor = "mlir::createRemoveShapeConstraintsPass()";
 }
 
-def ShapeToShapeLowering : Pass<"shape-to-shape-lowering", "FuncOp"> {
+def ShapeToShapeLowering : Pass<"shape-to-shape-lowering", "func::FuncOp"> {
   let summary = "Legalize Shape dialect to be convertible to Arithmetic";
   let constructor = "mlir::createShapeToShapeLowering()";
 }
 
 // TODO: Generalize this to allow any type conversions desired.
-def ShapeBufferize : Pass<"shape-bufferize", "FuncOp"> {
+def ShapeBufferize : Pass<"shape-bufferize", "func::FuncOp"> {
   let summary = "Bufferize the shape dialect.";
   let constructor = "mlir::createShapeBufferizePass()";
   let dependentDialects = ["bufferization::BufferizationDialect",

diff  --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
index 77743134325f5..2bf774d404bf5 100644
--- a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
@@ -11,7 +11,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def TensorBufferize : Pass<"tensor-bufferize", "FuncOp"> {
+def TensorBufferize : Pass<"tensor-bufferize", "func::FuncOp"> {
   let summary = "Bufferize the `tensor` dialect";
   let constructor = "mlir::createTensorBufferizePass()";
 }

diff  --git a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td
index 8a929775b8353..c3180ec14a325 100644
--- a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.td
@@ -15,7 +15,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def TosaInferShapes : Pass<"tosa-infer-shapes", "FuncOp"> {
+def TosaInferShapes : Pass<"tosa-infer-shapes", "func::FuncOp"> {
   let summary = "Propagate shapes across TOSA operations";
   let description = [{
     Pass that uses operand types and propagates shapes to TOSA operations.
@@ -30,7 +30,7 @@ def TosaInferShapes : Pass<"tosa-infer-shapes", "FuncOp"> {
   ];
 }
 
-def TosaMakeBroadcastable : Pass<"tosa-make-broadcastable", "FuncOp"> {
+def TosaMakeBroadcastable : Pass<"tosa-make-broadcastable", "func::FuncOp"> {
   let summary = "TOSA rank Reshape to enable Broadcasting";
   let description = [{
     Pass that enables broadcast by making all input arrays have the same
@@ -44,7 +44,7 @@ def TosaMakeBroadcastable : Pass<"tosa-make-broadcastable", "FuncOp"> {
 }
 
 def TosaOptionalDecompositions
-  : Pass<"tosa-optional-decompositions", "FuncOp"> {
+  : Pass<"tosa-optional-decompositions", "func::FuncOp"> {
   let summary = "Applies Tosa operations optional decompositions";
   let description = [{
     Pass to apply the Tosa operations decompositions 

diff  --git a/mlir/include/mlir/Dialect/Vector/Transforms/Passes.td b/mlir/include/mlir/Dialect/Vector/Transforms/Passes.td
index 6bca0dad1bf85..a1971a7121b0d 100644
--- a/mlir/include/mlir/Dialect/Vector/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Vector/Transforms/Passes.td
@@ -11,7 +11,7 @@
 
 include "mlir/Pass/PassBase.td"
 
-def VectorBufferize : Pass<"vector-bufferize", "FuncOp"> {
+def VectorBufferize : Pass<"vector-bufferize", "func::FuncOp"> {
   let summary = "Bufferize Vector dialect ops";
   let constructor = "mlir::vector::createVectorBufferizePass()";
 }

diff  --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 24f0f307875b6..159a726cd9196 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -204,7 +204,7 @@ static void addAsyncRuntimeApiDeclarations(ModuleOp module) {
   auto addFuncDecl = [&](StringRef name, FunctionType type) {
     if (module.lookupSymbol(name))
       return;
-    builder.create<FuncOp>(name, type).setPrivate();
+    builder.create<func::FuncOp>(name, type).setPrivate();
   };
 
   MLIRContext *ctx = module.getContext();
@@ -1006,7 +1006,8 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
   llvmConverter.addConversion(AsyncRuntimeTypeConverter::convertAsyncTypes);
 
   // Convert async types in function signatures and function calls.
-  populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns, converter);
+  populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+                                                                 converter);
   populateCallOpTypeConversionPattern(patterns, converter);
 
   // Convert return operations inside async.execute regions.
@@ -1042,7 +1043,7 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
   target.addIllegalDialect<AsyncDialect>();
 
   // Add dynamic legality constraints to apply conversions defined above.
-  target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+  target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
     return converter.isSignatureLegal(op.getFunctionType());
   });
   target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) {

diff  --git a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
index c75ee4c4ae300..3ffd52604fd78 100644
--- a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
+++ b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
@@ -125,7 +125,8 @@ prependResAttrsToArgAttrs(OpBuilder &builder,
 /// the extra arguments.
 static void wrapForExternalCallers(OpBuilder &rewriter, Location loc,
                                    LLVMTypeConverter &typeConverter,
-                                   FuncOp funcOp, LLVM::LLVMFuncOp newFuncOp) {
+                                   func::FuncOp funcOp,
+                                   LLVM::LLVMFuncOp newFuncOp) {
   auto type = funcOp.getFunctionType();
   SmallVector<NamedAttribute, 4> attributes;
   filterFuncAttributes(funcOp->getAttrs(), /*filterArgAndResAttrs=*/false,
@@ -183,7 +184,8 @@ static void wrapForExternalCallers(OpBuilder &rewriter, Location loc,
 /// corresponding to a memref descriptor.
 static void wrapExternalFunction(OpBuilder &builder, Location loc,
                                  LLVMTypeConverter &typeConverter,
-                                 FuncOp funcOp, LLVM::LLVMFuncOp newFuncOp) {
+                                 func::FuncOp funcOp,
+                                 LLVM::LLVMFuncOp newFuncOp) {
   OpBuilder::InsertionGuard guard(builder);
 
   Type wrapperType;
@@ -273,14 +275,14 @@ static void wrapExternalFunction(OpBuilder &builder, Location loc,
 
 namespace {
 
-struct FuncOpConversionBase : public ConvertOpToLLVMPattern<FuncOp> {
+struct FuncOpConversionBase : public ConvertOpToLLVMPattern<func::FuncOp> {
 protected:
-  using ConvertOpToLLVMPattern<FuncOp>::ConvertOpToLLVMPattern;
+  using ConvertOpToLLVMPattern<func::FuncOp>::ConvertOpToLLVMPattern;
 
   // Convert input FuncOp to LLVMFuncOp by using the LLVMTypeConverter provided
   // to this legalization pattern.
   LLVM::LLVMFuncOp
-  convertFuncOpToLLVMFuncOp(FuncOp funcOp,
+  convertFuncOpToLLVMFuncOp(func::FuncOp funcOp,
                             ConversionPatternRewriter &rewriter) const {
     // Convert the original function arguments. They are converted using the
     // LLVMTypeConverter provided to this legalization pattern.
@@ -363,7 +365,7 @@ struct FuncOpConversion : public FuncOpConversionBase {
       : FuncOpConversionBase(converter) {}
 
   LogicalResult
-  matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+  matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     auto newFuncOp = convertFuncOpToLLVMFuncOp(funcOp, rewriter);
     if (!newFuncOp)
@@ -390,7 +392,7 @@ struct BarePtrFuncOpConversion : public FuncOpConversionBase {
   using FuncOpConversionBase::FuncOpConversionBase;
 
   LogicalResult
-  matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+  matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
 
     // TODO: bare ptr conversion could be handled by argument materialization

diff  --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 6f0e585365e29..4f657303a8197 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -303,7 +303,7 @@ struct LowerGpuOpsToNVVMOpsPass
 } // namespace
 
 void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) {
-  target.addIllegalOp<FuncOp>();
+  target.addIllegalOp<func::FuncOp>();
   target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
   target.addLegalDialect<::mlir::NVVM::NVVMDialect>();
   target.addIllegalDialect<gpu::GPUDialect>();

diff  --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 6a50f0c78165e..fdc656ea02f6b 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -94,7 +94,7 @@ struct LowerGpuOpsToROCDLOpsPass
 } // namespace
 
 void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
-  target.addIllegalOp<FuncOp>();
+  target.addIllegalOp<func::FuncOp>();
   target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
   target.addLegalDialect<ROCDL::ROCDLDialect>();
   target.addIllegalDialect<gpu::GPUDialect>();

diff  --git a/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
index c87922602278c..a0c1eb027e224 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
@@ -123,7 +123,7 @@ LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::declareVulkanLaunchFunc(
 
   // Declare vulkan launch function.
   auto funcType = builder.getFunctionType(vulkanLaunchTypes, {});
-  builder.create<FuncOp>(loc, kVulkanLaunch, funcType).setPrivate();
+  builder.create<func::FuncOp>(loc, kVulkanLaunch, funcType).setPrivate();
 
   return success();
 }

diff  --git a/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp b/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp
index 23d7fc9a92520..1238cfe4bc874 100644
--- a/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRVPass.cpp
@@ -37,7 +37,7 @@ void LinalgToSPIRVPass::runOnOperation() {
 
   // Allow builtin ops.
   target->addLegalOp<ModuleOp>();
-  target->addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+  target->addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
     return typeConverter.isSignatureLegal(op.getFunctionType()) &&
            typeConverter.isLegal(&op.getBody());
   });

diff  --git a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
index cfa93c7e0ea6a..0fa51c7938b47 100644
--- a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
+++ b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
@@ -66,8 +66,8 @@ static FlatSymbolRefAttr getLibraryCallSymbolRef(Operation *op,
   // Insert before module terminator.
   rewriter.setInsertionPoint(module.getBody(),
                              std::prev(module.getBody()->end()));
-  FuncOp funcOp =
-      rewriter.create<FuncOp>(op->getLoc(), fnNameAttr.getValue(), libFnType);
+  func::FuncOp funcOp = rewriter.create<func::FuncOp>(
+      op->getLoc(), fnNameAttr.getValue(), libFnType);
   // Insert a function attribute that will trigger the emission of the
   // corresponding `_mlir_ciface_xxx` interface so that external libraries see
   // a normalized ABI. This interface is added during std to llvm conversion.
@@ -131,7 +131,7 @@ void ConvertLinalgToStandardPass::runOnOperation() {
   target.addLegalDialect<AffineDialect, arith::ArithmeticDialect,
                          func::FuncDialect, memref::MemRefDialect,
                          scf::SCFDialect>();
-  target.addLegalOp<ModuleOp, FuncOp, func::ReturnOp>();
+  target.addLegalOp<ModuleOp, func::FuncOp, func::ReturnOp>();
   RewritePatternSet patterns(&getContext());
   populateLinalgToStandardConversionPatterns(patterns);
   if (failed(applyFullConversion(module, target, std::move(patterns))))

diff  --git a/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp b/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp
index 569625ff0eb3a..9bea594d87df6 100644
--- a/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp
+++ b/mlir/lib/Conversion/MathToLibm/MathToLibm.cpp
@@ -101,8 +101,8 @@ ScalarOpToLibmCall<Op>::matchAndRewrite(Op op,
     rewriter.setInsertionPointToStart(&module->getRegion(0).front());
     auto opFunctionTy = FunctionType::get(
         rewriter.getContext(), op->getOperandTypes(), op->getResultTypes());
-    opFunc =
-        rewriter.create<FuncOp>(rewriter.getUnknownLoc(), name, opFunctionTy);
+    opFunc = rewriter.create<func::FuncOp>(rewriter.getUnknownLoc(), name,
+                                           opFunctionTy);
     opFunc.setPrivate();
   }
   assert(isa<FunctionOpInterface>(SymbolTable::lookupSymbolIn(module, name)));

diff  --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 01cbd93e47557..9de7740747fa6 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -1861,7 +1861,7 @@ struct MemRefToLLVMPass : public ConvertMemRefToLLVMBase<MemRefToLLVMPass> {
     RewritePatternSet patterns(&getContext());
     populateMemRefToLLVMConversionPatterns(typeConverter, patterns);
     LLVMConversionTarget target(getContext());
-    target.addLegalOp<FuncOp>();
+    target.addLegalOp<func::FuncOp>();
     if (failed(applyPartialConversion(op, target, std::move(patterns))))
       signalPassFailure();
   }

diff  --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
index dfe95dc1b83ef..b8eabbdb1d80d 100644
--- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
@@ -670,7 +670,7 @@ void ConvertShapeToStandardPass::runOnOperation() {
   ConversionTarget target(ctx);
   target.addLegalDialect<arith::ArithmeticDialect, SCFDialect,
                          tensor::TensorDialect>();
-  target.addLegalOp<CstrRequireOp, FuncOp, ModuleOp>();
+  target.addLegalOp<CstrRequireOp, func::FuncOp, ModuleOp>();
 
   // Setup conversion patterns.
   RewritePatternSet patterns(&ctx);

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp
index cc210908760d5..a8c610c05a7bc 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp
@@ -70,12 +70,12 @@ void mlir::tosa::addTosaToLinalgPasses(OpPassManager &pm,
                                        bool disableTosaDecompositions) {
   // Optional decompositions are designed to benefit linalg.
   if (!disableTosaDecompositions)
-    pm.addNestedPass<FuncOp>(mlir::tosa::createTosaOptionalDecompositions());
-  pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
+    pm.addNestedPass<func::FuncOp>(tosa::createTosaOptionalDecompositions());
+  pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
 
-  pm.addNestedPass<FuncOp>(tosa::createTosaMakeBroadcastablePass());
-  pm.addNestedPass<FuncOp>(tosa::createTosaToLinalgNamed());
-  pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
-  pm.addNestedPass<FuncOp>(tosa::createTosaMakeBroadcastablePass());
-  pm.addNestedPass<FuncOp>(tosa::createTosaToLinalg());
+  pm.addNestedPass<func::FuncOp>(tosa::createTosaMakeBroadcastablePass());
+  pm.addNestedPass<func::FuncOp>(tosa::createTosaToLinalgNamed());
+  pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
+  pm.addNestedPass<func::FuncOp>(tosa::createTosaMakeBroadcastablePass());
+  pm.addNestedPass<func::FuncOp>(tosa::createTosaToLinalg());
 }

diff  --git a/mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp b/mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp
index 6563a446aaa21..7b8ebe7e2920e 100644
--- a/mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp
+++ b/mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp
@@ -48,5 +48,5 @@ std::unique_ptr<Pass> mlir::tosa::createTosaToSCF() {
 }
 
 void mlir::tosa::addTosaToSCFPasses(OpPassManager &pm) {
-  pm.addNestedPass<FuncOp>(createTosaToSCF());
+  pm.addNestedPass<func::FuncOp>(createTosaToSCF());
 }

diff  --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
index 1905224b5d5b4..2df97bd1cc087 100644
--- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
@@ -326,7 +326,7 @@ static Block *getCommonBlock(const MemRefAccess &srcAccess,
 
   if (numCommonLoops == 0) {
     Block *block = srcAccess.opInst->getBlock();
-    while (!llvm::isa<FuncOp>(block->getParentOp())) {
+    while (!llvm::isa<func::FuncOp>(block->getParentOp())) {
       block = block->getParentOp()->getBlock();
     }
     return block;

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
index a93fe6195676a..864f10991c748 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
@@ -77,14 +77,17 @@ struct AffineDataCopyGeneration
 /// buffers in 'fastMemorySpace', and replaces memory operations to the former
 /// by the latter. Only load op's handled for now.
 /// TODO: extend this to store op's.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass(
-    unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace,
-    int minDmaTransferSize, uint64_t fastMemCapacityBytes) {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineDataCopyGenerationPass(unsigned slowMemorySpace,
+                                         unsigned fastMemorySpace,
+                                         unsigned tagMemorySpace,
+                                         int minDmaTransferSize,
+                                         uint64_t fastMemCapacityBytes) {
   return std::make_unique<AffineDataCopyGeneration>(
       slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize,
       fastMemCapacityBytes);
 }
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createAffineDataCopyGenerationPass() {
   return std::make_unique<AffineDataCopyGeneration>();
 }
@@ -196,7 +199,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
 }
 
 void AffineDataCopyGeneration::runOnOperation() {
-  FuncOp f = getOperation();
+  func::FuncOp f = getOperation();
   OpBuilder topBuilder(f.getBody());
   zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
index 9b3a9bdbe9131..fba9e735f73bc 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
@@ -238,7 +238,7 @@ void LoopInvariantCodeMotion::runOnOperation() {
   });
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createAffineLoopInvariantCodeMotionPass() {
   return std::make_unique<LoopInvariantCodeMotion>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
index 536f342e805b3..28b6b9976bd6b 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
@@ -37,6 +37,7 @@ struct AffineLoopNormalizePass
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineLoopNormalizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineLoopNormalizePass() {
   return std::make_unique<AffineLoopNormalizePass>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
index 61bae249bff98..dbd55411c0bf8 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
@@ -48,7 +48,7 @@ struct ParallelizationCandidate {
 } // namespace
 
 void AffineParallelize::runOnOperation() {
-  FuncOp f = getOperation();
+  func::FuncOp f = getOperation();
 
   // The walker proceeds in pre-order to process the outer loops first
   // and control the number of outer parallel loops.
@@ -81,6 +81,7 @@ void AffineParallelize::runOnOperation() {
   }
 }
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineParallelizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineParallelizePass() {
   return std::make_unique<AffineParallelize>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
index c5a347bced7a9..efc1676378df2 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
@@ -34,7 +34,7 @@ struct AffineScalarReplacement
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createAffineScalarReplacementPass() {
   return std::make_unique<AffineScalarReplacement>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
index d03594321430a..fbb72622bfb95 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
@@ -85,7 +85,7 @@ struct LoopCoalescingPass : public LoopCoalescingBase<LoopCoalescingPass> {
   }
 
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     func.walk([&](Operation *op) {
       if (auto scfForOp = dyn_cast<scf::ForOp>(op))
         walkLoop(scfForOp);
@@ -97,6 +97,6 @@ struct LoopCoalescingPass : public LoopCoalescingBase<LoopCoalescingPass> {
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopCoalescingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopCoalescingPass() {
   return std::make_unique<LoopCoalescingPass>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
index 4acc508f3e45b..bf028c31a8541 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
@@ -61,7 +61,7 @@ struct LoopFusion : public AffineLoopFusionBase<LoopFusion> {
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLoopFusionPass(unsigned fastMemorySpace,
                            uint64_t localBufSizeThreshold, bool maximalFusion,
                            enum FusionMode affineFusionMode) {
@@ -202,7 +202,7 @@ struct MemRefDependenceGraph {
 
   // Initializes the dependence graph based on operations in 'f'.
   // Returns true on success, false otherwise.
-  bool init(FuncOp f);
+  bool init(func::FuncOp f);
 
   // Returns the graph node for 'id'.
   Node *getNode(unsigned id) {
@@ -731,7 +731,7 @@ void gatherEscapingMemrefs(unsigned id, MemRefDependenceGraph *mdg,
 // Assigns each node in the graph a node id based on program order in 'f'.
 // TODO: Add support for taking a Block arg to construct the
 // dependence graph at a 
diff erent depth.
-bool MemRefDependenceGraph::init(FuncOp f) {
+bool MemRefDependenceGraph::init(func::FuncOp f) {
   LLVM_DEBUG(llvm::dbgs() << "--- Initializing MDG ---\n");
   DenseMap<Value, SetVector<unsigned>> memrefAccesses;
 
@@ -895,7 +895,7 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
   // Create builder to insert alloc op just before 'forOp'.
   OpBuilder b(forInst);
   // Builder to create constants at the top level.
-  OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
+  OpBuilder top(forInst->getParentOfType<func::FuncOp>().getBody());
   // Create new memref type based on slice bounds.
   auto oldMemRef = cast<AffineWriteOpInterface>(srcStoreOpInst).getMemRef();
   auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
@@ -1853,7 +1853,7 @@ struct GreedyFusion {
     };
 
     // Search for siblings which load the same memref function argument.
-    auto fn = dstNode->op->getParentOfType<FuncOp>();
+    auto fn = dstNode->op->getParentOfType<func::FuncOp>();
     for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
       for (auto *user : fn.getArgument(i).getUsers()) {
         if (auto loadOp = dyn_cast<AffineReadOpInterface>(user)) {

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
index a66fcdae2ecd4..7e82a47d01edd 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
@@ -53,11 +53,11 @@ struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {
 
 /// Creates a pass to perform loop tiling on all suitable loop nests of a
 /// Function.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
   return std::make_unique<LoopTiling>(cacheSizeBytes);
 }
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopTilingPass() {
   return std::make_unique<LoopTiling>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
index 00dc64a0d54c4..b608ef49571b6 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
@@ -71,7 +71,7 @@ static bool isInnermostAffineForOp(AffineForOp op) {
 }
 
 /// Gathers loops that have no affine.for's nested within.
-static void gatherInnermostLoops(FuncOp f,
+static void gatherInnermostLoops(func::FuncOp f,
                                  SmallVectorImpl<AffineForOp> &loops) {
   f.walk([&](AffineForOp forOp) {
     if (isInnermostAffineForOp(forOp))
@@ -80,7 +80,7 @@ static void gatherInnermostLoops(FuncOp f,
 }
 
 void LoopUnroll::runOnOperation() {
-  FuncOp func = getOperation();
+  func::FuncOp func = getOperation();
   if (func.isExternal())
     return;
 
@@ -132,7 +132,7 @@ LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) {
   return loopUnrollByFactor(forOp, unrollFactor);
 }
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopUnrollPass(
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
     int unrollFactor, bool unrollUpToFactor, bool unrollFull,
     const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
   return std::make_unique<LoopUnroll>(

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
index 273ffdb579d4b..42f6487e8eeba 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
@@ -63,7 +63,7 @@ struct LoopUnrollAndJam : public AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
 };
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
   return std::make_unique<LoopUnrollAndJam>(
       unrollJamFactor == -1 ? None : Optional<unsigned>(unrollJamFactor));

diff  --git a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
index 2ce8788e18d31..a1f4f7b86defd 100644
--- a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
@@ -41,7 +41,8 @@ struct PipelineDataTransfer
 
 /// Creates a pass to pipeline explicit movement of data across levels of the
 /// memory hierarchy.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createPipelineDataTransferPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createPipelineDataTransferPass() {
   return std::make_unique<PipelineDataTransfer>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
index d90b02c6f14c4..cd05d66ea823d 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
@@ -71,7 +71,7 @@ struct SimplifyAffineStructures
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createSimplifyAffineStructuresPass() {
   return std::make_unique<SimplifyAffineStructures>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
index 7354fb9646dab..6b0cf8ffa0a92 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
@@ -1700,18 +1700,18 @@ static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
   LLVM_DEBUG(dbgs() << "\n");
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
   return std::make_unique<Vectorize>(virtualVectorSize);
 }
-std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
   return std::make_unique<Vectorize>();
 }
 
 /// Applies vectorization to the current function by searching over a bunch of
 /// predetermined patterns.
 void Vectorize::runOnOperation() {
-  FuncOp f = getOperation();
+  func::FuncOp f = getOperation();
   if (!fastestVaryingPattern.empty() &&
       fastestVaryingPattern.size() != vectorSizes.size()) {
     f.emitRemark("Fastest varying pattern specified with 
diff erent size than "
@@ -1855,11 +1855,11 @@ vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
   return vectorizeLoopNest(loops, strategy);
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
   return std::make_unique<Vectorize>(virtualVectorSize);
 }
-std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
   return std::make_unique<Vectorize>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
index bfc4f63fbbcda..16d7c16da32b4 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
@@ -482,7 +482,7 @@ bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
   auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
     auto *childForOp = forOp.getOperation();
     auto *parentForOp = forOp->getParentOp();
-    if (!llvm::isa<FuncOp>(parentForOp)) {
+    if (!llvm::isa<func::FuncOp>(parentForOp)) {
       if (!isa<AffineForOp>(parentForOp)) {
         LLVM_DEBUG(llvm::dbgs() << "Expected parent AffineForOp\n");
         return WalkResult::interrupt();

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index 8d0b4d4957271..211e5fd45ab4a 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -141,7 +141,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
   auto *parentBlock = forOp->getBlock();
   if (!iv.use_empty()) {
     if (forOp.hasConstantLowerBound()) {
-      OpBuilder topBuilder(forOp->getParentOfType<FuncOp>().getBody());
+      OpBuilder topBuilder(forOp->getParentOfType<func::FuncOp>().getBody());
       auto constOp = topBuilder.create<arith::ConstantIndexOp>(
           forOp.getLoc(), forOp.getConstantLowerBound());
       iv.replaceAllUsesWith(constOp);
@@ -960,7 +960,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
 /// Identify valid and profitable bands of loops to tile. This is currently just
 /// a temporary placeholder to test the mechanics of tiled code generation.
 /// Returns all maximal outermost perfect loop nests to tile.
-void mlir::getTileableBands(FuncOp f,
+void mlir::getTileableBands(func::FuncOp f,
                             std::vector<SmallVector<AffineForOp, 6>> *bands) {
   // Get maximal perfect nest of 'affine.for' insts starting from root
   // (inclusive).
@@ -2049,7 +2049,7 @@ static LogicalResult generateCopy(
   *nBegin = begin;
   *nEnd = end;
 
-  FuncOp f = begin->getParentOfType<FuncOp>();
+  func::FuncOp f = begin->getParentOfType<func::FuncOp>();
   OpBuilder topBuilder(f.getBody());
   Value zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
 
@@ -2067,7 +2067,7 @@ static LogicalResult generateCopy(
   OpBuilder &b = region.isWrite() ? epilogue : prologue;
 
   // Builder to create constants at the top level.
-  auto func = copyPlacementBlock->getParent()->getParentOfType<FuncOp>();
+  auto func = copyPlacementBlock->getParent()->getParentOfType<func::FuncOp>();
   OpBuilder top(func.getBody());
 
   auto loc = region.loc;
@@ -2614,7 +2614,7 @@ gatherLoopsInBlock(Block *block, unsigned currLoopDepth,
 }
 
 /// Gathers all AffineForOps in 'func.func' grouped by loop depth.
-void mlir::gatherLoops(FuncOp func,
+void mlir::gatherLoops(func::FuncOp func,
                        std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
   for (auto &block : func)
     gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops);

diff  --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index 2466bf0e93714..6d015f51b622f 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -260,7 +260,7 @@ static Operation *getOutermostInvariantForOp(AffineIfOp ifOp) {
   // Walk up the parents past all for op that this conditional is invariant on.
   auto ifOperands = ifOp.getOperands();
   auto *res = ifOp.getOperation();
-  while (!isa<FuncOp>(res->getParentOp())) {
+  while (!isa<func::FuncOp>(res->getParentOp())) {
     auto *parentOp = res->getParentOp();
     if (auto forOp = dyn_cast<AffineForOp>(parentOp)) {
       if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
@@ -1020,7 +1020,7 @@ static void loadCSE(AffineReadOpInterface loadA,
 // currently only eliminates the stores only if no other loads/uses (other
 // than dealloc) remain.
 //
-void mlir::affineScalarReplace(FuncOp f, DominanceInfo &domInfo,
+void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
                                PostDominanceInfo &postDomInfo) {
   // Load op's whose results were replaced by those forwarded from stores.
   SmallVector<Operation *, 8> opsToErase;
@@ -1277,12 +1277,12 @@ LogicalResult mlir::replaceAllMemRefUsesWith(
   std::unique_ptr<DominanceInfo> domInfo;
   std::unique_ptr<PostDominanceInfo> postDomInfo;
   if (domOpFilter)
-    domInfo =
-        std::make_unique<DominanceInfo>(domOpFilter->getParentOfType<FuncOp>());
+    domInfo = std::make_unique<DominanceInfo>(
+        domOpFilter->getParentOfType<func::FuncOp>());
 
   if (postDomOpFilter)
     postDomInfo = std::make_unique<PostDominanceInfo>(
-        postDomOpFilter->getParentOfType<FuncOp>());
+        postDomOpFilter->getParentOfType<func::FuncOp>());
 
   // Walk all uses of old memref; collect ops to perform replacement. We use a
   // DenseSet since an operation could potentially have multiple uses of a

diff  --git a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
index 2510971a29e92..b64dbcf798216 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
@@ -153,7 +153,7 @@ struct ParallelComputeFunctionBounds {
 
 struct ParallelComputeFunction {
   unsigned numLoops;
-  FuncOp func;
+  func::FuncOp func;
   llvm::SmallVector<Value> captures;
 };
 
@@ -258,11 +258,11 @@ static ParallelComputeFunction createParallelComputeFunction(
       getParallelComputeFunctionType(op, rewriter);
 
   FunctionType type = computeFuncType.type;
-  FuncOp func = FuncOp::create(op.getLoc(),
-                               numBlockAlignedInnerLoops > 0
-                                   ? "parallel_compute_fn_with_aligned_loops"
-                                   : "parallel_compute_fn",
-                               type);
+  func::FuncOp func = func::FuncOp::create(
+      op.getLoc(),
+      numBlockAlignedInnerLoops > 0 ? "parallel_compute_fn_with_aligned_loops"
+                                    : "parallel_compute_fn",
+      type);
   func.setPrivate();
 
   // Insert function into the module symbol table and assign it unique name.
@@ -455,8 +455,9 @@ static ParallelComputeFunction createParallelComputeFunction(
 //     call @parallel_compute_fn(%block_start, %block_size, ...);
 //   }
 //
-static FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
-                                          PatternRewriter &rewriter) {
+static func::FuncOp
+createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
+                            PatternRewriter &rewriter) {
   OpBuilder::InsertionGuard guard(rewriter);
   Location loc = computeFunc.func.getLoc();
   ImplicitLocOpBuilder b(loc, rewriter);
@@ -476,7 +477,7 @@ static FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
   inputTypes.append(computeFuncInputTypes.begin(), computeFuncInputTypes.end());
 
   FunctionType type = rewriter.getFunctionType(inputTypes, TypeRange());
-  FuncOp func = FuncOp::create(loc, "async_dispatch_fn", type);
+  func::FuncOp func = func::FuncOp::create(loc, "async_dispatch_fn", type);
   func.setPrivate();
 
   // Insert function into the module symbol table and assign it unique name.
@@ -580,7 +581,7 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter,
 
   // Add one more level of indirection to dispatch parallel compute functions
   // using async operations and recursive work splitting.
-  FuncOp asyncDispatchFunction =
+  func::FuncOp asyncDispatchFunction =
       createAsyncDispatchFunction(parallelComputeFunction, rewriter);
 
   Value c0 = b.create<arith::ConstantIndexOp>(0);
@@ -651,7 +652,7 @@ doSequentialDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter,
                      const SmallVector<Value> &tripCounts) {
   MLIRContext *ctx = op->getContext();
 
-  FuncOp compute = parallelComputeFunction.func;
+  func::FuncOp compute = parallelComputeFunction.func;
 
   Value c0 = b.create<arith::ConstantIndexOp>(0);
   Value c1 = b.create<arith::ConstantIndexOp>(1);

diff  --git a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
index 528702e203390..d8be6c7486cfb 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
@@ -56,7 +56,7 @@ class AsyncToAsyncRuntimePass
 /// operation to enable non-blocking waiting via coroutine suspension.
 namespace {
 struct CoroMachinery {
-  FuncOp func;
+  func::FuncOp func;
 
   // Async execute region returns a completion token, and an async value for
   // each yielded value.
@@ -124,7 +124,7 @@ struct CoroMachinery {
 ///       return %token, %value : !async.token, !async.value<T>
 ///   }
 ///
-static CoroMachinery setupCoroMachinery(FuncOp func) {
+static CoroMachinery setupCoroMachinery(func::FuncOp func) {
   assert(!func.getBlocks().empty() && "Function must have an entry block");
 
   MLIRContext *ctx = func.getContext();
@@ -237,7 +237,7 @@ static Block *setupSetErrorBlock(CoroMachinery &coro) {
 /// function.
 ///
 /// Note that this is not reversible transformation.
-static std::pair<FuncOp, CoroMachinery>
+static std::pair<func::FuncOp, CoroMachinery>
 outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
   ModuleOp module = execute->getParentOfType<ModuleOp>();
 
@@ -265,7 +265,8 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
 
   // TODO: Derive outlined function name from the parent FuncOp (support
   // multiple nested async.execute operations).
-  FuncOp func = FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
+  func::FuncOp func =
+      func::FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
   symbolTable.insert(func);
 
   SymbolTable::setSymbolVisibility(func, SymbolTable::Visibility::Private);
@@ -385,8 +386,9 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
   using AwaitAdaptor = typename AwaitType::Adaptor;
 
 public:
-  AwaitOpLoweringBase(MLIRContext *ctx,
-                      llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+  AwaitOpLoweringBase(
+      MLIRContext *ctx,
+      llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
       : OpConversionPattern<AwaitType>(ctx),
         outlinedFunctions(outlinedFunctions) {}
 
@@ -399,7 +401,7 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
       return rewriter.notifyMatchFailure(op, "unsupported awaitable type");
 
     // Check if await operation is inside the outlined coroutine function.
-    auto func = op->template getParentOfType<FuncOp>();
+    auto func = op->template getParentOfType<func::FuncOp>();
     auto outlined = outlinedFunctions.find(func);
     const bool isInCoroutine = outlined != outlinedFunctions.end();
 
@@ -479,7 +481,7 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
   }
 
 private:
-  llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+  llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
 };
 
 /// Lowering for `async.await` with a token operand.
@@ -524,7 +526,7 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
 public:
   YieldOpLowering(
       MLIRContext *ctx,
-      const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+      const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
       : OpConversionPattern<async::YieldOp>(ctx),
         outlinedFunctions(outlinedFunctions) {}
 
@@ -532,7 +534,7 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
   matchAndRewrite(async::YieldOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // Check if yield operation is inside the async coroutine function.
-    auto func = op->template getParentOfType<FuncOp>();
+    auto func = op->template getParentOfType<func::FuncOp>();
     auto outlined = outlinedFunctions.find(func);
     if (outlined == outlinedFunctions.end())
       return rewriter.notifyMatchFailure(
@@ -557,7 +559,7 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
   }
 
 private:
-  const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+  const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
 };
 
 //===----------------------------------------------------------------------===//
@@ -566,8 +568,9 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
 
 class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
 public:
-  AssertOpLowering(MLIRContext *ctx,
-                   llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+  AssertOpLowering(
+      MLIRContext *ctx,
+      llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
       : OpConversionPattern<cf::AssertOp>(ctx),
         outlinedFunctions(outlinedFunctions) {}
 
@@ -575,7 +578,7 @@ class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
   matchAndRewrite(cf::AssertOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // Check if assert operation is inside the async coroutine function.
-    auto func = op->template getParentOfType<FuncOp>();
+    auto func = op->template getParentOfType<func::FuncOp>();
     auto outlined = outlinedFunctions.find(func);
     if (outlined == outlinedFunctions.end())
       return rewriter.notifyMatchFailure(
@@ -597,7 +600,7 @@ class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
   }
 
 private:
-  llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+  llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
 };
 
 //===----------------------------------------------------------------------===//
@@ -607,7 +610,7 @@ class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
 /// 2) Prepending the results with `async.token`.
 /// 3) Setting up coroutine blocks.
 /// 4) Rewriting return ops as yield op and branch op into the suspend block.
-static CoroMachinery rewriteFuncAsCoroutine(FuncOp func) {
+static CoroMachinery rewriteFuncAsCoroutine(func::FuncOp func) {
   auto *ctx = func->getContext();
   auto loc = func.getLoc();
   SmallVector<Type> resultTypes;
@@ -632,7 +635,8 @@ static CoroMachinery rewriteFuncAsCoroutine(FuncOp func) {
 ///
 /// The invocation of this function is safe only when call ops are traversed in
 /// reverse order of how they appear in a single block. See `funcsToCoroutines`.
-static void rewriteCallsiteForCoroutine(func::CallOp oldCall, FuncOp func) {
+static void rewriteCallsiteForCoroutine(func::CallOp oldCall,
+                                        func::FuncOp func) {
   auto loc = func.getLoc();
   ImplicitLocOpBuilder callBuilder(loc, oldCall);
   auto newCall = callBuilder.create<func::CallOp>(
@@ -651,25 +655,25 @@ static void rewriteCallsiteForCoroutine(func::CallOp oldCall, FuncOp func) {
   oldCall.erase();
 }
 
-static bool isAllowedToBlock(FuncOp func) {
+static bool isAllowedToBlock(func::FuncOp func) {
   return !!func->getAttrOfType<UnitAttr>(AsyncDialect::kAllowedToBlockAttrName);
 }
 
-static LogicalResult
-funcsToCoroutines(ModuleOp module,
-                  llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions) {
+static LogicalResult funcsToCoroutines(
+    ModuleOp module,
+    llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions) {
   // The following code supports the general case when 2 functions mutually
   // recurse into each other. Because of this and that we are relying on
   // SymbolUserMap to find pointers to calling FuncOps, we cannot simply erase
   // a FuncOp while inserting an equivalent coroutine, because that could lead
   // to dangling pointers.
 
-  SmallVector<FuncOp> funcWorklist;
+  SmallVector<func::FuncOp> funcWorklist;
 
   // Careful, it's okay to add a func to the worklist multiple times if and only
   // if the loop processing the worklist will skip the functions that have
   // already been converted to coroutines.
-  auto addToWorklist = [&](FuncOp func) {
+  auto addToWorklist = [&](func::FuncOp func) {
     if (isAllowedToBlock(func))
       return;
     // N.B. To refactor this code into a separate pass the lookup in
@@ -688,7 +692,7 @@ funcsToCoroutines(ModuleOp module,
   };
 
   // Traverse in post-order collecting for each func op the await ops it has.
-  for (FuncOp func : module.getOps<FuncOp>())
+  for (func::FuncOp func : module.getOps<func::FuncOp>())
     addToWorklist(func);
 
   SymbolTableCollection symbolTable;
@@ -718,7 +722,7 @@ funcsToCoroutines(ModuleOp module,
     // Rewrite the callsites to await on results of the newly created coroutine.
     for (Operation *op : users) {
       if (func::CallOp call = dyn_cast<func::CallOp>(*op)) {
-        FuncOp caller = call->getParentOfType<FuncOp>();
+        func::FuncOp caller = call->getParentOfType<func::FuncOp>();
         rewriteCallsiteForCoroutine(call, func); // Careful, erases the call op.
         addToWorklist(caller);
       } else {
@@ -736,7 +740,7 @@ void AsyncToAsyncRuntimePass::runOnOperation() {
   SymbolTable symbolTable(module);
 
   // Outline all `async.execute` body regions into async functions (coroutines).
-  llvm::DenseMap<FuncOp, CoroMachinery> outlinedFunctions;
+  llvm::DenseMap<func::FuncOp, CoroMachinery> outlinedFunctions;
 
   module.walk([&](ExecuteOp execute) {
     outlinedFunctions.insert(outlineExecuteOp(symbolTable, execute));
@@ -749,7 +753,7 @@ void AsyncToAsyncRuntimePass::runOnOperation() {
 
   // Returns true if operation is inside the coroutine.
   auto isInCoroutine = [&](Operation *op) -> bool {
-    auto parentFunc = op->getParentOfType<FuncOp>();
+    auto parentFunc = op->getParentOfType<func::FuncOp>();
     return outlinedFunctions.find(parentFunc) != outlinedFunctions.end();
   };
 
@@ -800,14 +804,14 @@ void AsyncToAsyncRuntimePass::runOnOperation() {
   // Assertions must be converted to runtime errors inside async functions.
   runtimeTarget.addDynamicallyLegalOp<cf::AssertOp>(
       [&](cf::AssertOp op) -> bool {
-        auto func = op->getParentOfType<FuncOp>();
+        auto func = op->getParentOfType<func::FuncOp>();
         return outlinedFunctions.find(func) == outlinedFunctions.end();
       });
 
   if (eliminateBlockingAwaitOps)
     runtimeTarget.addDynamicallyLegalOp<RuntimeAwaitOp>(
         [&](RuntimeAwaitOp op) -> bool {
-          return isAllowedToBlock(op->getParentOfType<FuncOp>());
+          return isAllowedToBlock(op->getParentOfType<func::FuncOp>());
         });
 
   if (failed(applyPartialConversion(module, runtimeTarget,

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index f2c67ed754a50..af1105fa7a720 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -626,7 +626,7 @@ bool bufferization::isFunctionArgument(Value value) {
   auto bbArg = value.dyn_cast<BlockArgument>();
   if (!bbArg)
     return false;
-  return isa<FuncOp>(bbArg.getOwner()->getParentOp());
+  return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
 }
 
 MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp
index 5d9dd6d1b7b61..07b48363fbecc 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp
@@ -83,7 +83,7 @@ walkReturnOperations(Region *region,
 static bool validateSupportedControlFlow(Operation *op) {
   WalkResult result = op->walk([&](Operation *operation) {
     // Only check ops that are inside a function.
-    if (!operation->getParentOfType<FuncOp>())
+    if (!operation->getParentOfType<func::FuncOp>())
       return WalkResult::advance();
 
     auto regions = operation->getRegions();
@@ -641,7 +641,7 @@ struct BufferDeallocationPass : BufferDeallocationBase<BufferDeallocationPass> {
   }
 
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     if (func.isExternal())
       return;
 
@@ -654,7 +654,7 @@ struct BufferDeallocationPass : BufferDeallocationBase<BufferDeallocationPass> {
 
 LogicalResult bufferization::deallocateBuffers(Operation *op) {
   if (isa<ModuleOp>(op)) {
-    WalkResult result = op->walk([&](FuncOp funcOp) {
+    WalkResult result = op->walk([&](func::FuncOp funcOp) {
       if (failed(deallocateBuffers(funcOp)))
         return WalkResult::interrupt();
       return WalkResult::advance();

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp
index 514ec901c3668..ba803ea65bdda 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp
@@ -18,7 +18,7 @@ using namespace mlir;
 // Updates the func op and entry block.
 //
 // Any args appended to the entry block are added to `appendedEntryArgs`.
-static void updateFuncOp(FuncOp func,
+static void updateFuncOp(func::FuncOp func,
                          SmallVectorImpl<BlockArgument> &appendedEntryArgs) {
   auto functionType = func.getFunctionType();
 
@@ -57,10 +57,10 @@ static void updateFuncOp(FuncOp func,
     appendedEntryArgs.push_back(func.front().addArgument(type, loc));
 }
 
-// Updates all ReturnOps in the scope of the given FuncOp by either keeping them
-// as return values or copying the associated buffer contents into the given
-// out-params.
-static void updateReturnOps(FuncOp func,
+// Updates all ReturnOps in the scope of the given func::FuncOp by either
+// keeping them as return values or copying the associated buffer contents into
+// the given out-params.
+static void updateReturnOps(func::FuncOp func,
                             ArrayRef<BlockArgument> appendedEntryArgs) {
   func.walk([&](func::ReturnOp op) {
     SmallVector<Value, 6> copyIntoOutParams;
@@ -128,7 +128,7 @@ struct BufferResultsToOutParamsPass
   void runOnOperation() override {
     ModuleOp module = getOperation();
 
-    for (auto func : module.getOps<FuncOp>()) {
+    for (auto func : module.getOps<func::FuncOp>()) {
       SmallVector<BlockArgument, 6> appendedEntryArgs;
       updateFuncOp(func, appendedEntryArgs);
       if (func.isExternal())

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
index 48d67fe16b0ed..12451ca5f2ced 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
@@ -224,7 +224,7 @@ std::unique_ptr<Pass> mlir::bufferization::createOneShotBufferizePass(
   return std::make_unique<OneShotBufferizePass>(options);
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::bufferization::createFinalizingBufferizePass() {
   return std::make_unique<FinalizingBufferizePass>();
 }

diff  --git a/mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp b/mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp
index 4a95b5989a4bc..98ae826b6497f 100644
--- a/mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp
+++ b/mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp
@@ -57,12 +57,12 @@ namespace {
 /// Expand function arguments according to the provided TypeConverter and
 /// ValueDecomposer.
 struct DecomposeCallGraphTypesForFuncArgs
-    : public DecomposeCallGraphTypesOpConversionPattern<FuncOp> {
+    : public DecomposeCallGraphTypesOpConversionPattern<func::FuncOp> {
   using DecomposeCallGraphTypesOpConversionPattern::
       DecomposeCallGraphTypesOpConversionPattern;
 
   LogicalResult
-  matchAndRewrite(FuncOp op, OpAdaptor adaptor,
+  matchAndRewrite(func::FuncOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const final {
     auto functionType = op.getFunctionType();
 

diff  --git a/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp b/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
index 7b89936682477..5e60696346d0e 100644
--- a/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
@@ -338,6 +338,6 @@ void GpuAsyncRegionPass::runOnOperation() {
   getOperation().getRegion().walk(SingleTokenUseCallback());
 }
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createGpuAsyncRegionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createGpuAsyncRegionPass() {
   return std::make_unique<GpuAsyncRegionPass>();
 }

diff  --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 3dba4164c42ea..5b15c1d0f42f4 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -293,13 +293,14 @@ class GpuKernelOutliningPass
   void runOnOperation() override {
     SymbolTable symbolTable(getOperation());
     bool modified = false;
-    for (auto func : getOperation().getOps<FuncOp>()) {
+    for (auto func : getOperation().getOps<func::FuncOp>()) {
       // Insert just after the function.
       Block::iterator insertPt(func->getNextNode());
       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
         SetVector<Value> operands;
         std::string kernelFnName =
-            Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
+            Twine(op->getParentOfType<func::FuncOp>().getName(), "_kernel")
+                .str();
 
         gpu::GPUFuncOp outlinedFunc =
             outlineKernelFuncImpl(op, kernelFnName, operands);

diff  --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
index 46d4f67c57b21..5a6d6788a70a9 100644
--- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
+++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
@@ -94,7 +94,7 @@ StringRef LinalgDependenceGraph::getDependenceTypeStr(DependenceType depType) {
 }
 
 LinalgDependenceGraph
-LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) {
+LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, func::FuncOp f) {
   SmallVector<LinalgOp, 8> linalgOps;
   f.walk([&](LinalgOp op) { linalgOps.push_back(op); });
   return LinalgDependenceGraph(aliases, linalgOps);

diff  --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp
index 5c636a84ffc99..7178e03a0c22d 100644
--- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp
+++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp
@@ -87,7 +87,7 @@ using namespace comprehensive_bufferize;
 using namespace mlir::bufferization;
 
 /// A mapping of FuncOps to their callers.
-using FuncCallerMap = DenseMap<FuncOp, DenseSet<Operation *>>;
+using FuncCallerMap = DenseMap<func::FuncOp, DenseSet<Operation *>>;
 
 namespace {
 /// The state of analysis of a FuncOp.
@@ -113,27 +113,27 @@ struct FuncAnalysisState : public DialectAnalysisState {
 
   /// A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg
   /// indices.
-  DenseMap<FuncOp, IndexMapping> equivalentFuncArgs;
+  DenseMap<func::FuncOp, IndexMapping> equivalentFuncArgs;
 
   /// A mapping of ReturnOp OpOperand indices to aliasing FuncOp BBArg indices.
-  DenseMap<FuncOp, IndexToIndexListMapping> aliasingFuncArgs;
+  DenseMap<func::FuncOp, IndexToIndexListMapping> aliasingFuncArgs;
 
   /// A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
-  DenseMap<FuncOp, IndexToIndexListMapping> aliasingReturnVals;
+  DenseMap<func::FuncOp, IndexToIndexListMapping> aliasingReturnVals;
 
   /// A set of all read BlockArguments of FuncOps.
-  DenseMap<FuncOp, BbArgIndexSet> readBbArgs;
+  DenseMap<func::FuncOp, BbArgIndexSet> readBbArgs;
 
   /// A set of all written-to BlockArguments of FuncOps.
-  DenseMap<FuncOp, BbArgIndexSet> writtenBbArgs;
+  DenseMap<func::FuncOp, BbArgIndexSet> writtenBbArgs;
 
   /// Keep track of which FuncOps are fully analyzed or currently being
   /// analyzed.
-  DenseMap<FuncOp, FuncOpAnalysisState> analyzedFuncOps;
+  DenseMap<func::FuncOp, FuncOpAnalysisState> analyzedFuncOps;
 
   /// This function is called right before analyzing the given FuncOp. It
   /// initializes the data structures for the FuncOp in this state object.
-  void startFunctionAnalysis(FuncOp funcOp) {
+  void startFunctionAnalysis(func::FuncOp funcOp) {
     analyzedFuncOps[funcOp] = FuncOpAnalysisState::InProgress;
     auto createdEquiv = equivalentFuncArgs.try_emplace(funcOp, IndexMapping());
     auto createdAliasingOperands =
@@ -176,7 +176,7 @@ static FuncAnalysisState &getFuncAnalysisState(AnalysisState &state) {
 
 /// Return the state (phase) of analysis of the FuncOp.
 static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state,
-                                                  FuncOp funcOp) {
+                                                  func::FuncOp funcOp) {
   const FuncAnalysisState &moduleState = getFuncAnalysisState(state);
   auto it = moduleState.analyzedFuncOps.find(funcOp);
   if (it == moduleState.analyzedFuncOps.end())
@@ -186,7 +186,7 @@ static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state,
 
 /// Return the unique ReturnOp that terminates `funcOp`.
 /// Return nullptr if there is no such unique ReturnOp.
-static func::ReturnOp getAssumedUniqueReturnOp(FuncOp funcOp) {
+static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
   func::ReturnOp returnOp;
   for (Block &b : funcOp.getBody()) {
     if (auto candidateOp = dyn_cast<func::ReturnOp>(b.getTerminator())) {
@@ -230,7 +230,7 @@ aliasingFuncOpBBArgsAnalysis(Operation *op, AnalysisState &state,
   FuncAnalysisState &funcState = getFuncAnalysisState(state);
 
   // Support only single return-terminated block in the function.
-  auto funcOp = cast<FuncOp>(op);
+  auto funcOp = cast<func::FuncOp>(op);
   func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
   assert(returnOp && "expected func with single return op");
 
@@ -261,13 +261,13 @@ static bool isValueWritten(Value value, const AnalysisState &state,
                            const BufferizationAliasInfo &aliasInfo) {
 #ifndef NDEBUG
   assert(value.getType().isa<TensorType>() && "expected TensorType");
-  FuncOp funcOp;
+  func::FuncOp funcOp;
   if (auto bbArg = value.dyn_cast<BlockArgument>()) {
     Operation *owner = bbArg.getOwner()->getParentOp();
-    funcOp = isa<FuncOp>(owner) ? cast<FuncOp>(owner)
-                                : owner->getParentOfType<FuncOp>();
+    funcOp = isa<func::FuncOp>(owner) ? cast<func::FuncOp>(owner)
+                                      : owner->getParentOfType<func::FuncOp>();
   } else {
-    funcOp = value.getDefiningOp()->getParentOfType<FuncOp>();
+    funcOp = value.getDefiningOp()->getParentOfType<func::FuncOp>();
   }
   assert(getFuncOpAnalysisState(state, funcOp) !=
              FuncOpAnalysisState::NotAnalyzed &&
@@ -283,7 +283,7 @@ static bool isValueWritten(Value value, const AnalysisState &state,
   return isWritten;
 }
 
-static void annotateFuncArgAccess(FuncOp funcOp, BlockArgument bbArg,
+static void annotateFuncArgAccess(func::FuncOp funcOp, BlockArgument bbArg,
                                   bool isRead, bool isWritten) {
   OpBuilder b(funcOp.getContext());
   Attribute accessType;
@@ -307,7 +307,7 @@ funcOpBbArgReadWriteAnalysis(Operation *op, AnalysisState &state,
                              BufferizationAliasInfo &aliasInfo,
                              SmallVector<Operation *> &newOps) {
   FuncAnalysisState &funcState = getFuncAnalysisState(state);
-  auto funcOp = cast<FuncOp>(op);
+  auto funcOp = cast<func::FuncOp>(op);
 
   // If the function has no body, conservatively assume that all args are
   // read + written.
@@ -347,22 +347,22 @@ static Value getNonCastedValue(Value value) {
   return value;
 }
 
-/// Remove the attribute that triggers inplace bufferization on a FuncOp
+/// Remove the attribute that triggers inplace bufferization on a func::FuncOp
 /// argument `bbArg`.
 static void removeBufferizationFuncArguments(BlockArgument bbArg) {
-  auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
+  auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
   funcOp.removeArgAttr(bbArg.getArgNumber(),
                        BufferizableOpInterface::kBufferLayoutAttrName);
   funcOp.removeArgAttr(bbArg.getArgNumber(),
                        BufferizableOpInterface::kInplaceableAttrName);
 }
 
-/// Return the FuncOp called by `callOp`.
-static FuncOp getCalledFunction(CallOpInterface callOp) {
+/// Return the func::FuncOp called by `callOp`.
+static func::FuncOp getCalledFunction(CallOpInterface callOp) {
   SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast<SymbolRefAttr>();
   if (!sym)
     return nullptr;
-  return dyn_cast_or_null<FuncOp>(
+  return dyn_cast_or_null<func::FuncOp>(
       SymbolTable::lookupNearestSymbolFrom(callOp, sym));
 }
 
@@ -392,12 +392,12 @@ getBufferizedFunctionType(MLIRContext *ctx, TypeRange argumentTypes,
 /// Note: This only adds new equivalence info if the called function was already
 /// analyzed.
 // TODO: This does not handle cyclic function call graphs etc.
-static void equivalenceAnalysis(FuncOp funcOp,
+static void equivalenceAnalysis(func::FuncOp funcOp,
                                 BufferizationAliasInfo &aliasInfo,
                                 FuncAnalysisState &funcState) {
   funcOp->walk([&](func::CallOp callOp) {
-    FuncOp calledFunction = getCalledFunction(callOp);
-    assert(calledFunction && "could not retrieved called FuncOp");
+    func::FuncOp calledFunction = getCalledFunction(callOp);
+    assert(calledFunction && "could not retrieved called func::FuncOp");
 
     // No equivalence info available for the called function.
     if (!funcState.equivalentFuncArgs.count(calledFunction))
@@ -429,7 +429,7 @@ static void equivalenceAnalysis(FuncOp funcOp,
 /// Note: Returning a memref currently fails bufferization. If such memrefs
 /// originate from an op with an Alloc effect, they could be hoisted in the
 /// future.
-static LogicalResult bufferizeFuncOpBoundary(FuncOp funcOp,
+static LogicalResult bufferizeFuncOpBoundary(func::FuncOp funcOp,
                                              RewriterBase &rewriter,
                                              BufferizationState &state) {
   const FuncAnalysisState &funcState =
@@ -566,14 +566,14 @@ static LogicalResult bufferizeFuncOpBoundary(FuncOp funcOp,
 /// retrieve the called FuncOp from any CallOpInterface.
 static LogicalResult
 getFuncOpsOrderedByCalls(ModuleOp moduleOp,
-                         SmallVectorImpl<FuncOp> &orderedFuncOps,
+                         SmallVectorImpl<func::FuncOp> &orderedFuncOps,
                          FuncCallerMap &callerMap) {
   // For each FuncOp, the set of functions called by it (i.e. the union of
   // symbols of all nested CallOpInterfaceOp).
-  DenseMap<FuncOp, DenseSet<FuncOp>> calledBy;
+  DenseMap<func::FuncOp, DenseSet<func::FuncOp>> calledBy;
   // For each FuncOp, the number of CallOpInterface it contains.
-  DenseMap<FuncOp, unsigned> numberCallOpsContainedInFuncOp;
-  WalkResult res = moduleOp.walk([&](FuncOp funcOp) -> WalkResult {
+  DenseMap<func::FuncOp, unsigned> numberCallOpsContainedInFuncOp;
+  WalkResult res = moduleOp.walk([&](func::FuncOp funcOp) -> WalkResult {
     if (!funcOp.getBody().empty()) {
       func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
       if (!returnOp)
@@ -587,8 +587,8 @@ getFuncOpsOrderedByCalls(ModuleOp moduleOp,
       // Only support CallOp for now.
       if (!isa<func::CallOp>(callOp.getOperation()))
         return callOp->emitError() << "expected a CallOp";
-      FuncOp calledFunction = getCalledFunction(callOp);
-      assert(calledFunction && "could not retrieved called FuncOp");
+      func::FuncOp calledFunction = getCalledFunction(callOp);
+      assert(calledFunction && "could not retrieved called func::FuncOp");
       auto it = callerMap.try_emplace(calledFunction, DenseSet<Operation *>{});
       it.first->getSecond().insert(callOp);
       if (calledBy[calledFunction].count(funcOp) == 0) {
@@ -616,7 +616,7 @@ getFuncOpsOrderedByCalls(ModuleOp moduleOp,
   return success();
 }
 
-static void foreachCaller(const FuncCallerMap &callerMap, FuncOp callee,
+static void foreachCaller(const FuncCallerMap &callerMap, func::FuncOp callee,
                           llvm::function_ref<void(Operation *)> doit) {
   auto itCallers = callerMap.find(callee);
   if (itCallers == callerMap.end())
@@ -629,13 +629,13 @@ static void foreachCaller(const FuncCallerMap &callerMap, FuncOp callee,
 /// This is a purely mechanical process that may later become part of a
 /// separate pass with its own layout assignment heuristic.
 static void layoutPostProcessing(ModuleOp moduleOp) {
-  SmallVector<FuncOp> orderedFuncOps;
-  DenseMap<FuncOp, DenseSet<Operation *>> callerMap;
+  SmallVector<func::FuncOp> orderedFuncOps;
+  DenseMap<func::FuncOp, DenseSet<Operation *>> callerMap;
   auto res = getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps, callerMap);
   (void)res;
   assert(succeeded(res) && "unexpected getFuncOpsOrderedByCalls failure");
 
-  for (FuncOp funcOp : orderedFuncOps) {
+  for (func::FuncOp funcOp : orderedFuncOps) {
     DenseMap<Operation *, SmallVector<Value>> operandsPerCaller;
     foreachCaller(callerMap, funcOp, [&](Operation *caller) {
       operandsPerCaller.try_emplace(caller, SmallVector<Value>());
@@ -718,9 +718,9 @@ namespace linalg {
 namespace comprehensive_bufferize {
 namespace std_ext {
 
-/// Return the index of the bbArg in the given FuncOp that is equivalent to the
-/// specified return value (if any).
-static Optional<int64_t> getEquivalentFuncArgIdx(FuncOp funcOp,
+/// Return the index of the bbArg in the given func::FuncOp that is equivalent
+/// to the specified return value (if any).
+static Optional<int64_t> getEquivalentFuncArgIdx(func::FuncOp funcOp,
                                                  const FuncAnalysisState &state,
                                                  int64_t returnValIdx) {
   auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
@@ -742,8 +742,8 @@ struct CallOpInterface
   bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
                               const AnalysisState &state) const {
     func::CallOp callOp = cast<func::CallOp>(op);
-    FuncOp funcOp = getCalledFunction(callOp);
-    assert(funcOp && "expected CallOp to a FuncOp");
+    func::FuncOp funcOp = getCalledFunction(callOp);
+    assert(funcOp && "expected CallOp to a func::FuncOp");
 
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
     if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
@@ -757,8 +757,8 @@ struct CallOpInterface
   bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
                                const AnalysisState &state) const {
     func::CallOp callOp = cast<func::CallOp>(op);
-    FuncOp funcOp = getCalledFunction(callOp);
-    assert(funcOp && "expected CallOp to a FuncOp");
+    func::FuncOp funcOp = getCalledFunction(callOp);
+    assert(funcOp && "expected CallOp to a func::FuncOp");
 
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
     if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
@@ -772,8 +772,8 @@ struct CallOpInterface
   SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
                                             const AnalysisState &state) const {
     func::CallOp callOp = cast<func::CallOp>(op);
-    FuncOp funcOp = getCalledFunction(callOp);
-    assert(funcOp && "expected CallOp to a FuncOp");
+    func::FuncOp funcOp = getCalledFunction(callOp);
+    assert(funcOp && "expected CallOp to a func::FuncOp");
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
     if (getFuncOpAnalysisState(state, funcOp) !=
         FuncOpAnalysisState::Analyzed) {
@@ -799,8 +799,8 @@ struct CallOpInterface
   getAliasingOpOperand(Operation *op, OpResult opResult,
                        const AnalysisState &state) const {
     func::CallOp callOp = cast<func::CallOp>(op);
-    FuncOp funcOp = getCalledFunction(callOp);
-    assert(funcOp && "expected CallOp to a FuncOp");
+    func::FuncOp funcOp = getCalledFunction(callOp);
+    assert(funcOp && "expected CallOp to a func::FuncOp");
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
     if (getFuncOpAnalysisState(state, funcOp) !=
         FuncOpAnalysisState::Analyzed) {
@@ -826,16 +826,16 @@ struct CallOpInterface
     return BufferRelation::Equivalent;
   }
 
-  /// In a first approximation, all the function arguments of a FuncOp are
+  /// In a first approximation, all the function arguments of a func::FuncOp are
   /// marked inplaceable. For now, it is the responsibility of the `callOp`
-  /// bufferization to allow FuncOp that are inplaceable to write inPlace.
+  /// bufferization to allow func::FuncOp that are inplaceable to write inPlace.
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           BufferizationState &state) const {
     func::CallOp callOp = cast<func::CallOp>(op);
     unsigned numResults = callOp.getNumResults();
     unsigned numOperands = callOp->getNumOperands();
-    FuncOp funcOp = getCalledFunction(callOp);
-    assert(funcOp && "expected CallOp to a FuncOp");
+    func::FuncOp funcOp = getCalledFunction(callOp);
+    assert(funcOp && "expected CallOp to a func::FuncOp");
     const FuncAnalysisState &funcState =
         getFuncAnalysisState(state.getAnalysisState());
     const OneShotBufferizationOptions &options =
@@ -863,11 +863,11 @@ struct CallOpInterface
     //
     // Note: If a function has no body, no equivalence information is
     // available. Consequently, a tensor return value cannot be proven to fold
-    // onto a FuncOp bbArg, so calls to such functions are not bufferizable at
-    // the moment.
+    // onto a func::FuncOp bbArg, so calls to such functions are not
+    // bufferizable at the moment.
 
     // 1. Compute the result types of the new CallOp. Tensor results that are
-    // equivalent to a FuncOp bbArg are no longer returned.
+    // equivalent to a func::FuncOp bbArg are no longer returned.
     for (const auto &it : llvm::enumerate(callOp.getResultTypes())) {
       unsigned returnValIdx = it.index();
       Type returnType = it.value();
@@ -922,8 +922,8 @@ struct CallOpInterface
       }
 
       // Retrieve buffers for tensor operands. Tensor operand buffers, who's
-      // corresponding FuncOp bbArgs are equivalent to a returned tensor, were
-      // already stored in `newOperands` during Step 1.
+      // corresponding func::FuncOp bbArgs are equivalent to a returned tensor,
+      // were already stored in `newOperands` during Step 1.
       Value buffer = newOperands[idx];
       if (!buffer) {
         FailureOr<Value> bufferOrFailure = state.getBuffer(rewriter, opOperand);
@@ -990,7 +990,7 @@ struct ReturnOpInterface
                           BufferizationState &state) const {
 #ifndef NDEBUG
     auto returnOp = cast<func::ReturnOp>(op);
-    assert(isa<FuncOp>(returnOp->getParentOp()) &&
+    assert(isa<func::FuncOp>(returnOp->getParentOp()) &&
            "only support FuncOp parent for ReturnOp");
 #endif // NDEBUG
     return failure();
@@ -998,7 +998,8 @@ struct ReturnOpInterface
 };
 
 struct FuncOpInterface
-    : public BufferizableOpInterface::ExternalModel<FuncOpInterface, FuncOp> {
+    : public BufferizableOpInterface::ExternalModel<FuncOpInterface,
+                                                    func::FuncOp> {
   LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
                           BufferizationState &state) const {
     return failure();
@@ -1007,7 +1008,7 @@ struct FuncOpInterface
   /// Return `true` if the given function argument is writable.
   bool isWritable(Operation *op, Value value,
                   const AnalysisState &state) const {
-    auto funcOp = cast<FuncOp>(op);
+    auto funcOp = cast<func::FuncOp>(op);
     BlockArgument bbArg = value.dyn_cast<BlockArgument>();
     assert(bbArg && "expected BlockArgument");
 
@@ -1039,17 +1040,17 @@ void mlir::linalg::comprehensive_bufferize::std_ext::
   });
 }
 
-/// Set the attribute that triggers inplace bufferization on a FuncOp argument
-/// `bbArg`.
+/// Set the attribute that triggers inplace bufferization on a func::FuncOp
+/// argument `bbArg`.
 static void setInPlaceFuncArgument(BlockArgument bbArg, bool inPlace) {
-  auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
+  auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
   funcOp.setArgAttr(bbArg.getArgNumber(),
                     BufferizableOpInterface::kInplaceableAttrName,
                     BoolAttr::get(bbArg.getContext(), inPlace));
 }
 
 /// Annotate the IR with the result of the analysis. For testing/debugging only.
-static void annotateOpsWithBufferizationMarkers(FuncOp funcOp,
+static void annotateOpsWithBufferizationMarkers(func::FuncOp funcOp,
                                                 const AnalysisState &state) {
   auto bufferizableOp = cast<BufferizableOpInterface>(funcOp.getOperation());
   for (BlockArgument bbArg : funcOp.getArguments())
@@ -1066,7 +1067,7 @@ LogicalResult mlir::linalg::comprehensive_bufferize::runModuleBufferize(
   BufferizationAliasInfo &aliasInfo = analysisState.getAliasInfo();
 
   // A list of functions in the order in which they are analyzed + bufferized.
-  SmallVector<FuncOp> orderedFuncOps;
+  SmallVector<func::FuncOp> orderedFuncOps;
 
   // A mapping of FuncOps to their callers.
   FuncCallerMap callerMap;
@@ -1079,7 +1080,7 @@ LogicalResult mlir::linalg::comprehensive_bufferize::runModuleBufferize(
   options.addPostAnalysisStep(funcOpBbArgReadWriteAnalysis);
 
   // Analyze ops.
-  for (FuncOp funcOp : orderedFuncOps) {
+  for (func::FuncOp funcOp : orderedFuncOps) {
     // No body => no analysis.
     if (funcOp.getBody().empty())
       continue;
@@ -1106,7 +1107,7 @@ LogicalResult mlir::linalg::comprehensive_bufferize::runModuleBufferize(
     return success();
 
   // Bufferize functions.
-  for (FuncOp funcOp : orderedFuncOps) {
+  for (func::FuncOp funcOp : orderedFuncOps) {
     // No body => no analysis.
     if (!funcOp.getBody().empty())
       if (failed(bufferizeOp(funcOp, bufferizationState)))
@@ -1119,7 +1120,7 @@ LogicalResult mlir::linalg::comprehensive_bufferize::runModuleBufferize(
   }
 
   // Check result.
-  for (FuncOp funcOp : orderedFuncOps) {
+  for (func::FuncOp funcOp : orderedFuncOps) {
     if (!options.allowReturnAllocs &&
         llvm::any_of(funcOp.getFunctionType().getResults(), [](Type t) {
           return t.isa<MemRefType, UnrankedMemRefType>();
@@ -1138,7 +1139,7 @@ LogicalResult mlir::linalg::comprehensive_bufferize::runModuleBufferize(
   layoutPostProcessing(moduleOp);
 
   // Post-pass cleanup of inplaceable and buffer_layout attributes.
-  moduleOp.walk([&](FuncOp op) {
+  moduleOp.walk([&](func::FuncOp op) {
     for (BlockArgument bbArg : op.getArguments())
       removeBufferizationFuncArguments(bbArg);
   });

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
index 84b16bcd7a3f7..143cef1ce5cf0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
@@ -42,6 +42,6 @@ struct LinalgBufferizePass : public LinalgBufferizeBase<LinalgBufferizePass> {
 };
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgBufferizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgBufferizePass() {
   return std::make_unique<LinalgBufferizePass>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 1eacc93f74825..eece77d75f760 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -74,7 +74,7 @@ struct LinalgGeneralizationPass
 } // namespace
 
 void LinalgGeneralizationPass::runOnOperation() {
-  FuncOp func = getOperation();
+  func::FuncOp func = getOperation();
   RewritePatternSet patterns(&getContext());
   populateLinalgNamedOpsGeneralizationPatterns(patterns);
   (void)applyPatternsAndFoldGreedily(func.getBody(), std::move(patterns));
@@ -85,6 +85,7 @@ void mlir::linalg::populateLinalgNamedOpsGeneralizationPatterns(
   patterns.add<LinalgGeneralizationPattern>(patterns.getContext(), marker);
 }
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgGeneralizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgGeneralizationPass() {
   return std::make_unique<LinalgGeneralizationPass>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
index e4f46a4415e49..1ede3a0cf03bf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
@@ -128,7 +128,7 @@ static bool isOnlyUsedAsInputOfLinalgOp(tensor::PadOp padOp) {
 static void
 getAtMostNEnclosingLoops(tensor::PadOp padOp, int nLevels,
                          SmallVector<scf::ForOp> &reverseEnclosingLoops) {
-  AsmState state(padOp->getParentOfType<mlir::FuncOp>());
+  AsmState state(padOp->getParentOfType<func::FuncOp>());
   (void)state;
   scf::ForOp outermostEnclosingForOp = nullptr;
   Operation *nextEnclosingOp = padOp->getParentOp();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index 58286bd6aa604..8dc0ee601116d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -339,7 +339,7 @@ static void hoistReadWrite(HoistableRead read, HoistableWrite write,
 // 4. Hoist the tensor_read/tensor_write and update the tensor SSA links.
 // After this transformation the scf.forOp may have unused arguments that can be
 // remove by the canonicalization pass.
-void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
+void mlir::linalg::hoistRedundantVectorTransfersOnTensor(func::FuncOp func) {
   bool changed = true;
   while (changed) {
     changed = false;
@@ -391,7 +391,7 @@ void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
   }
 }
 
-void mlir::linalg::hoistRedundantVectorTransfers(FuncOp func) {
+void mlir::linalg::hoistRedundantVectorTransfers(func::FuncOp func) {
   bool changed = true;
   while (changed) {
     changed = false;

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
index 1b81113bfcf0b..d57ecdfadc6b7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
@@ -98,7 +98,7 @@ namespace {
 struct LinalgInlineScalarOperandsPass
     : public LinalgInlineScalarOperandsBase<LinalgInlineScalarOperandsPass> {
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     MLIRContext *context = funcOp.getContext();
     RewritePatternSet patterns(context);
 
@@ -108,7 +108,7 @@ struct LinalgInlineScalarOperandsPass
 };
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgInlineScalarOperandsPass() {
   return std::make_unique<LinalgInlineScalarOperandsPass>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp
index 714fa0dec1e0f..fd1e948ee633c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp
@@ -450,7 +450,7 @@ struct LinalgStrategyRemoveMarkersPass
 } // namespace
 
 /// Create a LinalgStrategyTileAndFusePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyTileAndFusePass(
     StringRef opName, const LinalgTilingAndFusionOptions &options,
     const LinalgTransformationFilter &filter) {
@@ -459,7 +459,7 @@ mlir::createLinalgStrategyTileAndFusePass(
 }
 
 /// Create a LinalgStrategyTilePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyTilePass(StringRef opName,
                                    const LinalgTilingOptions &opt,
                                    const LinalgTransformationFilter &filter) {
@@ -467,7 +467,7 @@ mlir::createLinalgStrategyTilePass(StringRef opName,
 }
 
 /// Create a LinalgStrategyPadPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyPadPass(StringRef opName,
                                   const LinalgPaddingOptions &opt,
                                   const LinalgTransformationFilter &filter) {
@@ -475,27 +475,30 @@ mlir::createLinalgStrategyPadPass(StringRef opName,
 }
 
 /// Create a LinalgStrategyPromotePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyPromotePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyPromotePass(
     StringRef opName, const LinalgPromotionOptions &opt,
     const LinalgTransformationFilter &filter) {
   return std::make_unique<LinalgStrategyPromotePass>(opName, opt, filter);
 }
 
 /// Create a LinalgStrategyGeneralizePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyGeneralizePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyGeneralizePass(
     StringRef opName, const LinalgTransformationFilter &filter) {
   return std::make_unique<LinalgStrategyGeneralizePass>(opName, filter);
 }
 
 /// Create a LinalgStrategyDecomposePass.
 // TODO: if/when we need finer control add an `opName` parameter.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyDecomposePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyDecomposePass(
     const LinalgTransformationFilter &filter) {
   return std::make_unique<LinalgStrategyDecomposePass>(filter);
 }
 
 /// Create a LinalgStrategyInterchangePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyInterchangePass(
     ArrayRef<int64_t> iteratorInterchange,
     const LinalgTransformationFilter &filter) {
@@ -504,7 +507,8 @@ mlir::createLinalgStrategyInterchangePass(
 }
 
 /// Create a LinalgStrategyVectorizePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyVectorizePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyVectorizePass(
     StringRef opName, LinalgVectorizationOptions opt,
     const LinalgTransformationFilter &filter, bool padVectorize) {
   return std::make_unique<LinalgStrategyVectorizePass>(opName, opt, filter,
@@ -512,21 +516,21 @@ std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyVectorizePass(
 }
 
 /// Create a LinalgStrategyEnablePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyEnablePass(LinalgEnablingOptions opt,
                                      const LinalgTransformationFilter &filter) {
   return std::make_unique<LinalgStrategyEnablePass>(opt, filter);
 }
 
 /// Create a LinalgStrategyLowerVectorsPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyLowerVectorsPass(
     LinalgVectorLoweringOptions opt, const LinalgTransformationFilter &filter) {
   return std::make_unique<LinalgStrategyLowerVectorsPass>(opt, filter);
 }
 
 /// Create a LinalgStrategyRemoveMarkersPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgStrategyRemoveMarkersPass() {
   return std::make_unique<LinalgStrategyRemoveMarkersPass>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 5a5554992341f..9ca9521c0818e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -298,7 +298,7 @@ struct FoldAffineOp : public RewritePattern {
 };
 
 template <typename LoopType>
-static void lowerLinalgToLoopsImpl(FuncOp funcOp) {
+static void lowerLinalgToLoopsImpl(func::FuncOp funcOp) {
   MLIRContext *context = funcOp.getContext();
   RewritePatternSet patterns(context);
   patterns.add<LinalgRewritePattern<LoopType>>(context);
@@ -338,16 +338,17 @@ struct LowerToParallelLoops
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createConvertLinalgToLoopsPass() {
   return std::make_unique<LowerToLoops>();
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createConvertLinalgToParallelLoopsPass() {
   return std::make_unique<LowerToParallelLoops>();
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createConvertLinalgToAffineLoopsPass() {
   return std::make_unique<LowerToAffineLoops>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 12e517f620190..61241a3026720 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -414,10 +414,10 @@ struct LinalgPromotionPass : public LinalgPromotionBase<LinalgPromotionPass> {
 } // namespace
 
 // TODO: support more transformation options in the pass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgPromotionPass(bool dynamicBuffers, bool useAlloca) {
   return std::make_unique<LinalgPromotionPass>(dynamicBuffers, useAlloca);
 }
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgPromotionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgPromotionPass() {
   return std::make_unique<LinalgPromotionPass>();
 }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index b9a069c1d1e50..dfc78977c560e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -441,7 +441,7 @@ void mlir::linalg::populatePadTensorTilingPatterns(
   patterns.add<PadOpTilingPattern>(ctx, options);
 }
 
-static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
+static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
   MLIRContext *ctx = funcOp.getContext();
   RewritePatternSet patterns(ctx);
   patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
@@ -460,7 +460,7 @@ struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
   }
 
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     LinalgTilingLoopType type =
         llvm::StringSwitch<LinalgTilingLoopType>(loopType)
             .Case("for", LinalgTilingLoopType::Loops)
@@ -491,7 +491,7 @@ struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
 
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes,
                              linalg::LinalgTilingLoopType loopType) {
   return std::make_unique<LinalgTilingPass>(tileSizes, loopType);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 9caac73ea1cf0..4636483eb5759 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -125,7 +125,7 @@ mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
     OpBuilder::InsertionGuard guard(b);
     b.setInsertionPointToStart(
-        &op->getParentOfType<FuncOp>().getBody().front());
+        &op->getParentOfType<func::FuncOp>().getBody().front());
     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
       Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
       return v;

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 845c6434994e7..2ddee42e7fd1e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -676,7 +676,6 @@ LogicalResult mlir::linalg::vectorizeCopy(RewriterBase &rewriter,
   Operation *writeValue = rewriter.create<vector::TransferWriteOp>(
       loc, readValue, copyOp.target(), indices,
       rewriter.getMultiDimIdentityMap(srcType.getRank()));
-  copyOp->getParentOfType<FuncOp>().dump();
   rewriter.replaceOp(copyOp, writeValue->getResults());
   return success();
 }

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
index 980de54a0756e..b0b31c9189039 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
@@ -34,12 +34,13 @@ namespace {
 /// non-normalizable as well. We assume external functions to be normalizable.
 struct NormalizeMemRefs : public NormalizeMemRefsBase<NormalizeMemRefs> {
   void runOnOperation() override;
-  void normalizeFuncOpMemRefs(FuncOp funcOp, ModuleOp moduleOp);
-  bool areMemRefsNormalizable(FuncOp funcOp);
-  void updateFunctionSignature(FuncOp funcOp, ModuleOp moduleOp);
-  void setCalleesAndCallersNonNormalizable(FuncOp funcOp, ModuleOp moduleOp,
-                                           DenseSet<FuncOp> &normalizableFuncs);
-  Operation *createOpResultsNormalized(FuncOp funcOp, Operation *oldOp);
+  void normalizeFuncOpMemRefs(func::FuncOp funcOp, ModuleOp moduleOp);
+  bool areMemRefsNormalizable(func::FuncOp funcOp);
+  void updateFunctionSignature(func::FuncOp funcOp, ModuleOp moduleOp);
+  void setCalleesAndCallersNonNormalizable(
+      func::FuncOp funcOp, ModuleOp moduleOp,
+      DenseSet<func::FuncOp> &normalizableFuncs);
+  Operation *createOpResultsNormalized(func::FuncOp funcOp, Operation *oldOp);
 };
 
 } // namespace
@@ -57,9 +58,9 @@ void NormalizeMemRefs::runOnOperation() {
   // normalizable are removed from this set.
   // TODO: Change this to work on FuncLikeOp once there is an operation
   // interface for it.
-  DenseSet<FuncOp> normalizableFuncs;
+  DenseSet<func::FuncOp> normalizableFuncs;
   // Initialize `normalizableFuncs` with all the functions within a module.
-  moduleOp.walk([&](FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
+  moduleOp.walk([&](func::FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
 
   // Traverse through all the functions applying a filter which determines
   // whether that function is normalizable or not. All callers/callees of
@@ -67,7 +68,7 @@ void NormalizeMemRefs::runOnOperation() {
   // they aren't passing any or specific non-normalizable memrefs. So,
   // functions which calls or get called by a non-normalizable becomes non-
   // normalizable functions themselves.
-  moduleOp.walk([&](FuncOp funcOp) {
+  moduleOp.walk([&](func::FuncOp funcOp) {
     if (normalizableFuncs.contains(funcOp)) {
       if (!areMemRefsNormalizable(funcOp)) {
         LLVM_DEBUG(llvm::dbgs()
@@ -85,7 +86,7 @@ void NormalizeMemRefs::runOnOperation() {
   LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
                           << " functions\n");
   // Those functions which can be normalized are subjected to normalization.
-  for (FuncOp &funcOp : normalizableFuncs)
+  for (func::FuncOp &funcOp : normalizableFuncs)
     normalizeFuncOpMemRefs(funcOp, moduleOp);
 }
 
@@ -102,7 +103,8 @@ static bool isMemRefNormalizable(Value::user_range opUsers) {
 /// Set all the calling functions and the callees of the function as not
 /// normalizable.
 void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
-    FuncOp funcOp, ModuleOp moduleOp, DenseSet<FuncOp> &normalizableFuncs) {
+    func::FuncOp funcOp, ModuleOp moduleOp,
+    DenseSet<func::FuncOp> &normalizableFuncs) {
   if (!normalizableFuncs.contains(funcOp))
     return;
 
@@ -115,8 +117,9 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
   for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
     // TODO: Extend this for ops that are FunctionOpInterface. This would
     // require creating an OpInterface for FunctionOpInterface ops.
-    FuncOp parentFuncOp = symbolUse.getUser()->getParentOfType<FuncOp>();
-    for (FuncOp &funcOp : normalizableFuncs) {
+    func::FuncOp parentFuncOp =
+        symbolUse.getUser()->getParentOfType<func::FuncOp>();
+    for (func::FuncOp &funcOp : normalizableFuncs) {
       if (parentFuncOp == funcOp) {
         setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
                                             normalizableFuncs);
@@ -128,8 +131,8 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
   // Functions called by this function.
   funcOp.walk([&](func::CallOp callOp) {
     StringAttr callee = callOp.getCalleeAttr().getAttr();
-    for (FuncOp &funcOp : normalizableFuncs) {
-      // We compare FuncOp and callee's name.
+    for (func::FuncOp &funcOp : normalizableFuncs) {
+      // We compare func::FuncOp and callee's name.
       if (callee == funcOp.getNameAttr()) {
         setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
                                             normalizableFuncs);
@@ -146,7 +149,7 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
 /// wherein even if the non-normalizable memref is not a part of the function's
 /// argument or return type, we still label the entire function as
 /// non-normalizable. We assume external functions to be normalizable.
-bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
+bool NormalizeMemRefs::areMemRefsNormalizable(func::FuncOp funcOp) {
   // We assume external functions to be normalizable.
   if (funcOp.isExternal())
     return true;
@@ -191,7 +194,7 @@ bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
 /// the calling function's signature.
 /// TODO: An update to the calling function signature is required only if the
 /// returned value is in turn used in ReturnOp of the calling function.
-void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
+void NormalizeMemRefs::updateFunctionSignature(func::FuncOp funcOp,
                                                ModuleOp moduleOp) {
   FunctionType functionType = funcOp.getFunctionType();
   SmallVector<Type, 4> resultTypes;
@@ -239,7 +242,7 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
   // function in ReturnOps, the caller function's signature will also change.
   // Hence we record the caller function in 'funcOpsToUpdate' to update their
   // signature as well.
-  llvm::SmallDenseSet<FuncOp, 8> funcOpsToUpdate;
+  llvm::SmallDenseSet<func::FuncOp, 8> funcOpsToUpdate;
   // We iterate over all symbolic uses of the function and update the return
   // type at the caller site.
   Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
@@ -301,7 +304,7 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
       // required.
       // TODO: Extend this for ops that are FunctionOpInterface. This would
       // require creating an OpInterface for FunctionOpInterface ops.
-      FuncOp parentFuncOp = newCallOp->getParentOfType<FuncOp>();
+      func::FuncOp parentFuncOp = newCallOp->getParentOfType<func::FuncOp>();
       funcOpsToUpdate.insert(parentFuncOp);
     }
   }
@@ -313,14 +316,14 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
   // Updating the signature type of those functions which call the current
   // function. Only if the return type of the current function has a normalized
   // memref will the caller function become a candidate for signature update.
-  for (FuncOp parentFuncOp : funcOpsToUpdate)
+  for (func::FuncOp parentFuncOp : funcOpsToUpdate)
     updateFunctionSignature(parentFuncOp, moduleOp);
 }
 
 /// Normalizes the memrefs within a function which includes those arising as a
 /// result of AllocOps, CallOps and function's argument. The ModuleOp argument
 /// is used to help update function's signature after normalization.
-void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
+void NormalizeMemRefs::normalizeFuncOpMemRefs(func::FuncOp funcOp,
                                               ModuleOp moduleOp) {
   // Turn memrefs' non-identity layouts maps into ones with identity. Collect
   // alloc ops first and then process since normalizeMemRef replaces/erases ops
@@ -477,7 +480,7 @@ void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
 /// normalized, and new operation containing them in the operation results is
 /// returned. If all of the results of `oldOp` have no memrefs or memrefs
 /// without affine map, `oldOp` is returned without modification.
-Operation *NormalizeMemRefs::createOpResultsNormalized(FuncOp funcOp,
+Operation *NormalizeMemRefs::createOpResultsNormalized(func::FuncOp funcOp,
                                                        Operation *oldOp) {
   // Prepare OperationState to create newOp containing normalized memref in
   // the operation results.

diff  --git a/mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp b/mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp
index 3dc9f9f8a5365..255a2f22b176f 100644
--- a/mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp
+++ b/mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp
@@ -98,6 +98,7 @@ void ConvertConstPass::runOnOperation() {
   (void)applyPatternsAndFoldGreedily(func, std::move(patterns));
 }
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::quant::createConvertConstPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::quant::createConvertConstPass() {
   return std::make_unique<ConvertConstPass>();
 }

diff  --git a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
index faad830fd05a3..297dddd4eda44 100644
--- a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
+++ b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
@@ -134,7 +134,7 @@ void ConvertSimulatedQuantPass::runOnOperation() {
     signalPassFailure();
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::quant::createConvertSimulatedQuantPass() {
   return std::make_unique<ConvertSimulatedQuantPass>();
 }

diff  --git a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
index cf9f4f1dc47dd..557611e757407 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
@@ -100,7 +100,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {
 
 struct ForToWhileLoop : public SCFForToWhileLoopBase<ForToWhileLoop> {
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     MLIRContext *ctx = funcOp.getContext();
     RewritePatternSet patterns(ctx);
     patterns.add<ForLoopLoweringPattern>(ctx);

diff  --git a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
index 7e422b0b4a41c..568338e50efed 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
@@ -182,7 +182,7 @@ struct AffineOpSCFCanonicalizationPattern : public OpRewritePattern<OpTy> {
 struct SCFForLoopCanonicalization
     : public SCFForLoopCanonicalizationBase<SCFForLoopCanonicalization> {
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     MLIRContext *ctx = funcOp.getContext();
     RewritePatternSet patterns(ctx);
     scf::populateSCFForLoopCanonicalizationPatterns(patterns);

diff  --git a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
index d87bbeba77079..c441a2c71dfef 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
@@ -251,7 +251,7 @@ struct ForLoopSpecialization
 
 struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> {
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     MLIRContext *ctx = funcOp.getContext();
     RewritePatternSet patterns(ctx);
     patterns.add<ForLoopPeelingPattern>(ctx, skipPartial);

diff  --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index f1f34eaf00bee..5d69a3fa7260b 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -98,9 +98,10 @@ scf::ForOp mlir::cloneWithNewYields(OpBuilder &b, scf::ForOp loop,
 /// `outlinedFuncBody` to alloc simple canonicalizations.
 // TODO: support more than single-block regions.
 // TODO: more flexible constant handling.
-FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
-                                                 Location loc, Region &region,
-                                                 StringRef funcName) {
+FailureOr<func::FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
+                                                       Location loc,
+                                                       Region &region,
+                                                       StringRef funcName) {
   assert(!funcName.empty() && "funcName cannot be empty");
   if (!region.hasOneBlock())
     return failure();
@@ -110,7 +111,7 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
 
   // Outline before current function.
   OpBuilder::InsertionGuard g(rewriter);
-  rewriter.setInsertionPoint(region.getParentOfType<FuncOp>());
+  rewriter.setInsertionPoint(region.getParentOfType<func::FuncOp>());
 
   SetVector<Value> captures;
   getUsedValuesDefinedAbove(region, captures);
@@ -132,7 +133,8 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
   FunctionType outlinedFuncType =
       FunctionType::get(rewriter.getContext(), outlinedFuncArgTypes,
                         originalTerminator->getOperandTypes());
-  auto outlinedFunc = rewriter.create<FuncOp>(loc, funcName, outlinedFuncType);
+  auto outlinedFunc =
+      rewriter.create<func::FuncOp>(loc, funcName, outlinedFuncType);
   Block *outlinedFuncBody = outlinedFunc.addEntryBlock();
 
   // Merge blocks while replacing the original block operands.
@@ -198,12 +200,12 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
   return outlinedFunc;
 }
 
-LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp, FuncOp *thenFn,
-                                StringRef thenFnName, FuncOp *elseFn,
-                                StringRef elseFnName) {
+LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp,
+                                func::FuncOp *thenFn, StringRef thenFnName,
+                                func::FuncOp *elseFn, StringRef elseFnName) {
   IRRewriter rewriter(b);
   Location loc = ifOp.getLoc();
-  FailureOr<FuncOp> outlinedFuncOpOrFailure;
+  FailureOr<func::FuncOp> outlinedFuncOpOrFailure;
   if (thenFn && !ifOp.getThenRegion().empty()) {
     outlinedFuncOpOrFailure = outlineSingleBlockRegion(
         rewriter, loc, ifOp.getThenRegion(), thenFnName);

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/DecorateCompositeTypeLayoutPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/DecorateCompositeTypeLayoutPass.cpp
index 29ec0b3c07e0e..dd958e522a262 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/DecorateCompositeTypeLayoutPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/DecorateCompositeTypeLayoutPass.cpp
@@ -111,7 +111,7 @@ void DecorateSPIRVCompositeTypeLayoutPass::runOnOperation() {
   populateSPIRVLayoutInfoPatterns(patterns);
   ConversionTarget target(*(module.getContext()));
   target.addLegalDialect<spirv::SPIRVDialect>();
-  target.addLegalOp<FuncOp>();
+  target.addLegalOp<func::FuncOp>();
   target.addDynamicallyLegalOp<spirv::GlobalVariableOp>(
       [](spirv::GlobalVariableOp op) {
         return VulkanLayoutUtils::isLegalType(op.type());

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index 824df9272e37a..5174700bebcd4 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -534,24 +534,24 @@ SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr,
 }
 
 //===----------------------------------------------------------------------===//
-// FuncOp Conversion Patterns
+// func::FuncOp Conversion Patterns
 //===----------------------------------------------------------------------===//
 
 namespace {
 /// A pattern for rewriting function signature to convert arguments of functions
 /// to be of valid SPIR-V types.
-class FuncOpConversion final : public OpConversionPattern<FuncOp> {
+class FuncOpConversion final : public OpConversionPattern<func::FuncOp> {
 public:
-  using OpConversionPattern<FuncOp>::OpConversionPattern;
+  using OpConversionPattern<func::FuncOp>::OpConversionPattern;
 
   LogicalResult
-  matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+  matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override;
 };
 } // namespace
 
 LogicalResult
-FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+FuncOpConversion::matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
                                   ConversionPatternRewriter &rewriter) const {
   auto fnType = funcOp.getFunctionType();
   if (fnType.getNumResults() > 1)

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 32be1db32ffa0..9f8f290559efd 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -1190,13 +1190,13 @@ void FunctionLibraryOp::build(OpBuilder &builder, OperationState &result,
       ::mlir::SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)));
 }
 
-FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
+func::FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
   auto attr = getMapping()
                   .get(op->getName().getIdentifier())
                   .dyn_cast_or_null<FlatSymbolRefAttr>();
   if (!attr)
     return nullptr;
-  return lookupSymbol<FuncOp>(attr);
+  return lookupSymbol<func::FuncOp>(attr);
 }
 
 ParseResult FunctionLibraryOp::parse(OpAsmParser &parser,

diff  --git a/mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp
index 48c82024dc7c7..b84c3b7e8a470 100644
--- a/mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp
@@ -37,6 +37,6 @@ struct ShapeBufferizePass : public ShapeBufferizeBase<ShapeBufferizePass> {
 };
 } // namespace
 
-std::unique_ptr<OperationPass<FuncOp>> mlir::createShapeBufferizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createShapeBufferizePass() {
   return std::make_unique<ShapeBufferizePass>();
 }

diff  --git a/mlir/lib/Dialect/Shape/Transforms/RemoveShapeConstraints.cpp b/mlir/lib/Dialect/Shape/Transforms/RemoveShapeConstraints.cpp
index 506b260927edf..9375be93f6d87 100644
--- a/mlir/lib/Dialect/Shape/Transforms/RemoveShapeConstraints.cpp
+++ b/mlir/lib/Dialect/Shape/Transforms/RemoveShapeConstraints.cpp
@@ -60,7 +60,7 @@ void mlir::populateRemoveShapeConstraintsPatterns(RewritePatternSet &patterns) {
       patterns.getContext());
 }
 
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
 mlir::createRemoveShapeConstraintsPass() {
   return std::make_unique<RemoveShapeConstraintsPass>();
 }

diff  --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
index 54dac3d7ec441..bea3ccf70c11b 100644
--- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
@@ -30,25 +30,25 @@ using namespace mlir::sparse_tensor;
 void mlir::sparse_tensor::buildSparseCompiler(
     OpPassManager &pm, const SparseCompilerOptions &options) {
   // TODO(wrengr): ensure the original `pm` is for ModuleOp
-  pm.addNestedPass<FuncOp>(createLinalgGeneralizationPass());
+  pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
   pm.addPass(createLinalgElementwiseOpFusionPass());
   pm.addPass(createSparsificationPass(options.sparsificationOptions()));
   pm.addPass(createSparseTensorConversionPass(
       options.sparseTensorConversionOptions()));
-  pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
-  pm.addNestedPass<FuncOp>(vector::createVectorBufferizePass());
-  pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
-  pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
-  pm.addNestedPass<FuncOp>(createConvertSCFToCFPass());
+  pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
+  pm.addNestedPass<func::FuncOp>(vector::createVectorBufferizePass());
+  pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
+  pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
+  pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
   pm.addPass(func::createFuncBufferizePass());
   pm.addPass(arith::createConstantBufferizePass());
-  pm.addNestedPass<FuncOp>(createTensorBufferizePass());
-  pm.addNestedPass<FuncOp>(
+  pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
+  pm.addNestedPass<func::FuncOp>(
       mlir::bufferization::createFinalizingBufferizePass());
   pm.addPass(createLowerAffinePass());
   pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
   pm.addPass(createMemRefToLLVMPass());
-  pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
+  pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
   pm.addPass(createConvertFuncToLLVMPass());
   pm.addPass(createReconcileUnrealizedCastsPass());
 }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 164a7fe0a49a9..7feab2c7b1230 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -57,10 +57,10 @@ static FlatSymbolRefAttr getFunc(Operation *op, StringRef name,
   MLIRContext *context = op->getContext();
   auto module = op->getParentOfType<ModuleOp>();
   auto result = SymbolRefAttr::get(context, name);
-  auto func = module.lookupSymbol<FuncOp>(result.getAttr());
+  auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
   if (!func) {
     OpBuilder moduleBuilder(module.getBodyRegion());
-    func = moduleBuilder.create<FuncOp>(
+    func = moduleBuilder.create<func::FuncOp>(
         op->getLoc(), name,
         FunctionType::get(context, operands.getTypes(), resultType));
     func.setPrivate();

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
index a43932b1cecf8..c2b098b29fbfc 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
@@ -91,7 +91,7 @@ struct SparseTensorConversionPass
     // All dynamic rules below accept new function, call, return, and tensor
     // dim and cast operations as legal output of the rewriting provided that
     // all sparse tensor types have been fully rewritten.
-    target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+    target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
       return converter.isSignatureLegal(op.getFunctionType());
     });
     target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
@@ -118,8 +118,8 @@ struct SparseTensorConversionPass
     SparseTensorConversionOptions options(
         sparseToSparseConversionStrategy(sparseToSparse));
     // Populate with rules and apply rewriting rules.
-    populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
-                                                             converter);
+    populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+                                                                   converter);
     populateCallOpTypeConversionPattern(patterns, converter);
     populateSparseTensorConversionPatterns(converter, patterns, options);
     if (failed(applyPartialConversion(getOperation(), target,

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 398c4faa07d90..237cfa9724b5c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -281,7 +281,7 @@ static bool computeIterationGraph(Merger &merger, linalg::GenericOp op,
 /// Returns true if tensor has an in-place annotation.
 static bool isInPlace(Value val) {
   if (auto arg = val.dyn_cast<BlockArgument>())
-    if (auto funcOp = dyn_cast<FuncOp>(arg.getOwner()->getParentOp()))
+    if (auto funcOp = dyn_cast<func::FuncOp>(arg.getOwner()->getParentOp()))
       if (auto attr = funcOp.getArgAttrOfType<BoolAttr>(
               arg.getArgNumber(),
               bufferization::BufferizableOpInterface::kInplaceableAttrName))

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
index bfb1bbc23df3b..fc55e44a7d373 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
@@ -279,7 +279,7 @@ void propagateShapesInRegion(Region &region) {
 struct TosaInferShapes : public TosaInferShapesBase<TosaInferShapes> {
 public:
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
 
     IRRewriter rewriter(func.getContext());
 
@@ -288,7 +288,7 @@ struct TosaInferShapes : public TosaInferShapesBase<TosaInferShapes> {
     // Insert UnrealizedConversionCasts to guarantee ReturnOp agress with
     // the FuncOp type.
     func.walk([&](func::ReturnOp op) {
-      FuncOp parent = dyn_cast<FuncOp>(op->getParentOp());
+      func::FuncOp parent = dyn_cast<func::FuncOp>(op->getParentOp());
       if (!parent)
         return;
 

diff  --git a/mlir/test/IR/attribute.mlir b/mlir/test/IR/attribute.mlir
index 318168dfaa87e..30fdea00fa36a 100644
--- a/mlir/test/IR/attribute.mlir
+++ b/mlir/test/IR/attribute.mlir
@@ -490,7 +490,7 @@ func @fn() { return }
 
 // -----
 
-// expected-error @+1 {{referencing to a 'FuncOp' symbol}}
+// expected-error @+1 {{referencing to a 'func::FuncOp' symbol}}
 "test.symbol_ref_attr"() {symbol = @foo} : () -> ()
 
 // -----

diff  --git a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
index f18b77fd6321e..117f83e01f9ca 100644
--- a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
@@ -27,7 +27,7 @@ using namespace mlir;
 namespace {
 
 struct TestAffineDataCopy
-    : public PassWrapper<TestAffineDataCopy, OperationPass<FuncOp>> {
+    : public PassWrapper<TestAffineDataCopy, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineDataCopy)
 
   StringRef getArgument() const final { return PASS_NAME; }

diff  --git a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
index 0c938ca86bea3..ef72792b11c6a 100644
--- a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
@@ -23,7 +23,7 @@ using namespace mlir;
 namespace {
 struct TestAffineLoopParametricTiling
     : public PassWrapper<TestAffineLoopParametricTiling,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineLoopParametricTiling)
 
   StringRef getArgument() const final { return "test-affine-parametric-tile"; }
@@ -41,7 +41,7 @@ static void checkIfTilingParametersExist(ArrayRef<AffineForOp> band) {
   assert(!band.empty() && "no loops in input band");
   AffineForOp topLoop = band[0];
 
-  if (FuncOp funcOp = dyn_cast<FuncOp>(topLoop->getParentOp()))
+  if (func::FuncOp funcOp = dyn_cast<func::FuncOp>(topLoop->getParentOp()))
     assert(funcOp.getNumArguments() >= band.size() && "Too few tile sizes");
 }
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
index 0d458cbf09807..7eb345408e78c 100644
--- a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
@@ -41,7 +41,7 @@ static llvm::cl::opt<bool> clTestLoopFusionTransformation(
 namespace {
 
 struct TestLoopFusion
-    : public PassWrapper<TestLoopFusion, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLoopFusion, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLoopFusion)
 
   StringRef getArgument() const final { return "test-loop-fusion"; }

diff  --git a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
index 0dc27346fb362..1705c8d238355 100644
--- a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
@@ -70,7 +70,7 @@ static llvm::cl::opt<bool> clTestVecAffineLoopNest(
 
 namespace {
 struct VectorizerTestPass
-    : public PassWrapper<VectorizerTestPass, OperationPass<FuncOp>> {
+    : public PassWrapper<VectorizerTestPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(VectorizerTestPass)
 
   static constexpr auto kTestAffineMapOpName = "test_affine_map";
@@ -241,7 +241,7 @@ void VectorizerTestPass::testVecAffineLoopNest() {
 
 void VectorizerTestPass::runOnOperation() {
   // Only support single block functions at this point.
-  FuncOp f = getOperation();
+  func::FuncOp f = getOperation();
   if (!llvm::hasSingleElement(f))
     return;
 

diff  --git a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
index ad78588b4f454..a7fde171dd854 100644
--- a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
+++ b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
@@ -20,13 +20,13 @@ namespace {
 /// attributes containing the results of data layout queries for operation
 /// result types.
 struct TestDataLayoutQuery
-    : public PassWrapper<TestDataLayoutQuery, OperationPass<FuncOp>> {
+    : public PassWrapper<TestDataLayoutQuery, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDataLayoutQuery)
 
   StringRef getArgument() const final { return "test-data-layout-query"; }
   StringRef getDescription() const final { return "Test data layout queries"; }
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     Builder builder(func.getContext());
     const DataLayoutAnalysis &layouts = getAnalysis<DataLayoutAnalysis>();
 

diff  --git a/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp b/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
index 54470cf7d7291..f47d05bc470fb 100644
--- a/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
+++ b/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
@@ -49,7 +49,7 @@ struct TestDecomposeCallGraphTypes
     });
     target.addDynamicallyLegalOp<func::CallOp>(
         [&](func::CallOp op) { return typeConverter.isLegal(op); });
-    target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+    target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
       return typeConverter.isSignatureLegal(op.getFunctionType());
     });
 

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
index 9340101a8bf3c..74ca6b057291b 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
@@ -30,7 +30,8 @@ using namespace mlir::linalg;
 
 namespace {
 struct TestLinalgCodegenStrategy
-    : public PassWrapper<TestLinalgCodegenStrategy, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLinalgCodegenStrategy,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgCodegenStrategy)
 
   StringRef getArgument() const final { return "test-linalg-codegen-strategy"; }
@@ -222,7 +223,7 @@ void TestLinalgCodegenStrategy::runStrategy(
               .enableContractionLowering()
               .enableTransferToSCFConversion());
   // Created a nested OpPassManager and run.
-  FuncOp funcOp = getOperation();
+  func::FuncOp funcOp = getOperation();
   OpPassManager dynamicPM("func.func");
   strategy.configurePassPipeline(dynamicPM, funcOp.getContext(), runEnablePass);
   if (failed(runPipeline(dynamicPM, funcOp)))

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
index 32e428194a502..211ddcfc3730a 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
@@ -47,7 +47,8 @@ static bool setFusedOpOperandLimit(const OpResult &producer,
 
 namespace {
 struct TestLinalgElementwiseFusion
-    : public PassWrapper<TestLinalgElementwiseFusion, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLinalgElementwiseFusion,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgElementwiseFusion)
 
   TestLinalgElementwiseFusion() = default;
@@ -96,7 +97,7 @@ struct TestLinalgElementwiseFusion
 
   void runOnOperation() override {
     MLIRContext *context = &this->getContext();
-    FuncOp funcOp = this->getOperation();
+    func::FuncOp funcOp = this->getOperation();
 
     if (fuseGenericOps) {
       RewritePatternSet fusionPatterns(context);

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
index 573197a584e0e..1c15d0ee6de5e 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
@@ -113,7 +113,7 @@ namespace {
 template <LinalgTilingLoopType LoopType>
 struct TestLinalgFusionTransforms
     : public PassWrapper<TestLinalgFusionTransforms<LoopType>,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgFusionTransforms)
 
   void getDependentDialects(DialectRegistry &registry) const override {
@@ -125,7 +125,7 @@ struct TestLinalgFusionTransforms
 
   void runOnOperation() override {
     MLIRContext *context = &this->getContext();
-    FuncOp funcOp = this->getOperation();
+    func::FuncOp funcOp = this->getOperation();
     RewritePatternSet fusionPatterns(context);
     Aliases alias;
     LinalgDependenceGraph dependenceGraph =
@@ -177,7 +177,7 @@ struct TestLinalgFusionTransformsTiledLoops
 };
 } // namespace
 
-static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
+static LogicalResult fuseLinalgOpsGreedily(func::FuncOp f) {
   OpBuilder b(f);
   DenseSet<Operation *> eraseSet;
 
@@ -237,7 +237,7 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
 
 namespace {
 struct TestLinalgGreedyFusion
-    : public PassWrapper<TestLinalgGreedyFusion, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLinalgGreedyFusion, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion)
 
   void getDependentDialects(DialectRegistry &registry) const override {
@@ -255,7 +255,7 @@ struct TestLinalgGreedyFusion
     patterns.add<ExtractSliceOfPadTensorSwapPattern>(context);
     scf::populateSCFForLoopCanonicalizationPatterns(patterns);
     FrozenRewritePatternSet frozenPatterns(std::move(patterns));
-    OpPassManager pm(FuncOp::getOperationName());
+    OpPassManager pm(func::FuncOp::getOperationName());
     pm.addPass(createLoopInvariantCodeMotionPass());
     pm.addPass(createCanonicalizerPass());
     pm.addPass(createCSEPass());
@@ -271,7 +271,7 @@ struct TestLinalgGreedyFusion
 /// testing.
 struct TestLinalgTileAndFuseSequencePass
     : public PassWrapper<TestLinalgTileAndFuseSequencePass,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestLinalgTileAndFuseSequencePass)
 
@@ -294,7 +294,7 @@ struct TestLinalgTileAndFuseSequencePass
   }
 
   void runOnOperation() override {
-    FuncOp funcOp = getOperation();
+    func::FuncOp funcOp = getOperation();
     auto &blocks = funcOp.getBody().getBlocks();
     if (!llvm::hasSingleElement(blocks)) {
       return;

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
index c79983fc0668a..40e29ff1a3046 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
@@ -21,7 +21,7 @@ using namespace mlir::linalg;
 
 namespace {
 struct TestLinalgHoisting
-    : public PassWrapper<TestLinalgHoisting, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLinalgHoisting, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgHoisting)
 
   TestLinalgHoisting() = default;

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
index a8c75bee62117..a332402ef033d 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
@@ -32,7 +32,7 @@ using namespace mlir::linalg;
 
 namespace {
 struct TestLinalgTransforms
-    : public PassWrapper<TestLinalgTransforms, OperationPass<FuncOp>> {
+    : public PassWrapper<TestLinalgTransforms, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgTransforms)
 
   TestLinalgTransforms() = default;
@@ -142,7 +142,7 @@ struct TestLinalgTransforms
 };
 } // namespace
 
-static void applyPatterns(FuncOp funcOp) {
+static void applyPatterns(func::FuncOp funcOp) {
   MLIRContext *ctx = funcOp.getContext();
   RewritePatternSet patterns(ctx);
 
@@ -288,7 +288,7 @@ static void applyPatterns(FuncOp funcOp) {
 }
 
 static void fillL1TilingAndMatmulToVectorPatterns(
-    FuncOp funcOp, StringRef startMarker,
+    func::FuncOp funcOp, StringRef startMarker,
     SmallVectorImpl<RewritePatternSet> &patternsVector) {
   MLIRContext *ctx = funcOp.getContext();
   patternsVector.emplace_back(
@@ -531,7 +531,7 @@ static void fillTileFuseAndDistributePatterns(MLIRContext *context,
 }
 
 static void
-applyMatmulToVectorPatterns(FuncOp funcOp,
+applyMatmulToVectorPatterns(func::FuncOp funcOp,
                             bool testMatmulToVectorPatterns1dTiling,
                             bool testMatmulToVectorPatterns2dTiling) {
   MLIRContext *ctx = funcOp.getContext();
@@ -564,14 +564,14 @@ applyMatmulToVectorPatterns(FuncOp funcOp,
   (void)applyStagedPatterns(funcOp, frozenStage1Patterns, stage2Patterns);
 }
 
-static void applyVectorTransferForwardingPatterns(FuncOp funcOp) {
+static void applyVectorTransferForwardingPatterns(func::FuncOp funcOp) {
   RewritePatternSet forwardPattern(funcOp.getContext());
   forwardPattern.add<LinalgCopyVTRForwardingPattern>(funcOp.getContext());
   forwardPattern.add<LinalgCopyVTWForwardingPattern>(funcOp.getContext());
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(forwardPattern));
 }
 
-static void applyLinalgToVectorPatterns(FuncOp funcOp) {
+static void applyLinalgToVectorPatterns(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   auto *ctx = funcOp.getContext();
   patterns.add<LinalgVectorizationPattern>(
@@ -583,25 +583,25 @@ static void applyLinalgToVectorPatterns(FuncOp funcOp) {
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
 }
 
-static void applyPadTensorToGenericPatterns(FuncOp funcOp) {
+static void applyPadTensorToGenericPatterns(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   patterns.add<PadOpTransformationPattern>(funcOp.getContext());
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
 }
 
-static void applyGeneralizePadTensorPatterns(FuncOp funcOp) {
+static void applyGeneralizePadTensorPatterns(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   patterns.add<GeneralizePadOpPattern>(funcOp.getContext());
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
 }
 
-static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
+static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   patterns.add<ExtractSliceOfPadTensorSwapPattern>(funcOp.getContext());
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
 }
 
-static void applyTilePattern(FuncOp funcOp, const std::string &loopType,
+static void applyTilePattern(func::FuncOp funcOp, const std::string &loopType,
                              ArrayRef<int64_t> tileSizes,
                              ArrayRef<int64_t> peeledLoops,
                              bool scalarizeDynamicDims) {
@@ -628,7 +628,7 @@ static void applyTilePattern(FuncOp funcOp, const std::string &loopType,
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
 }
 
-static void applySplitReduction(FuncOp funcOp) {
+static void applySplitReduction(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   linalg::populateSplitReductionPattern(
       patterns,
@@ -642,7 +642,7 @@ static void applySplitReduction(FuncOp funcOp) {
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
 }
 
-static void applyBubbleUpExtractSliceOpPattern(FuncOp funcOp) {
+static void applyBubbleUpExtractSliceOpPattern(func::FuncOp funcOp) {
   RewritePatternSet patterns(funcOp.getContext());
   populateBubbleUpExtractSliceOpPatterns(patterns);
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));

diff  --git a/mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp b/mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
index 320af3a3eefe2..f354a309cd6e6 100644
--- a/mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
+++ b/mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
@@ -26,7 +26,7 @@ using namespace mlir;
 
 namespace {
 struct TestSCFForUtilsPass
-    : public PassWrapper<TestSCFForUtilsPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestSCFForUtilsPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFForUtilsPass)
 
   StringRef getArgument() const final { return "test-scf-for-utils"; }
@@ -34,7 +34,7 @@ struct TestSCFForUtilsPass
   explicit TestSCFForUtilsPass() = default;
 
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     SmallVector<scf::ForOp, 4> toErase;
 
     func.walk([&](Operation *fakeRead) {
@@ -70,7 +70,7 @@ struct TestSCFIfUtilsPass
     int count = 0;
     getOperation().walk([&](scf::IfOp ifOp) {
       auto strCount = std::to_string(count++);
-      FuncOp thenFn, elseFn;
+      func::FuncOp thenFn, elseFn;
       OpBuilder b(ifOp);
       IRRewriter rewriter(b);
       if (failed(outlineIfOp(rewriter, ifOp, &thenFn,
@@ -98,7 +98,7 @@ static const StringLiteral kTestPipeliningAnnotationIteration =
     "__test_pipelining_iteration";
 
 struct TestSCFPipeliningPass
-    : public PassWrapper<TestSCFPipeliningPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestSCFPipeliningPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFPipeliningPass)
 
   TestSCFPipeliningPass() = default;

diff  --git a/mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp b/mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
index 31eda369baefb..7f796aafb964a 100644
--- a/mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
+++ b/mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
@@ -22,7 +22,7 @@ using namespace mlir;
 namespace {
 /// A pass for testing SPIR-V op availability.
 struct PrintOpAvailability
-    : public PassWrapper<PrintOpAvailability, OperationPass<FuncOp>> {
+    : public PassWrapper<PrintOpAvailability, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PrintOpAvailability)
 
   void runOnOperation() override;
@@ -106,7 +106,7 @@ void registerPrintSpirvAvailabilityPass() {
 namespace {
 /// A pass for testing SPIR-V op availability.
 struct ConvertToTargetEnv
-    : public PassWrapper<ConvertToTargetEnv, OperationPass<FuncOp>> {
+    : public PassWrapper<ConvertToTargetEnv, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ConvertToTargetEnv)
 
   StringRef getArgument() const override { return "test-spirv-target-env"; }
@@ -149,7 +149,7 @@ struct ConvertToSubgroupBallot : public RewritePattern {
 
 void ConvertToTargetEnv::runOnOperation() {
   MLIRContext *context = &getContext();
-  FuncOp fn = getOperation();
+  func::FuncOp fn = getOperation();
 
   auto targetEnv = fn.getOperation()
                        ->getAttr(spirv::getTargetEnvAttrName())

diff  --git a/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp b/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
index ca1baa4d69eec..e4708f15edc77 100644
--- a/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
+++ b/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
@@ -46,7 +46,7 @@ void ReportShapeFnPass::runOnOperation() {
       return true;
     }
     if (auto symbol = op->getAttrOfType<SymbolRefAttr>(shapeFnId)) {
-      auto fn = cast<FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
+      auto fn = cast<func::FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
       op->emitRemark() << "associated shape function: " << fn.getName();
       return true;
     }
@@ -71,7 +71,7 @@ void ReportShapeFnPass::runOnOperation() {
     }
   }
 
-  module.getBodyRegion().walk([&](FuncOp func) {
+  module.getBodyRegion().walk([&](func::FuncOp func) {
     // Skip ops in the shape function library.
     if (isa<shape::FunctionLibraryOp>(func->getParentOp()))
       return;

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 644c6fd9e58d2..f42fb8edcc188 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -317,7 +317,7 @@ def : Pat<(OpWithEnum ConstantAttr<TestEnumAttr,
 
 def SymbolRefOp : TEST_Op<"symbol_ref_attr"> {
   let arguments = (ins
-    Confined<FlatSymbolRefAttr, [ReferToOp<"FuncOp">]>:$symbol
+    Confined<FlatSymbolRefAttr, [ReferToOp<"func::FuncOp">]>:$symbol
   );
 }
 
@@ -903,7 +903,7 @@ def OpFuncRef : TEST_Op<"op_funcref"> {
   let description = [{
     The "test.op_funcref" is a test op with a reference to a function symbol.
   }];
-  let builders = [OpBuilder<(ins "::mlir::FuncOp":$function)>];
+  let builders = [OpBuilder<(ins "::mlir::func::FuncOp":$function)>];
 }
 
 // Pattern add the argument plus a increasing static number hidden in

diff  --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index 83e887b1f47bf..ab722f69e55c7 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -148,7 +148,7 @@ struct FolderCommutativeOp2WithConstant
 };
 
 struct TestPatternDriver
-    : public PassWrapper<TestPatternDriver, OperationPass<FuncOp>> {
+    : public PassWrapper<TestPatternDriver, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPatternDriver)
 
   StringRef getArgument() const final { return "test-patterns"; }
@@ -176,7 +176,7 @@ namespace {
 template <typename OpTy>
 static void invokeCreateWithInferredReturnType(Operation *op) {
   auto *context = op->getContext();
-  auto fop = op->getParentOfType<FuncOp>();
+  auto fop = op->getParentOfType<func::FuncOp>();
   auto location = UnknownLoc::get(context);
   OpBuilder b(op);
   b.setInsertionPointAfter(op);
@@ -215,7 +215,7 @@ static void reifyReturnShape(Operation *op) {
 }
 
 struct TestReturnTypeDriver
-    : public PassWrapper<TestReturnTypeDriver, OperationPass<FuncOp>> {
+    : public PassWrapper<TestReturnTypeDriver, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestReturnTypeDriver)
 
   void getDependentDialects(DialectRegistry &registry) const override {
@@ -257,7 +257,8 @@ struct TestReturnTypeDriver
 
 namespace {
 struct TestDerivedAttributeDriver
-    : public PassWrapper<TestDerivedAttributeDriver, OperationPass<FuncOp>> {
+    : public PassWrapper<TestDerivedAttributeDriver,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDerivedAttributeDriver)
 
   StringRef getArgument() const final { return "test-derived-attr"; }
@@ -690,8 +691,8 @@ struct TestLegalizePatternDriver
              TestNestedOpCreationUndoRewrite, TestReplaceEraseOp,
              TestCreateUnregisteredOp>(&getContext());
     patterns.add<TestDropOpSignatureConversion>(&getContext(), converter);
-    mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
-                                                                   converter);
+    mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
+        patterns, converter);
     mlir::populateCallOpTypeConversionPattern(patterns, converter);
 
     // Define the conversion target used for the test.
@@ -706,7 +707,7 @@ struct TestLegalizePatternDriver
       return llvm::none_of(op.getOperandTypes(),
                            [](Type type) { return type.isF32(); });
     });
-    target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+    target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
       return converter.isSignatureLegal(op.getFunctionType()) &&
              converter.isLegal(&op.getBody());
     });
@@ -726,7 +727,7 @@ struct TestLegalizePatternDriver
     });
 
     // Check support for marking certain operations as recursively legal.
-    target.markOpRecursivelyLegal<FuncOp, ModuleOp>([](Operation *op) {
+    target.markOpRecursivelyLegal<func::FuncOp, ModuleOp>([](Operation *op) {
       return static_cast<bool>(
           op->getAttrOfType<UnitAttr>("test.recursively_legal"));
     });
@@ -871,7 +872,7 @@ struct TestRemapValueInRegion
 };
 
 struct TestRemappedValue
-    : public mlir::PassWrapper<TestRemappedValue, OperationPass<FuncOp>> {
+    : public mlir::PassWrapper<TestRemappedValue, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRemappedValue)
 
   StringRef getArgument() const final { return "test-remapped-value"; }
@@ -888,7 +889,7 @@ struct TestRemappedValue
     patterns.add<TestRemapValueInRegion>(typeConverter, &getContext());
 
     mlir::ConversionTarget target(getContext());
-    target.addLegalOp<ModuleOp, FuncOp, TestReturnOp>();
+    target.addLegalOp<ModuleOp, func::FuncOp, TestReturnOp>();
 
     // Expect the type_producer/type_consumer operations to only operate on f64.
     target.addDynamicallyLegalOp<TestTypeProducerOp>(
@@ -931,7 +932,8 @@ struct RemoveTestDialectOps : public RewritePattern {
 };
 
 struct TestUnknownRootOpDriver
-    : public mlir::PassWrapper<TestUnknownRootOpDriver, OperationPass<FuncOp>> {
+    : public mlir::PassWrapper<TestUnknownRootOpDriver,
+                               OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestUnknownRootOpDriver)
 
   StringRef getArgument() const final {
@@ -1142,7 +1144,7 @@ struct TestTypeConversionDriver
              (recursiveType &&
               recursiveType.getName() == "outer_converted_type");
     });
-    target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+    target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
       return converter.isSignatureLegal(op.getFunctionType()) &&
              converter.isLegal(&op.getBody());
     });
@@ -1162,8 +1164,8 @@ struct TestTypeConversionDriver
                  TestTestSignatureConversionNoConverter>(converter,
                                                          &getContext());
     patterns.add<TestTypeConversionAnotherProducer>(&getContext());
-    mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
-                                                                   converter);
+    mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
+        patterns, converter);
 
     if (failed(applyPartialConversion(getOperation(), target,
                                       std::move(patterns))))
@@ -1312,7 +1314,7 @@ struct TestMergeBlocksPatternDriver
     patterns.add<TestMergeBlock, TestUndoBlocksMerge, TestMergeSingleBlockOps>(
         context);
     ConversionTarget target(*context);
-    target.addLegalOp<FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
+    target.addLegalOp<func::FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
                       TestTypeConsumerOp, TestTypeProducerOp, TestReturnOp>();
     target.addIllegalOp<ILLegalOpF>();
 

diff  --git a/mlir/test/lib/Dialect/Test/TestTraits.cpp b/mlir/test/lib/Dialect/Test/TestTraits.cpp
index 312c10cc82306..0ccffc7a47a02 100644
--- a/mlir/test/lib/Dialect/Test/TestTraits.cpp
+++ b/mlir/test/lib/Dialect/Test/TestTraits.cpp
@@ -32,7 +32,7 @@ OpFoldResult TestInvolutionTraitSuccesfulOperationFolderOp::fold(
 
 namespace {
 struct TestTraitFolder
-    : public PassWrapper<TestTraitFolder, OperationPass<FuncOp>> {
+    : public PassWrapper<TestTraitFolder, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestTraitFolder)
 
   StringRef getArgument() const final { return "test-trait-folder"; }

diff  --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
index 1295159b11948..e7f3a40f307bb 100644
--- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
+++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
@@ -178,7 +178,7 @@ ConvertTosaConv2DOp::matchAndRewrite(Operation *op,
 namespace {
 
 struct TosaTestQuantUtilAPI
-    : public PassWrapper<TosaTestQuantUtilAPI, OperationPass<FuncOp>> {
+    : public PassWrapper<TosaTestQuantUtilAPI, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TosaTestQuantUtilAPI)
 
   StringRef getArgument() const final { return PASS_NAME; }

diff  --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index 12f74e91c29e4..7129ddda4b16f 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -29,7 +29,8 @@ using namespace mlir::vector;
 namespace {
 
 struct TestVectorToVectorLowering
-    : public PassWrapper<TestVectorToVectorLowering, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorToVectorLowering,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToVectorLowering)
 
   TestVectorToVectorLowering() = default;
@@ -104,7 +105,8 @@ struct TestVectorToVectorLowering
 };
 
 struct TestVectorContractionLowering
-    : public PassWrapper<TestVectorContractionLowering, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorContractionLowering,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorContractionLowering)
 
   StringRef getArgument() const final {
@@ -179,7 +181,8 @@ struct TestVectorContractionLowering
 };
 
 struct TestVectorTransposeLowering
-    : public PassWrapper<TestVectorTransposeLowering, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorTransposeLowering,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransposeLowering)
 
   StringRef getArgument() const final {
@@ -253,7 +256,8 @@ struct TestVectorTransposeLowering
 };
 
 struct TestVectorUnrollingPatterns
-    : public PassWrapper<TestVectorUnrollingPatterns, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorUnrollingPatterns,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorUnrollingPatterns)
 
   StringRef getArgument() const final {
@@ -328,7 +332,8 @@ struct TestVectorUnrollingPatterns
 };
 
 struct TestVectorDistributePatterns
-    : public PassWrapper<TestVectorDistributePatterns, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorDistributePatterns,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorDistributePatterns)
 
   StringRef getArgument() const final {
@@ -352,7 +357,7 @@ struct TestVectorDistributePatterns
   void runOnOperation() override {
     MLIRContext *ctx = &getContext();
     RewritePatternSet patterns(ctx);
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     func.walk([&](arith::AddFOp op) {
       OpBuilder builder(op);
       if (auto vecType = op.getType().dyn_cast<VectorType>()) {
@@ -387,7 +392,8 @@ struct TestVectorDistributePatterns
 };
 
 struct TestVectorToLoopPatterns
-    : public PassWrapper<TestVectorToLoopPatterns, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorToLoopPatterns,
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToLoopPatterns)
 
   StringRef getArgument() const final { return "test-vector-to-forloop"; }
@@ -408,7 +414,7 @@ struct TestVectorToLoopPatterns
   void runOnOperation() override {
     MLIRContext *ctx = &getContext();
     RewritePatternSet patterns(ctx);
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
     func.walk([&](arith::AddFOp op) {
       // Check that the operation type can be broken down into a loop.
       VectorType type = op.getType().dyn_cast<VectorType>();
@@ -447,7 +453,7 @@ struct TestVectorToLoopPatterns
 
 struct TestVectorTransferUnrollingPatterns
     : public PassWrapper<TestVectorTransferUnrollingPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorTransferUnrollingPatterns)
 
@@ -479,7 +485,7 @@ struct TestVectorTransferUnrollingPatterns
 
 struct TestVectorTransferFullPartialSplitPatterns
     : public PassWrapper<TestVectorTransferFullPartialSplitPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorTransferFullPartialSplitPatterns)
 
@@ -519,7 +525,7 @@ struct TestVectorTransferFullPartialSplitPatterns
 };
 
 struct TestVectorTransferOpt
-    : public PassWrapper<TestVectorTransferOpt, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorTransferOpt, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransferOpt)
 
   StringRef getArgument() const final { return "test-vector-transferop-opt"; }
@@ -531,7 +537,7 @@ struct TestVectorTransferOpt
 
 struct TestVectorTransferLoweringPatterns
     : public PassWrapper<TestVectorTransferLoweringPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorTransferLoweringPatterns)
 
@@ -554,7 +560,7 @@ struct TestVectorTransferLoweringPatterns
 
 struct TestVectorMultiReductionLoweringPatterns
     : public PassWrapper<TestVectorMultiReductionLoweringPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorMultiReductionLoweringPatterns)
 
@@ -588,7 +594,7 @@ struct TestVectorMultiReductionLoweringPatterns
 
 struct TestVectorTransferCollapseInnerMostContiguousDims
     : public PassWrapper<TestVectorTransferCollapseInnerMostContiguousDims,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorTransferCollapseInnerMostContiguousDims)
 
@@ -618,7 +624,7 @@ struct TestVectorTransferCollapseInnerMostContiguousDims
 
 struct TestVectorReduceToContractPatternsPatterns
     : public PassWrapper<TestVectorReduceToContractPatternsPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorReduceToContractPatternsPatterns)
 
@@ -638,7 +644,7 @@ struct TestVectorReduceToContractPatternsPatterns
 
 struct TestVectorTransferDropUnitDimsPatterns
     : public PassWrapper<TestVectorTransferDropUnitDimsPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestVectorTransferDropUnitDimsPatterns)
 
@@ -657,7 +663,7 @@ struct TestVectorTransferDropUnitDimsPatterns
 
 struct TestFlattenVectorTransferPatterns
     : public PassWrapper<TestFlattenVectorTransferPatterns,
-                         OperationPass<FuncOp>> {
+                         OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
       TestFlattenVectorTransferPatterns)
 
@@ -679,7 +685,7 @@ struct TestFlattenVectorTransferPatterns
 };
 
 struct TestVectorScanLowering
-    : public PassWrapper<TestVectorScanLowering, OperationPass<FuncOp>> {
+    : public PassWrapper<TestVectorScanLowering, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorScanLowering)
 
   StringRef getArgument() const final { return "test-vector-scan-lowering"; }

diff  --git a/mlir/test/lib/IR/TestPrintInvalid.cpp b/mlir/test/lib/IR/TestPrintInvalid.cpp
index a348c6fd3003b..8697918510a58 100644
--- a/mlir/test/lib/IR/TestPrintInvalid.cpp
+++ b/mlir/test/lib/IR/TestPrintInvalid.cpp
@@ -34,7 +34,7 @@ struct TestPrintInvalidPass
   void runOnOperation() override {
     Location loc = getOperation().getLoc();
     OpBuilder builder(getOperation().getBodyRegion());
-    auto funcOp = builder.create<FuncOp>(
+    auto funcOp = builder.create<func::FuncOp>(
         loc, "test", FunctionType::get(getOperation().getContext(), {}, {}));
     funcOp.addEntryBlock();
     // The created function is invalid because there is no return op.

diff  --git a/mlir/test/lib/IR/TestSlicing.cpp b/mlir/test/lib/IR/TestSlicing.cpp
index f64156d6f216e..01335022fe011 100644
--- a/mlir/test/lib/IR/TestSlicing.cpp
+++ b/mlir/test/lib/IR/TestSlicing.cpp
@@ -25,12 +25,12 @@ using namespace mlir;
 /// with name being the function name and a `suffix`.
 static LogicalResult createBackwardSliceFunction(Operation *op,
                                                  StringRef suffix) {
-  FuncOp parentFuncOp = op->getParentOfType<FuncOp>();
+  func::FuncOp parentFuncOp = op->getParentOfType<func::FuncOp>();
   OpBuilder builder(parentFuncOp);
   Location loc = op->getLoc();
   std::string clonedFuncOpName = parentFuncOp.getName().str() + suffix.str();
-  FuncOp clonedFuncOp = builder.create<FuncOp>(loc, clonedFuncOpName,
-                                               parentFuncOp.getFunctionType());
+  func::FuncOp clonedFuncOp = builder.create<func::FuncOp>(
+      loc, clonedFuncOpName, parentFuncOp.getFunctionType());
   BlockAndValueMapping mapper;
   builder.setInsertionPointToEnd(clonedFuncOp.addEntryBlock());
   for (const auto &arg : enumerate(parentFuncOp.getArguments()))
@@ -61,7 +61,7 @@ struct SliceAnalysisTestPass
 
 void SliceAnalysisTestPass::runOnOperation() {
   ModuleOp module = getOperation();
-  auto funcOps = module.getOps<FuncOp>();
+  auto funcOps = module.getOps<func::FuncOp>();
   unsigned opNum = 0;
   for (auto funcOp : funcOps) {
     // TODO: For now this is just looking for Linalg ops. It can be generalized

diff  --git a/mlir/test/lib/IR/TestSymbolUses.cpp b/mlir/test/lib/IR/TestSymbolUses.cpp
index 34d84caeb1253..a7f4d90386269 100644
--- a/mlir/test/lib/IR/TestSymbolUses.cpp
+++ b/mlir/test/lib/IR/TestSymbolUses.cpp
@@ -24,7 +24,7 @@ struct SymbolUsesPass
     return "Test detection of symbol uses";
   }
   WalkResult operateOnSymbol(Operation *symbol, ModuleOp module,
-                             SmallVectorImpl<FuncOp> &deadFunctions) {
+                             SmallVectorImpl<func::FuncOp> &deadFunctions) {
     // Test computing uses on a non symboltable op.
     Optional<SymbolTable::UseRange> symbolUses =
         SymbolTable::getSymbolUses(symbol);
@@ -42,7 +42,7 @@ struct SymbolUsesPass
 
     // Test the functionality of symbolKnownUseEmpty.
     if (SymbolTable::symbolKnownUseEmpty(symbol, &module.getBodyRegion())) {
-      FuncOp funcSymbol = dyn_cast<FuncOp>(symbol);
+      func::FuncOp funcSymbol = dyn_cast<func::FuncOp>(symbol);
       if (funcSymbol && funcSymbol.isExternal())
         deadFunctions.push_back(funcSymbol);
 
@@ -70,7 +70,7 @@ struct SymbolUsesPass
     auto module = getOperation();
 
     // Walk nested symbols.
-    SmallVector<FuncOp, 4> deadFunctions;
+    SmallVector<func::FuncOp, 4> deadFunctions;
     module.getBodyRegion().walk([&](Operation *nestedOp) {
       if (isa<SymbolOpInterface>(nestedOp))
         return operateOnSymbol(nestedOp, module, deadFunctions);

diff  --git a/mlir/test/lib/IR/TestTypes.cpp b/mlir/test/lib/IR/TestTypes.cpp
index 944075666fd7c..2bd63a48f77d1 100644
--- a/mlir/test/lib/IR/TestTypes.cpp
+++ b/mlir/test/lib/IR/TestTypes.cpp
@@ -15,7 +15,7 @@ using namespace test;
 
 namespace {
 struct TestRecursiveTypesPass
-    : public PassWrapper<TestRecursiveTypesPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestRecursiveTypesPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRecursiveTypesPass)
 
   LogicalResult createIRWithTypes();
@@ -25,7 +25,7 @@ struct TestRecursiveTypesPass
     return "Test support for recursive types";
   }
   void runOnOperation() override {
-    FuncOp func = getOperation();
+    func::FuncOp func = getOperation();
 
     // Just make sure recursive types are printed and parsed.
     if (func.getName() == "roundtrip")
@@ -47,7 +47,7 @@ struct TestRecursiveTypesPass
 
 LogicalResult TestRecursiveTypesPass::createIRWithTypes() {
   MLIRContext *ctx = &getContext();
-  FuncOp func = getOperation();
+  func::FuncOp func = getOperation();
   auto type = TestRecursiveType::get(ctx, "some_long_and_unique_name");
   if (failed(type.setBody(type)))
     return func.emitError("expected to be able to set the type body");

diff  --git a/mlir/test/lib/Pass/TestPassManager.cpp b/mlir/test/lib/Pass/TestPassManager.cpp
index 4f3935b2bccdc..68c67703e5942 100644
--- a/mlir/test/lib/Pass/TestPassManager.cpp
+++ b/mlir/test/lib/Pass/TestPassManager.cpp
@@ -26,7 +26,7 @@ struct TestModulePass
   }
 };
 struct TestFunctionPass
-    : public PassWrapper<TestFunctionPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestFunctionPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestFunctionPass)
 
   void runOnOperation() final {}
@@ -50,7 +50,7 @@ struct TestInterfacePass
   }
 };
 struct TestOptionsPass
-    : public PassWrapper<TestOptionsPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestOptionsPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOptionsPass)
 
   struct Options : public PassPipelineOptions<Options> {
@@ -155,11 +155,11 @@ static void testNestedPipeline(OpPassManager &pm) {
   auto &modulePM = pm.nest<ModuleOp>();
   modulePM.addPass(std::make_unique<TestModulePass>());
   /// A nested function pass.
-  auto &nestedFunctionPM = modulePM.nest<FuncOp>();
+  auto &nestedFunctionPM = modulePM.nest<func::FuncOp>();
   nestedFunctionPM.addPass(std::make_unique<TestFunctionPass>());
 
   // Nest a function pipeline that contains a single pass.
-  auto &functionPM = pm.nest<FuncOp>();
+  auto &functionPM = pm.nest<func::FuncOp>();
   functionPM.addPass(std::make_unique<TestFunctionPass>());
 }
 

diff  --git a/mlir/test/lib/Transforms/TestControlFlowSink.cpp b/mlir/test/lib/Transforms/TestControlFlowSink.cpp
index ab2eaea535e4c..ad34b6c2ffdf8 100644
--- a/mlir/test/lib/Transforms/TestControlFlowSink.cpp
+++ b/mlir/test/lib/Transforms/TestControlFlowSink.cpp
@@ -23,7 +23,7 @@ namespace {
 /// This pass will sink ops named `test.sink_me` and tag them with an attribute
 /// `was_sunk` into the first region of `test.sink_target` ops.
 struct TestControlFlowSinkPass
-    : public PassWrapper<TestControlFlowSinkPass, OperationPass<FuncOp>> {
+    : public PassWrapper<TestControlFlowSinkPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestControlFlowSinkPass)
 
   /// Get the command-line argument of the test pass.

diff  --git a/mlir/test/lib/Transforms/TestInlining.cpp b/mlir/test/lib/Transforms/TestInlining.cpp
index 54ae63514717f..4bf85c075b5ff 100644
--- a/mlir/test/lib/Transforms/TestInlining.cpp
+++ b/mlir/test/lib/Transforms/TestInlining.cpp
@@ -24,7 +24,7 @@ using namespace mlir;
 using namespace test;
 
 namespace {
-struct Inliner : public PassWrapper<Inliner, OperationPass<FuncOp>> {
+struct Inliner : public PassWrapper<Inliner, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(Inliner)
 
   StringRef getArgument() const final { return "test-inline"; }

diff  --git a/mlir/unittests/ExecutionEngine/Invoke.cpp b/mlir/unittests/ExecutionEngine/Invoke.cpp
index 3b5f28e12cd91..91f2a8f3dc22e 100644
--- a/mlir/unittests/ExecutionEngine/Invoke.cpp
+++ b/mlir/unittests/ExecutionEngine/Invoke.cpp
@@ -47,7 +47,8 @@ static struct LLVMInitializer {
 static LogicalResult lowerToLLVMDialect(ModuleOp module) {
   PassManager pm(module.getContext());
   pm.addPass(mlir::createMemRefToLLVMPass());
-  pm.addNestedPass<FuncOp>(mlir::arith::createConvertArithmeticToLLVMPass());
+  pm.addNestedPass<func::FuncOp>(
+      mlir::arith::createConvertArithmeticToLLVMPass());
   pm.addPass(mlir::createConvertFuncToLLVMPass());
   pm.addPass(mlir::createReconcileUnrealizedCastsPass());
   return pm.run(module);

diff  --git a/mlir/unittests/Interfaces/InferTypeOpInterfaceTest.cpp b/mlir/unittests/Interfaces/InferTypeOpInterfaceTest.cpp
index 28e57008c8208..87286d6bfd42f 100644
--- a/mlir/unittests/Interfaces/InferTypeOpInterfaceTest.cpp
+++ b/mlir/unittests/Interfaces/InferTypeOpInterfaceTest.cpp
@@ -36,7 +36,7 @@ class ValueShapeRangeTest : public testing::Test {
     registry.insert<func::FuncDialect, arith::ArithmeticDialect>();
     ctx.appendDialectRegistry(registry);
     module = parseSourceString<ModuleOp>(ir, &ctx);
-    mapFn = cast<FuncOp>(module->front());
+    mapFn = cast<func::FuncOp>(module->front());
   }
 
   // Create ValueShapeRange on the arith.addi operation.
@@ -48,7 +48,7 @@ class ValueShapeRangeTest : public testing::Test {
   DialectRegistry registry;
   MLIRContext ctx;
   OwningOpRef<ModuleOp> module;
-  FuncOp mapFn;
+  func::FuncOp mapFn;
 };
 
 TEST_F(ValueShapeRangeTest, ShapesFromValues) {

diff  --git a/mlir/unittests/Pass/AnalysisManagerTest.cpp b/mlir/unittests/Pass/AnalysisManagerTest.cpp
index e6613b5ddf2be..cef8da7634c7a 100644
--- a/mlir/unittests/Pass/AnalysisManagerTest.cpp
+++ b/mlir/unittests/Pass/AnalysisManagerTest.cpp
@@ -63,9 +63,9 @@ TEST(AnalysisManagerTest, FineGrainFunctionAnalysisPreservation) {
 
   // Create a function and a module.
   OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
-  FuncOp func1 =
-      FuncOp::create(builder.getUnknownLoc(), "foo",
-                     builder.getFunctionType(llvm::None, llvm::None));
+  func::FuncOp func1 =
+      func::FuncOp::create(builder.getUnknownLoc(), "foo",
+                           builder.getFunctionType(llvm::None, llvm::None));
   func1.setPrivate();
   module->push_back(func1);
 
@@ -94,9 +94,9 @@ TEST(AnalysisManagerTest, FineGrainChildFunctionAnalysisPreservation) {
 
   // Create a function and a module.
   OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
-  FuncOp func1 =
-      FuncOp::create(builder.getUnknownLoc(), "foo",
-                     builder.getFunctionType(llvm::None, llvm::None));
+  func::FuncOp func1 =
+      func::FuncOp::create(builder.getUnknownLoc(), "foo",
+                           builder.getFunctionType(llvm::None, llvm::None));
   func1.setPrivate();
   module->push_back(func1);
 

diff  --git a/mlir/unittests/Pass/PassManagerTest.cpp b/mlir/unittests/Pass/PassManagerTest.cpp
index 0b08a848ed5d2..079be46dd1de8 100644
--- a/mlir/unittests/Pass/PassManagerTest.cpp
+++ b/mlir/unittests/Pass/PassManagerTest.cpp
@@ -23,7 +23,7 @@ namespace {
 struct GenericAnalysis {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(GenericAnalysis)
 
-  GenericAnalysis(Operation *op) : isFunc(isa<FuncOp>(op)) {}
+  GenericAnalysis(Operation *op) : isFunc(isa<func::FuncOp>(op)) {}
   const bool isFunc;
 };
 
@@ -31,17 +31,17 @@ struct GenericAnalysis {
 struct OpSpecificAnalysis {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OpSpecificAnalysis)
 
-  OpSpecificAnalysis(FuncOp op) : isSecret(op.getName() == "secret") {}
+  OpSpecificAnalysis(func::FuncOp op) : isSecret(op.getName() == "secret") {}
   const bool isSecret;
 };
 
-/// Simple pass to annotate a FuncOp with the results of analysis.
+/// Simple pass to annotate a func::FuncOp with the results of analysis.
 struct AnnotateFunctionPass
-    : public PassWrapper<AnnotateFunctionPass, OperationPass<FuncOp>> {
+    : public PassWrapper<AnnotateFunctionPass, OperationPass<func::FuncOp>> {
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AnnotateFunctionPass)
 
   void runOnOperation() override {
-    FuncOp op = getOperation();
+    func::FuncOp op = getOperation();
     Builder builder(op->getParentOfType<ModuleOp>());
 
     auto &ga = getAnalysis<GenericAnalysis>();
@@ -60,21 +60,21 @@ TEST(PassManagerTest, OpSpecificAnalysis) {
   // Create a module with 2 functions.
   OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
   for (StringRef name : {"secret", "not_secret"}) {
-    FuncOp func =
-        FuncOp::create(builder.getUnknownLoc(), name,
-                       builder.getFunctionType(llvm::None, llvm::None));
+    auto func =
+        func::FuncOp::create(builder.getUnknownLoc(), name,
+                             builder.getFunctionType(llvm::None, llvm::None));
     func.setPrivate();
     module->push_back(func);
   }
 
   // Instantiate and run our pass.
   PassManager pm(&context);
-  pm.addNestedPass<FuncOp>(std::make_unique<AnnotateFunctionPass>());
+  pm.addNestedPass<func::FuncOp>(std::make_unique<AnnotateFunctionPass>());
   LogicalResult result = pm.run(module.get());
   EXPECT_TRUE(succeeded(result));
 
   // Verify that each function got annotated with expected attributes.
-  for (FuncOp func : module->getOps<FuncOp>()) {
+  for (func::FuncOp func : module->getOps<func::FuncOp>()) {
     ASSERT_TRUE(func->getAttr("isFunc").isa<BoolAttr>());
     EXPECT_TRUE(func->getAttr("isFunc").cast<BoolAttr>().getValue());
 


        


More information about the flang-commits mailing list