[flang-commits] [flang] 2242611 - mlir/tblgen: use std::optional in generation

Ramkumar Ramachandra via flang-commits flang-commits at lists.llvm.org
Sat Dec 17 02:14:01 PST 2022


Author: Ramkumar Ramachandra
Date: 2022-12-17T11:13:26+01:00
New Revision: 22426110c5ef329d9ff40212cd8e589b5c132667

URL: https://github.com/llvm/llvm-project/commit/22426110c5ef329d9ff40212cd8e589b5c132667
DIFF: https://github.com/llvm/llvm-project/commit/22426110c5ef329d9ff40212cd8e589b5c132667.diff

LOG: mlir/tblgen: use std::optional in generation

This is part of an effort to migrate from llvm::Optional to
std::optional. This patch changes the way mlir-tblgen generates .inc
files, and modifies tests and documentation appropriately. It is a "no
compromises" patch, and doesn't leave the user with an unpleasant mix of
llvm::Optional and std::optional.

A non-trivial change has been made to ControlFlowInterfaces to split one
constructor into two, relating to a build failure on Windows.

See also: https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716

Signed-off-by: Ramkumar Ramachandra <r at artagnon.com>

Differential Revision: https://reviews.llvm.org/D138934

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Dialect/FIROps.td
    flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td
    flang/include/flang/Optimizer/HLFIR/HLFIROps.td
    flang/lib/Frontend/FrontendActions.cpp
    flang/lib/Lower/ConvertExprToHLFIR.cpp
    flang/lib/Optimizer/CodeGen/CodeGen.cpp
    flang/lib/Optimizer/Dialect/FIROps.cpp
    flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp
    flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
    flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
    flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
    mlir/docs/DefiningDialects/Operations.md
    mlir/docs/Dialects/Linalg/_index.md
    mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h
    mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h
    mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
    mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
    mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h
    mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
    mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h
    mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
    mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
    mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
    mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
    mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
    mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
    mlir/include/mlir/Dialect/Utils/StaticValueUtils.h
    mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
    mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
    mlir/include/mlir/IR/BuiltinOps.td
    mlir/include/mlir/IR/BuiltinTypeInterfaces.td
    mlir/include/mlir/IR/BuiltinTypes.h
    mlir/include/mlir/IR/BuiltinTypes.td
    mlir/include/mlir/IR/Diagnostics.h
    mlir/include/mlir/IR/Dialect.h
    mlir/include/mlir/IR/EnumAttr.td
    mlir/include/mlir/IR/OpBase.td
    mlir/include/mlir/Interfaces/ControlFlowInterfaces.h
    mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
    mlir/include/mlir/Interfaces/InferTypeOpInterface.h
    mlir/include/mlir/Interfaces/InferTypeOpInterface.td
    mlir/include/mlir/Interfaces/LoopLikeInterface.td
    mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td
    mlir/include/mlir/Interfaces/VectorInterfaces.td
    mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp
    mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp
    mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp
    mlir/lib/AsmParser/Parser.cpp
    mlir/lib/CAPI/Interfaces/Interfaces.cpp
    mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
    mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
    mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
    mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
    mlir/lib/Conversion/PDLToPDLInterp/Predicate.h
    mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/Arith/IR/ArithOps.cpp
    mlir/lib/Dialect/Async/IR/Async.cpp
    mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
    mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
    mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
    mlir/lib/Dialect/EmitC/IR/EmitC.cpp
    mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
    mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
    mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
    mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
    mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
    mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp
    mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
    mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
    mlir/lib/Dialect/PDL/IR/PDL.cpp
    mlir/lib/Dialect/SCF/IR/SCF.cpp
    mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
    mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
    mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
    mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
    mlir/lib/Dialect/Shape/IR/Shape.cpp
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
    mlir/lib/Dialect/Transform/IR/TransformOps.cpp
    mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
    mlir/lib/Dialect/Utils/StaticValueUtils.cpp
    mlir/lib/Dialect/Vector/IR/VectorOps.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
    mlir/lib/IR/BuiltinDialect.cpp
    mlir/lib/IR/BuiltinTypes.cpp
    mlir/lib/IR/Dialect.cpp
    mlir/lib/Interfaces/ControlFlowInterfaces.cpp
    mlir/lib/Interfaces/InferTypeOpInterface.cpp
    mlir/lib/Rewrite/ByteCode.cpp
    mlir/lib/Target/LLVMIR/DebugImporter.cpp
    mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
    mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp
    mlir/test/lib/Dialect/Test/TestDialect.cpp
    mlir/test/lib/Dialect/Test/TestDialect.td
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/mlir-tblgen/enums-gen.td
    mlir/test/mlir-tblgen/op-attribute.td
    mlir/test/mlir-tblgen/op-decl-and-defs.td
    mlir/test/python/python_test_ops.td
    mlir/tools/mlir-tblgen/DialectGen.cpp
    mlir/tools/mlir-tblgen/EnumsGen.cpp
    mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
    mlir/tools/mlir-tblgen/OpFormatGen.cpp
    mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
    mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp
    mlir/unittests/TableGen/EnumsGenTest.cpp

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index cbe5940f07e68..407a42599a100 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -483,15 +483,15 @@ class fir_SwitchTerminatorOp<string mnemonic, list<Trait> traits = []> :
     // The number of blocks that may be branched to
     unsigned getNumDest() { return (*this)->getNumSuccessors(); }
 
-    llvm::Optional<mlir::OperandRange> getCompareOperands(unsigned cond);
-    llvm::Optional<llvm::ArrayRef<mlir::Value>> getCompareOperands(
+    std::optional<mlir::OperandRange> getCompareOperands(unsigned cond);
+    std::optional<llvm::ArrayRef<mlir::Value>> getCompareOperands(
         llvm::ArrayRef<mlir::Value> operands, unsigned cond);
-    llvm::Optional<mlir::ValueRange> getCompareOperands(
+    std::optional<mlir::ValueRange> getCompareOperands(
         mlir::ValueRange operands, unsigned cond);
 
-    llvm::Optional<llvm::ArrayRef<mlir::Value>> getSuccessorOperands(
+    std::optional<llvm::ArrayRef<mlir::Value>> getSuccessorOperands(
         llvm::ArrayRef<mlir::Value> operands, unsigned cond);
-    llvm::Optional<mlir::ValueRange> getSuccessorOperands(
+    std::optional<mlir::ValueRange> getSuccessorOperands(
         mlir::ValueRange operands, unsigned cond);
 
     // Helper function to deal with Optional operand forms
@@ -2426,16 +2426,16 @@ def fir_StringLitOp : fir_Op<"string_lit", [NoMemoryEffect]> {
   let builders = [
     OpBuilder<(ins "fir::CharacterType":$inType,
       "llvm::StringRef":$value,
-      CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
+      CArg<"std::optional<int64_t>", "{}">:$len)>,
     OpBuilder<(ins "fir::CharacterType":$inType,
       "llvm::ArrayRef<char>":$xlist,
-      CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
+      CArg<"std::optional<int64_t>", "{}">:$len)>,
     OpBuilder<(ins "fir::CharacterType":$inType,
       "llvm::ArrayRef<char16_t>":$xlist,
-      CArg<"llvm::Optional<int64_t>", "{}">:$len)>,
+      CArg<"std::optional<int64_t>", "{}">:$len)>,
     OpBuilder<(ins "fir::CharacterType":$inType,
       "llvm::ArrayRef<char32_t>":$xlist,
-      CArg<"llvm::Optional<int64_t>", "{}">:$len)>];
+      CArg<"std::optional<int64_t>", "{}">:$len)>];
 
   let extraClassDeclaration = [{
     static constexpr const char *size() { return "size"; }

diff  --git a/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td b/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td
index ed6b3bb2c7c99..e4738d962e03f 100644
--- a/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td
+++ b/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td
@@ -36,7 +36,7 @@ def fir_FortranVariableOpInterface : OpInterface<"FortranVariableOpInterface"> {
     >,
     InterfaceMethod<
       /*desc=*/"Get Fortran attributes",
-      /*retTy=*/"llvm::Optional<fir::FortranVariableFlagsEnum>",
+      /*retTy=*/"std::optional<fir::FortranVariableFlagsEnum>",
       /*methodName=*/"getFortranAttrs",
       /*args=*/(ins),
       /*methodBody=*/[{}],
@@ -91,7 +91,7 @@ def fir_FortranVariableOpInterface : OpInterface<"FortranVariableOpInterface"> {
     }
 
     /// Return the rank of the entity if it is known at compile time.
-    llvm::Optional<unsigned> getRank() {
+    std::optional<unsigned> getRank() {
       if (auto sequenceType =
             getElementOrSequenceType().dyn_cast<fir::SequenceType>()) {
         if (sequenceType.hasUnknownShape())

diff  --git a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
index 93e3383e27124..7df8d3db74961 100644
--- a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
+++ b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
@@ -202,7 +202,7 @@ def hlfir_DesignateOp : hlfir_Op<"designate", [AttrSizedOperandSegments,
       "llvm::StringRef":$component, "mlir::Value":$component_shape,
       "llvm::ArrayRef<std::variant<mlir::Value, std::tuple<mlir::Value, mlir::Value, mlir::Value>>>":$subscripts,
       CArg<"mlir::ValueRange", "{}">:$substring,
-      CArg<"llvm::Optional<bool>", "{}">:$complex_part,
+      CArg<"std::optional<bool>", "{}">:$complex_part,
       CArg<"mlir::Value", "{}">:$shape, CArg<"mlir::ValueRange", "{}">:$typeparams,
       CArg<"fir::FortranVariableFlagsAttr", "{}">:$fortran_attrs)>,
 

diff  --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index fe4dd1b5e335c..6c457857ce999 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -552,7 +552,7 @@ void CodeGenAction::generateLLVMIR() {
   }
 
   // Translate to LLVM IR
-  llvm::Optional<llvm::StringRef> moduleName = mlirModule->getName();
+  std::optional<llvm::StringRef> moduleName = mlirModule->getName();
   llvmModule = mlir::translateModuleToLLVMIR(
       *mlirModule, *llvmCtx, moduleName ? *moduleName : "FIRModule");
 

diff  --git a/flang/lib/Lower/ConvertExprToHLFIR.cpp b/flang/lib/Lower/ConvertExprToHLFIR.cpp
index b26c499cb5172..fcad425018cfb 100644
--- a/flang/lib/Lower/ConvertExprToHLFIR.cpp
+++ b/flang/lib/Lower/ConvertExprToHLFIR.cpp
@@ -99,7 +99,7 @@ class HlfirDesignatorBuilder {
     else
       resultType = fir::ReferenceType::get(resultValueType);
 
-    llvm::Optional<bool> complexPart;
+    std::optional<bool> complexPart;
     llvm::SmallVector<mlir::Value> substring;
     auto designate = getBuilder().create<hlfir::DesignateOp>(
         getLoc(), resultType, partInfo.base.getBase(), "",

diff  --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index b195ad0992ac1..49d711f69f413 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -1618,7 +1618,7 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
                   mlir::Value base, mlir::Value outerOffset,
                   mlir::ValueRange cstInteriorIndices,
                   mlir::ValueRange componentIndices,
-                  llvm::Optional<mlir::Value> substringOffset) const {
+                  std::optional<mlir::Value> substringOffset) const {
     llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs{outerOffset};
     mlir::Type resultTy =
         base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType();
@@ -1907,7 +1907,7 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
     if (hasSlice || hasSubcomp || hasSubstr) {
       // Shift the base address.
       llvm::SmallVector<mlir::Value> fieldIndices;
-      llvm::Optional<mlir::Value> substringOffset;
+      std::optional<mlir::Value> substringOffset;
       if (hasSubcomp)
         getSubcomponentIndices(xbox, xbox.getMemref(), operands, fieldIndices);
       if (hasSubstr)
@@ -2047,7 +2047,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
       base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
 
       llvm::SmallVector<mlir::Value> fieldIndices;
-      llvm::Optional<mlir::Value> substringOffset;
+      std::optional<mlir::Value> substringOffset;
       if (!rebox.getSubcomponent().empty())
         getSubcomponentIndices(rebox, rebox.getBox(), operands, fieldIndices);
       if (!rebox.getSubstr().empty())
@@ -2725,7 +2725,7 @@ struct CoordinateOpConversion
       if (hasKnownShape && hasSubdimension) {
         offs.push_back(0);
       }
-      llvm::Optional<int> dims;
+      std::optional<int> dims;
       llvm::SmallVector<mlir::Value> arrIdx;
       for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) {
         mlir::Value nxtOpnd = operands[i];
@@ -2930,7 +2930,7 @@ struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
   // TODO: String comparaison should be avoided. Replace linkName with an
   // enumeration.
   mlir::LLVM::Linkage
-  convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const {
+  convertLinkage(std::optional<llvm::StringRef> optLinkage) const {
     if (optLinkage) {
       auto name = *optLinkage;
       if (name == "internal")
@@ -3002,7 +3002,7 @@ struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
 };
 
 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
-                        llvm::Optional<mlir::ValueRange> destOps,
+                        std::optional<mlir::ValueRange> destOps,
                         mlir::ConversionPatternRewriter &rewriter,
                         mlir::Block *newBlock) {
   if (destOps)
@@ -3013,7 +3013,7 @@ static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
 }
 
 template <typename A, typename B>
-static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps,
+static void genBrOp(A caseOp, mlir::Block *dest, std::optional<B> destOps,
                     mlir::ConversionPatternRewriter &rewriter) {
   if (destOps)
     rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, *destOps, dest);
@@ -3023,7 +3023,7 @@ static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps,
 
 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
                               mlir::Block *dest,
-                              llvm::Optional<mlir::ValueRange> destOps,
+                              std::optional<mlir::ValueRange> destOps,
                               mlir::ConversionPatternRewriter &rewriter) {
   auto *thisBlock = rewriter.getInsertionBlock();
   auto *newBlock = createBlock(rewriter, dest);
@@ -3069,9 +3069,9 @@ struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
     auto loc = caseOp.getLoc();
     for (unsigned t = 0; t != conds; ++t) {
       mlir::Block *dest = caseOp.getSuccessor(t);
-      llvm::Optional<mlir::ValueRange> destOps =
+      std::optional<mlir::ValueRange> destOps =
           caseOp.getSuccessorOperands(adaptor.getOperands(), t);
-      llvm::Optional<mlir::ValueRange> cmpOps =
+      std::optional<mlir::ValueRange> cmpOps =
           *caseOp.getCompareOperands(adaptor.getOperands(), t);
       mlir::Value caseArg = *(cmpOps.value().begin());
       mlir::Attribute attr = cases[t];

diff  --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp
index b2b094a126a63..7bee0a229ea3a 100644
--- a/flang/lib/Optimizer/Dialect/FIROps.cpp
+++ b/flang/lib/Optimizer/Dialect/FIROps.cpp
@@ -2529,11 +2529,11 @@ getMutableSuccessorOperands(unsigned pos, mlir::MutableOperandRange operands,
       mlir::MutableOperandRange::OperandSegment(pos, targetOffsetAttr));
 }
 
-llvm::Optional<mlir::OperandRange> fir::SelectOp::getCompareOperands(unsigned) {
+std::optional<mlir::OperandRange> fir::SelectOp::getCompareOperands(unsigned) {
   return {};
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
   return {};
 }
@@ -2543,7 +2543,7 @@ mlir::SuccessorOperands fir::SelectOp::getSuccessorOperands(unsigned oper) {
       oper, getTargetArgsMutable(), getTargetOffsetAttr()));
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
                                     unsigned oper) {
   auto a =
@@ -2553,7 +2553,7 @@ fir::SelectOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
   return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
 }
 
-llvm::Optional<mlir::ValueRange>
+std::optional<mlir::ValueRange>
 fir::SelectOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) {
   auto a =
       (*this)->getAttrOfType<mlir::DenseI32ArrayAttr>(getTargetOffsetAttr());
@@ -2572,14 +2572,14 @@ unsigned fir::SelectOp::targetOffsetSize() {
 // SelectCaseOp
 //===----------------------------------------------------------------------===//
 
-llvm::Optional<mlir::OperandRange>
+std::optional<mlir::OperandRange>
 fir::SelectCaseOp::getCompareOperands(unsigned cond) {
   auto a =
       (*this)->getAttrOfType<mlir::DenseI32ArrayAttr>(getCompareOffsetAttr());
   return {getSubOperands(cond, getCompareArgs(), a)};
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectCaseOp::getCompareOperands(llvm::ArrayRef<mlir::Value> operands,
                                       unsigned cond) {
   auto a =
@@ -2589,7 +2589,7 @@ fir::SelectCaseOp::getCompareOperands(llvm::ArrayRef<mlir::Value> operands,
   return {getSubOperands(cond, getSubOperands(1, operands, segments), a)};
 }
 
-llvm::Optional<mlir::ValueRange>
+std::optional<mlir::ValueRange>
 fir::SelectCaseOp::getCompareOperands(mlir::ValueRange operands,
                                       unsigned cond) {
   auto a =
@@ -2604,7 +2604,7 @@ mlir::SuccessorOperands fir::SelectCaseOp::getSuccessorOperands(unsigned oper) {
       oper, getTargetArgsMutable(), getTargetOffsetAttr()));
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectCaseOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
                                         unsigned oper) {
   auto a =
@@ -2614,7 +2614,7 @@ fir::SelectCaseOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
   return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
 }
 
-llvm::Optional<mlir::ValueRange>
+std::optional<mlir::ValueRange>
 fir::SelectCaseOp::getSuccessorOperands(mlir::ValueRange operands,
                                         unsigned oper) {
   auto a =
@@ -2864,12 +2864,12 @@ void fir::SelectRankOp::print(mlir::OpAsmPrinter &p) {
   printIntegralSwitchTerminator(*this, p);
 }
 
-llvm::Optional<mlir::OperandRange>
+std::optional<mlir::OperandRange>
 fir::SelectRankOp::getCompareOperands(unsigned) {
   return {};
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectRankOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
   return {};
 }
@@ -2879,7 +2879,7 @@ mlir::SuccessorOperands fir::SelectRankOp::getSuccessorOperands(unsigned oper) {
       oper, getTargetArgsMutable(), getTargetOffsetAttr()));
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectRankOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
                                         unsigned oper) {
   auto a =
@@ -2889,7 +2889,7 @@ fir::SelectRankOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
   return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
 }
 
-llvm::Optional<mlir::ValueRange>
+std::optional<mlir::ValueRange>
 fir::SelectRankOp::getSuccessorOperands(mlir::ValueRange operands,
                                         unsigned oper) {
   auto a =
@@ -2909,12 +2909,12 @@ unsigned fir::SelectRankOp::targetOffsetSize() {
 // SelectTypeOp
 //===----------------------------------------------------------------------===//
 
-llvm::Optional<mlir::OperandRange>
+std::optional<mlir::OperandRange>
 fir::SelectTypeOp::getCompareOperands(unsigned) {
   return {};
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectTypeOp::getCompareOperands(llvm::ArrayRef<mlir::Value>, unsigned) {
   return {};
 }
@@ -2924,7 +2924,7 @@ mlir::SuccessorOperands fir::SelectTypeOp::getSuccessorOperands(unsigned oper) {
       oper, getTargetArgsMutable(), getTargetOffsetAttr()));
 }
 
-llvm::Optional<llvm::ArrayRef<mlir::Value>>
+std::optional<llvm::ArrayRef<mlir::Value>>
 fir::SelectTypeOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
                                         unsigned oper) {
   auto a =
@@ -2934,7 +2934,7 @@ fir::SelectTypeOp::getSuccessorOperands(llvm::ArrayRef<mlir::Value> operands,
   return {getSubOperands(oper, getSubOperands(2, operands, segments), a)};
 }
 
-llvm::Optional<mlir::ValueRange>
+std::optional<mlir::ValueRange>
 fir::SelectTypeOp::getSuccessorOperands(mlir::ValueRange operands,
                                         unsigned oper) {
   auto a =
@@ -3225,7 +3225,7 @@ mkNamedIntegerAttr(mlir::OpBuilder &builder, llvm::StringRef name, int64_t v) {
 void fir::StringLitOp::build(mlir::OpBuilder &builder,
                              mlir::OperationState &result,
                              fir::CharacterType inType, llvm::StringRef val,
-                             llvm::Optional<int64_t> len) {
+                             std::optional<int64_t> len) {
   auto valAttr = builder.getNamedAttr(value(), builder.getStringAttr(val));
   int64_t length = len ? *len : inType.getLen();
   auto lenAttr = mkNamedIntegerAttr(builder, size(), length);
@@ -3247,7 +3247,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
                              mlir::OperationState &result,
                              fir::CharacterType inType,
                              llvm::ArrayRef<char> vlist,
-                             llvm::Optional<std::int64_t> len) {
+                             std::optional<std::int64_t> len) {
   auto valAttr =
       builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
   std::int64_t length = len ? *len : inType.getLen();
@@ -3260,7 +3260,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
                              mlir::OperationState &result,
                              fir::CharacterType inType,
                              llvm::ArrayRef<char16_t> vlist,
-                             llvm::Optional<std::int64_t> len) {
+                             std::optional<std::int64_t> len) {
   auto valAttr =
       builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
   std::int64_t length = len ? *len : inType.getLen();
@@ -3273,7 +3273,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder,
                              mlir::OperationState &result,
                              fir::CharacterType inType,
                              llvm::ArrayRef<char32_t> vlist,
-                             llvm::Optional<std::int64_t> len) {
+                             std::optional<std::int64_t> len) {
   auto valAttr =
       builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist));
   std::int64_t length = len ? *len : inType.getLen();

diff  --git a/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp b/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp
index b12904bc4713a..94f1689dfb058 100644
--- a/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp
+++ b/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp
@@ -57,7 +57,7 @@ fir::FortranVariableOpInterface::verifyDeclareLikeOpImpl(mlir::Value memref) {
         shapeRank = shape.getType().cast<fir::ShiftType>().getRank();
       }
 
-      llvm::Optional<unsigned> rank = getRank();
+      std::optional<unsigned> rank = getRank();
       if (!rank || *rank != shapeRank)
         return emitOpError("has conflicting shape and base operand ranks");
     } else if (!sourceIsBox) {

diff  --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
index e537e7cfe3a35..95c4a5306d47c 100644
--- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
+++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
@@ -87,7 +87,7 @@ void hlfir::DesignateOp::build(
     mlir::OpBuilder &builder, mlir::OperationState &result,
     mlir::Type result_type, mlir::Value memref, llvm::StringRef component,
     mlir::Value component_shape, llvm::ArrayRef<Subscript> subscripts,
-    mlir::ValueRange substring, llvm::Optional<bool> complex_part,
+    mlir::ValueRange substring, std::optional<bool> complex_part,
     mlir::Value shape, mlir::ValueRange typeparams,
     fir::FortranVariableFlagsAttr fortran_attrs) {
   auto componentAttr =

diff  --git a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
index 66bfd2ff4a203..d334866939e06 100644
--- a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
+++ b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
@@ -397,7 +397,7 @@ class CfgSelectTypeConv : public OpConversionPattern<fir::SelectTypeOp> {
 
     for (unsigned idx : orderedTypeGuards) {
       auto *dest = selectType.getSuccessor(idx);
-      llvm::Optional<mlir::ValueRange> destOps =
+      std::optional<mlir::ValueRange> destOps =
           selectType.getSuccessorOperands(operands, idx);
       if (typeGuards[idx].dyn_cast<mlir::UnitAttr>())
         rewriter.replaceOpWithNewOp<mlir::cf::BranchOp>(selectType, dest);
@@ -470,12 +470,13 @@ class CfgSelectTypeConv : public OpConversionPattern<fir::SelectTypeOp> {
     return 0;
   }
 
-  mlir::LogicalResult
-  genTypeLadderStep(mlir::Location loc, mlir::Value selector,
-                    mlir::Attribute attr, mlir::Block *dest,
-                    llvm::Optional<mlir::ValueRange> destOps,
-                    mlir::ModuleOp mod, mlir::PatternRewriter &rewriter,
-                    fir::KindMapping &kindMap) const {
+  mlir::LogicalResult genTypeLadderStep(mlir::Location loc,
+                                        mlir::Value selector,
+                                        mlir::Attribute attr, mlir::Block *dest,
+                                        std::optional<mlir::ValueRange> destOps,
+                                        mlir::ModuleOp mod,
+                                        mlir::PatternRewriter &rewriter,
+                                        fir::KindMapping &kindMap) const {
     mlir::Value cmp;
     // TYPE IS type guard comparison are all done inlined.
     if (auto a = attr.dyn_cast<fir::ExactTypeAttr>()) {

diff  --git a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
index ed4e86dd4c352..f0e201402fa79 100644
--- a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
+++ b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp
@@ -116,7 +116,7 @@ class AllocaOpConversion : public mlir::OpRewritePattern<fir::AllocaOp> {
     auto loc = alloca.getLoc();
     mlir::Type varTy = alloca.getInType();
     auto unpackName =
-        [](llvm::Optional<llvm::StringRef> opt) -> llvm::StringRef {
+        [](std::optional<llvm::StringRef> opt) -> llvm::StringRef {
       if (opt)
         return *opt;
       return {};

diff  --git a/mlir/docs/DefiningDialects/Operations.md b/mlir/docs/DefiningDialects/Operations.md
index 2fbc7362b9e06..4b22084822495 100644
--- a/mlir/docs/DefiningDialects/Operations.md
+++ b/mlir/docs/DefiningDialects/Operations.md
@@ -1341,9 +1341,9 @@ enum class MyIntEnum : uint32_t {
   Case20 = 20,
 };
 
-llvm::Optional<MyIntEnum> symbolizeMyIntEnum(uint32_t);
+std::optional<MyIntEnum> symbolizeMyIntEnum(uint32_t);
 llvm::StringRef ConvertToString(MyIntEnum);
-llvm::Optional<MyIntEnum> ConvertToEnum(llvm::StringRef);
+std::optional<MyIntEnum> ConvertToEnum(llvm::StringRef);
 inline constexpr unsigned getMaxEnumValForMyIntEnum() {
   return 20;
 }
@@ -1387,13 +1387,13 @@ llvm::StringRef ConvertToString(MyIntEnum val) {
   return "";
 }
 
-llvm::Optional<MyIntEnum> ConvertToEnum(llvm::StringRef str) {
-  return llvm::StringSwitch<llvm::Optional<MyIntEnum>>(str)
+std::optional<MyIntEnum> ConvertToEnum(llvm::StringRef str) {
+  return llvm::StringSwitch<std::optional<MyIntEnum>>(str)
       .Case("Case15", MyIntEnum::Case15)
       .Case("Case20", MyIntEnum::Case20)
       .Default(std::nullopt);
 }
-llvm::Optional<MyIntEnum> symbolizeMyIntEnum(uint32_t value) {
+std::optional<MyIntEnum> symbolizeMyIntEnum(uint32_t value) {
   switch (value) {
   case 15: return MyIntEnum::Case15;
   case 20: return MyIntEnum::Case20;
@@ -1430,9 +1430,9 @@ enum class MyBitEnum : uint32_t {
   Bit3 = 8,
 };
 
-llvm::Optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
+std::optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
 std::string stringifyMyBitEnum(MyBitEnum);
-llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef);
+std::optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef);
 
 inline constexpr MyBitEnum operator|(MyBitEnum a, MyBitEnum b) {
   return static_cast<MyBitEnum>(static_cast<uint32_t>(a) | static_cast<uint32_t>(b));
@@ -1462,10 +1462,10 @@ inline std::string stringifyEnum(MyBitEnum enumValue) {
 }
 
 template <typename EnumType>
-::llvm::Optional<EnumType> symbolizeEnum(::llvm::StringRef);
+::std::optional<EnumType> symbolizeEnum(::llvm::StringRef);
 
 template <>
-inline ::llvm::Optional<MyBitEnum> symbolizeEnum<MyBitEnum>(::llvm::StringRef str) {
+inline ::std::optional<MyBitEnum> symbolizeEnum<MyBitEnum>(::llvm::StringRef str) {
   return symbolizeMyBitEnum(str);
 }
 
@@ -1506,7 +1506,7 @@ std::string stringifyMyBitEnum(MyBitEnum symbol) {
   return llvm::join(strs, "|");
 }
 
-llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
+std::optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
   // Special case for all bits unset.
   if (str == "None") return MyBitEnum::None;
 
@@ -1515,7 +1515,7 @@ llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
 
   uint32_t val = 0;
   for (auto symbol : symbols) {
-    auto bit = llvm::StringSwitch<llvm::Optional<uint32_t>>(symbol)
+    auto bit = llvm::StringSwitch<std::optional<uint32_t>>(symbol)
       .Case("tagged", 1)
       .Case("Bit1", 2)
       .Case("Bit2", 4)
@@ -1526,7 +1526,7 @@ llvm::Optional<MyBitEnum> symbolizeMyBitEnum(llvm::StringRef str) {
   return static_cast<MyBitEnum>(val);
 }
 
-llvm::Optional<MyBitEnum> symbolizeMyBitEnum(uint32_t value) {
+std::optional<MyBitEnum> symbolizeMyBitEnum(uint32_t value) {
   // Special case for all bits unset.
   if (value == 0) return MyBitEnum::None;
 

diff  --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md
index dbf008d0bb0c2..c8fa0374334ec 100644
--- a/mlir/docs/Dialects/Linalg/_index.md
+++ b/mlir/docs/Dialects/Linalg/_index.md
@@ -631,14 +631,14 @@ def batchmatmulOp : LinalgNamedStructured_Op<"batchmatmul", [
 When `mlir-linalg-ods-gen -gen-impl=1` is called, the following C++ is produced:
 
 ```
-llvm::Optional<SmallVector<StringRef, 8>> batchmatmul::referenceIterators() {
+std::optional<SmallVector<StringRef, 8>> batchmatmul::referenceIterators() {
   return SmallVector<StringRef, 8>{
     getParallelIteratorTypeName(),
     getParallelIteratorTypeName(),
     getParallelIteratorTypeName(),
     getReductionIteratorTypeName() };
 }
-llvm::Optional<SmallVector<AffineMap, 8>> batchmatmul::referenceIndexingMaps() {
+std::optional<SmallVector<AffineMap, 8>> batchmatmul::referenceIndexingMaps() {
   MLIRContext *context = getContext();
   AffineExpr d0, d1, d2, d3;
   bindDims(context, d0, d1, d2, d3);

diff  --git a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h
index a178c6024fdee..b546bc6b184bc 100644
--- a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h
+++ b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h
@@ -243,7 +243,7 @@ class AbstractSparseDataFlowAnalysis : public DataFlowAnalysis {
   /// regions or the parent operation itself, and set either the argument or
   /// parent result lattices.
   void visitRegionSuccessors(ProgramPoint point, RegionBranchOpInterface branch,
-                             Optional<unsigned> successorIndex,
+                             std::optional<unsigned> successorIndex,
                              ArrayRef<AbstractSparseLattice *> lattices);
 };
 

diff  --git a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h
index 25cf034ea1e16..8684d353aef86 100644
--- a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h
+++ b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h
@@ -61,7 +61,7 @@ struct AllocationOpLLVMLowering : public ConvertToLLVMPattern {
   int64_t alignedAllocationGetAlignment(ConversionPatternRewriter &rewriter,
                                         Location loc, OpType op,
                                         const DataLayout *defaultLayout) const {
-    if (Optional<uint64_t> alignment = op.getAlignment())
+    if (std::optional<uint64_t> alignment = op.getAlignment())
       return *alignment;
 
     // Whenever we don't have alignment set, we will use an alignment

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index 46b3c90a1c1dd..d19e4d233c952 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -708,7 +708,7 @@ def AffineParallelOp : Affine_Op<"parallel",
     unsigned getNumDims();
 
     /// Get ranges as constants, may fail in dynamic case.
-    Optional<SmallVector<int64_t, 8>> getConstantRanges();
+    std::optional<SmallVector<int64_t, 8>> getConstantRanges();
 
     Block *getBody();
     OpBuilder getBodyBuilder();

diff  --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
index ce61890f7784d..b531b5ed9f288 100644
--- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
+++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
@@ -268,7 +268,7 @@ def Arith_AddUIExtendedOp : Arith_Op<"addui_extended", [Pure, Commutative,
   let hasCanonicalizer = 1;
 
   let extraClassDeclaration = [{
-    Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
+    std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
   }];
 }
 
@@ -330,7 +330,7 @@ def Arith_MulSIExtendedOp : Arith_Op<"mulsi_extended", [Pure, Commutative,
   let hasCanonicalizer = 1;
 
   let extraClassDeclaration = [{
-    Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
+    std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
   }];
 }
 
@@ -373,7 +373,7 @@ def Arith_MulUIExtendedOp : Arith_Op<"mului_extended", [Pure, Commutative,
   let hasCanonicalizer = 1;
 
   let extraClassDeclaration = [{
-    Optional<SmallVector<int64_t, 4>> getShapeForUnroll();
+    std::optional<SmallVector<int64_t, 4>> getShapeForUnroll();
   }];
 }
 

diff  --git a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h
index aaa0129f621c8..97eb323ddb254 100644
--- a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h
+++ b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h
@@ -43,7 +43,7 @@ namespace gpu {
 DiagnosedSilenceableFailure mapNestedForeachToThreadsImpl(
     RewriterBase &rewriter, Operation *target,
     const SmallVectorImpl<int64_t> &blockDim, bool syncAfterDistribute,
-    llvm::Optional<TransformOpInterface> transformOp,
+    std::optional<TransformOpInterface> transformOp,
     const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes);
 
 /// Maps the top level `scf.foreach_thread` op to GPU Thread Blocks. Mapping is

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 2bba2eecc0f85..525c60c4be1b5 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -990,8 +990,8 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
   let extraClassDeclaration = !strconcat([{
       static llvm::Intrinsic::ID getIntrinsicID(
             int64_t m, int64_t n, uint64_t k,
-            llvm::Optional<MMAB1Op> b1Op,
-            llvm::Optional<MMAIntOverflow> sat,
+            std::optional<MMAB1Op> b1Op,
+            std::optional<MMAIntOverflow> sat,
             mlir::NVVM::MMALayout layoutAEnum, mlir::NVVM::MMALayout layoutBEnum,
             mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum,
             mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) {
@@ -1006,7 +1006,7 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
         return 0;
       }
 
-      static Optional<mlir::NVVM::MMATypes> inferOperandMMAType(Type operandElType,
+      static std::optional<mlir::NVVM::MMATypes> inferOperandMMAType(Type operandElType,
         bool isAccumulator);
 
       MMATypes accumPtxType();
@@ -1016,10 +1016,10 @@ def NVVM_MmaOp : NVVM_Op<"mma.sync", [AttrSizedOperandSegments]> {
   let builders = [
       OpBuilder<(ins  "Type":$resultType, "ValueRange":$operandA,
         "ValueRange":$operandB, "ValueRange":$operandC,
-        "ArrayRef<int64_t>":$shape, "Optional<MMAB1Op>":$b1Op,
-        "Optional<MMAIntOverflow>":$intOverflow,
-        "Optional<std::array<MMATypes, 2>>":$multiplicandPtxTypes,
-        "Optional<std::array<MMALayout, 2>>":$multiplicandLayouts)>
+        "ArrayRef<int64_t>":$shape, "std::optional<MMAB1Op>":$b1Op,
+        "std::optional<MMAIntOverflow>":$intOverflow,
+        "std::optional<std::array<MMATypes, 2>>":$multiplicandPtxTypes,
+        "std::optional<std::array<MMALayout, 2>>":$multiplicandLayouts)>
     ];
 
   string llvmBuilder = [{

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 815d542ca83cf..5456ca1301c6e 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -47,7 +47,7 @@ class LinalgStructuredBase_Op<string mnemonic, list<Trait> props>
     }
 
     void getSuccessorRegions(
-        Optional<unsigned> index, ArrayRef<Attribute> operands,
+        std::optional<unsigned> index, ArrayRef<Attribute> operands,
         SmallVectorImpl<RegionSuccessor> &regions) {
       // Op has a region, but conceptually the control flow does not enter the
       // region.

diff  --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h
index fd3a33cc0fb14..29fad4510d379 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h
@@ -48,7 +48,7 @@ DiagnosedSilenceableFailure tileToForeachThreadOpImpl(
     RewriterBase &rewriter, transform::TransformState &state,
     TransformOpInterface transformOp, ArrayRef<Operation *> targets,
     ArrayRef<OpFoldResult> mixedNumThreads,
-    ArrayRef<OpFoldResult> mixedTileSizes, Optional<ArrayAttr> mapping,
+    ArrayRef<OpFoldResult> mixedTileSizes, std::optional<ArrayAttr> mapping,
     SmallVector<Operation *> &tileOps, SmallVector<Operation *> &tiledOps);
 } // namespace transform
 

diff  --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index e3e4f6e3ec089..2919c659aa13c 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -244,7 +244,7 @@ FailureOr<GenericOp> generalizeNamedOp(RewriterBase &rewriter,
 /// smallest constant value for the size of the buffer needed for each
 /// dimension. If that is not possible, contains the dynamic size of the
 /// subview. The call back should return the buffer to use.
-using AllocBufferCallbackFn = std::function<Optional<Value>(
+using AllocBufferCallbackFn = std::function<std::optional<Value>(
     OpBuilder &b, memref::SubViewOp subView,
     ArrayRef<Value> boundingSubViewSize, DataLayout &layout)>;
 
@@ -262,7 +262,7 @@ using CopyCallbackFn =
 
 struct LinalgPromotionOptions {
   /// Indices of subViews to promote. If `None`, try to promote all operands.
-  Optional<DenseSet<unsigned>> operandsToPromote = std::nullopt;
+  std::optional<DenseSet<unsigned>> operandsToPromote = std::nullopt;
   LinalgPromotionOptions &setOperandsToPromote(ArrayRef<int64_t> operands) {
     operandsToPromote = DenseSet<unsigned>();
     operandsToPromote->insert(operands.begin(), operands.end());
@@ -273,7 +273,7 @@ struct LinalgPromotionOptions {
   /// Otherwise the partial view will be used. The decision is defaulted to
   /// `useFullTileBuffersDefault` when `useFullTileBuffers` is None and for
   /// operands missing from `useFullTileBuffers`.
-  Optional<llvm::SmallBitVector> useFullTileBuffers = std::nullopt;
+  std::optional<llvm::SmallBitVector> useFullTileBuffers = std::nullopt;
   LinalgPromotionOptions &setUseFullTileBuffers(ArrayRef<bool> useFullTiles) {
     unsigned size = useFullTiles.size();
     llvm::SmallBitVector tmp(size, false);
@@ -290,7 +290,7 @@ struct LinalgPromotionOptions {
     return *this;
   }
   /// Alignment of promoted buffer. If `None` do not specify alignment.
-  Optional<unsigned> alignment = std::nullopt;
+  std::optional<unsigned> alignment = std::nullopt;
   LinalgPromotionOptions &setAlignment(unsigned align) {
     alignment = align;
     return *this;
@@ -304,8 +304,8 @@ struct LinalgPromotionOptions {
   /// Callback function to do the allocation of the promoted buffer. If None,
   /// then the default allocation scheme of allocating a memref<?xi8> buffer
   /// followed by a view operation is used.
-  Optional<AllocBufferCallbackFn> allocationFn = std::nullopt;
-  Optional<DeallocBufferCallbackFn> deallocationFn = std::nullopt;
+  std::optional<AllocBufferCallbackFn> allocationFn = std::nullopt;
+  std::optional<DeallocBufferCallbackFn> deallocationFn = std::nullopt;
   LinalgPromotionOptions &
   setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn,
                                DeallocBufferCallbackFn const &deallocFn) {
@@ -315,8 +315,8 @@ struct LinalgPromotionOptions {
   }
   /// Callback function to do the copy of data to and from the promoted
   /// subview. If None then a memref.copy is used.
-  Optional<CopyCallbackFn> copyInFn = std::nullopt;
-  Optional<CopyCallbackFn> copyOutFn = std::nullopt;
+  std::optional<CopyCallbackFn> copyInFn = std::nullopt;
+  std::optional<CopyCallbackFn> copyOutFn = std::nullopt;
   LinalgPromotionOptions &setCopyInOutFns(CopyCallbackFn const &copyIn,
                                           CopyCallbackFn const &copyOut) {
     copyInFn = copyIn;
@@ -469,14 +469,14 @@ struct ForeachThreadTilingResult {
 FailureOr<ForeachThreadTilingResult>
 tileToForeachThreadOp(RewriterBase &builder, TilingInterface op,
                       ArrayRef<OpFoldResult> numThreads,
-                      Optional<ArrayAttr> mapping);
+                      std::optional<ArrayAttr> mapping);
 
 /// Same as `tileToForeachThreadOp`, but calculate the number of threads
 /// required using the given tileSizes.
 FailureOr<ForeachThreadTilingResult>
 tileToForeachThreadOpUsingTileSizes(RewriterBase &builder, TilingInterface op,
                                     ArrayRef<OpFoldResult> tileSizes,
-                                    Optional<ArrayAttr> mapping);
+                                    std::optional<ArrayAttr> mapping);
 
 /// Transformation information returned after reduction tiling.
 struct ForeachThreadReductionTilingResult {
@@ -514,11 +514,10 @@ struct ForeachThreadReductionTilingResult {
 /// %6 = linalg.generic %1 ["parallel", "reduction"]
 ///   : tensor<7x4xf32> -> tensor<7xf32>
 /// ```
-FailureOr<ForeachThreadReductionTilingResult>
-tileReductionUsingForeachThread(RewriterBase &b, PartialReductionOpInterface op,
-                                ArrayRef<OpFoldResult> numThreads,
-                                ArrayRef<OpFoldResult> tileSizes = {},
-                                Optional<ArrayAttr> mapping = std::nullopt);
+FailureOr<ForeachThreadReductionTilingResult> tileReductionUsingForeachThread(
+    RewriterBase &b, PartialReductionOpInterface op,
+    ArrayRef<OpFoldResult> numThreads, ArrayRef<OpFoldResult> tileSizes = {},
+    std::optional<ArrayAttr> mapping = std::nullopt);
 
 /// All indices returned by IndexOp should be invariant with respect to
 /// tiling. Therefore, if an operation is tiled, we have to transform the
@@ -623,7 +622,7 @@ struct LinalgTilingAndFusionOptions {
   SmallVector<int64_t> tileInterchange;
   /// When specified, specifies distribution of generated tile loops to
   /// processors.
-  Optional<LinalgLoopDistributionOptions> tileDistribution = std::nullopt;
+  std::optional<LinalgLoopDistributionOptions> tileDistribution = std::nullopt;
   LinalgTilingAndFusionOptions &
   setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
     tileDistribution = std::move(distributionOptions);
@@ -676,7 +675,7 @@ struct LinalgTilingOptions {
 
   /// When specified, specifies distribution of generated tile loops to
   /// processors.
-  Optional<LinalgLoopDistributionOptions> distribution = std::nullopt;
+  std::optional<LinalgLoopDistributionOptions> distribution = std::nullopt;
 
   LinalgTilingOptions &
   setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) {
@@ -806,7 +805,7 @@ struct CopyVectorizationPattern : public OpRewritePattern<memref::CopyOp> {
 };
 
 /// Return vector::CombiningKind for the given op.
-llvm::Optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
+std::optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
 
 //===----------------------------------------------------------------------===//
 // Transformations exposed as rewrite patterns.
@@ -966,7 +965,7 @@ struct ExtractSliceOfPadTensorSwapPattern
   ///
   /// See the documentation for tensor::bubbleUpPadSlice regarding zero slice
   /// guard.
-  using ControlFn = std::function<llvm::Optional<bool>(tensor::ExtractSliceOp)>;
+  using ControlFn = std::function<std::optional<bool>(tensor::ExtractSliceOp)>;
 
   ExtractSliceOfPadTensorSwapPattern(MLIRContext *context,
                                      ControlFn controlFn = nullptr,

diff  --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index 5233badf0bcc1..c88450e42f9ab 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -586,7 +586,7 @@ def MemRef_DimOp : MemRef_Op<"dim", [
 
   let extraClassDeclaration = [{
     /// Helper function to get the index as a simple integer if it is constant.
-    Optional<int64_t> getConstantIndex();
+    std::optional<int64_t> getConstantIndex();
 
     /// Interface method of ShapedDimOpInterface: Return the source memref.
     Value getShapedValue() { return getSource(); }

diff  --git a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
index 6ecbed2ac1b67..5e2892f12db8b 100644
--- a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
+++ b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
@@ -816,7 +816,7 @@ def PDLInterp_GetOperandsOp : PDLInterp_Op<"get_operands", [Pure]> {
   let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict";
   let builders = [
     OpBuilder<(ins "Type":$resultType, "Value":$inputOp,
-                   "Optional<unsigned>":$index), [{
+                   "std::optional<unsigned>":$index), [{
       build($_builder, $_state, resultType, inputOp,
             index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr());
     }]>,
@@ -883,7 +883,7 @@ def PDLInterp_GetResultsOp : PDLInterp_Op<"get_results", [Pure]> {
   let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict";
   let builders = [
     OpBuilder<(ins "Type":$resultType, "Value":$inputOp,
-                   "Optional<unsigned>":$index), [{
+                   "std::optional<unsigned>":$index), [{
       build($_builder, $_state, resultType, inputOp,
             index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr());
     }]>,

diff  --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index fbee86b0a0ce1..d5a150557163c 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -274,7 +274,7 @@ def ForOp : SCF_Op<"for",
     }
     /// Get the iter arg number for an operand. If it isnt an iter arg
     /// operand return std::nullopt.
-    Optional<unsigned> getIterArgNumberForOpOperand(OpOperand &opOperand) {
+    std::optional<unsigned> getIterArgNumberForOpOperand(OpOperand &opOperand) {
       if (opOperand.getOwner() != getOperation())
         return std::nullopt;
       unsigned operandNumber = opOperand.getOperandNumber();
@@ -331,10 +331,10 @@ def ForOp : SCF_Op<"for",
     /// correspond to the loop iterator operands, i.e., those exclusing the
     /// induction variable. LoopOp only has one region, so 0 is the only valid
     /// value for `index`.
-    OperandRange getSuccessorEntryOperands(Optional<unsigned> index);
+    OperandRange getSuccessorEntryOperands(std::optional<unsigned> index);
 
     /// Returns the step as an `APInt` if it is constant.
-    Optional<APInt> getConstantStep();
+    std::optional<APInt> getConstantStep();
 
     /// Interface method for ConditionallySpeculatable.
     Speculation::Speculatability getSpeculatability();
@@ -496,7 +496,7 @@ def ForeachThreadOp : SCF_Op<"foreach_thread", [
   let builders = [
     // Bodyless builder, outputs must be specified.
     OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads,
-                   "Optional<ArrayAttr>":$mapping)>,
+                   "std::optional<ArrayAttr>":$mapping)>,
     // Builder that takes a bodyBuilder lambda.
     OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads,
                    "ArrayRef<Attribute>":$mapping,
@@ -1003,7 +1003,7 @@ def WhileOp : SCF_Op<"while",
     using BodyBuilderFn =
         function_ref<void(OpBuilder &, Location, ValueRange)>;
 
-    OperandRange getSuccessorEntryOperands(Optional<unsigned> index);
+    OperandRange getSuccessorEntryOperands(std::optional<unsigned> index);
     ConditionOp getConditionOp();
     YieldOp getYieldOp();
     Block::BlockArgListType getBeforeArguments();

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
index 9c307bc5cdcf4..073bdcef2293f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h
@@ -31,7 +31,8 @@ parseEnumKeywordAttr(EnumClass &value, ParserType &parser,
   auto loc = parser.getCurrentLocation();
   if (parser.parseKeyword(&keyword))
     return failure();
-  if (Optional<EnumClass> attr = spirv::symbolizeEnum<EnumClass>(keyword)) {
+  if (std::optional<EnumClass> attr =
+          spirv::symbolizeEnum<EnumClass>(keyword)) {
     value = *attr;
     return success();
   }

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
index 8339afc4f7a33..8c432b21a1b7d 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
@@ -492,11 +492,11 @@ def SPIRV_ModuleOp : SPIRV_Op<"module",
   let regions = (region AnyRegion);
 
   let builders = [
-    OpBuilder<(ins CArg<"Optional<StringRef>", "std::nullopt">:$name)>,
+    OpBuilder<(ins CArg<"std::optional<StringRef>", "std::nullopt">:$name)>,
     OpBuilder<(ins "spirv::AddressingModel":$addressing_model,
                    "spirv::MemoryModel":$memory_model,
-                   CArg<"Optional<spirv::VerCapExtAttr>", "std::nullopt">:$vce_triple,
-                   CArg<"Optional<StringRef>", "std::nullopt">:$name)>
+                   CArg<"std::optional<spirv::VerCapExtAttr>", "std::nullopt">:$vce_triple,
+                   CArg<"std::optional<StringRef>", "std::nullopt">:$name)>
   ];
 
   // We need to ensure the block inside the region is properly terminated;
@@ -511,7 +511,7 @@ def SPIRV_ModuleOp : SPIRV_Op<"module",
 
     bool isOptionalSymbol() { return true; }
 
-    Optional<StringRef> getName() { return getSymName(); }
+    std::optional<StringRef> getName() { return getSymName(); }
 
     static StringRef getVCETripleAttrName() { return "vce_triple"; }
   }];

diff  --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
index f2108719d9de1..fa0e2ebb1f16d 100644
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
@@ -352,7 +352,7 @@ def Shape_DimOp : Shape_Op<"dim",
 
   let extraClassDeclaration = [{
     /// Get the `index` value as integer if it is constant.
-    Optional<int64_t> getConstantIndex();
+    std::optional<int64_t> getConstantIndex();
 
     /// Returns when two result types are compatible for this op; method used by
     /// InferTypeOpInterface
@@ -383,7 +383,7 @@ def Shape_GetExtentOp : Shape_Op<"get_extent",
 
   let extraClassDeclaration = [{
     /// Get the `dim` value as integer if it is constant.
-    Optional<int64_t> getConstantDim();
+    std::optional<int64_t> getConstantDim();
     /// Returns when two result types are compatible for this op; method used by
     /// InferTypeOpInterface
     static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
index fc167372add37..e8396b6bb7716 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
@@ -67,8 +67,8 @@ def SparseTensor_StorageSpecifier : SparseTensor_Type<"StorageSpecifier"> {
   let extraClassDeclaration = [{
     // Get the integer type used to store memory and dimension sizes.
     IntegerType getSizesType() const;
-    Type getFieldType(StorageSpecifierKind kind, Optional<unsigned> dim) const;
-    Type getFieldType(StorageSpecifierKind kind, Optional<APInt> dim) const;
+    Type getFieldType(StorageSpecifierKind kind, std::optional<unsigned> dim) const;
+    Type getFieldType(StorageSpecifierKind kind, std::optional<APInt> dim) const;
   }];
 
   let assemblyFormat="`<` qualified($encoding) `>`";

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
index 7088b0c012f06..37f8b775b9354 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
@@ -129,7 +129,7 @@ def Tensor_DimOp : Tensor_Op<"dim", [
 
   let extraClassDeclaration = [{
     /// Helper function to get the index as a simple integer if it is constant.
-    Optional<int64_t> getConstantIndex();
+    std::optional<int64_t> getConstantIndex();
 
     /// Interface method of ShapedDimOpInterface: Return the source tensor.
     Value getShapedValue() { return getSource(); }
@@ -380,7 +380,7 @@ def Tensor_ExtractSliceOp : Tensor_OpWithOffsetSizesAndStrides<"extract_slice",
 
     /// Compute the rank-reduction mask that can be applied to map the source
     /// tensor type to the result tensor type by dropping unit dims.
-    llvm::Optional<llvm::SmallDenseSet<unsigned>>
+    std::optional<llvm::SmallDenseSet<unsigned>>
     computeRankReductionMask() {
       return ::mlir::computeRankReductionMask(getSourceType().getShape(),
                                               getType().getShape());

diff  --git a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h
index 0454137d40054..0b0e03dc2788c 100644
--- a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h
@@ -67,7 +67,7 @@ SmallVector<OpFoldResult> getAsOpFoldResult(ValueRange values);
 SmallVector<OpFoldResult> getAsOpFoldResult(ArrayAttr arrayAttr);
 
 /// If ofr is a constant integer or an IntegerAttr, return the integer.
-Optional<int64_t> getConstantIntValue(OpFoldResult ofr);
+std::optional<int64_t> getConstantIntValue(OpFoldResult ofr);
 
 /// Return true if `ofr` is constant integer equal to `value`.
 bool isConstantIntValue(OpFoldResult ofr, int64_t value);

diff  --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index d67d64731fe7e..a4735aefd61c2 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -118,7 +118,7 @@ void populateBubbleVectorBitCastOpPatterns(RewritePatternSet &patterns,
 /// VectorToSCF, which reduces the rank of vector transfer ops.
 void populateVectorTransferLoweringPatterns(
     RewritePatternSet &patterns,
-    llvm::Optional<unsigned> maxTransferRank = std::nullopt,
+    std::optional<unsigned> maxTransferRank = std::nullopt,
     PatternBenefit benefit = 1);
 
 /// These patterns materialize masks for various vector ops such as transfers.

diff  --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 176d70942a08f..8a5d1025167be 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1370,19 +1370,19 @@ def Vector_TransferReadOp :
                    "Value":$source,
                    "ValueRange":$indices,
                    "AffineMap":$permutationMap,
-                   CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
+                   CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
     /// 3. Builder that sets permutation map to 'getMinorIdentityMap'.
     OpBuilder<(ins "VectorType":$vectorType,
                    "Value":$source,
                    "ValueRange":$indices,
                    "Value":$padding,
-                   CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
+                   CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
     /// 4. Builder that sets padding to zero and permutation map to
     /// 'getMinorIdentityMap'.
     OpBuilder<(ins "VectorType":$vectorType,
                    "Value":$source,
                    "ValueRange":$indices,
-                   CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
+                   CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
   ];
 
   let extraClassDeclaration = [{
@@ -1521,13 +1521,13 @@ def Vector_TransferWriteOp :
                    "Value":$dest,
                    "ValueRange":$indices,
                    "AffineMap":$permutationMap,
-                   CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
+                   CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
     /// 4. Builder with type inference that sets an empty mask and sets permutation
     /// map to 'getMinorIdentityMap'.
     OpBuilder<(ins "Value":$vector,
                    "Value":$dest,
                    "ValueRange":$indices,
-                   CArg<"Optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
+                   CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
   ];
 
   let extraClassDeclaration = [{

diff  --git a/mlir/include/mlir/IR/BuiltinOps.td b/mlir/include/mlir/IR/BuiltinOps.td
index 68a512c06430c..da4ba43c08a5e 100644
--- a/mlir/include/mlir/IR/BuiltinOps.td
+++ b/mlir/include/mlir/IR/BuiltinOps.td
@@ -58,13 +58,13 @@ def ModuleOp : Builtin_Op<"module", [
   let regions = (region SizedRegion<1>:$bodyRegion);
 
   let assemblyFormat = "($sym_name^)? attr-dict-with-keyword $bodyRegion";
-  let builders = [OpBuilder<(ins CArg<"Optional<StringRef>", "{}">:$name)>];
+  let builders = [OpBuilder<(ins CArg<"std::optional<StringRef>", "{}">:$name)>];
   let extraClassDeclaration = [{
     /// Construct a module from the given location with an optional name.
-    static ModuleOp create(Location loc, Optional<StringRef> name = std::nullopt);
+    static ModuleOp create(Location loc, std::optional<StringRef> name = std::nullopt);
 
     /// Return the name of this module if present.
-    Optional<StringRef> getName() { return getSymName(); }
+    std::optional<StringRef> getName() { return getSymName(); }
 
     //===------------------------------------------------------------------===//
     // SymbolOpInterface Methods

diff  --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index cfbf937bf06cc..f2b1fa34bc391 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -63,7 +63,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
       type. If a shape is not provided, the current shape of the type is used.
     }],
     "::mlir::ShapedType", "cloneWith", (ins
-      "::llvm::Optional<::llvm::ArrayRef<int64_t>>":$shape,
+      "::std::optional<::llvm::ArrayRef<int64_t>>":$shape,
       "::mlir::Type":$elementType
     )>,
 

diff  --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h
index 8bdc672e77470..f06581a4b0d73 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.h
+++ b/mlir/include/mlir/IR/BuiltinTypes.h
@@ -90,7 +90,7 @@ class TensorType : public Type, public ShapedType::Trait<TensorType> {
 
   /// Clone this type with the given shape and element type. If the
   /// provided shape is `None`, the current shape of the type is used.
-  TensorType cloneWith(Optional<ArrayRef<int64_t>> shape,
+  TensorType cloneWith(std::optional<ArrayRef<int64_t>> shape,
                        Type elementType) const;
 
   /// Return true if the specified element type is ok in a tensor.
@@ -126,7 +126,7 @@ class BaseMemRefType : public Type, public ShapedType::Trait<BaseMemRefType> {
 
   /// Clone this type with the given shape and element type. If the
   /// provided shape is `None`, the current shape of the type is used.
-  BaseMemRefType cloneWith(Optional<ArrayRef<int64_t>> shape,
+  BaseMemRefType cloneWith(std::optional<ArrayRef<int64_t>> shape,
                            Type elementType) const;
 
   /// Return true if the specified element type is ok in a memref.
@@ -337,7 +337,7 @@ class VectorType::Builder {
 /// which dimensions must be kept when e.g. compute MemRef strides under
 /// rank-reducing operations. Return std::nullopt if reducedShape cannot be
 /// obtained by dropping only `1` entries in `originalShape`.
-llvm::Optional<llvm::SmallDenseSet<unsigned>>
+std::optional<llvm::SmallDenseSet<unsigned>>
 computeRankReductionMask(ArrayRef<int64_t> originalShape,
                          ArrayRef<int64_t> reducedShape);
 

diff  --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td
index be9d14d8c3040..8b7bfafab5680 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.td
+++ b/mlir/include/mlir/IR/BuiltinTypes.td
@@ -1031,7 +1031,7 @@ def Builtin_Vector : Builtin_Type<"Vector", [
 
     /// Clone this vector type with the given shape and element type. If the
     /// provided shape is `None`, the current shape of the type is used.
-    VectorType cloneWith(Optional<ArrayRef<int64_t>> shape,
+    VectorType cloneWith(std::optional<ArrayRef<int64_t>> shape,
                          Type elementType) const;
   }];
   let skipDefaultBuilders = 1;

diff  --git a/mlir/include/mlir/IR/Diagnostics.h b/mlir/include/mlir/IR/Diagnostics.h
index af3bafa0028e3..7c1c81548c367 100644
--- a/mlir/include/mlir/IR/Diagnostics.h
+++ b/mlir/include/mlir/IR/Diagnostics.h
@@ -487,19 +487,19 @@ InFlightDiagnostic emitRemark(Location loc, const Twine &message);
 /// the diagnostic arguments directly instead of relying on the returned
 /// InFlightDiagnostic.
 template <typename... Args>
-LogicalResult emitOptionalError(Optional<Location> loc, Args &&...args) {
+LogicalResult emitOptionalError(std::optional<Location> loc, Args &&...args) {
   if (loc)
     return emitError(*loc).append(std::forward<Args>(args)...);
   return failure();
 }
 template <typename... Args>
-LogicalResult emitOptionalWarning(Optional<Location> loc, Args &&...args) {
+LogicalResult emitOptionalWarning(std::optional<Location> loc, Args &&...args) {
   if (loc)
     return emitWarning(*loc).append(std::forward<Args>(args)...);
   return failure();
 }
 template <typename... Args>
-LogicalResult emitOptionalRemark(Optional<Location> loc, Args &&...args) {
+LogicalResult emitOptionalRemark(std::optional<Location> loc, Args &&...args) {
   if (loc)
     return emitRemark(*loc).append(std::forward<Args>(args)...);
   return failure();

diff  --git a/mlir/include/mlir/IR/Dialect.h b/mlir/include/mlir/IR/Dialect.h
index c3ae2c81b2b12..1f9cd28ae4118 100644
--- a/mlir/include/mlir/IR/Dialect.h
+++ b/mlir/include/mlir/IR/Dialect.h
@@ -115,7 +115,8 @@ class Dialect {
   /// By default this will lookup for registered operations and return the
   /// `parse()` method registered on the RegisteredOperationName. Dialects can
   /// override this behavior and handle unregistered operations as well.
-  virtual Optional<ParseOpHook> getParseOperationHook(StringRef opName) const;
+  virtual std::optional<ParseOpHook>
+  getParseOperationHook(StringRef opName) const;
 
   /// Print an operation registered to this dialect.
   /// This hook is invoked for registered operation which don't override the

diff  --git a/mlir/include/mlir/IR/EnumAttr.td b/mlir/include/mlir/IR/EnumAttr.td
index 3b41e9fc219ca..14fbfb9f0997f 100644
--- a/mlir/include/mlir/IR/EnumAttr.td
+++ b/mlir/include/mlir/IR/EnumAttr.td
@@ -130,7 +130,7 @@ class EnumAttrInfo<
   // type to the corresponding symbol. It will have the following signature:
   //
   // ```c++
-  // llvm::Optional<<qualified-enum-class-name>> <fn-name>(<underlying-type>);
+  // std::optional<<qualified-enum-class-name>> <fn-name>(<underlying-type>);
   // ```
   string underlyingToSymbolFnName = "symbolize" # name;
 
@@ -138,7 +138,7 @@ class EnumAttrInfo<
   // corresponding symbol. It will have the following signature:
   //
   // ```c++
-  // llvm::Optional<<qualified-enum-class-name>> <fn-name>(llvm::StringRef);
+  // std::optional<<qualified-enum-class-name>> <fn-name>(llvm::StringRef);
   // ```
   string stringToSymbolFnName = "symbolize" # name;
 

diff  --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td
index 400f67162aaf9..a14c8911d152a 100644
--- a/mlir/include/mlir/IR/OpBase.td
+++ b/mlir/include/mlir/IR/OpBase.td
@@ -1033,7 +1033,7 @@ class OptionalAttr<Attr attr> : Attr<attr.predicate, attr.summary> {
   // Rewrite the attribute to be optional.
   // Note: this has to be kept up to date with Attr above.
   let storageType = attr.storageType;
-  let returnType = "::llvm::Optional<" # attr.returnType #">";
+  let returnType = "::std::optional<" # attr.returnType #">";
   let convertFromStorage = "$_self ? " # returnType # "(" #
                            attr.convertFromStorage # ") : (::std::nullopt)";
   let valueType = attr.valueType;

diff  --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h
index f206c51d222c2..e3c262da17039 100644
--- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h
+++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h
@@ -128,7 +128,7 @@ namespace detail {
 /// Return the `BlockArgument` corresponding to operand `operandIndex` in some
 /// successor if `operandIndex` is within the range of `operands`, or
 /// std::nullopt if `operandIndex` isn't a successor operand index.
-Optional<BlockArgument>
+std::optional<BlockArgument>
 getBranchSuccessorArgument(const SuccessorOperands &operands,
                            unsigned operandIndex, Block *successor);
 
@@ -164,8 +164,10 @@ class RegionSuccessor {
   RegionSuccessor(Region *region, Block::BlockArgListType regionInputs = {})
       : region(region), inputs(regionInputs) {}
   /// Initialize a successor that branches back to/out of the parent operation.
-  RegionSuccessor(Optional<Operation::result_range> results = {})
-      : inputs(results ? ValueRange(*results) : ValueRange()) {}
+  RegionSuccessor(Operation::result_range results)
+      : inputs(ValueRange(results)) {}
+  /// Constructor with no arguments.
+  RegionSuccessor() : inputs(ValueRange()) {}
 
   /// Return the given region successor. Returns nullptr if the successor is the
   /// parent operation.
@@ -190,7 +192,8 @@ class InvocationBounds {
 public:
   /// Create invocation bounds. The lower bound must be at least 0 and only the
   /// upper bound can be unknown.
-  InvocationBounds(unsigned lb, Optional<unsigned> ub) : lower(lb), upper(ub) {
+  InvocationBounds(unsigned lb, std::optional<unsigned> ub)
+      : lower(lb), upper(ub) {
     assert((!ub || ub >= lb) && "upper bound cannot be less than lower bound");
   }
 
@@ -198,7 +201,7 @@ class InvocationBounds {
   unsigned getLowerBound() const { return lower; }
 
   /// Return the upper bound.
-  Optional<unsigned> getUpperBound() const { return upper; }
+  std::optional<unsigned> getUpperBound() const { return upper; }
 
   /// Returns the unknown invocation bounds, i.e., there is no information on
   /// how many times a region may be invoked.
@@ -209,7 +212,7 @@ class InvocationBounds {
   unsigned lower;
   /// The maximum number of times the successor region will be invoked or
   /// `std::nullopt` if an upper bound is not known.
-  Optional<unsigned> upper;
+  std::optional<unsigned> upper;
 };
 
 /// Return `true` if `a` and `b` are in mutually exclusive regions as per
@@ -241,16 +244,16 @@ bool isRegionReturnLike(Operation *operation);
 /// `OperandRange` represents all operands that are passed to the specified
 /// successor region. If `regionIndex` is `std::nullopt`, all operands that are
 /// passed to the parent operation will be returned.
-Optional<MutableOperandRange>
+std::optional<MutableOperandRange>
 getMutableRegionBranchSuccessorOperands(Operation *operation,
-                                        Optional<unsigned> regionIndex);
+                                        std::optional<unsigned> regionIndex);
 
 /// Returns the read only operands that are passed to the region with the given
 /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more
 /// information.
-Optional<OperandRange>
+std::optional<OperandRange>
 getRegionBranchSuccessorOperands(Operation *operation,
-                                 Optional<unsigned> regionIndex);
+                                 std::optional<unsigned> regionIndex);
 
 //===----------------------------------------------------------------------===//
 // ControlFlow Traits

diff  --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
index 9f5a0a41e65f3..1a331f8a706c6 100644
--- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
+++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
@@ -70,11 +70,11 @@ def BranchOpInterface : OpInterface<"BranchOpInterface"> {
         some successor, or None if `operandIndex` isn't a successor operand
         index.
       }],
-      "::llvm::Optional<::mlir::BlockArgument>", "getSuccessorBlockArgument",
+      "::std::optional<::mlir::BlockArgument>", "getSuccessorBlockArgument",
       (ins "unsigned":$operandIndex), [{
         ::mlir::Operation *opaqueOp = $_op;
         for (unsigned i = 0, e = opaqueOp->getNumSuccessors(); i != e; ++i) {
-          if (::llvm::Optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument(
+          if (::std::optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument(
                 $_op.getSuccessorOperands(i), operandIndex,
                 opaqueOp->getSuccessor(i)))
             return arg;
@@ -140,7 +140,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
         `getSuccessorRegions`.
       }],
       "::mlir::OperandRange", "getSuccessorEntryOperands",
-      (ins "::llvm::Optional<unsigned>":$index), [{}],
+      (ins "::std::optional<unsigned>":$index), [{}],
       /*defaultImplementation=*/[{
         auto operandEnd = this->getOperation()->operand_end();
         return ::mlir::OperandRange(operandEnd, operandEnd);
@@ -161,7 +161,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
         successor region must be non-empty.
       }],
       "void", "getSuccessorRegions",
-      (ins "::llvm::Optional<unsigned>":$index,
+      (ins "::std::optional<unsigned>":$index,
            "::llvm::ArrayRef<::mlir::Attribute>":$operands,
            "::llvm::SmallVectorImpl<::mlir::RegionSuccessor> &":$regions)
     >,
@@ -208,7 +208,7 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
 
   let extraClassDeclaration = [{
     /// Convenience helper in case none of the operands is known.
-    void getSuccessorRegions(Optional<unsigned> index,
+    void getSuccessorRegions(std::optional<unsigned> index,
                              SmallVectorImpl<RegionSuccessor> &regions);
 
     /// Return `true` if control flow originating from the given region may
@@ -239,7 +239,7 @@ def RegionBranchTerminatorOpInterface :
         the parent operation.
       }],
       "::mlir::MutableOperandRange", "getMutableSuccessorOperands",
-      (ins "::llvm::Optional<unsigned>":$index)
+      (ins "::std::optional<unsigned>":$index)
     >,
     InterfaceMethod<[{
         Returns a range of operands that are semantically "returned" by passing
@@ -248,7 +248,7 @@ def RegionBranchTerminatorOpInterface :
         operation.
       }],
       "::mlir::OperandRange", "getSuccessorOperands",
-      (ins "::llvm::Optional<unsigned>":$index), [{}],
+      (ins "::std::optional<unsigned>":$index), [{}],
       /*defaultImplementation=*/[{
         return $_op.getMutableSuccessorOperands(index);
       }]

diff  --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h
index c89c7280784ba..4c0ffd02ac80a 100644
--- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h
+++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h
@@ -235,12 +235,13 @@ namespace detail {
 // TODO: Consider generating typedefs for trait member functions if this usage
 // becomes more common.
 LogicalResult inferReturnTensorTypes(
-    function_ref<LogicalResult(
-        MLIRContext *, Optional<Location> location, ValueShapeRange operands,
-        DictionaryAttr attributes, RegionRange regions,
-        SmallVectorImpl<ShapedTypeComponents> &retComponents)>
+    function_ref<
+        LogicalResult(MLIRContext *, std::optional<Location> location,
+                      ValueShapeRange operands, DictionaryAttr attributes,
+                      RegionRange regions,
+                      SmallVectorImpl<ShapedTypeComponents> &retComponents)>
         componentTypeFn,
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes);
 
@@ -272,7 +273,7 @@ template <typename ConcreteType>
 class InferTensorType : public TraitBase<ConcreteType, InferTensorType> {
 public:
   static LogicalResult
-  inferReturnTypes(MLIRContext *context, Optional<Location> location,
+  inferReturnTypes(MLIRContext *context, std::optional<Location> location,
                    ValueRange operands, DictionaryAttr attributes,
                    RegionRange regions,
                    SmallVectorImpl<Type> &inferredReturnTypes) {

diff  --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td
index 3c6391732d0d5..052e5907df4f8 100644
--- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td
+++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td
@@ -41,7 +41,7 @@ def InferTypeOpInterface : OpInterface<"InferTypeOpInterface"> {
       /*retTy=*/"::mlir::LogicalResult",
       /*methodName=*/"inferReturnTypes",
       /*args=*/(ins "::mlir::MLIRContext *":$context,
-                    "::llvm::Optional<::mlir::Location>":$location,
+                    "::std::optional<::mlir::Location>":$location,
                     "::mlir::ValueRange":$operands,
                     "::mlir::DictionaryAttr":$attributes,
                     "::mlir::RegionRange":$regions,
@@ -72,7 +72,7 @@ def InferTypeOpInterface : OpInterface<"InferTypeOpInterface"> {
       /*retTy=*/"::mlir::LogicalResult",
       /*methodName=*/"refineReturnTypes",
       /*args=*/(ins "::mlir::MLIRContext *":$context,
-                    "::llvm::Optional<::mlir::Location>":$location,
+                    "::std::optional<::mlir::Location>":$location,
                     "::mlir::ValueRange":$operands,
                     "::mlir::DictionaryAttr":$attributes,
                     "::mlir::RegionRange":$regions,
@@ -144,7 +144,7 @@ def InferShapedTypeOpInterface : OpInterface<"InferShapedTypeOpInterface"> {
       /*retTy=*/"::mlir::LogicalResult",
       /*methodName=*/"inferReturnTypeComponents",
       /*args=*/(ins "::mlir::MLIRContext*":$context,
-                    "::llvm::Optional<::mlir::Location>":$location,
+                    "::std::optional<::mlir::Location>":$location,
                     "::mlir::ValueShapeRange":$operands,
                     "::mlir::DictionaryAttr":$attributes,
                     "::mlir::RegionRange":$regions,

diff  --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.td b/mlir/include/mlir/Interfaces/LoopLikeInterface.td
index 748b668594be5..95ba4511c2cd2 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.td
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.td
@@ -52,7 +52,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
         If there is a single induction variable return it, otherwise return
         std::nullopt.
       }],
-      /*retTy=*/"::llvm::Optional<::mlir::Value>",
+      /*retTy=*/"::std::optional<::mlir::Value>",
       /*methodName=*/"getSingleInductionVar",
       /*args=*/(ins),
       /*methodBody=*/"",
@@ -64,7 +64,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
         Return the single lower bound value or attribute if it exists, otherwise
         return std::nullopt.
       }],
-      /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
+      /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
       /*methodName=*/"getSingleLowerBound",
       /*args=*/(ins),
       /*methodBody=*/"",
@@ -76,7 +76,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
         Return the single step value or attribute if it exists, otherwise
         return std::nullopt.
       }],
-      /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
+      /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
       /*methodName=*/"getSingleStep",
       /*args=*/(ins),
       /*methodBody=*/"",
@@ -88,7 +88,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
         Return the single upper bound value or attribute if it exists, otherwise
         return std::nullopt.
       }],
-      /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>",
+      /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
       /*methodName=*/"getSingleUpperBound",
       /*args=*/(ins),
       /*methodBody=*/"",

diff  --git a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td
index e821c9e03abd8..bbf2392114626 100644
--- a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td
+++ b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td
@@ -106,7 +106,7 @@ class EffectOpInterfaceBase<string name, string baseEffect>
     /// Return the effect of the given type `Effect` that is applied to the
     /// given value, or std::nullopt if no effect exists.
     template <typename Effect>
-    ::llvm::Optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>>
+    ::std::optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>>
     getEffectOnValue(::mlir::Value value) {
       llvm::SmallVector<::mlir::SideEffects::EffectInstance<
               }] # baseEffect # [{>, 4> effects;

diff  --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td
index 645f57fb95883..27352bd971c65 100644
--- a/mlir/include/mlir/Interfaces/VectorInterfaces.td
+++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td
@@ -28,7 +28,7 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
         `targetShape`. Return `None` if the op cannot be unrolled to the target
         vector shape.
       }],
-      /*retTy=*/"::llvm::Optional<::llvm::SmallVector<int64_t, 4>>",
+      /*retTy=*/"::std::optional<::llvm::SmallVector<int64_t, 4>>",
       /*methodName=*/"getShapeForUnroll",
       /*args=*/(ins),
       /*methodBody=*/"",
@@ -143,7 +143,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
     >,
     InterfaceMethod<
       /*desc=*/"Return the `in_bounds` boolean ArrayAttr.",
-      /*retTy=*/"::llvm::Optional<::mlir::ArrayAttr>",
+      /*retTy=*/"::std::optional<::mlir::ArrayAttr>",
       /*methodName=*/"in_bounds",
       /*args=*/(ins),
       /*methodBody=*/"return $_op.getInBounds();"

diff  --git a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp
index a69935601ee0f..1b7dee919753c 100644
--- a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp
+++ b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp
@@ -45,7 +45,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
   // this region predecessor that correspond to the input values of `region`. If
   // an index could not be found, std::nullopt is returned instead.
   auto getOperandIndexIfPred =
-      [&](Optional<unsigned> predIndex) -> Optional<unsigned> {
+      [&](std::optional<unsigned> predIndex) -> std::optional<unsigned> {
     SmallVector<RegionSuccessor, 2> successors;
     branch.getSuccessorRegions(predIndex, successors);
     for (RegionSuccessor &successor : successors) {
@@ -75,12 +75,12 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
   };
 
   // Check branches from the parent operation.
-  Optional<unsigned> regionIndex;
+  std::optional<unsigned> regionIndex;
   if (region) {
     // Determine the actual region number from the passed region.
     regionIndex = region->getRegionNumber();
   }
-  if (Optional<unsigned> operandIndex =
+  if (std::optional<unsigned> operandIndex =
           getOperandIndexIfPred(/*predIndex=*/std::nullopt)) {
     collectUnderlyingAddressValues(
         branch.getSuccessorEntryOperands(regionIndex)[*operandIndex], maxDepth,
@@ -89,7 +89,7 @@ static void collectUnderlyingAddressValues(RegionBranchOpInterface branch,
   // Check branches from each child region.
   Operation *op = branch.getOperation();
   for (int i = 0, e = op->getNumRegions(); i != e; ++i) {
-    if (Optional<unsigned> operandIndex = getOperandIndexIfPred(i)) {
+    if (std::optional<unsigned> operandIndex = getOperandIndexIfPred(i)) {
       for (Block &block : op->getRegion(i)) {
         Operation *term = block.getTerminator();
         // Try to determine possible region-branch successor operands for the
@@ -211,7 +211,8 @@ static void collectUnderlyingAddressValues(Value value,
 /// non-null it specifies the parent operation that the allocation does not
 /// escape. If no scope is found, `allocScopeOp` is set to nullptr.
 static LogicalResult
-getAllocEffectFor(Value value, Optional<MemoryEffects::EffectInstance> &effect,
+getAllocEffectFor(Value value,
+                  std::optional<MemoryEffects::EffectInstance> &effect,
                   Operation *&allocScopeOp) {
   // Try to get a memory effect interface for the parent operation.
   Operation *op;
@@ -249,7 +250,7 @@ static AliasResult aliasImpl(Value lhs, Value rhs) {
   if (lhs == rhs)
     return AliasResult::MustAlias;
   Operation *lhsAllocScope = nullptr, *rhsAllocScope = nullptr;
-  Optional<MemoryEffects::EffectInstance> lhsAlloc, rhsAlloc;
+  std::optional<MemoryEffects::EffectInstance> lhsAlloc, rhsAlloc;
 
   // Handle the case where lhs is a constant.
   Attribute lhsAttr, rhsAttr;

diff  --git a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp
index 533d88d494d1e..84311e47ef8c7 100644
--- a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp
+++ b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp
@@ -165,7 +165,7 @@ void IntegerRangeAnalysis::visitNonControlFlowArguments(
   /// Given the results of getConstant{Lower,Upper}Bound() or getConstantStep()
   /// on a LoopLikeInterface return the lower/upper bound for that result if
   /// possible.
-  auto getLoopBoundFromFold = [&](Optional<OpFoldResult> loopBound,
+  auto getLoopBoundFromFold = [&](std::optional<OpFoldResult> loopBound,
                                   Type boundType, bool getUpper) {
     unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType);
     if (loopBound.has_value()) {
@@ -190,14 +190,14 @@ void IntegerRangeAnalysis::visitNonControlFlowArguments(
 
   // Infer bounds for loop arguments that have static bounds
   if (auto loop = dyn_cast<LoopLikeOpInterface>(op)) {
-    Optional<Value> iv = loop.getSingleInductionVar();
+    std::optional<Value> iv = loop.getSingleInductionVar();
     if (!iv) {
       return SparseDataFlowAnalysis ::visitNonControlFlowArguments(
           op, successor, argLattices, firstIndex);
     }
-    Optional<OpFoldResult> lowerBound = loop.getSingleLowerBound();
-    Optional<OpFoldResult> upperBound = loop.getSingleUpperBound();
-    Optional<OpFoldResult> step = loop.getSingleStep();
+    std::optional<OpFoldResult> lowerBound = loop.getSingleLowerBound();
+    std::optional<OpFoldResult> upperBound = loop.getSingleUpperBound();
+    std::optional<OpFoldResult> step = loop.getSingleStep();
     APInt min = getLoopBoundFromFold(lowerBound, iv->getType(),
                                      /*getUpper=*/false);
     APInt max = getLoopBoundFromFold(upperBound, iv->getType(),

diff  --git a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp
index c5d2ac4ca01ff..94b662987dc49 100644
--- a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp
+++ b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp
@@ -208,7 +208,7 @@ void AbstractSparseDataFlowAnalysis::visitBlock(Block *block) {
 
 void AbstractSparseDataFlowAnalysis::visitRegionSuccessors(
     ProgramPoint point, RegionBranchOpInterface branch,
-    Optional<unsigned> successorIndex,
+    std::optional<unsigned> successorIndex,
     ArrayRef<AbstractSparseLattice *> lattices) {
   const auto *predecessors = getOrCreateFor<PredecessorState>(point, point);
   assert(predecessors->allPredecessorsKnown() &&
@@ -216,7 +216,7 @@ void AbstractSparseDataFlowAnalysis::visitRegionSuccessors(
 
   for (Operation *op : predecessors->getKnownPredecessors()) {
     // Get the incoming successor operands.
-    Optional<OperandRange> operands;
+    std::optional<OperandRange> operands;
 
     // Check if the predecessor is the parent op.
     if (op == branch) {
@@ -390,7 +390,7 @@ void AbstractSparseBackwardDataFlowAnalysis::visitOperation(Operation *op) {
             forwarded.getBeginOperandIndex(), forwarded.size());
         for (OpOperand &operand : operands) {
           unaccounted.reset(operand.getOperandNumber());
-          if (Optional<BlockArgument> blockArg =
+          if (std::optional<BlockArgument> blockArg =
                   detail::getBranchSuccessorArgument(
                       successorOperands, operand.getOperandNumber(), block)) {
             meet(getLatticeElement(operand.get()),

diff  --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp
index 7eb839d875b1d..6fb32de37b373 100644
--- a/mlir/lib/AsmParser/Parser.cpp
+++ b/mlir/lib/AsmParser/Parser.cpp
@@ -1871,7 +1871,7 @@ OperationParser::parseCustomOperation(ArrayRef<ResultRecord> resultIDs) {
     if (iface && !iface->getDefaultDialect().empty())
       defaultDialect = iface->getDefaultDialect();
   } else {
-    Optional<Dialect::ParseOpHook> dialectHook;
+    std::optional<Dialect::ParseOpHook> dialectHook;
     Dialect *dialect = opNameInfo->getDialect();
     if (!dialect) {
       InFlightDiagnostic diag =

diff  --git a/mlir/lib/CAPI/Interfaces/Interfaces.cpp b/mlir/lib/CAPI/Interfaces/Interfaces.cpp
index 66d3bb187f2e7..ec2cf017581a8 100644
--- a/mlir/lib/CAPI/Interfaces/Interfaces.cpp
+++ b/mlir/lib/CAPI/Interfaces/Interfaces.cpp
@@ -46,7 +46,7 @@ MlirLogicalResult mlirInferTypeOpInterfaceInferReturnTypes(
   if (!info)
     return mlirLogicalResultFailure();
 
-  llvm::Optional<Location> maybeLocation;
+  std::optional<Location> maybeLocation;
   if (!mlirLocationIsNull(location))
     maybeLocation = unwrap(location);
   SmallVector<Value> unwrappedOperands;

diff  --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index a3f033d7d80ad..a8e2d28e98c66 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -223,7 +223,7 @@ class AffineParallelLowering : public OpRewritePattern<AffineParallelOp> {
       // initialization of the result values.
       Attribute reduction = std::get<0>(pair);
       Type resultType = std::get<1>(pair);
-      Optional<arith::AtomicRMWKind> reductionOp =
+      std::optional<arith::AtomicRMWKind> reductionOp =
           arith::symbolizeAtomicRMWKind(
               static_cast<uint64_t>(reduction.cast<IntegerAttr>().getInt()));
       assert(reductionOp && "Reduction operation cannot be of None Type");
@@ -243,7 +243,7 @@ class AffineParallelLowering : public OpRewritePattern<AffineParallelOp> {
            "Unequal number of reductions and operands.");
     for (unsigned i = 0, end = reductions.size(); i < end; i++) {
       // For each of the reduction operations get the respective mlir::Value.
-      Optional<arith::AtomicRMWKind> reductionOp =
+      std::optional<arith::AtomicRMWKind> reductionOp =
           arith::symbolizeAtomicRMWKind(
               reductions[i].cast<IntegerAttr>().getInt());
       assert(reductionOp && "Reduction Operation cannot be of None Type");

diff  --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index d308d2be86b26..62a1fa5f10a9f 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -442,8 +442,8 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
     return rewriter.create<LLVM::LoadOp>(loc, sizePtr);
   }
 
-  Optional<int64_t> getConstantDimIndex(memref::DimOp dimOp) const {
-    if (Optional<int64_t> idx = dimOp.getConstantIndex())
+  std::optional<int64_t> getConstantDimIndex(memref::DimOp dimOp) const {
+    if (auto idx = dimOp.getConstantIndex())
       return idx;
 
     if (auto constantOp = dimOp.getIndex().getDefiningOp<LLVM::ConstantOp>())
@@ -462,7 +462,7 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> {
 
     // Take advantage if index is constant.
     MemRefType memRefType = operandType.cast<MemRefType>();
-    if (Optional<int64_t> index = getConstantDimIndex(dimOp)) {
+    if (std::optional<int64_t> index = getConstantDimIndex(dimOp)) {
       int64_t i = *index;
       if (memRefType.isDynamicDim(i)) {
         // extract dynamic size from the memref descriptor.

diff  --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 1347b0cf3b8a8..296683b1a50dd 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -298,14 +298,15 @@ struct MmaSyncOptoNVVM : public ConvertOpToLLVMPattern<nvgpu::MmaSyncOp> {
     FailureOr<NVVM::MMATypes> ptxTypeB = getNvvmMmaType(bType);
     if (failed(ptxTypeB))
       return op->emitOpError("failed to deduce operand PTX types");
-    Optional<NVVM::MMATypes> ptxTypeC = NVVM::MmaOp::inferOperandMMAType(
-        cType.getElementType(), /*isAccumulator=*/true);
+    std::optional<NVVM::MMATypes> ptxTypeC =
+        NVVM::MmaOp::inferOperandMMAType(cType.getElementType(),
+                                         /*isAccumulator=*/true);
     if (!ptxTypeC)
       return op->emitError(
           "could not infer the PTX type for the accumulator/result");
 
     // TODO: add an attribute to the op to customize this behavior.
-    Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
+    std::optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
     if (aType.getElementType().isa<IntegerType>())
       overflow = NVVM::MMAIntOverflow::satfinite;
 
@@ -413,7 +414,7 @@ buildMmaSparseAsmString(const std::array<int64_t, 3> &shape, unsigned matASize,
                         unsigned matBSize, unsigned matCSize,
                         NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB,
                         NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD,
-                        Optional<NVVM::MMAIntOverflow> overflow) {
+                        std::optional<NVVM::MMAIntOverflow> overflow) {
   auto ptxTypeStr = [](NVVM::MMATypes ptxType) {
     return NVVM::stringifyMMATypes(ptxType);
   };
@@ -449,7 +450,7 @@ buildMmaSparseAsmString(const std::array<int64_t, 3> &shape, unsigned matASize,
 static FailureOr<LLVM::InlineAsmOp> emitMmaSparseSyncOpAsm(
     Location loc, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB,
     NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD,
-    Optional<NVVM::MMAIntOverflow> overflow, ArrayRef<Value> unpackedAData,
+    std::optional<NVVM::MMAIntOverflow> overflow, ArrayRef<Value> unpackedAData,
     ArrayRef<Value> unpackedB, ArrayRef<Value> unpackedC, Value indexData,
     int64_t metadataSelector, const std::array<int64_t, 3> &shape,
     Type intrinsicResultType, ConversionPatternRewriter &rewriter) {
@@ -505,8 +506,9 @@ struct NVGPUMmaSparseSyncLowering
     FailureOr<NVVM::MMATypes> ptxTypeB = getNvvmMmaType(bType);
     if (failed(ptxTypeB))
       return op->emitOpError("failed to deduce operand PTX types");
-    Optional<NVVM::MMATypes> ptxTypeC = NVVM::MmaOp::inferOperandMMAType(
-        cType.getElementType(), /*isAccumulator=*/true);
+    std::optional<NVVM::MMATypes> ptxTypeC =
+        NVVM::MmaOp::inferOperandMMAType(cType.getElementType(),
+                                         /*isAccumulator=*/true);
     if (!ptxTypeC)
       return op->emitError(
           "could not infer the PTX type for the accumulator/result");
@@ -517,7 +519,7 @@ struct NVGPUMmaSparseSyncLowering
       return failure();
 
     // TODO: add an attribute to the op to customize this behavior.
-    Optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
+    std::optional<NVVM::MMAIntOverflow> overflow(std::nullopt);
     if (aType.getElementType().isa<IntegerType>())
       overflow = NVVM::MMAIntOverflow::satfinite;
 

diff  --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
index 75a01409bddc2..fc0c845b69987 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp
@@ -595,7 +595,7 @@ void PatternLowering::generate(SuccessNode *successNode, Block *&currentBlock) {
   // Grab the root kind if present.
   StringAttr rootKindAttr;
   if (pdl::OperationOp rootOp = root.getDefiningOp<pdl::OperationOp>())
-    if (Optional<StringRef> rootKind = rootOp.getOpName())
+    if (std::optional<StringRef> rootKind = rootOp.getOpName())
       rootKindAttr = builder.getStringAttr(*rootKind);
 
   builder.setInsertionPointToEnd(currentBlock);

diff  --git a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h
index 0307239865f2b..1027ed00757ce 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h
+++ b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h
@@ -222,7 +222,7 @@ struct OperandPosition
 struct OperandGroupPosition
     : public PredicateBase<
           OperandGroupPosition, Position,
-          std::tuple<OperationPosition *, Optional<unsigned>, bool>,
+          std::tuple<OperationPosition *, std::optional<unsigned>, bool>,
           Predicates::OperandGroupPos> {
   explicit OperandGroupPosition(const KeyTy &key);
 
@@ -233,7 +233,9 @@ struct OperandGroupPosition
 
   /// Returns the group number of this position. If std::nullopt, this group
   /// refers to all operands.
-  Optional<unsigned> getOperandGroupNumber() const { return std::get<1>(key); }
+  std::optional<unsigned> getOperandGroupNumber() const {
+    return std::get<1>(key);
+  }
 
   /// Returns if the operand group has unknown size. If false, the operand group
   /// has at max one element.
@@ -298,7 +300,7 @@ struct ResultPosition
 struct ResultGroupPosition
     : public PredicateBase<
           ResultGroupPosition, Position,
-          std::tuple<OperationPosition *, Optional<unsigned>, bool>,
+          std::tuple<OperationPosition *, std::optional<unsigned>, bool>,
           Predicates::ResultGroupPos> {
   explicit ResultGroupPosition(const KeyTy &key) : Base(key) {
     parent = std::get<0>(key);
@@ -311,7 +313,9 @@ struct ResultGroupPosition
 
   /// Returns the group number of this position. If std::nullopt, this group
   /// refers to all results.
-  Optional<unsigned> getResultGroupNumber() const { return std::get<1>(key); }
+  std::optional<unsigned> getResultGroupNumber() const {
+    return std::get<1>(key);
+  }
 
   /// Returns if the result group has unknown size. If false, the result group
   /// has at max one element.
@@ -595,7 +599,7 @@ class PredicateBuilder {
   }
 
   /// Returns a position for a group of operands of the given operation.
-  Position *getOperandGroup(OperationPosition *p, Optional<unsigned> group,
+  Position *getOperandGroup(OperationPosition *p, std::optional<unsigned> group,
                             bool isVariadic) {
     return OperandGroupPosition::get(uniquer, p, group, isVariadic);
   }
@@ -609,7 +613,7 @@ class PredicateBuilder {
   }
 
   /// Returns a position for a group of results of the given operation.
-  Position *getResultGroup(OperationPosition *p, Optional<unsigned> group,
+  Position *getResultGroup(OperationPosition *p, std::optional<unsigned> group,
                            bool isVariadic) {
     return ResultGroupPosition::get(uniquer, p, group, isVariadic);
   }

diff  --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
index 683b18e1d1920..076e2a247d1a7 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp
@@ -81,7 +81,7 @@ static void getOperandTreePredicates(std::vector<PositionalPredicate> &predList,
                             builder.getType(pos));
       })
       .Case<pdl::ResultOp, pdl::ResultsOp>([&](auto op) {
-        Optional<unsigned> index = op.getIndex();
+        std::optional<unsigned> index = op.getIndex();
 
         // Prevent traversal into a null value if the result has a proper index.
         if (index)
@@ -106,11 +106,11 @@ static void getOperandTreePredicates(std::vector<PositionalPredicate> &predList,
       });
 }
 
-static void getTreePredicates(std::vector<PositionalPredicate> &predList,
-                              Value val, PredicateBuilder &builder,
-                              DenseMap<Value, Position *> &inputs,
-                              OperationPosition *pos,
-                              Optional<unsigned> ignoreOperand = std::nullopt) {
+static void
+getTreePredicates(std::vector<PositionalPredicate> &predList, Value val,
+                  PredicateBuilder &builder,
+                  DenseMap<Value, Position *> &inputs, OperationPosition *pos,
+                  std::optional<unsigned> ignoreOperand = std::nullopt) {
   assert(val.getType().isa<pdl::OperationType>() && "expected operation");
   pdl::OperationOp op = cast<pdl::OperationOp>(val.getDefiningOp());
   OperationPosition *opPos = cast<OperationPosition>(pos);
@@ -120,7 +120,7 @@ static void getTreePredicates(std::vector<PositionalPredicate> &predList,
     predList.emplace_back(pos, builder.getIsNotNull());
 
   // Check that this is the correct root operation.
-  if (Optional<StringRef> opName = op.getOpName())
+  if (std::optional<StringRef> opName = op.getOpName())
     predList.emplace_back(pos, builder.getOperationName(*opName));
 
   // Check that the operation has the proper number of operands. If there are
@@ -302,7 +302,7 @@ static void getResultPredicates(pdl::ResultsOp op,
   // Ensure that the result isn't null if the result has an index.
   auto *parentPos = cast<OperationPosition>(inputs.lookup(op.getParent()));
   bool isVariadic = op.getType().isa<pdl::RangeType>();
-  Optional<unsigned> index = op.getIndex();
+  std::optional<unsigned> index = op.getIndex();
   resultPos = builder.getResultGroup(parentPos, index, isVariadic);
   if (index)
     predList.emplace_back(resultPos, builder.getIsNotNull());
@@ -356,7 +356,7 @@ namespace {
 /// An op accepting a value at an optional index.
 struct OpIndex {
   Value parent;
-  Optional<unsigned> index;
+  std::optional<unsigned> index;
 };
 
 /// The parent and operand index of each operation for each root, stored
@@ -408,12 +408,13 @@ static void buildCostGraph(ArrayRef<Value> roots, RootOrderingGraph &graph,
   // * the operand index of the value in its parent;
   // * the depth of the visited value.
   struct Entry {
-    Entry(Value value, Value parent, Optional<unsigned> index, unsigned depth)
+    Entry(Value value, Value parent, std::optional<unsigned> index,
+          unsigned depth)
         : value(value), parent(parent), index(index), depth(depth) {}
 
     Value value;
     Value parent;
-    Optional<unsigned> index;
+    std::optional<unsigned> index;
     unsigned depth;
   };
 

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 7cafc2749af9c..8c11ad2ecf8b3 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -335,7 +335,7 @@ static bool isDimOpValidSymbol(ShapedDimOpInterface dimOp, Region *region) {
 
   // The dim op is also okay if its operand memref is a view/subview whose
   // corresponding size is a valid symbol.
-  Optional<int64_t> index = getConstantIntValue(dimOp.getDimension());
+  std::optional<int64_t> index = getConstantIntValue(dimOp.getDimension());
   assert(index.has_value() &&
          "expect only `dim` operations with a constant index");
   int64_t i = index.value();
@@ -923,7 +923,7 @@ template <typename OpTy, typename... Args>
 static std::enable_if_t<OpTy::template hasTrait<OpTrait::OneResult>(),
                         OpFoldResult>
 createOrFold(OpBuilder &b, Location loc, ValueRange operands,
-             Args &&... leadingArguments) {
+             Args &&...leadingArguments) {
   // Identify the constant operands and extract their values as attributes.
   // Note that we cannot use the original values directly because the list of
   // operands may have changed due to canonicalization and composition.
@@ -2009,7 +2009,7 @@ static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
 
 namespace {
 /// Returns constant trip count in trivial cases.
-static Optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
+static std::optional<uint64_t> getTrivialConstantTripCount(AffineForOp forOp) {
   int64_t step = forOp.getStep();
   if (!forOp.hasConstantBounds() || step <= 0)
     return std::nullopt;
@@ -2030,7 +2030,7 @@ struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
       return failure();
     if (forOp.getNumResults() == 0)
       return success();
-    Optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
+    std::optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
     if (tripCount && *tripCount == 0) {
       // The initial values of the iteration arguments would be the op's
       // results.
@@ -2082,7 +2082,8 @@ void AffineForOp::getCanonicalizationPatterns(RewritePatternSet &results,
 /// correspond to the loop iterator operands, i.e., those excluding the
 /// induction variable. AffineForOp only has one region, so zero is the only
 /// valid value for `index`.
-OperandRange AffineForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange
+AffineForOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   assert((!index || *index == 0) && "invalid region index");
 
   // The initial operands map to the loop arguments after the induction
@@ -2096,14 +2097,14 @@ OperandRange AffineForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
 /// correspond to a constant value for each operand, or null if that operand is
 /// not a constant.
 void AffineForOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   assert((!index.has_value() || index.value() == 0) && "expected loop region");
   // The loop may typically branch back to its body or to the parent operation.
   // If the predecessor is the parent op and the trip count is known to be at
   // least one, branch into the body using the iterator arguments. And in cases
   // we know the trip count is zero, it can only branch back to its parent.
-  Optional<uint64_t> tripCount = getTrivialConstantTripCount(*this);
+  std::optional<uint64_t> tripCount = getTrivialConstantTripCount(*this);
   if (!index.has_value() && tripCount.has_value()) {
     if (tripCount.value() > 0) {
       regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
@@ -2130,7 +2131,7 @@ void AffineForOp::getSuccessorRegions(
 
 /// Returns true if the affine.for has zero iterations in trivial cases.
 static bool hasTrivialZeroTripCount(AffineForOp op) {
-  Optional<uint64_t> tripCount = getTrivialConstantTripCount(op);
+  std::optional<uint64_t> tripCount = getTrivialConstantTripCount(op);
   return tripCount && *tripCount == 0;
 }
 
@@ -2262,23 +2263,23 @@ bool AffineForOp::matchingBoundOperandList() {
 
 Region &AffineForOp::getLoopBody() { return getRegion(); }
 
-Optional<Value> AffineForOp::getSingleInductionVar() {
+std::optional<Value> AffineForOp::getSingleInductionVar() {
   return getInductionVar();
 }
 
-Optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
+std::optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
   if (!hasConstantLowerBound())
     return std::nullopt;
   OpBuilder b(getContext());
   return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound()));
 }
 
-Optional<OpFoldResult> AffineForOp::getSingleStep() {
+std::optional<OpFoldResult> AffineForOp::getSingleStep() {
   OpBuilder b(getContext());
   return OpFoldResult(b.getI64IntegerAttr(getStep()));
 }
 
-Optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
+std::optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
   if (!hasConstantUpperBound())
     return std::nullopt;
   OpBuilder b(getContext());
@@ -2541,7 +2542,7 @@ struct AlwaysTrueOrFalseIf : public OpRewritePattern<AffineIfOp> {
 /// AffineIfOp has two regions -- `then` and `else`. The flow of data should be
 /// as follows: AffineIfOp -> `then`/`else` -> AffineIfOp
 void AffineIfOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   // If the predecessor is an AffineIfOp, then branching into both `then` and
   // `else` region is valid.
@@ -3567,7 +3568,7 @@ AffineValueMap AffineParallelOp::getUpperBoundsValueMap() {
   return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands());
 }
 
-Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
+std::optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
   if (hasMinMaxBounds())
     return std::nullopt;
 
@@ -3985,7 +3986,7 @@ ParseResult AffineParallelOp::parse(OpAsmParser &parser,
       if (parser.parseAttribute(attrVal, builder.getNoneType(), "reduce",
                                 attrStorage))
         return failure();
-      llvm::Optional<arith::AtomicRMWKind> reduction =
+      std::optional<arith::AtomicRMWKind> reduction =
           arith::symbolizeAtomicRMWKind(attrVal.getValue());
       if (!reduction)
         return parser.emitError(loc, "invalid reduction value: ") << attrVal;
@@ -4231,7 +4232,7 @@ void AffineDelinearizeIndexOp::build(OpBuilder &builder, OperationState &result,
   result.addOperands(linearIndex);
   SmallVector<Value> basisValues =
       llvm::to_vector(llvm::map_range(basis, [&](OpFoldResult ofr) -> Value {
-        Optional<int64_t> staticDim = getConstantIntValue(ofr);
+        std::optional<int64_t> staticDim = getConstantIntValue(ofr);
         if (staticDim.has_value())
           return builder.create<arith::ConstantIndexOp>(result.location,
                                                         *staticDim);

diff  --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
index 590ea4d0716e8..74e226d7a3452 100644
--- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
+++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
@@ -246,7 +246,8 @@ void arith::AddIOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
 // AddUIExtendedOp
 //===----------------------------------------------------------------------===//
 
-Optional<SmallVector<int64_t, 4>> arith::AddUIExtendedOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>>
+arith::AddUIExtendedOp::getShapeForUnroll() {
   if (auto vt = getType(0).dyn_cast<VectorType>())
     return llvm::to_vector<4>(vt.getShape());
   return std::nullopt;
@@ -378,7 +379,8 @@ OpFoldResult arith::MulIOp::fold(ArrayRef<Attribute> operands) {
 // MulSIExtendedOp
 //===----------------------------------------------------------------------===//
 
-Optional<SmallVector<int64_t, 4>> arith::MulSIExtendedOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>>
+arith::MulSIExtendedOp::getShapeForUnroll() {
   if (auto vt = getType(0).dyn_cast<VectorType>())
     return llvm::to_vector<4>(vt.getShape());
   return std::nullopt;
@@ -424,7 +426,8 @@ void arith::MulSIExtendedOp::getCanonicalizationPatterns(
 // MulUIExtendedOp
 //===----------------------------------------------------------------------===//
 
-Optional<SmallVector<int64_t, 4>> arith::MulUIExtendedOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>>
+arith::MulUIExtendedOp::getShapeForUnroll() {
   if (auto vt = getType(0).dyn_cast<VectorType>())
     return llvm::to_vector<4>(vt.getShape());
   return std::nullopt;
@@ -1639,7 +1642,7 @@ static Attribute getBoolAttribute(Type type, MLIRContext *ctx, bool value) {
   return DenseElementsAttr::get(shapedType, boolAttr);
 }
 
-static Optional<int64_t> getIntegerWidth(Type t) {
+static std::optional<int64_t> getIntegerWidth(Type t) {
   if (auto intType = t.dyn_cast<IntegerType>()) {
     return intType.getWidth();
   }
@@ -1661,7 +1664,7 @@ OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
   if (matchPattern(getRhs(), m_Zero())) {
     if (auto extOp = getLhs().getDefiningOp<ExtSIOp>()) {
       // extsi(%x : i1 -> iN) != 0  ->  %x
-      Optional<int64_t> integerWidth =
+      std::optional<int64_t> integerWidth =
           getIntegerWidth(extOp.getOperand().getType());
       if (integerWidth && integerWidth.value() == 1 &&
           getPredicate() == arith::CmpIPredicate::ne)
@@ -1669,7 +1672,7 @@ OpFoldResult arith::CmpIOp::fold(ArrayRef<Attribute> operands) {
     }
     if (auto extOp = getLhs().getDefiningOp<ExtUIOp>()) {
       // extui(%x : i1 -> iN) != 0  ->  %x
-      Optional<int64_t> integerWidth =
+      std::optional<int64_t> integerWidth =
           getIntegerWidth(extOp.getOperand().getType());
       if (integerWidth && integerWidth.value() == 1 &&
           getPredicate() == arith::CmpIPredicate::ne)

diff  --git a/mlir/lib/Dialect/Async/IR/Async.cpp b/mlir/lib/Dialect/Async/IR/Async.cpp
index 54acc373018c0..db481f19fcd85 100644
--- a/mlir/lib/Dialect/Async/IR/Async.cpp
+++ b/mlir/lib/Dialect/Async/IR/Async.cpp
@@ -53,7 +53,7 @@ LogicalResult YieldOp::verify() {
 }
 
 MutableOperandRange
-YieldOp::getMutableSuccessorOperands(Optional<unsigned> index) {
+YieldOp::getMutableSuccessorOperands(std::optional<unsigned> index) {
   return getOperandsMutable();
 }
 
@@ -63,7 +63,8 @@ YieldOp::getMutableSuccessorOperands(Optional<unsigned> index) {
 
 constexpr char kOperandSegmentSizesAttr[] = "operand_segment_sizes";
 
-OperandRange ExecuteOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange
+ExecuteOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   assert(index && *index == 0 && "invalid region index");
   return getBodyOperands();
 }
@@ -77,7 +78,7 @@ bool ExecuteOp::areTypesCompatible(Type lhs, Type rhs) {
   return getValueOrTokenType(lhs) == getValueOrTokenType(rhs);
 }
 
-void ExecuteOp::getSuccessorRegions(Optional<unsigned> index,
+void ExecuteOp::getSuccessorRegions(std::optional<unsigned> index,
                                     ArrayRef<Attribute>,
                                     SmallVectorImpl<RegionSuccessor> &regions) {
   // The `body` region branch back to the parent operation.

diff  --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 432705dce8893..374a1f324be92 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -348,7 +348,7 @@ struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
 
   LogicalResult matchAndRewrite(tensor::DimOp dimOp,
                                 PatternRewriter &rewriter) const override {
-    Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
+    std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
     auto allocTensorOp = dimOp.getSource().getDefiningOp<AllocTensorOp>();
     if (!allocTensorOp || !maybeConstantIndex)
       return failure();

diff  --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
index 3b520f01e9547..1d61a8be57815 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
@@ -104,7 +104,7 @@ void BufferViewFlowAnalysis::build(Operation *op) {
                                           successorRegions);
       for (RegionSuccessor &successorRegion : successorRegions) {
         // Determine the current region index (if any).
-        Optional<unsigned> regionIndex;
+        std::optional<unsigned> regionIndex;
         Region *regionSuccessor = successorRegion.getSuccessor();
         if (regionSuccessor)
           regionIndex = regionSuccessor->getRegionNumber();

diff  --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
index 5ca2281b4766e..6230d67806556 100644
--- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
+++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
@@ -595,7 +595,7 @@ SuccessorOperands SwitchOp::getSuccessorOperands(unsigned index) {
 }
 
 Block *SwitchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
-  Optional<DenseIntElementsAttr> caseValues = getCaseValues();
+  std::optional<DenseIntElementsAttr> caseValues = getCaseValues();
 
   if (!caseValues)
     return getDefaultDestination();
@@ -805,7 +805,8 @@ simplifySwitchFromSwitchOnSameCondition(SwitchOp op,
   SuccessorRange predDests = predSwitch.getCaseDestinations();
   auto it = llvm::find(predDests, currentBlock);
   if (it != predDests.end()) {
-    Optional<DenseIntElementsAttr> predCaseValues = predSwitch.getCaseValues();
+    std::optional<DenseIntElementsAttr> predCaseValues =
+        predSwitch.getCaseValues();
     foldSwitch(op, rewriter,
                predCaseValues->getValues<APInt>()[it - predDests.begin()]);
   } else {

diff  --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
index 5b3a0f6a31fc5..91046c7f70f21 100644
--- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
+++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
@@ -84,7 +84,7 @@ LogicalResult emitc::CallOp::verify() {
   if (getCallee().empty())
     return emitOpError("callee must not be empty");
 
-  if (Optional<ArrayAttr> argsAttr = getArgs()) {
+  if (std::optional<ArrayAttr> argsAttr = getArgs()) {
     for (Attribute arg : *argsAttr) {
       auto intAttr = arg.dyn_cast<IntegerAttr>();
       if (intAttr && intAttr.getType().isa<IndexType>()) {
@@ -102,7 +102,7 @@ LogicalResult emitc::CallOp::verify() {
     }
   }
 
-  if (Optional<ArrayAttr> templateArgsAttr = getTemplateArgs()) {
+  if (std::optional<ArrayAttr> templateArgsAttr = getTemplateArgs()) {
     for (Attribute tArg : *templateArgsAttr) {
       if (!tArg.isa<TypeAttr, IntegerAttr, FloatAttr, emitc::OpaqueAttr>())
         return emitOpError("template argument has invalid type");

diff  --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 9ea1b11593694..e1d92b9eac315 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -367,7 +367,8 @@ static ParseResult parseAllReduceOperation(AsmParser &parser,
                                            AllReduceOperationAttr &attr) {
   StringRef enumStr;
   if (!parser.parseOptionalKeyword(&enumStr)) {
-    Optional<AllReduceOperation> op = gpu::symbolizeAllReduceOperation(enumStr);
+    std::optional<AllReduceOperation> op =
+        gpu::symbolizeAllReduceOperation(enumStr);
     if (!op)
       return parser.emitError(parser.getCurrentLocation(), "invalid op kind");
     attr = AllReduceOperationAttr::get(parser.getContext(), *op);

diff  --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
index 57cce1942803f..ab7e7014c165a 100644
--- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
+++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
@@ -26,8 +26,8 @@ using namespace mlir::transform;
 /// Check if given mapping attributes are one of the desired attributes
 static DiagnosedSilenceableFailure
 checkAttributeType(ArrayRef<DeviceMappingAttrInterface> threadMappingAttributes,
-                   const Optional<ArrayAttr> &foreachMapping,
-                   Optional<TransformOpInterface> transformOp) {
+                   const std::optional<ArrayAttr> &foreachMapping,
+                   std::optional<TransformOpInterface> transformOp) {
   if (!foreachMapping.has_value())
     return transformOp->emitSilenceableError() << "mapping must be present";
 
@@ -52,11 +52,11 @@ checkAttributeType(ArrayRef<DeviceMappingAttrInterface> threadMappingAttributes,
 /// Determines if the size of the kernel configuration is supported by the GPU
 /// architecture being used. It presently makes use of CUDA limitations, however
 /// that aspect may be enhanced for other GPUs.
-static DiagnosedSilenceableFailure
-checkGpuLimits(TransformOpInterface transformOp, Optional<int64_t> gridDimX,
-               Optional<int64_t> gridDimY, Optional<int64_t> gridDimZ,
-               Optional<int64_t> blockDimX, Optional<int64_t> blockDimY,
-               Optional<int64_t> blockDimZ) {
+static DiagnosedSilenceableFailure checkGpuLimits(
+    TransformOpInterface transformOp, std::optional<int64_t> gridDimX,
+    std::optional<int64_t> gridDimY, std::optional<int64_t> gridDimZ,
+    std::optional<int64_t> blockDimX, std::optional<int64_t> blockDimY,
+    std::optional<int64_t> blockDimZ) {
 
   static constexpr int maxTotalBlockdim = 1024;
   static constexpr int maxBlockdimx = 1024;
@@ -92,12 +92,12 @@ checkGpuLimits(TransformOpInterface transformOp, Optional<int64_t> gridDimX,
 static DiagnosedSilenceableFailure
 createGpuLaunch(RewriterBase &rewriter, Location loc,
                 TransformOpInterface transformOp, LaunchOp &launchOp,
-                Optional<int64_t> gridDimX = std::nullopt,
-                Optional<int64_t> gridDimY = std::nullopt,
-                Optional<int64_t> gridDimZ = std::nullopt,
-                Optional<int64_t> blockDimX = std::nullopt,
-                Optional<int64_t> blockDimY = std::nullopt,
-                Optional<int64_t> blockDimZ = std::nullopt) {
+                std::optional<int64_t> gridDimX = std::nullopt,
+                std::optional<int64_t> gridDimY = std::nullopt,
+                std::optional<int64_t> gridDimZ = std::nullopt,
+                std::optional<int64_t> blockDimX = std::nullopt,
+                std::optional<int64_t> blockDimY = std::nullopt,
+                std::optional<int64_t> blockDimZ = std::nullopt) {
   DiagnosedSilenceableFailure diag =
       checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX,
                      blockDimY, blockDimZ);
@@ -126,12 +126,12 @@ createGpuLaunch(RewriterBase &rewriter, Location loc,
 static DiagnosedSilenceableFailure
 alterGpuLaunch(TrivialPatternRewriter &rewriter, LaunchOp gpuLaunch,
                TransformOpInterface transformOp,
-               Optional<int64_t> gridDimX = std::nullopt,
-               Optional<int64_t> gridDimY = std::nullopt,
-               Optional<int64_t> gridDimZ = std::nullopt,
-               Optional<int64_t> blockDimX = std::nullopt,
-               Optional<int64_t> blockDimY = std::nullopt,
-               Optional<int64_t> blockDimZ = std::nullopt) {
+               std::optional<int64_t> gridDimX = std::nullopt,
+               std::optional<int64_t> gridDimY = std::nullopt,
+               std::optional<int64_t> gridDimZ = std::nullopt,
+               std::optional<int64_t> blockDimX = std::nullopt,
+               std::optional<int64_t> blockDimY = std::nullopt,
+               std::optional<int64_t> blockDimZ = std::nullopt) {
   DiagnosedSilenceableFailure diag =
       checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX,
                      blockDimY, blockDimZ);
@@ -370,7 +370,7 @@ transform::MapForeachToBlocks::applyToOne(Operation *target,
 static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads(
     RewriterBase &rewriter, scf::ForeachThreadOp foreachThreadOp,
     const SmallVectorImpl<int64_t> &globalBlockDims, bool syncAfterDistribute,
-    llvm::Optional<TransformOpInterface> transformOp,
+    std::optional<TransformOpInterface> transformOp,
     const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes) {
   // Step 0. Target-specific verifications. There is no good place to anchor
   // those right now: the ForeachThreadOp is target-independent and the
@@ -502,7 +502,7 @@ static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads(
 DiagnosedSilenceableFailure mlir::transform::gpu::mapNestedForeachToThreadsImpl(
     RewriterBase &rewriter, Operation *target,
     const SmallVectorImpl<int64_t> &blockDim, bool syncAfterDistribute,
-    llvm::Optional<TransformOpInterface> transformOp,
+    std::optional<TransformOpInterface> transformOp,
     const ArrayRef<DeviceMappingAttrInterface> &threadMappingAttributes) {
   DiagnosedSilenceableFailure diag = DiagnosedSilenceableFailure::success();
   target->walk([&](scf::ForeachThreadOp foreachThreadOp) {

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 2bb483c78dc15..33e7a7ce8aa3b 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -147,7 +147,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
   // Replace the string attribute `predicate` with an integer attribute.
   int64_t predicateValue = 0;
   if (std::is_same<CmpPredicateType, ICmpPredicate>()) {
-    Optional<ICmpPredicate> predicate =
+    std::optional<ICmpPredicate> predicate =
         symbolizeICmpPredicate(predicateAttr.getValue());
     if (!predicate)
       return parser.emitError(predicateLoc)
@@ -155,7 +155,7 @@ static ParseResult parseCmpOp(OpAsmParser &parser, OperationState &result) {
              << "' is an incorrect value of the 'predicate' attribute";
     predicateValue = static_cast<int64_t>(*predicate);
   } else {
-    Optional<FCmpPredicate> predicate =
+    std::optional<FCmpPredicate> predicate =
         symbolizeFCmpPredicate(predicateAttr.getValue());
     if (!predicate)
       return parser.emitError(predicateLoc)
@@ -253,7 +253,7 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) {
 /// Checks that the elemental type is present in either the pointer type or
 /// the attribute, but not both.
 static LogicalResult verifyOpaquePtr(Operation *op, LLVMPointerType ptrType,
-                                     Optional<Type> ptrElementType) {
+                                     std::optional<Type> ptrElementType) {
   if (ptrType.isOpaque() && !ptrElementType.has_value()) {
     return op->emitOpError() << "expected '" << kElemTypeAttrName
                              << "' attribute if opaque pointer type is used";
@@ -665,7 +665,7 @@ LogicalResult LLVM::GEPOp::verify() {
 }
 
 Type LLVM::GEPOp::getSourceElementType() {
-  if (Optional<Type> elemType = getElemType())
+  if (std::optional<Type> elemType = getElemType())
     return *elemType;
 
   return extractVectorElementType(getBase().getType())
@@ -1853,7 +1853,7 @@ LogicalResult GlobalOp::verify() {
     }
   }
 
-  Optional<uint64_t> alignAttr = getAlignment();
+  std::optional<uint64_t> alignAttr = getAlignment();
   if (alignAttr.has_value()) {
     uint64_t value = alignAttr.value();
     if (!llvm::isPowerOf2_64(value))

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 62d5be5536917..f5f4b4507af4e 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -77,8 +77,8 @@ LogicalResult CpAsyncOp::verify() {
 // Given the element type of an operand and whether or not it is an accumulator,
 // this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the
 // operand's element type.
-Optional<mlir::NVVM::MMATypes> MmaOp::inferOperandMMAType(Type operandElType,
-                                                          bool isAccumulator) {
+std::optional<mlir::NVVM::MMATypes>
+MmaOp::inferOperandMMAType(Type operandElType, bool isAccumulator) {
   auto half2Type =
       LLVM::getFixedVectorType(Float16Type::get(operandElType.getContext()), 2);
   if (operandElType.isF64())
@@ -118,14 +118,14 @@ static bool isIntegerPtxType(MMATypes type) {
 }
 
 MMATypes MmaOp::accumPtxType() {
-  Optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
+  std::optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
       getODSOperands(2).getTypes().front(), /*isAccum=*/true);
   assert(val.has_value() && "accumulator PTX type should always be inferrable");
   return val.value();
 }
 
 MMATypes MmaOp::resultPtxType() {
-  Optional<mlir::NVVM::MMATypes> val =
+  std::optional<mlir::NVVM::MMATypes> val =
       inferOperandMMAType(getResult().getType(), /*isAccum=*/true);
   assert(val.has_value() && "result PTX type should always be inferrable");
   return val.value();
@@ -159,7 +159,7 @@ void MmaOp::print(OpAsmPrinter &p) {
         regTypes.push_back(this->getOperand(operandIdx).getType());
       }
     }
-    Optional<MMATypes> inferredType =
+    std::optional<MMATypes> inferredType =
         inferOperandMMAType(regTypes.back(), /*isAccum=*/fragIdx >= 2);
     if (inferredType)
       ignoreAttrNames.push_back(frag.ptxTypeAttr);
@@ -191,10 +191,10 @@ void MmaOp::print(OpAsmPrinter &p) {
 
 void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
                   ValueRange operandA, ValueRange operandB, ValueRange operandC,
-                  ArrayRef<int64_t> shape, Optional<MMAB1Op> b1Op,
-                  Optional<MMAIntOverflow> intOverflow,
-                  Optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
-                  Optional<std::array<MMALayout, 2>> multiplicandLayouts) {
+                  ArrayRef<int64_t> shape, std::optional<MMAB1Op> b1Op,
+                  std::optional<MMAIntOverflow> intOverflow,
+                  std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
+                  std::optional<std::array<MMALayout, 2>> multiplicandLayouts) {
 
   assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
   MLIRContext *ctx = builder.getContext();
@@ -247,7 +247,7 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
 //     `->` type($res)
 ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) {
   struct OperandFragment {
-    Optional<MMATypes> elemtype;
+    std::optional<MMATypes> elemtype;
     SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
     SmallVector<Type> regTypes;
   };

diff  --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 3138268e57b15..837beb7400c39 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -1569,7 +1569,7 @@ DiagnosedSilenceableFailure transform::tileToForeachThreadOpImpl(
     RewriterBase &rewriter, transform::TransformState &state,
     TransformOpInterface transformOp, ArrayRef<Operation *> targets,
     ArrayRef<OpFoldResult> mixedNumThreads,
-    ArrayRef<OpFoldResult> mixedTileSizes, Optional<ArrayAttr> mapping,
+    ArrayRef<OpFoldResult> mixedTileSizes, std::optional<ArrayAttr> mapping,
     SmallVector<Operation *> &tileOps, SmallVector<Operation *> &tiledOps) {
   if (targets.empty())
     return DiagnosedSilenceableFailure::success();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 5c8b72d80d210..049f5cc0c62ac 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -44,7 +44,7 @@ using llvm::MapVector;
 static Value allocBuffer(ImplicitLocOpBuilder &b,
                          const LinalgPromotionOptions &options,
                          Type elementType, Value allocSize, DataLayout &layout,
-                         Optional<unsigned> alignment = std::nullopt) {
+                         std::optional<unsigned> alignment = std::nullopt) {
   auto width = layout.getTypeSize(elementType);
 
   IntegerAttr alignmentAttr;
@@ -77,11 +77,10 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
 /// no call back to do so is provided. The default is to allocate a
 /// memref<..xi8> and return a view to get a memref type of shape
 /// boundingSubViewSize.
-static Optional<Value>
-defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
-                           OpBuilder &builder, memref::SubViewOp subView,
-                           ArrayRef<Value> boundingSubViewSize,
-                           Optional<unsigned> alignment, DataLayout &layout) {
+static std::optional<Value> defaultAllocBufferCallBack(
+    const LinalgPromotionOptions &options, OpBuilder &builder,
+    memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
+    std::optional<unsigned> alignment, DataLayout &layout) {
   ShapedType viewType = subView.getType();
   ImplicitLocOpBuilder b(subView.getLoc(), builder);
   auto zero = b.createOrFold<arith::ConstantIndexOp>(0);
@@ -136,7 +135,7 @@ struct LinalgOpInstancePromotionOptions {
   CopyCallbackFn copyOutFn;
 
   /// Alignment of promoted buffer.
-  Optional<unsigned> alignment;
+  std::optional<unsigned> alignment;
 };
 } // namespace
 
@@ -166,7 +165,7 @@ LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
   } else {
     allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
                        ArrayRef<Value> boundingSubViewSize,
-                       DataLayout &layout) -> Optional<Value> {
+                       DataLayout &layout) -> std::optional<Value> {
       return defaultAllocBufferCallBack(options, b, subViewOp,
                                         boundingSubViewSize, alignment, layout);
     };
@@ -246,7 +245,8 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
   // If a callback is not specified, then use the default implementation for
   // allocating the promoted buffer.
-  Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout);
+  std::optional<Value> fullLocalView =
+      allocationFn(b, subView, fullSizes, layout);
   if (!fullLocalView)
     return failure();
   SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index f5cbd81762a8b..8123b66fb5930 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -191,9 +191,9 @@ mlir::linalg::computeMultiTileSizes(OpBuilder &builder, LinalgOp op,
 static bool canOmitTileOffsetInBoundsCheck(OpFoldResult tileSize,
                                            OpFoldResult numThreads,
                                            OpFoldResult iterationSize) {
-  Optional<int64_t> tileSizeConst = getConstantIntValue(tileSize);
-  Optional<int64_t> numThreadsConst = getConstantIntValue(numThreads);
-  Optional<int64_t> iterSizeConst = getConstantIntValue(iterationSize);
+  std::optional<int64_t> tileSizeConst = getConstantIntValue(tileSize);
+  std::optional<int64_t> numThreadsConst = getConstantIntValue(numThreads);
+  std::optional<int64_t> iterSizeConst = getConstantIntValue(iterationSize);
   if (!tileSizeConst || !numThreadsConst || !iterSizeConst)
     return false;
   return *tileSizeConst * (*numThreadsConst - 1) < *iterSizeConst;
@@ -221,7 +221,7 @@ static void calculateTileOffsetsAndSizes(
     RewriterBase &b, Location loc, scf::ForeachThreadOp foreachThreadOp,
     ArrayRef<OpFoldResult> numThreads, SmallVector<Range> loopRanges,
     bool omitTileOffsetBoundsCheck,
-    Optional<ArrayRef<OpFoldResult>> nominalTileSizes,
+    std::optional<ArrayRef<OpFoldResult>> nominalTileSizes,
     SmallVector<OpFoldResult> &tiledOffsets,
     SmallVector<OpFoldResult> &tiledSizes) {
   OpBuilder::InsertionGuard g(b);
@@ -302,8 +302,8 @@ static void calculateTileOffsetsAndSizes(
 /// assume that `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds.
 static FailureOr<ForeachThreadTilingResult> tileToForeachThreadOpImpl(
     RewriterBase &b, TilingInterface op, ArrayRef<OpFoldResult> numThreads,
-    Optional<ArrayRef<OpFoldResult>> nominalTileSizes,
-    Optional<ArrayAttr> mapping, bool omitTileOffsetBoundsCheck) {
+    std::optional<ArrayRef<OpFoldResult>> nominalTileSizes,
+    std::optional<ArrayAttr> mapping, bool omitTileOffsetBoundsCheck) {
   Location loc = op->getLoc();
   OpBuilder::InsertionGuard g(b);
 
@@ -399,7 +399,7 @@ static FailureOr<ForeachThreadTilingResult> tileToForeachThreadOpImpl(
 FailureOr<ForeachThreadTilingResult>
 linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op,
                               ArrayRef<OpFoldResult> numThreads,
-                              Optional<ArrayAttr> mapping) {
+                              std::optional<ArrayAttr> mapping) {
   return tileToForeachThreadOpImpl(b, op, numThreads,
                                    /*nominalTileSizes=*/std::nullopt, mapping,
                                    /*omitTileOffsetBoundsCheck=*/false);
@@ -408,7 +408,7 @@ linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op,
 FailureOr<ForeachThreadTilingResult>
 linalg::tileToForeachThreadOpUsingTileSizes(RewriterBase &b, TilingInterface op,
                                             ArrayRef<OpFoldResult> tileSizes,
-                                            Optional<ArrayAttr> mapping) {
+                                            std::optional<ArrayAttr> mapping) {
   SmallVector<Range> loopRanges = op.getIterationDomain(b);
   unsigned nLoops = loopRanges.size();
   SmallVector<OpFoldResult> numThreads;
@@ -586,7 +586,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b,
                                         PartialReductionOpInterface op,
                                         ArrayRef<OpFoldResult> numThreads,
                                         ArrayRef<OpFoldResult> tileSizes,
-                                        Optional<ArrayAttr> mapping) {
+                                        std::optional<ArrayAttr> mapping) {
   Location loc = op.getLoc();
   OpBuilder::InsertionGuard g(b);
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index f55b701997d76..b8c6115d04474 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -460,7 +460,7 @@ LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
 
   bool zeroSliceGuard = true;
   if (controlFn) {
-    if (Optional<bool> control = controlFn(sliceOp))
+    if (std::optional<bool> control = controlFn(sliceOp))
       zeroSliceGuard = *control;
     else
       return failure();
@@ -501,7 +501,7 @@ static Value getPackOpSourceOrPaddedSource(OpBuilder &builder,
     }
 
     // The size is less than or equal to tileSize because outer dims are all 1s.
-    Optional<int64_t> tileSize =
+    std::optional<int64_t> tileSize =
         getConstantIntValue(tileAndPosMapping.lookup(dim));
     assert(tileSize.has_value() && "dynamic inner tile size is not supported");
     paddedShape.push_back(tileSize.value());

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 89140d42dd2f4..5a407b9054fb4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -373,14 +373,13 @@ struct VectorizationResult {
   Operation *newOp;
 };
 
-llvm::Optional<vector::CombiningKind>
+std::optional<vector::CombiningKind>
 mlir::linalg::getCombinerOpKind(Operation *combinerOp) {
   using ::mlir::vector::CombiningKind;
 
   if (!combinerOp)
     return std::nullopt;
-  return llvm::TypeSwitch<Operation *, llvm::Optional<CombiningKind>>(
-             combinerOp)
+  return llvm::TypeSwitch<Operation *, std::optional<CombiningKind>>(combinerOp)
       .Case<arith::AddIOp, arith::AddFOp>(
           [&](auto op) { return CombiningKind::ADD; })
       .Case<arith::AndIOp>([&](auto op) { return CombiningKind::AND; })
@@ -1847,7 +1846,7 @@ struct Conv1DGenerator
     Operation *reduceOp = matchLinalgReduction(linalgOp.getDpsInitOperand(0));
     if (!reduceOp)
       return;
-    llvm::Optional<vector::CombiningKind> maybeKind;
+    std::optional<vector::CombiningKind> maybeKind;
     maybeKind = getCombinerOpKind(reduceOp);
     if (!maybeKind || *maybeKind != vector::CombiningKind::ADD)
       return;

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 420b02b8502de..eb1c63836b593 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -815,7 +815,7 @@ computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile,
     // b. The subshape size is 1. According to the way the loops are set up,
     //    tensors with "0" dimensions would never be constructed.
     int64_t shapeSize = shape[r];
-    Optional<int64_t> sizeCst = getConstantIntValue(size);
+    std::optional<int64_t> sizeCst = getConstantIntValue(size);
     auto hasTileSizeOne = sizeCst && *sizeCst == 1;
     auto dividesEvenly = sizeCst && !ShapedType::isDynamic(shapeSize) &&
                          ((shapeSize % *sizeCst) == 0);

diff  --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 1d9011091c3a8..1631996b54740 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -184,7 +184,8 @@ static void constifyIndexValues(
           ofr.get<Attribute>().cast<IntegerAttr>().getInt());
       continue;
     }
-    Optional<int64_t> maybeConstant = getConstantIntValue(ofr.get<Value>());
+    std::optional<int64_t> maybeConstant =
+        getConstantIntValue(ofr.get<Value>());
     if (maybeConstant)
       ofr = builder.getIndexAttr(*maybeConstant);
   }
@@ -458,7 +459,7 @@ ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
 }
 
 void AllocaScopeOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   if (index) {
     regions.push_back(RegionSuccessor(getResults()));
@@ -922,7 +923,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
   build(builder, result, source, indexValue);
 }
 
-Optional<int64_t> DimOp::getConstantIndex() {
+std::optional<int64_t> DimOp::getConstantIndex() {
   return getConstantIntValue(getIndex());
 }
 
@@ -942,7 +943,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {
 
 LogicalResult DimOp::verify() {
   // Assume unknown index to be in range.
-  Optional<int64_t> index = getConstantIndex();
+  std::optional<int64_t> index = getConstantIndex();
   if (!index)
     return success();
 
@@ -977,7 +978,7 @@ static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
 /// This accounts for cases where there are multiple unit-dims, but only a
 /// subset of those are dropped. For MemRefTypes these can be disambiguated
 /// using the strides. If a dimension is dropped the stride must be dropped too.
-static llvm::Optional<llvm::SmallBitVector>
+static std::optional<llvm::SmallBitVector>
 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
                                ArrayRef<OpFoldResult> sizes) {
   llvm::SmallBitVector unusedDims(originalType.getRank());
@@ -1049,7 +1050,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
 llvm::SmallBitVector SubViewOp::getDroppedDims() {
   MemRefType sourceType = getSourceType();
   MemRefType resultType = getType();
-  llvm::Optional<llvm::SmallBitVector> unusedDims =
+  std::optional<llvm::SmallBitVector> unusedDims =
       computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
   assert(unusedDims && "unable to find unused dims of subview");
   return *unusedDims;
@@ -1364,7 +1365,7 @@ void ExtractAlignedPointerAsIndexOp::getAsmResultNames(
 /// The number and type of the results are inferred from the
 /// shape of the source.
 LogicalResult ExtractStridedMetadataOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   ExtractStridedMetadataOpAdaptor extractAdaptor(operands, attributes, regions);
@@ -1625,7 +1626,7 @@ LogicalResult GlobalOp::verify() {
     }
   }
 
-  if (Optional<uint64_t> alignAttr = getAlignment()) {
+  if (std::optional<uint64_t> alignAttr = getAlignment()) {
     uint64_t alignment = *alignAttr;
 
     if (!llvm::isPowerOf2_64(alignment))
@@ -2610,7 +2611,7 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
     return inferredType;
 
   // Compute which dimensions are dropped.
-  Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
+  std::optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
       computeRankReductionMask(inferredType.getShape(), resultShape);
   assert(dimsToProject.has_value() && "invalid rank reduction");
 
@@ -2887,7 +2888,7 @@ static MemRefType getCanonicalSubViewResultType(
   auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
                                                        mixedSizes, mixedStrides)
                                 .cast<MemRefType>();
-  llvm::Optional<llvm::SmallBitVector> unusedDims =
+  std::optional<llvm::SmallBitVector> unusedDims =
       computeMemRefRankReductionMask(currentSourceType, currentResultType,
                                      mixedSizes);
   // Return nullptr as failure mode.
@@ -2970,14 +2971,14 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
 
   // Check offsets are zero.
   if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
-        Optional<int64_t> intValue = getConstantIntValue(ofr);
+        std::optional<int64_t> intValue = getConstantIntValue(ofr);
         return !intValue || intValue.value() != 0;
       }))
     return false;
 
   // Check strides are one.
   if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
-        Optional<int64_t> intValue = getConstantIntValue(ofr);
+        std::optional<int64_t> intValue = getConstantIntValue(ofr);
         return !intValue || intValue.value() != 1;
       }))
     return false;
@@ -2985,7 +2986,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
   // Check all size values are static and matches the (static) source shape.
   ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
   for (const auto &size : llvm::enumerate(mixedSizes)) {
-    Optional<int64_t> intValue = getConstantIntValue(size.value());
+    std::optional<int64_t> intValue = getConstantIntValue(size.value());
     if (!intValue || *intValue != sourceShape[size.index()])
       return false;
   }

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
index fae68a0a349e8..c70b210b59489 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
@@ -98,9 +98,9 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
   }
   if (!candidateLoop)
     return failure();
-  llvm::Optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
-  llvm::Optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
-  llvm::Optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
+  std::optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
+  std::optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
+  std::optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
   if (!inductionVar || !lowerBound || !singleStep)
     return failure();
 
@@ -125,13 +125,12 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
   AffineExpr induc = getAffineDimExpr(0, allocOp.getContext());
   unsigned dimCount = 1;
   auto getAffineExpr = [&](OpFoldResult e) -> AffineExpr {
-    if (Optional<int64_t> constValue = getConstantIntValue(e)) {
+    if (std::optional<int64_t> constValue = getConstantIntValue(e)) {
       return getAffineConstantExpr(*constValue, allocOp.getContext());
     }
     auto value = getOrCreateValue(e, builder, candidateLoop->getLoc());
     operands.push_back(value);
     return getAffineDimExpr(dimCount++, allocOp.getContext());
-   
   };
   auto init = getAffineExpr(*lowerBound);
   auto step = getAffineExpr(*singleStep);

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp
index 3d52b1319957a..7d3f1fbd5293d 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp
@@ -46,7 +46,7 @@ struct DimOfShapedTypeOpInterface : public OpRewritePattern<OpTy> {
     if (!shapedTypeOp)
       return failure();
 
-    Optional<int64_t> dimIndex = dimOp.getConstantIndex();
+    std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
     if (!dimIndex)
       return failure();
 
@@ -88,7 +88,7 @@ struct DimOfReifyRankedShapedTypeOpInterface : public OpRewritePattern<OpTy> {
     if (!rankedShapeTypeOp)
       return failure();
 
-    Optional<int64_t> dimIndex = dimOp.getConstantIndex();
+    std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
     if (!dimIndex)
       return failure();
 

diff  --git a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
index 468684c5afafe..5d511e83cf165 100644
--- a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
+++ b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
@@ -149,7 +149,7 @@ getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef,
     MemoryEffectOpInterface iface = dyn_cast<MemoryEffectOpInterface>(op);
     if (!iface)
       return;
-    Optional<MemoryEffects::EffectInstance> effect =
+    std::optional<MemoryEffects::EffectInstance> effect =
         iface.getEffectOnValue<MemoryEffects::Read>(shmMemRef);
     if (effect) {
       readOps.push_back(op);

diff  --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index d10dc1bcd8d46..0ae13e412924b 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -117,7 +117,7 @@ static ParseResult parseClauseAttr(AsmParser &parser, ClauseAttr &attr) {
   SMLoc loc = parser.getCurrentLocation();
   if (parser.parseKeyword(&enumStr))
     return failure();
-  if (Optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
+  if (std::optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
     attr = ClauseAttr::get(parser.getContext(), *enumValue);
     return success();
   }
@@ -173,9 +173,9 @@ static void printLinearClause(OpAsmPrinter &p, Operation *op,
 //===----------------------------------------------------------------------===//
 // Parser, verifier and printer for Aligned Clause
 //===----------------------------------------------------------------------===//
-static LogicalResult verifyAlignedClause(Operation *op,
-                                         Optional<ArrayAttr> alignmentValues,
-                                         OperandRange alignedVariables) {
+static LogicalResult
+verifyAlignedClause(Operation *op, std::optional<ArrayAttr> alignmentValues,
+                    OperandRange alignedVariables) {
   // Check if number of alignment values equals to number of aligned variables
   if (!alignedVariables.empty()) {
     if (!alignmentValues || alignmentValues->size() != alignedVariables.size())
@@ -236,7 +236,7 @@ static ParseResult parseAlignedClause(
 static void printAlignedClause(OpAsmPrinter &p, Operation *op,
                                ValueRange alignedVars,
                                TypeRange alignedVarTypes,
-                               Optional<ArrayAttr> alignmentValues) {
+                               std::optional<ArrayAttr> alignmentValues) {
   for (unsigned i = 0; i < alignedVars.size(); ++i) {
     if (i != 0)
       p << ", ";
@@ -293,11 +293,11 @@ verifyScheduleModifiers(OpAsmParser &parser,
 static ParseResult parseScheduleClause(
     OpAsmParser &parser, ClauseScheduleKindAttr &scheduleAttr,
     ScheduleModifierAttr &scheduleModifier, UnitAttr &simdModifier,
-    Optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
+    std::optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
   StringRef keyword;
   if (parser.parseKeyword(&keyword))
     return failure();
-  llvm::Optional<mlir::omp::ClauseScheduleKind> schedule =
+  std::optional<mlir::omp::ClauseScheduleKind> schedule =
       symbolizeClauseScheduleKind(keyword);
   if (!schedule)
     return parser.emitError(parser.getNameLoc()) << " expected schedule kind";
@@ -334,7 +334,7 @@ static ParseResult parseScheduleClause(
 
   if (!modifiers.empty()) {
     SMLoc loc = parser.getCurrentLocation();
-    if (Optional<ScheduleModifier> mod =
+    if (std::optional<ScheduleModifier> mod =
             symbolizeScheduleModifier(modifiers[0])) {
       scheduleModifier = ScheduleModifierAttr::get(parser.getContext(), *mod);
     } else {
@@ -396,7 +396,7 @@ parseReductionVarList(OpAsmParser &parser,
 static void printReductionVarList(OpAsmPrinter &p, Operation *op,
                                   OperandRange reductionVars,
                                   TypeRange reductionTypes,
-                                  Optional<ArrayAttr> reductions) {
+                                  std::optional<ArrayAttr> reductions) {
   for (unsigned i = 0, e = reductions->size(); i < e; ++i) {
     if (i != 0)
       p << ", ";
@@ -407,7 +407,7 @@ static void printReductionVarList(OpAsmPrinter &p, Operation *op,
 
 /// Verifies Reduction Clause
 static LogicalResult verifyReductionVarList(Operation *op,
-                                            Optional<ArrayAttr> reductions,
+                                            std::optional<ArrayAttr> reductions,
                                             OperandRange reductionVars) {
   if (!reductionVars.empty()) {
     if (!reductions || reductions->size() != reductionVars.size())

diff  --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp
index e33ba7153968e..49692f62ac9bb 100644
--- a/mlir/lib/Dialect/PDL/IR/PDL.cpp
+++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp
@@ -112,7 +112,7 @@ LogicalResult ApplyNativeRewriteOp::verify() {
 
 LogicalResult AttributeOp::verify() {
   Value attrType = getValueType();
-  Optional<Attribute> attrValue = getValue();
+  std::optional<Attribute> attrValue = getValue();
 
   if (!attrValue) {
     if (isa<RewriteOp>((*this)->getParentOp()))
@@ -203,7 +203,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op,
   if (resultTypes.empty()) {
     // If we don't know the concrete operation, don't attempt any verification.
     // We can't make assumptions if we don't know the concrete operation.
-    Optional<StringRef> rawOpName = op.getOpName();
+    std::optional<StringRef> rawOpName = op.getOpName();
     if (!rawOpName)
       return success();
     Optional<RegisteredOperationName> opName =
@@ -290,7 +290,7 @@ LogicalResult OperationOp::verify() {
 }
 
 bool OperationOp::hasTypeInference() {
-  if (Optional<StringRef> rawOpName = getOpName()) {
+  if (std::optional<StringRef> rawOpName = getOpName()) {
     OperationName opName(*rawOpName, getContext());
     return opName.hasInterface<InferTypeOpInterface>();
   }
@@ -298,7 +298,7 @@ bool OperationOp::hasTypeInference() {
 }
 
 bool OperationOp::mightHaveTypeInference() {
-  if (Optional<StringRef> rawOpName = getOpName()) {
+  if (std::optional<StringRef> rawOpName = getOpName()) {
     OperationName opName(*rawOpName, getContext());
     return opName.mightHaveInterface<InferTypeOpInterface>();
   }

diff  --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 0ec89c06f77ea..25f953c22ac36 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -248,7 +248,7 @@ void ExecuteRegionOp::getCanonicalizationPatterns(RewritePatternSet &results,
 /// correspond to a constant value for each operand, or null if that operand is
 /// not a constant.
 void ExecuteRegionOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   // If the predecessor is the ExecuteRegionOp, branch into the body.
   if (!index) {
@@ -265,7 +265,7 @@ void ExecuteRegionOp::getSuccessorRegions(
 //===----------------------------------------------------------------------===//
 
 MutableOperandRange
-ConditionOp::getMutableSuccessorOperands(Optional<unsigned> index) {
+ConditionOp::getMutableSuccessorOperands(std::optional<unsigned> index) {
   // Pass all operands except the condition to the successor region.
   return getArgsMutable();
 }
@@ -352,17 +352,19 @@ LogicalResult ForOp::verifyRegions() {
   return success();
 }
 
-Optional<Value> ForOp::getSingleInductionVar() { return getInductionVar(); }
+std::optional<Value> ForOp::getSingleInductionVar() {
+  return getInductionVar();
+}
 
-Optional<OpFoldResult> ForOp::getSingleLowerBound() {
+std::optional<OpFoldResult> ForOp::getSingleLowerBound() {
   return OpFoldResult(getLowerBound());
 }
 
-Optional<OpFoldResult> ForOp::getSingleStep() {
+std::optional<OpFoldResult> ForOp::getSingleStep() {
   return OpFoldResult(getStep());
 }
 
-Optional<OpFoldResult> ForOp::getSingleUpperBound() {
+std::optional<OpFoldResult> ForOp::getSingleUpperBound() {
   return OpFoldResult(getUpperBound());
 }
 
@@ -476,7 +478,7 @@ ForOp mlir::scf::getForInductionVarOwner(Value val) {
 /// correspond to the loop iterator operands, i.e., those excluding the
 /// induction variable. LoopOp only has one region, so 0 is the only valid value
 /// for `index`.
-OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange ForOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   assert(index && *index == 0 && "invalid region index");
 
   // The initial operands map to the loop arguments after the induction
@@ -489,7 +491,7 @@ OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
 /// during the flow of control. `operands` is a set of optional attributes that
 /// correspond to a constant value for each operand, or null if that operand is
 /// not a constant.
-void ForOp::getSuccessorRegions(Optional<unsigned> index,
+void ForOp::getSuccessorRegions(std::optional<unsigned> index,
                                 ArrayRef<Attribute> operands,
                                 SmallVectorImpl<RegionSuccessor> &regions) {
   // If the predecessor is the ForOp, branch into the body using the iterator
@@ -721,7 +723,7 @@ struct ForOpIterArgsFolder : public OpRewritePattern<scf::ForOp> {
 /// Util function that tries to compute a constant 
diff  between u and l.
 /// Returns std::nullopt when the 
diff erence between two AffineValueMap is
 /// dynamic.
-static Optional<int64_t> computeConstDiff(Value l, Value u) {
+static std::optional<int64_t> computeConstDiff(Value l, Value u) {
   IntegerAttr clb, cub;
   if (matchPattern(l, m_Constant(&clb)) && matchPattern(u, m_Constant(&cub))) {
     llvm::APInt lbValue = clb.getValue();
@@ -754,7 +756,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
       return success();
     }
 
-    Optional<int64_t> 
diff  =
+    std::optional<int64_t> 
diff  =
         computeConstDiff(op.getLowerBound(), op.getUpperBound());
     if (!
diff )
       return failure();
@@ -765,7 +767,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
       return success();
     }
 
-    llvm::Optional<llvm::APInt> maybeStepValue = op.getConstantStep();
+    std::optional<llvm::APInt> maybeStepValue = op.getConstantStep();
     if (!maybeStepValue)
       return failure();
 
@@ -1068,7 +1070,7 @@ void ForOp::getCanonicalizationPatterns(RewritePatternSet &results,
               LastTensorLoadCanonicalization, ForOpTensorCastFolder>(context);
 }
 
-Optional<APInt> ForOp::getConstantStep() {
+std::optional<APInt> ForOp::getConstantStep() {
   IntegerAttr step;
   if (matchPattern(getStep(), m_Constant(&step)))
     return step.getValue();
@@ -1212,7 +1214,7 @@ ParseResult ForeachThreadOp::parse(OpAsmParser &parser,
 void ForeachThreadOp::build(mlir::OpBuilder &builder,
                             mlir::OperationState &result, ValueRange outputs,
                             ValueRange numThreads,
-                            Optional<ArrayAttr> mapping) {
+                            std::optional<ArrayAttr> mapping) {
   result.addOperands(numThreads);
   result.addOperands(outputs);
   if (mapping.has_value()) {
@@ -1565,7 +1567,7 @@ void IfOp::print(OpAsmPrinter &p) {
 /// during the flow of control. `operands` is a set of optional attributes that
 /// correspond to a constant value for each operand, or null if that operand is
 /// not a constant.
-void IfOp::getSuccessorRegions(Optional<unsigned> index,
+void IfOp::getSuccessorRegions(std::optional<unsigned> index,
                                ArrayRef<Attribute> operands,
                                SmallVectorImpl<RegionSuccessor> &regions) {
   // The `then` and the `else` region branch back to the parent operation.
@@ -2723,7 +2725,7 @@ void WhileOp::build(::mlir::OpBuilder &odsBuilder,
   afterBuilder(odsBuilder, odsState.location, afterBlock->getArguments());
 }
 
-OperandRange WhileOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange WhileOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   assert(index && *index == 0 &&
          "WhileOp is expected to branch only to the first region");
 
@@ -2746,7 +2748,7 @@ Block::BlockArgListType WhileOp::getAfterArguments() {
   return getAfter().front().getArguments();
 }
 
-void WhileOp::getSuccessorRegions(Optional<unsigned> index,
+void WhileOp::getSuccessorRegions(std::optional<unsigned> index,
                                   ArrayRef<Attribute> operands,
                                   SmallVectorImpl<RegionSuccessor> &regions) {
   // The parent op always branches to the condition region.
@@ -3524,7 +3526,7 @@ Block &scf::IndexSwitchOp::getCaseBlock(unsigned idx) {
 }
 
 void IndexSwitchOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &successors) {
   // All regions branch back to the parent op.
   if (index) {

diff  --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index ef9350e68e531..630edd300a927 100644
--- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -453,8 +453,8 @@ static FailureOr<BaseMemRefType> computeLoopRegionIterArgBufferType(
 
 /// Return `true` if the given loop may have 0 iterations.
 bool mayHaveZeroIterations(scf::ForOp forOp) {
-  Optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
-  Optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
+  std::optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
+  std::optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
   if (!lb.has_value() || !ub.has_value())
     return true;
   return *ub <= *lb;
@@ -1055,7 +1055,7 @@ struct YieldOpInterface
 bool mayHaveZeroIterations(scf::ForeachThreadOp foreachThreadOp) {
   int64_t p = 1;
   for (Value v : foreachThreadOp.getNumThreads()) {
-    if (Optional<int64_t> c = getConstantIntValue(v)) {
+    if (std::optional<int64_t> c = getConstantIntValue(v)) {
       p *= *c;
     } else {
       return true;

diff  --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 092f853b11d10..7fe9b4baf7659 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -66,13 +66,13 @@ fillInterchangeVector(ArrayRef<int64_t> interchangeVector,
 
 // Check if `stride` evenly divides the trip count `size - offset`.
 static bool tileDividesIterationDomain(Range loopRange) {
-  Optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
+  std::optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
   if (!offsetAsInt)
     return false;
-  Optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
+  std::optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
   if (!sizeAsInt)
     return false;
-  Optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
+  std::optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
   if (!strideAsInt)
     return false;
   return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0);
@@ -83,7 +83,7 @@ static bool tileDividesIterationDomain(Range loopRange) {
 static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
                                        Range loopRange, Value iv,
                                        Value tileSize) {
-  Optional<int64_t> ts = getConstantIntValue(tileSize);
+  std::optional<int64_t> ts = getConstantIntValue(tileSize);
   if (ts && ts.value() == 1)
     return getAsOpFoldResult(tileSize);
 
@@ -484,10 +484,10 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
 /// `iter_args` of the outer most that is encountered. Traversing the iter_args
 /// indicates that this is a destination operand of the consumer. If there was
 /// no loop traversal needed, the second value of the returned tuple is empty.
-static std::tuple<OpResult, Optional<OpOperand *>>
+static std::tuple<OpResult, std::optional<OpOperand *>>
 getUntiledProducerFromSliceSource(OpOperand *source,
                                   ArrayRef<scf::ForOp> loops) {
-  Optional<OpOperand *> destinationIterArg;
+  std::optional<OpOperand *> destinationIterArg;
   auto loopIt = loops.rbegin();
   while (auto iterArg = source->get().dyn_cast<BlockArgument>()) {
     scf::ForOp loop = *loopIt;
@@ -633,7 +633,7 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
     // TODO: This can be modeled better if the `DestinationStyleOpInterface`.
     // Update to use that when it does become available.
     scf::ForOp outerMostLoop = tileAndFuseResult.loops.front();
-    Optional<unsigned> iterArgNumber;
+    std::optional<unsigned> iterArgNumber;
     if (destinationIterArg) {
       iterArgNumber = outerMostLoop.getIterArgNumberForOpOperand(
           *destinationIterArg.value());

diff  --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
index ee92ecf8d7c8e..7b13e3c426773 100644
--- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
@@ -218,8 +218,8 @@ addLoopRangeConstraints(FlatAffineValueConstraints &constraints, Value iv,
                        : constraints.appendSymbolVar(/*num=*/1);
 
   // If loop lower/upper bounds are constant: Add EQ constraint.
-  Optional<int64_t> lbInt = getConstantIntValue(lb);
-  Optional<int64_t> ubInt = getConstantIntValue(ub);
+  std::optional<int64_t> lbInt = getConstantIntValue(lb);
+  std::optional<int64_t> ubInt = getConstantIntValue(ub);
   if (lbInt)
     constraints.addBound(IntegerPolyhedron::EQ, symLb, *lbInt);
   if (ubInt)

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
index c5fffdf26c920..fe722e90c9b6b 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
@@ -142,15 +142,15 @@ std::string SPIRVDialect::getAttributeName(Decoration decoration) {
 
 // Forward declarations.
 template <typename ValTy>
-static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
-                                      DialectAsmParser &parser);
+static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
+                                           DialectAsmParser &parser);
 template <>
-Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
-                                    DialectAsmParser &parser);
+std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
+                                         DialectAsmParser &parser);
 
 template <>
-Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
-                                            DialectAsmParser &parser);
+std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
+                                                 DialectAsmParser &parser);
 
 static Type parseAndVerifyType(SPIRVDialect const &dialect,
                                DialectAsmParser &parser) {
@@ -264,7 +264,7 @@ static LogicalResult parseOptionalArrayStride(const SPIRVDialect &dialect,
     return failure();
 
   SMLoc strideLoc = parser.getCurrentLocation();
-  Optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
+  std::optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
   if (!optStride)
     return failure();
 
@@ -474,8 +474,8 @@ static Type parseMatrixType(SPIRVDialect const &dialect,
 // Specialize this function to parse each of the parameters that define an
 // ImageType. By default it assumes this is an enum type.
 template <typename ValTy>
-static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
-                                      DialectAsmParser &parser) {
+static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
+                                           DialectAsmParser &parser) {
   StringRef enumSpec;
   SMLoc enumLoc = parser.getCurrentLocation();
   if (parser.parseKeyword(&enumSpec)) {
@@ -489,8 +489,8 @@ static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
 }
 
 template <>
-Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
-                                    DialectAsmParser &parser) {
+std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
+                                         DialectAsmParser &parser) {
   // TODO: Further verify that the element type can be sampled
   auto ty = parseAndVerifyType(dialect, parser);
   if (!ty)
@@ -499,8 +499,8 @@ Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
 }
 
 template <typename IntTy>
-static Optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
-                                             DialectAsmParser &parser) {
+static std::optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
+                                                  DialectAsmParser &parser) {
   IntTy offsetVal = std::numeric_limits<IntTy>::max();
   if (parser.parseInteger(offsetVal))
     return std::nullopt;
@@ -508,8 +508,8 @@ static Optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
 }
 
 template <>
-Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
-                                            DialectAsmParser &parser) {
+std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
+                                                 DialectAsmParser &parser) {
   return parseAndVerifyInteger<unsigned>(dialect, parser);
 }
 
@@ -520,7 +520,7 @@ namespace {
 // (termination condition) needs partial specialization.
 template <typename ParseType, typename... Args>
 struct ParseCommaSeparatedList {
-  Optional<std::tuple<ParseType, Args...>>
+  std::optional<std::tuple<ParseType, Args...>>
   operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
     auto parseVal = parseAndVerify<ParseType>(dialect, parser);
     if (!parseVal)
@@ -541,8 +541,8 @@ struct ParseCommaSeparatedList {
 // specs to parse the last element of the list.
 template <typename ParseType>
 struct ParseCommaSeparatedList<ParseType> {
-  Optional<std::tuple<ParseType>> operator()(SPIRVDialect const &dialect,
-                                             DialectAsmParser &parser) const {
+  std::optional<std::tuple<ParseType>>
+  operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
     if (auto value = parseAndVerify<ParseType>(dialect, parser))
       return std::tuple<ParseType>(*value);
     return std::nullopt;

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index 2dfd1b7394930..929010651b764 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -313,8 +313,8 @@ template <typename MemoryOpTy>
 static void printMemoryAccessAttribute(
     MemoryOpTy memoryOp, OpAsmPrinter &printer,
     SmallVectorImpl<StringRef> &elidedAttrs,
-    Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
-    Optional<uint32_t> alignmentAttrValue = std::nullopt) {
+    std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
+    std::optional<uint32_t> alignmentAttrValue = std::nullopt) {
   // Print optional memory access attribute.
   if (auto memAccess = (memoryAccessAtrrValue ? memoryAccessAtrrValue
                                               : memoryOp.getMemoryAccess())) {
@@ -343,8 +343,8 @@ template <typename MemoryOpTy>
 static void printSourceMemoryAccessAttribute(
     MemoryOpTy memoryOp, OpAsmPrinter &printer,
     SmallVectorImpl<StringRef> &elidedAttrs,
-    Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
-    Optional<uint32_t> alignmentAttrValue = std::nullopt) {
+    std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
+    std::optional<uint32_t> alignmentAttrValue = std::nullopt) {
 
   printer << ", ";
 
@@ -912,7 +912,7 @@ static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser,
       parser.parseOperand(valueInfo))
     return failure();
 
-  Optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
+  std::optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
   if (succeeded(parser.parseOptionalKeyword(kClusterSize))) {
     clusterSizeInfo = OpAsmParser::UnresolvedOperand();
     if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) ||
@@ -3348,7 +3348,7 @@ LogicalResult spirv::MergeOp::verify() {
 //===----------------------------------------------------------------------===//
 
 void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
-                            Optional<StringRef> name) {
+                            std::optional<StringRef> name) {
   OpBuilder::InsertionGuard guard(builder);
   builder.createBlock(state.addRegion());
   if (name) {
@@ -3360,8 +3360,8 @@ void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
 void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
                             spirv::AddressingModel addressingModel,
                             spirv::MemoryModel memoryModel,
-                            Optional<VerCapExtAttr> vceTriple,
-                            Optional<StringRef> name) {
+                            std::optional<VerCapExtAttr> vceTriple,
+                            std::optional<StringRef> name) {
   state.addAttribute(
       "addressing_model",
       builder.getAttr<spirv::AddressingModelAttr>(addressingModel));
@@ -3414,7 +3414,7 @@ ParseResult spirv::ModuleOp::parse(OpAsmParser &parser,
 }
 
 void spirv::ModuleOp::print(OpAsmPrinter &printer) {
-  if (Optional<StringRef> name = getName()) {
+  if (std::optional<StringRef> name = getName()) {
     printer << ' ';
     printer.printSymbolName(*name);
   }
@@ -3428,7 +3428,7 @@ void spirv::ModuleOp::print(OpAsmPrinter &printer) {
   elidedAttrs.assign({addressingModelAttrName, memoryModelAttrName,
                       mlir::SymbolTable::getSymbolAttrName()});
 
-  if (Optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
+  if (std::optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
     printer << " requires " << *triple;
     elidedAttrs.push_back(spirv::ModuleOp::getVCETripleAttrName());
   }
@@ -3806,7 +3806,7 @@ LogicalResult spirv::UnreachableOp::verify() {
 ParseResult spirv::VariableOp::parse(OpAsmParser &parser,
                                      OperationState &result) {
   // Parse optional initializer
-  Optional<OpAsmParser::UnresolvedOperand> initInfo;
+  std::optional<OpAsmParser::UnresolvedOperand> initInfo;
   if (succeeded(parser.parseOptionalKeyword("init"))) {
     initInfo = OpAsmParser::UnresolvedOperand();
     if (parser.parseLParen() || parser.parseOperand(*initInfo) ||

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index efc340c87d135..d77f7b53a2804 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -148,7 +148,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
 
   // Specifies the spirv.ExecutionModeOp.
   if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) {
-    Optional<ArrayRef<spirv::Capability>> caps =
+    std::optional<ArrayRef<spirv::Capability>> caps =
         spirv::getCapabilities(spirv::ExecutionMode::LocalSize);
     if (!caps || targetEnv.allows(*caps)) {
       builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,
@@ -161,7 +161,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
     }
   }
   if (Optional<int> subgroupSize = entryPointAttr.getSubgroupSize()) {
-    Optional<ArrayRef<spirv::Capability>> caps =
+    std::optional<ArrayRef<spirv::Capability>> caps =
         spirv::getCapabilities(spirv::ExecutionMode::SubgroupSize);
     if (!caps || targetEnv.allows(*caps)) {
       builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
index 520ee5f2fe64a..edb72bec52530 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
@@ -52,8 +52,8 @@ static AliasedResourceMap collectAliasedResources(spirv::ModuleOp moduleOp) {
   AliasedResourceMap aliasedResources;
   moduleOp->walk([&aliasedResources](spirv::GlobalVariableOp varOp) {
     if (varOp->getAttrOfType<UnitAttr>("aliased")) {
-      Optional<uint32_t> set = varOp.getDescriptorSet();
-      Optional<uint32_t> binding = varOp.getBinding();
+      std::optional<uint32_t> set = varOp.getDescriptorSet();
+      std::optional<uint32_t> binding = varOp.getBinding();
       if (set && binding)
         aliasedResources[{*set, *binding}].push_back(varOp);
     }

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 28ac98e5ab8a5..c3fd22998321a 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -335,7 +335,7 @@ void AssumingOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
 
 // See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td
 void AssumingOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   // AssumingOp has unconditional control flow into the region and back to the
   // parent, so return the correct RegionSuccessor purely based on the index
@@ -394,7 +394,7 @@ void AssumingOp::build(
 //===----------------------------------------------------------------------===//
 
 LogicalResult mlir::shape::AddOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<SizeType>() ||
@@ -911,7 +911,7 @@ void ConstShapeOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
 }
 
 LogicalResult mlir::shape::ConstShapeOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   Builder b(context);
@@ -1068,7 +1068,7 @@ OpFoldResult CstrRequireOp::fold(ArrayRef<Attribute> operands) {
 // DimOp
 //===----------------------------------------------------------------------===//
 
-Optional<int64_t> DimOp::getConstantIndex() {
+std::optional<int64_t> DimOp::getConstantIndex() {
   if (auto constSizeOp = getIndex().getDefiningOp<ConstSizeOp>())
     return constSizeOp.getValue().getLimitedValue();
   if (auto constantOp = getIndex().getDefiningOp<arith::ConstantOp>())
@@ -1081,7 +1081,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
   auto valShapedType = valType.dyn_cast<ShapedType>();
   if (!valShapedType || !valShapedType.hasRank())
     return nullptr;
-  Optional<int64_t> index = getConstantIndex();
+  std::optional<int64_t> index = getConstantIndex();
   if (!index.has_value())
     return nullptr;
   if (index.value() >= valShapedType.getRank())
@@ -1093,7 +1093,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
 }
 
 LogicalResult mlir::shape::DimOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   DimOpAdaptor dimOp(operands);
@@ -1141,7 +1141,7 @@ OpFoldResult DivOp::fold(ArrayRef<Attribute> operands) {
 }
 
 LogicalResult mlir::shape::DivOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<SizeType>() ||
@@ -1327,7 +1327,7 @@ void FuncOp::print(OpAsmPrinter &p) {
 // GetExtentOp
 //===----------------------------------------------------------------------===//
 
-Optional<int64_t> GetExtentOp::getConstantDim() {
+std::optional<int64_t> GetExtentOp::getConstantDim() {
   if (auto constSizeOp = getDim().getDefiningOp<ConstSizeOp>())
     return constSizeOp.getValue().getLimitedValue();
   if (auto constantOp = getDim().getDefiningOp<arith::ConstantOp>())
@@ -1339,7 +1339,7 @@ OpFoldResult GetExtentOp::fold(ArrayRef<Attribute> operands) {
   auto elements = operands[0].dyn_cast_or_null<DenseIntElementsAttr>();
   if (!elements)
     return nullptr;
-  Optional<int64_t> dim = getConstantDim();
+  std::optional<int64_t> dim = getConstantDim();
   if (!dim.has_value())
     return nullptr;
   if (dim.value() >= elements.getNumElements())
@@ -1362,7 +1362,7 @@ void GetExtentOp::build(OpBuilder &builder, OperationState &result, Value shape,
 }
 
 LogicalResult mlir::shape::GetExtentOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   inferredReturnTypes.assign({IndexType::get(context)});
@@ -1400,7 +1400,7 @@ OpFoldResult IsBroadcastableOp::fold(ArrayRef<Attribute> operands) {
 //===----------------------------------------------------------------------===//
 
 LogicalResult mlir::shape::MeetOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands.empty())
@@ -1536,7 +1536,7 @@ void shape::RankOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
 }
 
 LogicalResult mlir::shape::RankOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<ShapeType>())
@@ -1572,7 +1572,7 @@ OpFoldResult NumElementsOp::fold(ArrayRef<Attribute> operands) {
 }
 
 LogicalResult mlir::shape::NumElementsOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<ShapeType>())
@@ -1604,7 +1604,7 @@ OpFoldResult MaxOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
 }
 
 LogicalResult mlir::shape::MaxOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType() == operands[1].getType())
@@ -1636,7 +1636,7 @@ OpFoldResult MinOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
 }
 
 LogicalResult mlir::shape::MinOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType() == operands[1].getType())
@@ -1673,7 +1673,7 @@ OpFoldResult MulOp::fold(ArrayRef<Attribute> operands) {
 }
 
 LogicalResult mlir::shape::MulOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<SizeType>() ||
@@ -1760,7 +1760,7 @@ void ShapeOfOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
 }
 
 LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType().isa<ValueShapeType>())

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 3d7dca9bb1196..e4e54b7143319 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -333,7 +333,7 @@ IntegerType StorageSpecifierType::getSizesType() const {
 }
 
 Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
-                                        Optional<unsigned> dim) const {
+                                        std::optional<unsigned> dim) const {
   if (kind != StorageSpecifierKind::ValMemSize)
     assert(dim);
 
@@ -344,8 +344,8 @@ Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
 }
 
 Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
-                                        Optional<APInt> dim) const {
-  Optional<unsigned> intDim = std::nullopt;
+                                        std::optional<APInt> dim) const {
+  std::optional<unsigned> intDim = std::nullopt;
   if (dim)
     intDim = dim.value().getZExtValue();
   return getFieldType(kind, intDim);
@@ -369,10 +369,9 @@ static LogicalResult isMatchingWidth(Value result, unsigned width) {
   return failure();
 }
 
-static LogicalResult
-verifySparsifierGetterSetter(StorageSpecifierKind mdKind, Optional<APInt> dim,
-                             TypedValue<StorageSpecifierType> md,
-                             Operation *op) {
+static LogicalResult verifySparsifierGetterSetter(
+    StorageSpecifierKind mdKind, std::optional<APInt> dim,
+    TypedValue<StorageSpecifierType> md, Operation *op) {
   if (mdKind == StorageSpecifierKind::ValMemSize && dim) {
     return op->emitError(
         "redundant dimension argument for querying value memory size");
@@ -482,7 +481,7 @@ static SetStorageSpecifierOp getSpecifierSetDef(SpecifierOp op) {
 
 OpFoldResult GetStorageSpecifierOp::fold(ArrayRef<Attribute> operands) {
   StorageSpecifierKind kind = getSpecifierKind();
-  Optional<APInt> dim = getDim();
+  std::optional<APInt> dim = getDim();
   for (auto op = getSpecifierSetDef(*this); op; op = getSpecifierSetDef(op))
     if (kind == op.getSpecifierKind() && dim == op.getDim())
       return op.getValue();

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 4c190bc1e92ed..f3e4a9af8f483 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -133,9 +133,10 @@ static scf::ForOp createFor(OpBuilder &builder, Location loc, Value upper,
 /// Gets the dimension size for the given sparse tensor at the given
 /// original dimension 'dim'. Returns std::nullopt if no sparse encoding is
 /// attached to the given tensor type.
-static Optional<Value> sizeFromTensorAtDim(OpBuilder &builder, Location loc,
-                                           SparseTensorDescriptor desc,
-                                           unsigned dim) {
+static std::optional<Value> sizeFromTensorAtDim(OpBuilder &builder,
+                                                Location loc,
+                                                SparseTensorDescriptor desc,
+                                                unsigned dim) {
   RankedTensorType rtp = desc.getTensorType();
   // Access into static dimension can query original type directly.
   // Note that this is typically already done by DimOp's folding.
@@ -681,7 +682,7 @@ class SparseDimOpConverter : public OpConversionPattern<tensor::DimOp> {
   LogicalResult
   matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    Optional<int64_t> index = op.getConstantIndex();
+    std::optional<int64_t> index = op.getConstantIndex();
     if (!index || !getSparseTensorEncoding(adaptor.getSource().getType()))
       return failure();
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 01ae0150e6505..a358f384bfb0c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -706,7 +706,7 @@ class SparseTensorToDimSizeConverter
     if (!enc)
       return failure();
     // Only rewrite DimOp with constant index.
-    Optional<int64_t> dim = op.getConstantIndex();
+    std::optional<int64_t> dim = op.getConstantIndex();
     if (!dim)
       return failure();
     // Generate the call.

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 57c0495f25d4b..a469574fc5265 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -380,7 +380,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
   build(builder, result, source, indexValue);
 }
 
-Optional<int64_t> DimOp::getConstantIndex() {
+std::optional<int64_t> DimOp::getConstantIndex() {
   return getConstantIntValue(getIndex());
 }
 
@@ -400,7 +400,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {
 
 LogicalResult DimOp::verify() {
   // Assume unknown index to be in range.
-  Optional<int64_t> index = getConstantIndex();
+  std::optional<int64_t> index = getConstantIndex();
   if (!index)
     return success();
 
@@ -598,7 +598,7 @@ struct ReplaceEmptyTensorStaticShapeDims : OpRewritePattern<EmptyOp> {
     for (int64_t i = 0; i < op.getType().getRank(); ++i) {
       if (op.getType().isDynamicDim(i)) {
         Value dynamicSize = op.getDynamicSizes()[ctr++];
-        Optional<int64_t> cst = getConstantIntValue(dynamicSize);
+        std::optional<int64_t> cst = getConstantIntValue(dynamicSize);
         if (cst.has_value()) {
           staticShape[i] = *cst;
           changedType = true;
@@ -626,7 +626,7 @@ struct FoldEmptyTensorWithDimOp : public OpRewritePattern<DimOp> {
 
   LogicalResult matchAndRewrite(tensor::DimOp dimOp,
                                 PatternRewriter &rewriter) const override {
-    Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
+    std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
     auto emptyTensorOp = dimOp.getSource().getDefiningOp<EmptyOp>();
     if (!emptyTensorOp || !maybeConstantIndex)
       return failure();
@@ -1445,7 +1445,7 @@ struct FoldDimOfExpandShape : public OpRewritePattern<DimOp> {
       return failure();
 
     // Only constant dimension values are supported.
-    Optional<int64_t> dim = dimOp.getConstantIndex();
+    std::optional<int64_t> dim = dimOp.getConstantIndex();
     if (!dim.has_value())
       return failure();
 
@@ -1489,7 +1489,7 @@ struct FoldDimOfCollapseShape : public OpRewritePattern<DimOp> {
       return failure();
 
     // Only constant dimension values are supported.
-    Optional<int64_t> dim = dimOp.getConstantIndex();
+    std::optional<int64_t> dim = dimOp.getConstantIndex();
     if (!dim.has_value())
       return failure();
 
@@ -1732,7 +1732,7 @@ llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
   llvm::SmallBitVector droppedDims(mixedSizes.size());
   unsigned shapePos = 0;
   for (const auto &size : enumerate(mixedSizes)) {
-    Optional<int64_t> sizeVal = getConstantIntValue(size.value());
+    std::optional<int64_t> sizeVal = getConstantIntValue(size.value());
     // If the size is not 1, or if the current matched dimension of the result
     // is the same static shape as the size value (which is 1), then the
     // dimension is preserved.
@@ -2278,15 +2278,16 @@ struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertOpTy> {
         }))
       return failure();
 
-    auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
+    auto getSourceOfCastOp = [](Value v) -> std::optional<Value> {
       auto castOp = v.getDefiningOp<tensor::CastOp>();
       if (!castOp || !canFoldIntoConsumerOp(castOp))
         return std::nullopt;
       return castOp.getSource();
     };
-    Optional<Value> sourceCastSource =
+    std::optional<Value> sourceCastSource =
         getSourceOfCastOp(insertSliceOp.getSource());
-    Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.getDest());
+    std::optional<Value> destCastSource =
+        getSourceOfCastOp(insertSliceOp.getDest());
     if (!sourceCastSource && !destCastSource)
       return failure();
 
@@ -2352,7 +2353,7 @@ struct InsertSliceOpSourceCastInserter final
     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
                                      srcType.getShape().end());
     for (int64_t i = 0; i < srcType.getRank(); ++i) {
-      if (Optional<int64_t> constInt =
+      if (std::optional<int64_t> constInt =
               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
         newSrcShape[i] = *constInt;
     }
@@ -2419,9 +2420,10 @@ void PadOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
                     Type typeToInfer, Type typeToInferFrom) {}
 
-ParseResult parseInferType(OpAsmParser &parser,
-                           Optional<OpAsmParser::UnresolvedOperand> optOperand,
-                           Type &typeToInfer, Type typeToInferFrom) {
+ParseResult
+parseInferType(OpAsmParser &parser,
+               std::optional<OpAsmParser::UnresolvedOperand> optOperand,
+               Type &typeToInfer, Type typeToInferFrom) {
   if (optOperand)
     typeToInfer = typeToInferFrom;
   return success();
@@ -3151,7 +3153,7 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
           llvm::zip(packedType.getShape().take_back(mixedTiles.size()),
                     mixedTiles),
           [](std::tuple<int64_t, OpFoldResult> it) {
-            Optional<int64_t> constTileSize =
+            std::optional<int64_t> constTileSize =
                 getConstantIntValue(std::get<1>(it));
             int64_t shape = std::get<0>(it);
             if (!constTileSize) {
@@ -3232,7 +3234,7 @@ areNotFullTiles(ArrayRef<int64_t> inputShape,
     auto it = dimAndTileMapping.find(dim);
     if (it == dimAndTileMapping.end())
       continue;
-    Optional<int64_t> constantTile = getConstantIntValue(it->second);
+    std::optional<int64_t> constantTile = getConstantIntValue(it->second);
     if (!constantTile)
       continue;
     if (inputShape[dim] % (*constantTile) != 0)
@@ -3333,7 +3335,7 @@ bool areTilesAndTiledDimsAllConstant(OpTy op) {
   SmallVector<OpFoldResult> mixedTiles = op.getMixedTiles();
   for (auto [dimDest, tile] : llvm::zip(
            packedType.getShape().take_back(mixedTiles.size()), mixedTiles)) {
-    Optional<int64_t> constTileSize = getConstantIntValue(tile);
+    std::optional<int64_t> constTileSize = getConstantIntValue(tile);
     if (!constTileSize || ShapedType::isDynamic(dimDest))
       return false;
   }

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index ec45eb46969c9..c476cd1325e59 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -265,7 +265,7 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
   info.isAlignedToInnerTileSize = false;
   FailureOr<int64_t> cstSize = linalg::getConstantUpperBoundForIndex(
       getValueOrCreateConstantIndexOp(b, loc, tileSize));
-  Optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
+  std::optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
   if (!failed(cstSize) && cstInnerSize) {
     if (cstSize.value() % cstInnerSize.value() == 0)
       info.isAlignedToInnerTileSize = true;

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
index 5fbd06c2860f2..21473fa1fb909 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp
@@ -26,7 +26,7 @@ using namespace mlir;
 
 /// Returns true if the the given `attrOrValue` is a constant zero.
 static bool isZero(OpFoldResult attrOrValue) {
-  if (Optional<int64_t> val = getConstantIntValue(attrOrValue))
+  if (std::optional<int64_t> val = getConstantIntValue(attrOrValue))
     return *val == 0;
   return false;
 }

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index d5ae1196cd1fe..14ea18c775197 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -373,7 +373,7 @@ static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands,
 }
 
 LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
@@ -398,7 +398,7 @@ LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   // Infer all dimension sizes by reducing based on inputs.
@@ -455,7 +455,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::EqualOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outShape;
@@ -476,7 +476,7 @@ bool tosa::EqualOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
 }
 
 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
@@ -496,9 +496,8 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
   }
 
   if (biasShape.hasRank()) {
-    outShape[1] = outShape[1] == ShapedType::kDynamic
-                      ? biasShape.getDimSize(0)
-                      : outShape[1];
+    outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
+                                                      : outShape[1];
   }
 
   inferredReturnShapes.push_back(ShapedTypeComponents(outShape));
@@ -508,7 +507,7 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
 LogicalResult FullyConnectedOp::verify() { return verifyConvOp(*this); }
 
 LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor lhsShape = operands.getShape(0);
@@ -524,9 +523,8 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
   }
 
   if (rhsShape.hasRank()) {
-    outShape[0] = outShape[0] == ShapedType::kDynamic
-                      ? rhsShape.getDimSize(0)
-                      : outShape[0];
+    outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
+                                                      : outShape[0];
     outShape[2] = rhsShape.getDimSize(2);
   }
 
@@ -535,7 +533,7 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::PadOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
@@ -597,7 +595,7 @@ static SmallVector<int64_t> convertToMlirShape(ArrayRef<int64_t> shape) {
 }
 
 LogicalResult tosa::SliceOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ArrayAttr sizes = SliceOpAdaptor(operands, attributes).getSize();
@@ -607,13 +605,13 @@ LogicalResult tosa::SliceOp::inferReturnTypeComponents(
     outputShape.push_back(val.cast<IntegerAttr>().getValue().getSExtValue());
   }
 
-  inferredReturnShapes.push_back(ShapedTypeComponents(
-    convertToMlirShape(outputShape)));
+  inferredReturnShapes.push_back(
+      ShapedTypeComponents(convertToMlirShape(outputShape)));
   return success();
 }
 
 LogicalResult tosa::TableOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
@@ -629,7 +627,7 @@ LogicalResult tosa::TableOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::TileOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   TileOpAdaptor adaptor(operands, attributes);
@@ -663,7 +661,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ReshapeOpAdaptor adaptor(operands, attributes);
@@ -703,7 +701,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ShapeAdaptor inputShape = operands.getShape(0);
@@ -770,7 +768,7 @@ LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::GatherOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape;
@@ -795,7 +793,7 @@ LogicalResult tosa::GatherOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   ResizeOpAdaptor adaptor(operands, attributes);
@@ -838,7 +836,7 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
 }
 
 LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape;
@@ -887,7 +885,7 @@ static LogicalResult ReduceInferReturnTypes(
 
 #define REDUCE_SHAPE_INFER(OP)                                                 \
   LogicalResult OP::inferReturnTypeComponents(                                 \
-      MLIRContext *context, ::llvm::Optional<Location> location,               \
+      MLIRContext *context, ::std::optional<Location> location,                \
       ValueShapeRange operands, DictionaryAttr attributes,                     \
       RegionRange regions,                                                     \
       SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {           \
@@ -918,7 +916,7 @@ static LogicalResult NAryInferReturnTypes(
 
 #define NARY_SHAPE_INFER(OP)                                                   \
   LogicalResult OP::inferReturnTypeComponents(                                 \
-      MLIRContext *context, ::llvm::Optional<Location> location,               \
+      MLIRContext *context, ::std::optional<Location> location,                \
       ValueShapeRange operands, DictionaryAttr attributes,                     \
       RegionRange regions,                                                     \
       SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {           \
@@ -1007,7 +1005,7 @@ static LogicalResult poolingInferReturnTypes(
 }
 
 LogicalResult Conv2DOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
@@ -1074,7 +1072,7 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
 LogicalResult Conv2DOp::verify() { return verifyConvOp(*this); }
 
 LogicalResult Conv3DOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
@@ -1151,21 +1149,21 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
 LogicalResult Conv3DOp::verify() { return verifyConvOp(*this); }
 
 LogicalResult AvgPool2dOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
 }
 
 LogicalResult MaxPool2dOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
 }
 
 LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
@@ -1245,7 +1243,7 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
 LogicalResult DepthwiseConv2DOp::verify() { return verifyConvOp(*this); }
 
 LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
@@ -1313,7 +1311,7 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
 }
 
 LogicalResult IfOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<tosa::YieldOp> yieldOps;
@@ -1357,7 +1355,7 @@ LogicalResult IfOp::inferReturnTypeComponents(
 }
 
 LogicalResult WhileOp::inferReturnTypeComponents(
-    MLIRContext *context, ::llvm::Optional<Location> location,
+    MLIRContext *context, ::std::optional<Location> location,
     ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<tosa::YieldOp> yieldOps;

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index 18479e0383902..9517cbf8703cb 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -46,7 +46,7 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
 private:
   void runOnOperation() override;
 
-  llvm::Optional<TosaProfileEnum> profileType;
+  std::optional<TosaProfileEnum> profileType;
 };
 
 void TosaValidation::runOnOperation() {

diff  --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
index 402e80b0af6b1..f7a8ee1a979a0 100644
--- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
+++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
@@ -117,8 +117,8 @@ LogicalResult PatternApplicatorExtension::findAllMatches(
 // AlternativesOp
 //===----------------------------------------------------------------------===//
 
-OperandRange
-transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange transform::AlternativesOp::getSuccessorEntryOperands(
+    std::optional<unsigned> index) {
   if (index && getOperation()->getNumOperands() == 1)
     return getOperation()->getOperands();
   return OperandRange(getOperation()->operand_end(),
@@ -126,7 +126,7 @@ transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
 }
 
 void transform::AlternativesOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   for (Region &alternative : llvm::drop_begin(
            getAlternatives(), index.has_value() ? *index + 1 : 0)) {
@@ -338,7 +338,7 @@ void transform::ForeachOp::getEffects(
 }
 
 void transform::ForeachOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   Region *bodyRegion = &getBody();
   if (!index) {
@@ -353,7 +353,7 @@ void transform::ForeachOp::getSuccessorRegions(
 }
 
 OperandRange
-transform::ForeachOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+transform::ForeachOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   // The iteration variable op handle is mapped to a subset (one op to be
   // precise) of the payload ops of the ForeachOp operand.
   assert(index && *index == 0 && "unexpected region index");
@@ -737,8 +737,8 @@ void transform::SequenceOp::getEffects(
   }
 }
 
-OperandRange
-transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange transform::SequenceOp::getSuccessorEntryOperands(
+    std::optional<unsigned> index) {
   assert(index && *index == 0 && "unexpected region index");
   if (getOperation()->getNumOperands() == 1)
     return getOperation()->getOperands();
@@ -747,7 +747,7 @@ transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
 }
 
 void transform::SequenceOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   if (!index) {
     Region *bodyRegion = &getBody();

diff  --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
index 272692db87103..cb5b93bf0d600 100644
--- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
+++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
@@ -57,8 +57,7 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
     // dimensions should also be dynamic and product of all previous unprocessed
     // dimensions of the expanded shape should be 1.
     if (sourceShape[sourceDim] == ShapedType::kDynamic &&
-        (currTargetShape != ShapedType::kDynamic ||
-         prodOfCollapsedDims != 1))
+        (currTargetShape != ShapedType::kDynamic || prodOfCollapsedDims != 1))
       return std::nullopt;
 
     // If the collapsed dim is dynamic, the current expanded dim should also
@@ -229,7 +228,7 @@ LogicalResult mlir::reshapeLikeShapesAreCompatible(
     ArrayRef<ReassociationIndices> reassociationMaps, bool isExpandingReshape) {
   unsigned expandedDimStart = 0;
   for (const auto &map : llvm::enumerate(reassociationMaps)) {
-    Optional<int64_t> dynamicShape;
+    std::optional<int64_t> dynamicShape;
     int64_t linearizedStaticShape = 1;
     for (const auto &dim : llvm::enumerate(
              expandedShape.slice(expandedDimStart, map.value().size()))) {
@@ -279,8 +278,8 @@ mlir::getSlicedDimensions(ArrayRef<OpFoldResult> sliceInputShape,
   llvm::SmallBitVector mask(sliceInputShape.size());
   unsigned idx = 0;
   for (const auto &[offset, size, stride] : sliceParams) {
-    Optional<int64_t> offsetConst = getConstantIntValue(offset);
-    Optional<int64_t> strideConst = getConstantIntValue(stride);
+    std::optional<int64_t> offsetConst = getConstantIntValue(offset);
+    std::optional<int64_t> strideConst = getConstantIntValue(stride);
     mask[idx] = !isEqualConstantIntOrValue(size, sliceInputShape[idx]) ||
                 (!strideConst || *strideConst != 1) ||
                 (!offsetConst || *offsetConst != 0);

diff  --git a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp
index 092dbae9beb00..436e6e901a4a6 100644
--- a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp
+++ b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp
@@ -91,7 +91,7 @@ SmallVector<OpFoldResult> getAsOpFoldResult(ArrayAttr arrayAttr) {
 }
 
 /// If ofr is a constant integer or an IntegerAttr, return the integer.
-Optional<int64_t> getConstantIntValue(OpFoldResult ofr) {
+std::optional<int64_t> getConstantIntValue(OpFoldResult ofr) {
   // Case 1: Check for Constant integer.
   if (auto val = ofr.dyn_cast<Value>()) {
     APSInt intVal;

diff  --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 4afbaee3b3e60..67d0f7566677c 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -315,7 +315,8 @@ OpFoldResult MultiDimReductionOp::fold(ArrayRef<Attribute> operands) {
   return {};
 }
 
-Optional<SmallVector<int64_t, 4>> MultiDimReductionOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>>
+MultiDimReductionOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getSourceVectorType().getShape());
 }
 
@@ -500,7 +501,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op,
   return nullptr;
 }
 
-Optional<SmallVector<int64_t, 4>> ReductionOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> ReductionOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getVectorType().getShape());
 }
 
@@ -939,7 +940,7 @@ std::vector<std::pair<int64_t, int64_t>> ContractionOp::getBatchDimMap() {
                    getContext());
 }
 
-Optional<SmallVector<int64_t, 4>> ContractionOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> ContractionOp::getShapeForUnroll() {
   SmallVector<int64_t, 4> shape;
   getIterationBounds(shape);
   return shape;
@@ -1077,7 +1078,7 @@ void vector::ExtractOp::build(OpBuilder &builder, OperationState &result,
 }
 
 LogicalResult
-ExtractOp::inferReturnTypes(MLIRContext *, Optional<Location>,
+ExtractOp::inferReturnTypes(MLIRContext *, std::optional<Location>,
                             ValueRange operands, DictionaryAttr attributes,
                             RegionRange,
                             SmallVectorImpl<Type> &inferredReturnTypes) {
@@ -1721,7 +1722,7 @@ static void populateFromInt64AttrArray(ArrayAttr arrayAttr,
 // FmaOp
 //===----------------------------------------------------------------------===//
 
-Optional<SmallVector<int64_t, 4>> FMAOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> FMAOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getVectorType().getShape());
 }
 
@@ -2001,7 +2002,7 @@ LogicalResult ShuffleOp::verify() {
 }
 
 LogicalResult
-ShuffleOp::inferReturnTypes(MLIRContext *, Optional<Location>,
+ShuffleOp::inferReturnTypes(MLIRContext *, std::optional<Location>,
                             ValueRange operands, DictionaryAttr attributes,
                             RegionRange,
                             SmallVectorImpl<Type> &inferredReturnTypes) {
@@ -3178,7 +3179,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
 void TransferReadOp::build(OpBuilder &builder, OperationState &result,
                            VectorType vectorType, Value source,
                            ValueRange indices, AffineMap permutationMap,
-                           Optional<ArrayRef<bool>> inBounds) {
+                           std::optional<ArrayRef<bool>> inBounds) {
   auto permutationMapAttr = AffineMapAttr::get(permutationMap);
   auto inBoundsAttr = (inBounds && !inBounds.value().empty())
                           ? builder.getBoolArrayAttr(inBounds.value())
@@ -3191,7 +3192,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
 void TransferReadOp::build(OpBuilder &builder, OperationState &result,
                            VectorType vectorType, Value source,
                            ValueRange indices, Value padding,
-                           Optional<ArrayRef<bool>> inBounds) {
+                           std::optional<ArrayRef<bool>> inBounds) {
   AffineMap permutationMap = getTransferMinorIdentityMap(
       source.getType().cast<ShapedType>(), vectorType);
   auto permutationMapAttr = AffineMapAttr::get(permutationMap);
@@ -3208,7 +3209,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
 void TransferReadOp::build(OpBuilder &builder, OperationState &result,
                            VectorType vectorType, Value source,
                            ValueRange indices,
-                           Optional<ArrayRef<bool>> inBounds) {
+                           std::optional<ArrayRef<bool>> inBounds) {
   Type elemType = source.getType().cast<ShapedType>().getElementType();
   Value padding = builder.create<arith::ConstantOp>(
       result.location, elemType, builder.getZeroAttr(elemType));
@@ -3573,7 +3574,7 @@ OpFoldResult TransferReadOp::fold(ArrayRef<Attribute>) {
   return OpFoldResult();
 }
 
-Optional<SmallVector<int64_t, 4>> TransferReadOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> TransferReadOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getVectorType().getShape());
 }
 
@@ -3800,7 +3801,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
 void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
                             Value vector, Value dest, ValueRange indices,
                             AffineMap permutationMap,
-                            Optional<ArrayRef<bool>> inBounds) {
+                            std::optional<ArrayRef<bool>> inBounds) {
   auto permutationMapAttr = AffineMapAttr::get(permutationMap);
   auto inBoundsAttr = (inBounds && !inBounds.value().empty())
                           ? builder.getBoolArrayAttr(inBounds.value())
@@ -3813,7 +3814,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
 ///    map to 'getMinorIdentityMap'.
 void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
                             Value vector, Value dest, ValueRange indices,
-                            Optional<ArrayRef<bool>> inBounds) {
+                            std::optional<ArrayRef<bool>> inBounds) {
   auto vectorType = vector.getType().cast<VectorType>();
   AffineMap permutationMap = getTransferMinorIdentityMap(
       dest.getType().cast<ShapedType>(), vectorType);
@@ -4046,7 +4047,7 @@ LogicalResult TransferWriteOp::fold(ArrayRef<Attribute> operands,
   return memref::foldMemRefCast(*this);
 }
 
-Optional<SmallVector<int64_t, 4>> TransferWriteOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> TransferWriteOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getVectorType().getShape());
 }
 
@@ -5037,7 +5038,7 @@ LogicalResult vector::TransposeOp::verify() {
   return success();
 }
 
-Optional<SmallVector<int64_t, 4>> TransposeOp::getShapeForUnroll() {
+std::optional<SmallVector<int64_t, 4>> TransposeOp::getShapeForUnroll() {
   return llvm::to_vector<4>(getResultType().getShape());
 }
 
@@ -5580,7 +5581,7 @@ ParseResult WarpExecuteOnLane0Op::parse(OpAsmParser &parser,
 }
 
 void WarpExecuteOnLane0Op::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   if (index) {
     regions.push_back(RegionSuccessor(getResults()));

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index fda709303b99e..048c3296dfef4 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -44,7 +44,7 @@ using namespace mlir;
 using namespace mlir::vector;
 
 // Helper to find an index in an affine map.
-static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
+static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
   for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
     int64_t idx = map.getDimPosition(i);
     if (idx == index)
@@ -147,11 +147,11 @@ static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) {
 }
 
 /// Helper to create arithmetic operation associated with a kind of contraction.
-static Optional<Value> createContractArithOp(Location loc, Value x, Value y,
-                                             Value acc,
-                                             vector::CombiningKind kind,
-                                             PatternRewriter &rewriter,
-                                             bool isInt) {
+static std::optional<Value> createContractArithOp(Location loc, Value x,
+                                                  Value y, Value acc,
+                                                  vector::CombiningKind kind,
+                                                  PatternRewriter &rewriter,
+                                                  bool isInt) {
   using vector::CombiningKind;
   Value mul;
   if (isInt) {
@@ -169,12 +169,13 @@ static Optional<Value> createContractArithOp(Location loc, Value x, Value y,
       return std::nullopt;
     // Special case for fused multiply-add.
     if (acc && acc.getType().isa<VectorType>() && kind == CombiningKind::ADD) {
-      return Optional<Value>(rewriter.create<vector::FMAOp>(loc, x, y, acc));
+      return std::optional<Value>(
+          rewriter.create<vector::FMAOp>(loc, x, y, acc));
     }
     mul = rewriter.create<arith::MulFOp>(loc, x, y);
   }
   if (!acc)
-    return Optional<Value>(mul);
+    return std::optional<Value>(mul);
   return makeArithReduction(rewriter, loc, kind, mul, acc);
 }
 
@@ -191,7 +192,7 @@ static SmallVector<int64_t> getReductionIndex(AffineMap map,
 
 /// Look for a given dimension in an affine map and return its position. Return
 /// std::nullopt if the dimension is not in the map results.
-static llvm::Optional<unsigned> getDimPosition(AffineMap map, unsigned dim) {
+static std::optional<unsigned> getDimPosition(AffineMap map, unsigned dim) {
   for (unsigned i = 0, e = map.getNumResults(); i < e; i++) {
     if (map.getDimPosition(i) == dim)
       return i;
@@ -552,8 +553,8 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
     if (!rhsType) {
       // Special case: AXPY operation.
       Value b = rewriter.create<vector::BroadcastOp>(loc, lhsType, op.getRhs());
-      Optional<Value> mult = createContractArithOp(loc, op.getLhs(), b, acc,
-                                                   kind, rewriter, isInt);
+      std::optional<Value> mult = createContractArithOp(
+          loc, op.getLhs(), b, acc, kind, rewriter, isInt);
       if (!mult.has_value())
         return failure();
       rewriter.replaceOp(op, mult.value());
@@ -570,7 +571,7 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
       Value r = nullptr;
       if (acc)
         r = rewriter.create<vector::ExtractOp>(loc, rhsType, acc, pos);
-      Optional<Value> m =
+      std::optional<Value> m =
           createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt);
       if (!m.has_value())
         return failure();
@@ -645,7 +646,7 @@ struct ContractOpToElementwise
     // Loop through the parallel dimensions to calculate the dimensions to
     // broadcast and to permute in order to extract only parallel dimensions.
     for (unsigned i = 0; i < numParallelDims; i++) {
-      llvm::Optional<unsigned> lhsDim =
+      std::optional<unsigned> lhsDim =
           getDimPosition(lhsMap, accMap.getDimPosition(i));
       if (lhsDim) {
         lhsTranspose.push_back(numLhsDimToBroadcast + *lhsDim);
@@ -655,7 +656,7 @@ struct ContractOpToElementwise
             contractOp.getResultType().cast<VectorType>().getDimSize(i));
         lhsTranspose.push_back(lhsDims.size() - 1);
       }
-      llvm::Optional<unsigned> rhsDim =
+      std::optional<unsigned> rhsDim =
           getDimPosition(rhsMap, accMap.getDimPosition(i));
       if (rhsDim) {
         rhsTranspose.push_back(numRhsDimToBroadcast + *rhsDim);
@@ -690,7 +691,7 @@ struct ContractOpToElementwise
         loc, newLhs, rewriter.getI64ArrayAttr(lhsOffsets));
     newRhs = rewriter.create<vector::ExtractOp>(
         loc, newRhs, rewriter.getI64ArrayAttr(rhsOffsets));
-    Optional<Value> result =
+    std::optional<Value> result =
         createContractArithOp(loc, newLhs, newRhs, contractOp.getAcc(),
                               contractOp.getKind(), rewriter, isInt);
     rewriter.replaceOp(contractOp, {*result});
@@ -2010,8 +2011,8 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
   // Use iterator index 0.
   int64_t iterIndex = 0;
   SmallVector<AffineMap> iMap = op.getIndexingMapsArray();
-  Optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
-  Optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
+  std::optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
+  std::optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
   if (!lookupLhs.has_value())
     return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
       diag << "expected iterIndex=" << iterIndex << "to map to a LHS dimension";
@@ -2075,7 +2076,7 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
 struct TransferReadToVectorLoadLowering
     : public OpRewritePattern<vector::TransferReadOp> {
   TransferReadToVectorLoadLowering(MLIRContext *context,
-                                   llvm::Optional<unsigned> maxRank,
+                                   std::optional<unsigned> maxRank,
                                    PatternBenefit benefit = 1)
       : OpRewritePattern<vector::TransferReadOp>(context, benefit),
         maxTransferRank(maxRank) {}
@@ -2151,7 +2152,7 @@ struct TransferReadToVectorLoadLowering
     return success();
   }
 
-  llvm::Optional<unsigned> maxTransferRank;
+  std::optional<unsigned> maxTransferRank;
 };
 
 /// Replace a 0-d vector.load with a memref.load + vector.broadcast.
@@ -2217,7 +2218,7 @@ struct VectorStoreToMemrefStoreLowering
 struct TransferWriteToVectorStoreLowering
     : public OpRewritePattern<vector::TransferWriteOp> {
   TransferWriteToVectorStoreLowering(MLIRContext *context,
-                                     llvm::Optional<unsigned> maxRank,
+                                     std::optional<unsigned> maxRank,
                                      PatternBenefit benefit = 1)
       : OpRewritePattern<vector::TransferWriteOp>(context, benefit),
         maxTransferRank(maxRank) {}
@@ -2280,7 +2281,7 @@ struct TransferWriteToVectorStoreLowering
     return success();
   }
 
-  llvm::Optional<unsigned> maxTransferRank;
+  std::optional<unsigned> maxTransferRank;
 };
 
 // Returns the values in `arrayAttr` as an integer vector.
@@ -3026,7 +3027,7 @@ void mlir::vector::
 }
 
 void mlir::vector::populateVectorTransferLoweringPatterns(
-    RewritePatternSet &patterns, llvm::Optional<unsigned> maxTransferRank,
+    RewritePatternSet &patterns, std::optional<unsigned> maxTransferRank,
     PatternBenefit benefit) {
   patterns.add<TransferReadToVectorLoadLowering,
                TransferWriteToVectorStoreLowering>(patterns.getContext(),

diff  --git a/mlir/lib/IR/BuiltinDialect.cpp b/mlir/lib/IR/BuiltinDialect.cpp
index 6686e7f58c9c9..b66346ffa39c0 100644
--- a/mlir/lib/IR/BuiltinDialect.cpp
+++ b/mlir/lib/IR/BuiltinDialect.cpp
@@ -126,7 +126,7 @@ void BuiltinDialect::initialize() {
 //===----------------------------------------------------------------------===//
 
 void ModuleOp::build(OpBuilder &builder, OperationState &state,
-                     Optional<StringRef> name) {
+                     std::optional<StringRef> name) {
   state.addRegion()->emplaceBlock();
   if (name) {
     state.attributes.push_back(builder.getNamedAttr(
@@ -135,7 +135,7 @@ void ModuleOp::build(OpBuilder &builder, OperationState &state,
 }
 
 /// Construct a module from the given context.
-ModuleOp ModuleOp::create(Location loc, Optional<StringRef> name) {
+ModuleOp ModuleOp::create(Location loc, std::optional<StringRef> name) {
   OpBuilder builder(loc->getContext());
   return builder.create<ModuleOp>(loc, name);
 }

diff  --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index 4eef7dd496815..238b5bbb4eae1 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -246,7 +246,7 @@ VectorType VectorType::scaleElementBitwidth(unsigned scale) {
   return VectorType();
 }
 
-VectorType VectorType::cloneWith(Optional<ArrayRef<int64_t>> shape,
+VectorType VectorType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
                                  Type elementType) const {
   return VectorType::get(shape.value_or(getShape()), elementType,
                          getNumScalableDims());
@@ -268,7 +268,7 @@ ArrayRef<int64_t> TensorType::getShape() const {
   return cast<RankedTensorType>().getShape();
 }
 
-TensorType TensorType::cloneWith(Optional<ArrayRef<int64_t>> shape,
+TensorType TensorType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
                                  Type elementType) const {
   if (auto unrankedTy = dyn_cast<UnrankedTensorType>()) {
     if (shape)
@@ -346,7 +346,7 @@ ArrayRef<int64_t> BaseMemRefType::getShape() const {
   return cast<MemRefType>().getShape();
 }
 
-BaseMemRefType BaseMemRefType::cloneWith(Optional<ArrayRef<int64_t>> shape,
+BaseMemRefType BaseMemRefType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
                                          Type elementType) const {
   if (auto unrankedTy = dyn_cast<UnrankedMemRefType>()) {
     if (!shape)
@@ -387,7 +387,7 @@ unsigned BaseMemRefType::getMemorySpaceAsInt() const {
 /// which dimensions must be kept when e.g. compute MemRef strides under
 /// rank-reducing operations. Return std::nullopt if reducedShape cannot be
 /// obtained by dropping only `1` entries in `originalShape`.
-llvm::Optional<llvm::SmallDenseSet<unsigned>>
+std::optional<llvm::SmallDenseSet<unsigned>>
 mlir::computeRankReductionMask(ArrayRef<int64_t> originalShape,
                                ArrayRef<int64_t> reducedShape) {
   size_t originalRank = originalShape.size(), reducedRank = reducedShape.size();

diff  --git a/mlir/lib/IR/Dialect.cpp b/mlir/lib/IR/Dialect.cpp
index 7fd484a20636c..429b0f10baa3d 100644
--- a/mlir/lib/IR/Dialect.cpp
+++ b/mlir/lib/IR/Dialect.cpp
@@ -75,7 +75,7 @@ Type Dialect::parseType(DialectAsmParser &parser) const {
   return Type();
 }
 
-Optional<Dialect::ParseOpHook>
+std::optional<Dialect::ParseOpHook>
 Dialect::getParseOperationHook(StringRef opName) const {
   return std::nullopt;
 }

diff  --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
index e87724eee796c..65006f3569fca 100644
--- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
+++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
@@ -36,7 +36,7 @@ SuccessorOperands::SuccessorOperands(unsigned int producedOperandCount,
 /// Returns the `BlockArgument` corresponding to operand `operandIndex` in some
 /// successor if 'operandIndex' is within the range of 'operands', or
 /// std::nullopt if `operandIndex` isn't a successor operand index.
-Optional<BlockArgument>
+std::optional<BlockArgument>
 detail::getBranchSuccessorArgument(const SuccessorOperands &operands,
                                    unsigned operandIndex, Block *successor) {
   OperandRange forwardedOperands = operands.getForwardedOperands();
@@ -90,17 +90,17 @@ detail::verifyBranchSuccessorOperands(Operation *op, unsigned succNo,
 /// inputs that flow from `sourceIndex' to the given region, or std::nullopt if
 /// the exact type match verification is not necessary (e.g., if the Op verifies
 /// the match itself).
-static LogicalResult
-verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
-                         function_ref<Optional<TypeRange>(Optional<unsigned>)>
-                             getInputsTypesForRegion) {
+static LogicalResult verifyTypesAlongAllEdges(
+    Operation *op, std::optional<unsigned> sourceNo,
+    function_ref<std::optional<TypeRange>(std::optional<unsigned>)>
+        getInputsTypesForRegion) {
   auto regionInterface = cast<RegionBranchOpInterface>(op);
 
   SmallVector<RegionSuccessor, 2> successors;
   regionInterface.getSuccessorRegions(sourceNo, successors);
 
   for (RegionSuccessor &succ : successors) {
-    Optional<unsigned> succRegionNo;
+    std::optional<unsigned> succRegionNo;
     if (!succ.isParent())
       succRegionNo = succ.getSuccessor()->getRegionNumber();
 
@@ -119,7 +119,8 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
       return diag;
     };
 
-    Optional<TypeRange> sourceTypes = getInputsTypesForRegion(succRegionNo);
+    std::optional<TypeRange> sourceTypes =
+        getInputsTypesForRegion(succRegionNo);
     if (!sourceTypes.has_value())
       continue;
 
@@ -151,7 +152,8 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
 LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
   auto regionInterface = cast<RegionBranchOpInterface>(op);
 
-  auto inputTypesFromParent = [&](Optional<unsigned> regionNo) -> TypeRange {
+  auto inputTypesFromParent =
+      [&](std::optional<unsigned> regionNo) -> TypeRange {
     return regionInterface.getSuccessorEntryOperands(regionNo).getTypes();
   };
 
@@ -179,7 +181,7 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
     // implementing the `RegionBranchTerminatorOpInterface`, all should have the
     // same operand types when passing them to the same region.
 
-    Optional<OperandRange> regionReturnOperands;
+    std::optional<OperandRange> regionReturnOperands;
     for (Block &block : region) {
       Operation *terminator = block.getTerminator();
       auto terminatorOperands =
@@ -202,7 +204,7 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
     }
 
     auto inputTypesFromRegion =
-        [&](Optional<unsigned> regionNo) -> Optional<TypeRange> {
+        [&](std::optional<unsigned> regionNo) -> std::optional<TypeRange> {
       // If there is no return-like terminator, the op itself should verify
       // type consistency.
       if (!regionReturnOperands)
@@ -307,7 +309,7 @@ bool RegionBranchOpInterface::isRepetitiveRegion(unsigned index) {
 }
 
 void RegionBranchOpInterface::getSuccessorRegions(
-    Optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
+    std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
   unsigned numInputs = 0;
   if (index) {
     // If the predecessor is a region, get the number of operands from an
@@ -367,9 +369,9 @@ bool mlir::isRegionReturnLike(Operation *operation) {
 /// `OperandRange` represents all operands that are passed to the specified
 /// successor region. If `regionIndex` is `std::nullopt`, all operands that are
 /// passed to the parent operation will be returned.
-Optional<MutableOperandRange>
-mlir::getMutableRegionBranchSuccessorOperands(Operation *operation,
-                                              Optional<unsigned> regionIndex) {
+std::optional<MutableOperandRange>
+mlir::getMutableRegionBranchSuccessorOperands(
+    Operation *operation, std::optional<unsigned> regionIndex) {
   // Try to query a RegionBranchTerminatorOpInterface to determine
   // all successor operands that will be passed to the successor
   // input arguments.
@@ -388,9 +390,9 @@ mlir::getMutableRegionBranchSuccessorOperands(Operation *operation,
 /// Returns the read only operands that are passed to the region with the given
 /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more
 /// information.
-Optional<OperandRange>
+std::optional<OperandRange>
 mlir::getRegionBranchSuccessorOperands(Operation *operation,
-                                       Optional<unsigned> regionIndex) {
+                                       std::optional<unsigned> regionIndex) {
   auto range = getMutableRegionBranchSuccessorOperands(operation, regionIndex);
-  return range ? Optional<OperandRange>(*range) : std::nullopt;
+  return range ? std::optional<OperandRange>(*range) : std::nullopt;
 }

diff  --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp
index 34200eb0daebf..e7d7e774f2f6a 100644
--- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp
+++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp
@@ -174,12 +174,13 @@ ShapeAdaptor ValueShapeRange::getShape(int index) const {
 }
 
 LogicalResult mlir::detail::inferReturnTensorTypes(
-    function_ref<LogicalResult(
-        MLIRContext *, Optional<Location> location, ValueShapeRange operands,
-        DictionaryAttr attributes, RegionRange regions,
-        SmallVectorImpl<ShapedTypeComponents> &retComponents)>
+    function_ref<
+        LogicalResult(MLIRContext *, std::optional<Location> location,
+                      ValueShapeRange operands, DictionaryAttr attributes,
+                      RegionRange regions,
+                      SmallVectorImpl<ShapedTypeComponents> &retComponents)>
         componentTypeFn,
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   SmallVector<ShapedTypeComponents, 2> retComponents;

diff  --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp
index b4a57f3537016..90530c95fb539 100644
--- a/mlir/lib/Rewrite/ByteCode.cpp
+++ b/mlir/lib/Rewrite/ByteCode.cpp
@@ -46,7 +46,7 @@ PDLByteCodePattern PDLByteCodePattern::create(pdl_interp::RecordMatchOp matchOp,
         llvm::to_vector<8>(generatedOpsAttr.getAsValueRange<StringAttr>());
 
   // Check to see if this is pattern matches a specific operation type.
-  if (Optional<StringRef> rootKind = matchOp.getRootKind())
+  if (std::optional<StringRef> rootKind = matchOp.getRootKind())
     return PDLByteCodePattern(rewriterAddr, configSet, *rootKind, benefit, ctx,
                               generatedOps);
   return PDLByteCodePattern(rewriterAddr, configSet, MatchAnyOpTypeTag(),
@@ -940,7 +940,7 @@ void Generator::generate(pdl_interp::GetOperandOp op, ByteCodeWriter &writer) {
 }
 void Generator::generate(pdl_interp::GetOperandsOp op, ByteCodeWriter &writer) {
   Value result = op.getValue();
-  Optional<uint32_t> index = op.getIndex();
+  std::optional<uint32_t> index = op.getIndex();
   writer.append(OpCode::GetOperands,
                 index.value_or(std::numeric_limits<uint32_t>::max()),
                 op.getInputOp());
@@ -960,7 +960,7 @@ void Generator::generate(pdl_interp::GetResultOp op, ByteCodeWriter &writer) {
 }
 void Generator::generate(pdl_interp::GetResultsOp op, ByteCodeWriter &writer) {
   Value result = op.getValue();
-  Optional<uint32_t> index = op.getIndex();
+  std::optional<uint32_t> index = op.getIndex();
   writer.append(OpCode::GetResults,
                 index.value_or(std::numeric_limits<uint32_t>::max()),
                 op.getInputOp());

diff  --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
index cd1f58f632146..5ffcb128f6989 100644
--- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp
+++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
@@ -42,7 +42,7 @@ DIBasicTypeAttr DebugImporter::translateImpl(llvm::DIBasicType *node) {
 }
 
 DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) {
-  Optional<DIEmissionKind> emissionKind =
+  std::optional<DIEmissionKind> emissionKind =
       symbolizeDIEmissionKind(node->getEmissionKind());
   return DICompileUnitAttr::get(context, node->getSourceLanguage(),
                                 translate(node->getFile()),
@@ -51,7 +51,7 @@ DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) {
 }
 
 DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) {
-  Optional<DIFlags> flags = symbolizeDIFlags(node->getFlags());
+  std::optional<DIFlags> flags = symbolizeDIFlags(node->getFlags());
   SmallVector<DINodeAttr> elements;
   for (llvm::DINode *element : node->getElements()) {
     assert(element && "expected a non-null element type");
@@ -102,7 +102,7 @@ DIScopeAttr DebugImporter::translateImpl(llvm::DIScope *node) {
 }
 
 DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
-  Optional<DISubprogramFlags> subprogramFlags =
+  std::optional<DISubprogramFlags> subprogramFlags =
       symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags());
   return DISubprogramAttr::get(
       context, translate(node->getUnit()), translate(node->getScope()),

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 5fa1593cc333f..8be57d2cf6542 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -27,7 +27,7 @@ using namespace mlir;
 
 namespace {
 static llvm::omp::ScheduleKind
-convertToScheduleKind(Optional<omp::ClauseScheduleKind> schedKind) {
+convertToScheduleKind(std::optional<omp::ClauseScheduleKind> schedKind) {
   if (!schedKind.has_value())
     return llvm::omp::OMP_SCHEDULE_Default;
   switch (schedKind.value()) {
@@ -398,7 +398,7 @@ static omp::ReductionDeclareOp findReductionDecl(omp::WsLoopOp container,
 static void
 collectReductionDecls(omp::WsLoopOp loop,
                       SmallVectorImpl<omp::ReductionDeclareOp> &reductions) {
-  Optional<ArrayAttr> attr = loop.getReductions();
+  std::optional<ArrayAttr> attr = loop.getReductions();
   if (!attr)
     return;
 
@@ -855,7 +855,8 @@ convertOmpWsLoop(Operation &opInst, llvm::IRBuilderBase &builder,
 
   // TODO: Handle doacross loops when the ordered clause has a parameter.
   bool isOrdered = loop.getOrderedVal().has_value();
-  Optional<omp::ScheduleModifier> scheduleModifier = loop.getScheduleModifier();
+  std::optional<omp::ScheduleModifier> scheduleModifier =
+      loop.getScheduleModifier();
   bool isSimd = loop.getSimdModifier();
 
   ompBuilder->applyWorkshareLoop(
@@ -989,11 +990,11 @@ convertOmpSimdLoop(Operation &opInst, llvm::IRBuilderBase &builder,
       ompBuilder->collapseLoops(ompLoc.DL, loopInfos, {});
 
   llvm::ConstantInt *simdlen = nullptr;
-  if (llvm::Optional<uint64_t> simdlenVar = loop.getSimdlen())
+  if (std::optional<uint64_t> simdlenVar = loop.getSimdlen())
     simdlen = builder.getInt64(simdlenVar.value());
 
   llvm::ConstantInt *safelen = nullptr;
-  if (llvm::Optional<uint64_t> safelenVar = loop.getSafelen())
+  if (std::optional<uint64_t> safelenVar = loop.getSafelen())
     safelen = builder.getInt64(safelenVar.value());
 
   llvm::MapVector<llvm::Value *, llvm::Value *> alignedVars;
@@ -1009,7 +1010,7 @@ convertOmpSimdLoop(Operation &opInst, llvm::IRBuilderBase &builder,
 
 /// Convert an Atomic Ordering attribute to llvm::AtomicOrdering.
 llvm::AtomicOrdering
-convertAtomicOrdering(Optional<omp::ClauseMemoryOrderKind> ao) {
+convertAtomicOrdering(std::optional<omp::ClauseMemoryOrderKind> ao) {
   if (!ao)
     return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering
 

diff  --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 4a532ef1d06ed..c5af256a0cacc 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -687,7 +687,7 @@ LogicalResult ModuleTranslation::convertGlobals() {
 
     addRuntimePreemptionSpecifier(op.getDsoLocal(), var);
 
-    Optional<uint64_t> alignment = op.getAlignment();
+    std::optional<uint64_t> alignment = op.getAlignment();
     if (alignment.has_value())
       var->setAlignment(llvm::MaybeAlign(alignment.value()));
 
@@ -783,7 +783,7 @@ static LogicalResult checkedAddLLVMFnAttribute(Location loc,
 /// attribute and the second string beings its value. Note that even integer
 /// attributes are expected to have their values expressed as strings.
 static LogicalResult
-forwardPassthroughAttributes(Location loc, Optional<ArrayAttr> attributes,
+forwardPassthroughAttributes(Location loc, std::optional<ArrayAttr> attributes,
                              llvm::Function *llvmFunc) {
   if (!attributes)
     return success();
@@ -1111,7 +1111,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       llvm::LLVMContext &ctx = llvmModule->getContext();
       llvm::SmallVector<llvm::Metadata *, 2> operands;
       operands.push_back({}); // Placeholder for self-reference
-      if (Optional<StringRef> description = op.getDescription())
+      if (std::optional<StringRef> description = op.getDescription())
         operands.push_back(llvm::MDString::get(ctx, *description));
       llvm::MDNode *domain = llvm::MDNode::get(ctx, operands);
       domain->replaceOperandWith(0, domain); // Self-reference for uniqueness
@@ -1130,7 +1130,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
       llvm::SmallVector<llvm::Metadata *, 3> operands;
       operands.push_back({}); // Placeholder for self-reference
       operands.push_back(domain);
-      if (Optional<StringRef> description = op.getDescription())
+      if (std::optional<StringRef> description = op.getDescription())
         operands.push_back(llvm::MDString::get(ctx, *description));
       llvm::MDNode *scope = llvm::MDNode::get(ctx, operands);
       scope->replaceOperandWith(0, scope); // Self-reference for uniqueness

diff  --git a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp
index cf6770a2816d1..28da3db3f4e68 100644
--- a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp
+++ b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp
@@ -79,7 +79,7 @@ void CodeGen::generate(const ast::Module &astModule, ModuleOp module) {
   int patternIndex = 0;
   for (pdl::PatternOp pattern : module.getOps<pdl::PatternOp>()) {
     // If the pattern has a name, use that. Otherwise, generate a unique name.
-    if (Optional<StringRef> patternName = pattern.getSymName()) {
+    if (std::optional<StringRef> patternName = pattern.getSymName()) {
       patternNames.insert(patternName->str());
     } else {
       std::string name;

diff  --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp
index 5fcfbf26aed26..04a01dedaa1fe 100644
--- a/mlir/test/lib/Dialect/Test/TestDialect.cpp
+++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp
@@ -84,8 +84,8 @@ struct TestOpAsmInterface : public OpAsmDialectInterface {
 
     // Check the contents of the string attribute to see what the test alias
     // should be named.
-    Optional<StringRef> aliasName =
-        StringSwitch<Optional<StringRef>>(strAttr.getValue())
+    std::optional<StringRef> aliasName =
+        StringSwitch<std::optional<StringRef>>(strAttr.getValue())
             .Case("alias_test:dot_in_name", StringRef("test.alias"))
             .Case("alias_test:trailing_digit", StringRef("test_alias0"))
             .Case("alias_test:prefixed_digit", StringRef("0_test_alias"))
@@ -383,7 +383,7 @@ Operation *TestDialect::materializeConstant(OpBuilder &builder, Attribute value,
 }
 
 ::mlir::LogicalResult FormatInferType2Op::inferReturnTypes(
-    ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location,
+    ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location,
     ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes,
     ::mlir::RegionRange regions,
     ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
@@ -424,7 +424,7 @@ TestDialect::verifyRegionResultAttribute(Operation *op, unsigned regionIndex,
   return success();
 }
 
-Optional<Dialect::ParseOpHook>
+std::optional<Dialect::ParseOpHook>
 TestDialect::getParseOperationHook(StringRef opName) const {
   if (opName == "test.dialect_custom_printer") {
     return ParseOpHook{[](OpAsmParser &parser, OperationState &state) {
@@ -569,7 +569,8 @@ void FoldToCallOp::getCanonicalizationPatterns(RewritePatternSet &results,
 // Parsing
 
 static ParseResult parseCustomOptionalOperand(
-    OpAsmParser &parser, Optional<OpAsmParser::UnresolvedOperand> &optOperand) {
+    OpAsmParser &parser,
+    std::optional<OpAsmParser::UnresolvedOperand> &optOperand) {
   if (succeeded(parser.parseOptionalLParen())) {
     optOperand.emplace();
     if (parser.parseOperand(*optOperand) || parser.parseRParen())
@@ -580,7 +581,7 @@ static ParseResult parseCustomOptionalOperand(
 
 static ParseResult parseCustomDirectiveOperands(
     OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand,
-    Optional<OpAsmParser::UnresolvedOperand> &optOperand,
+    std::optional<OpAsmParser::UnresolvedOperand> &optOperand,
     SmallVectorImpl<OpAsmParser::UnresolvedOperand> &varOperands) {
   if (parser.parseOperand(operand))
     return failure();
@@ -633,7 +634,7 @@ parseCustomDirectiveWithTypeRefs(OpAsmParser &parser, Type operandType,
 }
 static ParseResult parseCustomDirectiveOperandsAndTypes(
     OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand,
-    Optional<OpAsmParser::UnresolvedOperand> &optOperand,
+    std::optional<OpAsmParser::UnresolvedOperand> &optOperand,
     SmallVectorImpl<OpAsmParser::UnresolvedOperand> &varOperands,
     Type &operandType, Type &optOperandType,
     SmallVectorImpl<Type> &varOperandTypes) {
@@ -689,7 +690,8 @@ static ParseResult parseCustomDirectiveAttrDict(OpAsmParser &parser,
   return parser.parseOptionalAttrDict(attrs);
 }
 static ParseResult parseCustomDirectiveOptionalOperandRef(
-    OpAsmParser &parser, Optional<OpAsmParser::UnresolvedOperand> &optOperand) {
+    OpAsmParser &parser,
+    std::optional<OpAsmParser::UnresolvedOperand> &optOperand) {
   int64_t operandCount = 0;
   if (parser.parseInteger(operandCount))
     return failure();
@@ -1125,7 +1127,7 @@ OpFoldResult TestPassthroughFold::fold(ArrayRef<Attribute> operands) {
 }
 
 LogicalResult OpWithInferTypeInterfaceOp::inferReturnTypes(
-    MLIRContext *, Optional<Location> location, ValueRange operands,
+    MLIRContext *, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &inferredReturnTypes) {
   if (operands[0].getType() != operands[1].getType()) {
@@ -1140,7 +1142,7 @@ LogicalResult OpWithInferTypeInterfaceOp::inferReturnTypes(
 // TODO: We should be able to only define either inferReturnType or
 // refineReturnType, currently only refineReturnType can be omitted.
 LogicalResult OpWithRefineTypeInterfaceOp::inferReturnTypes(
-    MLIRContext *context, Optional<Location> location, ValueRange operands,
+    MLIRContext *context, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &returnTypes) {
   returnTypes.clear();
@@ -1149,7 +1151,7 @@ LogicalResult OpWithRefineTypeInterfaceOp::inferReturnTypes(
 }
 
 LogicalResult OpWithRefineTypeInterfaceOp::refineReturnTypes(
-    MLIRContext *, Optional<Location> location, ValueRange operands,
+    MLIRContext *, std::optional<Location> location, ValueRange operands,
     DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<Type> &returnTypes) {
   if (operands[0].getType() != operands[1].getType()) {
@@ -1168,8 +1170,8 @@ LogicalResult OpWithRefineTypeInterfaceOp::refineReturnTypes(
 }
 
 LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents(
-    MLIRContext *context, Optional<Location> location, ValueShapeRange operands,
-    DictionaryAttr attributes, RegionRange regions,
+    MLIRContext *context, std::optional<Location> location,
+    ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   // Create return type consisting of the last element of the first operand.
   auto operandType = operands.front().getType();
@@ -1177,8 +1179,7 @@ LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents(
   if (!sval) {
     return emitOptionalError(location, "only shaped type operands allowed");
   }
-  int64_t dim =
-      sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic;
+  int64_t dim = sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic;
   auto type = IntegerType::get(context, 17);
 
   Attribute encoding;
@@ -1451,13 +1452,14 @@ ParseResult RegionIfOp::parse(OpAsmParser &parser, OperationState &result) {
                                 parser.getCurrentLocation(), result.operands);
 }
 
-OperandRange RegionIfOp::getSuccessorEntryOperands(Optional<unsigned> index) {
+OperandRange
+RegionIfOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
   assert(index && *index < 2 && "invalid region index");
   return getOperands();
 }
 
 void RegionIfOp::getSuccessorRegions(
-    Optional<unsigned> index, ArrayRef<Attribute> operands,
+    std::optional<unsigned> index, ArrayRef<Attribute> operands,
     SmallVectorImpl<RegionSuccessor> &regions) {
   // We always branch to the join region.
   if (index.has_value()) {
@@ -1484,7 +1486,7 @@ void RegionIfOp::getRegionInvocationBounds(
 // AnyCondOp
 //===----------------------------------------------------------------------===//
 
-void AnyCondOp::getSuccessorRegions(Optional<unsigned> index,
+void AnyCondOp::getSuccessorRegions(std::optional<unsigned> index,
                                     ArrayRef<Attribute> operands,
                                     SmallVectorImpl<RegionSuccessor> &regions) {
   // The parent op branches into the only region, and the region branches back

diff  --git a/mlir/test/lib/Dialect/Test/TestDialect.td b/mlir/test/lib/Dialect/Test/TestDialect.td
index 0bc789fa8b16e..9ec12749bfcc3 100644
--- a/mlir/test/lib/Dialect/Test/TestDialect.td
+++ b/mlir/test/lib/Dialect/Test/TestDialect.td
@@ -31,7 +31,7 @@ def Test_Dialect : Dialect {
     void registerTypes();
 
     // Provides a custom printing/parsing for some operations.
-    ::llvm::Optional<ParseOpHook>
+    ::std::optional<ParseOpHook>
       getParseOperationHook(::llvm::StringRef opName) const override;
     ::llvm::unique_function<void(::mlir::Operation *,
                                  ::mlir::OpAsmPrinter &printer)>

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index ae8a4a95d4025..08b3fd6079d20 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -421,7 +421,7 @@ def VariadicRegionInferredTypesOp : TEST_Op<"variadic_region_inferred",
 
   let extraClassDeclaration = [{
     static mlir::LogicalResult inferReturnTypes(mlir::MLIRContext *context,
-          llvm::Optional<::mlir::Location> location, mlir::ValueRange operands,
+          std::optional<::mlir::Location> location, mlir::ValueRange operands,
           mlir::DictionaryAttr attributes, mlir::RegionRange regions,
           llvm::SmallVectorImpl<mlir::Type> &inferredReturnTypes) {
       inferredReturnTypes.assign({mlir::IntegerType::get(context, 16)});
@@ -2404,7 +2404,7 @@ def FormatInferTypeOp : TEST_Op<"format_infer_type", [InferTypeOpInterface]> {
 
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context,
-          ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands,
+          ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands,
           ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions,
           ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
       inferredReturnTypes.assign({::mlir::IntegerType::get(context, 16)});
@@ -2427,7 +2427,7 @@ class FormatInferAllTypesBaseOp<string mnemonic, list<Trait> traits = []>
   let results = (outs Variadic<AnyType>:$outs);
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context,
-          ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands,
+          ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands,
           ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions,
           ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
       ::mlir::TypeRange operandTypes = operands.getTypes();
@@ -2474,7 +2474,7 @@ def FormatInferTypeRegionsOp
   let assemblyFormat = "$region attr-dict";
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context,
-          ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands,
+          ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands,
           ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions,
           ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
       if (regions.empty())
@@ -2495,7 +2495,7 @@ def FormatInferTypeVariadicOperandsOp
   let assemblyFormat = "`(` $a `:` type($a) `)` `(` $b `:` type($b) `)` attr-dict";
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context,
-          ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands,
+          ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands,
           ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions,
           ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
       FormatInferTypeVariadicOperandsOpAdaptor adaptor(operands, attributes);
@@ -2640,7 +2640,7 @@ def RegionIfOp : TEST_Op<"region_if",
       return getBody(2)->getArguments();
     }
     ::mlir::OperandRange getSuccessorEntryOperands(
-        ::llvm::Optional<unsigned> index);
+        ::std::optional<unsigned> index);
   }];
   let hasCustomAssemblyFormat = 1;
 }
@@ -2703,7 +2703,7 @@ class TableGenBuildInferReturnTypeBaseOp<string mnemonic,
 
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *,
-          ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands,
+          ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands,
           ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions,
           ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {
       inferredReturnTypes.assign({operands[0].getType()});

diff  --git a/mlir/test/mlir-tblgen/enums-gen.td b/mlir/test/mlir-tblgen/enums-gen.td
index 55181b96dd31f..c3a768e42236c 100644
--- a/mlir/test/mlir-tblgen/enums-gen.td
+++ b/mlir/test/mlir-tblgen/enums-gen.td
@@ -27,9 +27,9 @@ def MyBitEnum: I32BitEnumAttr<"MyBitEnum", "An example bit enum",
 // DECL: Bit3 = 8,
 // DECL: }
 
-// DECL: ::llvm::Optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
+// DECL: ::std::optional<MyBitEnum> symbolizeMyBitEnum(uint32_t);
 // DECL: std::string stringifyMyBitEnum(MyBitEnum);
-// DECL: ::llvm::Optional<MyBitEnum> symbolizeMyBitEnum(::llvm::StringRef);
+// DECL: ::std::optional<MyBitEnum> symbolizeMyBitEnum(::llvm::StringRef);
 
 // DECL: struct FieldParser<::MyBitEnum, ::MyBitEnum> {
 // DECL:   template <typename ParserT>
@@ -40,7 +40,7 @@ def MyBitEnum: I32BitEnumAttr<"MyBitEnum", "An example bit enum",
 // DECL:     if (failed(parser.parseOptionalKeywordOrString(&enumKeyword)))
 // DECL:       return parser.emitError(loc, "expected keyword for An example bit enum");
 // DECL:     // Symbolize the keyword.
-// DECL:     if (::llvm::Optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword))
+// DECL:     if (::std::optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword))
 // DECL:       return *attr;
 // DECL:     return parser.emitError(loc, "invalid An example bit enum specification: ") << enumKeyword;
 // DECL:   }
@@ -66,7 +66,7 @@ def MyBitEnum: I32BitEnumAttr<"MyBitEnum", "An example bit enum",
 // DEF: if (2u == (2u & val))
 // DEF-NEXT: push_back("Bit1")
 
-// DEF-LABEL: ::llvm::Optional<MyBitEnum> symbolizeMyBitEnum(::llvm::StringRef str)
+// DEF-LABEL: ::std::optional<MyBitEnum> symbolizeMyBitEnum(::llvm::StringRef str)
 // DEF: if (str == "none") return MyBitEnum::None;
 // DEF: .Case("tagged", 1)
 // DEF: .Case("Bit1", 2)

diff  --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td
index 269d6e1e9b795..5351db36dea95 100644
--- a/mlir/test/mlir-tblgen/op-attribute.td
+++ b/mlir/test/mlir-tblgen/op-attribute.td
@@ -110,9 +110,9 @@ def AOp : NS_Op<"a_op", []> {
 
 // DEF:      some-attr-kind AOp::getCAttrAttr()
 // DEF-NEXT:   ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getCAttrAttrName()).dyn_cast_or_null<some-attr-kind>()
-// DEF:      ::llvm::Optional<some-return-type> AOp::getCAttr() {
+// DEF:      ::std::optional<some-return-type> AOp::getCAttr() {
 // DEF-NEXT:   auto attr = getCAttrAttr()
-// DEF-NEXT:   return attr ? ::llvm::Optional<some-return-type>(attr.some-convert-from-storage()) : (::std::nullopt);
+// DEF-NEXT:   return attr ? ::std::optional<some-return-type>(attr.some-convert-from-storage()) : (::std::nullopt);
 
 // DEF:      some-attr-kind AOp::getDAttrAttr()
 // DEF-NEXT:   ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getDAttrAttrName()).dyn_cast_or_null<some-attr-kind>()
@@ -135,7 +135,7 @@ def AOp : NS_Op<"a_op", []> {
 // DEF-NEXT:   (*this)->setAttr(getBAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), attrValue));
 // DEF:      void AOp::setCAttrAttr(some-attr-kind attr) {
 // DEF-NEXT:   (*this)->setAttr(getCAttrAttrName(), attr);
-// DEF:      void AOp::setCAttr(::llvm::Optional<some-return-type> attrValue) {
+// DEF:      void AOp::setCAttr(::std::optional<some-return-type> attrValue) {
 // DEF-NEXT:   if (attrValue)
 // DEF-NEXT:     return (*this)->setAttr(getCAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), *attrValue));
 // DEF-NEXT:   (*this)->removeAttr(getCAttrAttrName());
@@ -247,9 +247,9 @@ def AgetOp : Op<Test2_Dialect, "a_get_op", []> {
 
 // DEF:      some-attr-kind AgetOp::getCAttrAttr()
 // DEF-NEXT:   return ::mlir::impl::getAttrFromSortedRange({{.*}}).dyn_cast_or_null<some-attr-kind>()
-// DEF:      ::llvm::Optional<some-return-type> AgetOp::getCAttr() {
+// DEF:      ::std::optional<some-return-type> AgetOp::getCAttr() {
 // DEF-NEXT:   auto attr = getCAttrAttr()
-// DEF-NEXT:   return attr ? ::llvm::Optional<some-return-type>(attr.some-convert-from-storage()) : (::std::nullopt);
+// DEF-NEXT:   return attr ? ::std::optional<some-return-type>(attr.some-convert-from-storage()) : (::std::nullopt);
 
 // Test setter methods
 // ---

diff  --git a/mlir/test/mlir-tblgen/op-decl-and-defs.td b/mlir/test/mlir-tblgen/op-decl-and-defs.td
index 80e29c644fe22..884c72ce9e6d5 100644
--- a/mlir/test/mlir-tblgen/op-decl-and-defs.td
+++ b/mlir/test/mlir-tblgen/op-decl-and-defs.td
@@ -61,7 +61,7 @@ def NS_AOp : NS_Op<"a_op", [IsolatedFromAbove, IsolatedFromAbove]> {
 // CHECK:   ::mlir::IntegerAttr getAttr1Attr();
 // CHECK:   uint32_t getAttr1();
 // CHECK:   ::mlir::FloatAttr getSomeAttr2Attr();
-// CHECK:   ::llvm::Optional< ::llvm::APFloat > getSomeAttr2();
+// CHECK:   ::std::optional< ::llvm::APFloat > getSomeAttr2();
 // CHECK:   ::mlir::Region &getSomeRegion();
 // CHECK:   ::mlir::RegionRange getSomeRegions();
 // CHECK: private:
@@ -88,7 +88,7 @@ def NS_AOp : NS_Op<"a_op", [IsolatedFromAbove, IsolatedFromAbove]> {
 // CHECK:   ::mlir::IntegerAttr getAttr1Attr()
 // CHECK:   uint32_t getAttr1();
 // CHECK:   ::mlir::FloatAttr getSomeAttr2Attr()
-// CHECK:   ::llvm::Optional< ::llvm::APFloat > getSomeAttr2();
+// CHECK:   ::std::optional< ::llvm::APFloat > getSomeAttr2();
 // CHECK:   ::mlir::Attribute removeSomeAttr2Attr();
 // CHECK:   static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value val);
 // CHECK:   static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, int integer = 0);

diff  --git a/mlir/test/python/python_test_ops.td b/mlir/test/python/python_test_ops.td
index 78ae1895558fa..0c63620251c03 100644
--- a/mlir/test/python/python_test_ops.td
+++ b/mlir/test/python/python_test_ops.td
@@ -71,7 +71,7 @@ def InferResultsOp : TestOp<"infer_results_op", [InferTypeOpInterface]> {
 
   let extraClassDeclaration = [{
     static ::mlir::LogicalResult inferReturnTypes(
-      ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location,
+      ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location,
       ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes,
       ::mlir::RegionRange regions,
       ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) {

diff  --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp
index 2331a960e6429..414408c8837df 100644
--- a/mlir/tools/mlir-tblgen/DialectGen.cpp
+++ b/mlir/tools/mlir-tblgen/DialectGen.cpp
@@ -254,7 +254,7 @@ static bool emitDialectDecls(const llvm::RecordKeeper &recordKeeper,
 ///      initialize().
 /// {2}: The dialect parent class.
 static const char *const dialectConstructorStr = R"(
-{0}::{0}(::mlir::MLIRContext *context) 
+{0}::{0}(::mlir::MLIRContext *context)
     : ::mlir::{2}(getDialectNamespace(), context, ::mlir::TypeID::get<{0}>()) {{
   {1}
   initialize();

diff  --git a/mlir/tools/mlir-tblgen/EnumsGen.cpp b/mlir/tools/mlir-tblgen/EnumsGen.cpp
index c9f5c81af4669..10725f7177569 100644
--- a/mlir/tools/mlir-tblgen/EnumsGen.cpp
+++ b/mlir/tools/mlir-tblgen/EnumsGen.cpp
@@ -97,7 +97,7 @@ struct FieldParser<{0}, {0}> {{
       return parser.emitError(loc, "expected keyword for {2}");
 
     // Symbolize the keyword.
-    if (::llvm::Optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword))
+    if (::std::optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword))
       return *attr;
     return parser.emitError(loc, "invalid {2} specification: ") << enumKeyword;
   }
@@ -227,7 +227,7 @@ static void emitMaxValueFn(const Record &enumDef, raw_ostream &os) {
 
 // Returns the EnumAttrCase whose value is zero if exists; returns std::nullopt
 // otherwise.
-static llvm::Optional<EnumAttrCase>
+static std::optional<EnumAttrCase>
 getAllBitsUnsetCase(llvm::ArrayRef<EnumAttrCase> cases) {
   for (auto attrCase : cases) {
     if (attrCase.getValue() == 0)
@@ -381,9 +381,9 @@ static void emitStrToSymFnForIntEnum(const Record &enumDef, raw_ostream &os) {
   StringRef strToSymFnName = enumAttr.getStringToSymbolFnName();
   auto enumerants = enumAttr.getAllCases();
 
-  os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n",
+  os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n",
                 enumName, strToSymFnName);
-  os << formatv("  return ::llvm::StringSwitch<::llvm::Optional<{0}>>(str)\n",
+  os << formatv("  return ::llvm::StringSwitch<::std::optional<{0}>>(str)\n",
                 enumName);
   for (const auto &enumerant : enumerants) {
     auto symbol = enumerant.getSymbol();
@@ -405,7 +405,7 @@ static void emitStrToSymFnForBitEnum(const Record &enumDef, raw_ostream &os) {
   auto enumerants = enumAttr.getAllCases();
   auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants);
 
-  os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n",
+  os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n",
                 enumName, strToSymFnName);
 
   if (allBitsUnsetCase) {
@@ -425,7 +425,7 @@ static void emitStrToSymFnForBitEnum(const Record &enumDef, raw_ostream &os) {
 
   // Convert each symbol to the bit ordinal and set the corresponding bit.
   os << formatv("    auto bit = "
-                "llvm::StringSwitch<::llvm::Optional<{0}>>(symbol.trim())\n",
+                "llvm::StringSwitch<::std::optional<{0}>>(symbol.trim())\n",
                 underlyingType);
   for (const auto &enumerant : enumerants) {
     // Skip the special enumerant for None.
@@ -456,7 +456,7 @@ static void emitUnderlyingToSymFnForIntEnum(const Record &enumDef,
       }))
     return;
 
-  os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName,
+  os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName,
                 underlyingToSymFnName,
                 underlyingType.empty() ? std::string("unsigned")
                                        : underlyingType)
@@ -539,7 +539,7 @@ static void emitUnderlyingToSymFnForBitEnum(const Record &enumDef,
   auto enumerants = enumAttr.getAllCases();
   auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants);
 
-  os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName,
+  os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName,
                 underlyingToSymFnName, underlyingType);
   if (allBitsUnsetCase) {
     os << "  // Special case for all bits unset.\n";
@@ -579,11 +579,11 @@ static void emitEnumDecl(const Record &enumDef, raw_ostream &os) {
         return enumerant.getValue() >= 0;
       })) {
     os << formatv(
-        "::llvm::Optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName,
+        "::std::optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName,
         underlyingType.empty() ? std::string("unsigned") : underlyingType);
   }
   os << formatv("{2} {1}({0});\n", enumName, symToStrFnName, symToStrFnRetType);
-  os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef);\n", enumName,
+  os << formatv("::std::optional<{0}> {1}(::llvm::StringRef);\n", enumName,
                 strToSymFnName);
 
   if (enumAttr.isBitEnum()) {
@@ -605,10 +605,10 @@ inline {0} stringifyEnum({1} enumValue) {{
   // specified by the user.
   const char *const symbolizeEnumStr = R"(
 template <typename EnumType>
-::llvm::Optional<EnumType> symbolizeEnum(::llvm::StringRef);
+::std::optional<EnumType> symbolizeEnum(::llvm::StringRef);
 
 template <>
-inline ::llvm::Optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) {
+inline ::std::optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) {
   return {1}(str);
 }
 )";

diff  --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index 8abd42c46aada..2f88583d7faca 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -1129,7 +1129,7 @@ void OpEmitter::genAttrSetters() {
       method = createMethod("bool");
     else if (isOptional)
       method =
-          createMethod("::llvm::Optional<" + baseAttr.getReturnType() + ">");
+          createMethod("::std::optional<" + baseAttr.getReturnType() + ">");
     else
       method = createMethod(attr.getReturnType());
     if (!method)
@@ -1148,7 +1148,7 @@ void OpEmitter::genAttrSetters() {
 
     // TODO: Handle unit attr parameters specially, given that it is treated as
     // optional but not in the same way as the others (i.e. it uses bool over
-    // llvm::Optional<>).
+    // std::optional<>).
     StringRef paramStr = isUnitAttr ? "attrValue" : "*attrValue";
     const char *optionalCodeBody = R"(
     if (attrValue)
@@ -2949,7 +2949,7 @@ OpOperandAdaptorEmitter::OpOperandAdaptorEmitter(
   adaptor.addField("::mlir::ValueRange", "odsOperands");
   adaptor.addField("::mlir::DictionaryAttr", "odsAttrs");
   adaptor.addField("::mlir::RegionRange", "odsRegions");
-  adaptor.addField("::llvm::Optional<::mlir::OperationName>", "odsOpName");
+  adaptor.addField("::std::optional<::mlir::OperationName>", "odsOpName");
 
   const auto *attrSizedOperands =
       op.getTrait("::m::OpTrait::AttrSizedOperandSegments");

diff  --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
index 4199e97c5169a..d90d1308527df 100644
--- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
@@ -944,7 +944,7 @@ static void genCustomDirectiveParser(CustomDirective *dir, MethodBody &body) {
            << "OperandsLoc = parser.getCurrentLocation();\n";
       if (var->isOptional()) {
         body << llvm::formatv(
-            "    ::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand> "
+            "    ::std::optional<::mlir::OpAsmParser::UnresolvedOperand> "
             "{0}Operand;\n",
             var->name);
       } else if (var->isVariadicOfVariadic()) {
@@ -973,7 +973,7 @@ static void genCustomDirectiveParser(CustomDirective *dir, MethodBody &body) {
         body << llvm::formatv(
             "    {0} {1}Operand = {1}Operands.empty() ? {0}() : "
             "{1}Operands[0];\n",
-            "::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand>",
+            "::std::optional<::mlir::OpAsmParser::UnresolvedOperand>",
             operand->getVar()->name);
 
       } else if (auto *type = dyn_cast<TypeDirective>(input)) {

diff  --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
index dad90566ffff8..6f2e07798361b 100644
--- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
+++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
@@ -346,7 +346,7 @@ static void emitAvailabilityQueryForIntEnum(const Record &enumDef,
   for (const auto &classCasePair : classCaseMap) {
     Availability avail = classCasePair.getValue().front().second;
 
-    os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n",
+    os << formatv("std::optional<{0}> {1}({2} value) {{\n",
                   avail.getMergeInstanceType(), avail.getQueryFnName(),
                   enumName);
 
@@ -388,7 +388,7 @@ static void emitAvailabilityQueryForBitEnum(const Record &enumDef,
   for (const auto &classCasePair : classCaseMap) {
     Availability avail = classCasePair.getValue().front().second;
 
-    os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n",
+    os << formatv("std::optional<{0}> {1}({2} value) {{\n",
                   avail.getMergeInstanceType(), avail.getQueryFnName(),
                   enumName);
 
@@ -433,7 +433,7 @@ static void emitEnumDecl(const Record &enumDef, raw_ostream &os) {
       StringRef className = avail.getClass();
       if (handledClasses.count(className))
         continue;
-      os << formatv("llvm::Optional<{0}> {1}({2} value);\n",
+      os << formatv("std::optional<{0}> {1}({2} value);\n",
                     avail.getMergeInstanceType(), avail.getQueryFnName(),
                     enumName);
       handledClasses.insert(className);

diff  --git a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp
index af02a45e27aed..230298cef8569 100644
--- a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp
+++ b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp
@@ -37,7 +37,7 @@ struct MutuallyExclusiveRegionsOp
   }
 
   // Regions have no successors.
-  void getSuccessorRegions(Optional<unsigned> index,
+  void getSuccessorRegions(std::optional<unsigned> index,
                            ArrayRef<Attribute> operands,
                            SmallVectorImpl<RegionSuccessor> &regions) {}
 };
@@ -52,7 +52,7 @@ struct LoopRegionsOp
 
   static StringRef getOperationName() { return "cftest.loop_regions_op"; }
 
-  void getSuccessorRegions(Optional<unsigned> index,
+  void getSuccessorRegions(std::optional<unsigned> index,
                            ArrayRef<Attribute> operands,
                            SmallVectorImpl<RegionSuccessor> &regions) {
     if (index) {
@@ -76,7 +76,7 @@ struct DoubleLoopRegionsOp
     return "cftest.double_loop_regions_op";
   }
 
-  void getSuccessorRegions(Optional<unsigned> index,
+  void getSuccessorRegions(std::optional<unsigned> index,
                            ArrayRef<Attribute> operands,
                            SmallVectorImpl<RegionSuccessor> &regions) {
     if (index.has_value()) {
@@ -95,7 +95,7 @@ struct SequentialRegionsOp
   static StringRef getOperationName() { return "cftest.sequential_regions_op"; }
 
   // Region 0 has Region 1 as a successor.
-  void getSuccessorRegions(Optional<unsigned> index,
+  void getSuccessorRegions(std::optional<unsigned> index,
                            ArrayRef<Attribute> operands,
                            SmallVectorImpl<RegionSuccessor> &regions) {
     if (index == 0u) {

diff  --git a/mlir/unittests/TableGen/EnumsGenTest.cpp b/mlir/unittests/TableGen/EnumsGenTest.cpp
index bf6b6d3a9c0fe..128696d838b98 100644
--- a/mlir/unittests/TableGen/EnumsGenTest.cpp
+++ b/mlir/unittests/TableGen/EnumsGenTest.cpp
@@ -54,8 +54,8 @@ TEST(EnumsGenTest, GeneratedSymbolToStringFn) {
 }
 
 TEST(EnumsGenTest, GeneratedStringToSymbolFn) {
-  EXPECT_EQ(llvm::Optional<FooEnum>(FooEnum::CaseA), ConvertToEnum("CaseA"));
-  EXPECT_EQ(llvm::Optional<FooEnum>(FooEnum::CaseB), ConvertToEnum("CaseB"));
+  EXPECT_EQ(std::optional<FooEnum>(FooEnum::CaseA), ConvertToEnum("CaseA"));
+  EXPECT_EQ(std::optional<FooEnum>(FooEnum::CaseB), ConvertToEnum("CaseB"));
   EXPECT_EQ(std::nullopt, ConvertToEnum("X"));
 }
 


        


More information about the flang-commits mailing list