[llvm] 67819a7 - [CodeGen] llvm::Optional => std::optional

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 13 01:06:45 PST 2022


Author: Fangrui Song
Date: 2022-12-13T09:06:36Z
New Revision: 67819a72c6ba39267effe8edfc1befddc3f3f2f9

URL: https://github.com/llvm/llvm-project/commit/67819a72c6ba39267effe8edfc1befddc3f3f2f9
DIFF: https://github.com/llvm/llvm-project/commit/67819a72c6ba39267effe8edfc1befddc3f3f2f9.diff

LOG: [CodeGen] llvm::Optional => std::optional

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
    llvm/include/llvm/CodeGen/CommandFlags.h
    llvm/include/llvm/CodeGen/DebugHandlerBase.h
    llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
    llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
    llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
    llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
    llvm/include/llvm/CodeGen/GlobalISel/Utils.h
    llvm/include/llvm/CodeGen/ISDOpcodes.h
    llvm/include/llvm/CodeGen/MIRFormatter.h
    llvm/include/llvm/CodeGen/MachineBasicBlock.h
    llvm/include/llvm/CodeGen/MachineInstr.h
    llvm/include/llvm/CodeGen/MachineOperand.h
    llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
    llvm/include/llvm/CodeGen/SelectionDAGNodes.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
    llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
    llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
    llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h
    llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
    llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
    llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
    llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
    llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
    llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
    llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
    llvm/lib/CodeGen/CFIInstrInserter.cpp
    llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
    llvm/lib/CodeGen/GlobalISel/Utils.cpp
    llvm/lib/CodeGen/ImplicitNullChecks.cpp
    llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
    llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
    llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp
    llvm/lib/CodeGen/LiveDebugVariables.cpp
    llvm/lib/CodeGen/MIRParser/MIParser.cpp
    llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
    llvm/lib/CodeGen/MachineInstr.cpp
    llvm/lib/CodeGen/MachineOperand.cpp
    llvm/lib/CodeGen/ModuloSchedule.cpp
    llvm/lib/CodeGen/PeepholeOptimizer.cpp
    llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
    llvm/lib/CodeGen/RegAllocGreedy.cpp
    llvm/lib/CodeGen/SelectOptimize.cpp
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
    llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
    llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
    llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
    llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
    llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
    llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
    llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h
    llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
    llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
    llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
    llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
    llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
    llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
    llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
    llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
    llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
    llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
    llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
    llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp
    llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
    llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
index b45d95302e1a3..9ac51ed9f6fad 100644
--- a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
@@ -189,7 +189,7 @@ template <typename DerivedT> class CodeGenPassBuilder {
     // This special-casing introduces less adaptor passes. If we have the need
     // of adding module passes after function passes, we could change the
     // implementation to accommodate that.
-    Optional<bool> AddingFunctionPasses;
+    std::optional<bool> AddingFunctionPasses;
   };
 
   // Function object to maintain state while adding codegen machine passes.

diff  --git a/llvm/include/llvm/CodeGen/CommandFlags.h b/llvm/include/llvm/CodeGen/CommandFlags.h
index a4f1ab84cca88..7b1ef60912f1a 100644
--- a/llvm/include/llvm/CodeGen/CommandFlags.h
+++ b/llvm/include/llvm/CodeGen/CommandFlags.h
@@ -133,7 +133,7 @@ bool getEnableMachineFunctionSplitter();
 bool getEnableDebugEntryValues();
 
 bool getValueTrackingVariableLocations();
-Optional<bool> getExplicitValueTrackingVariableLocations();
+std::optional<bool> getExplicitValueTrackingVariableLocations();
 
 bool getForceDwarfFrameSection();
 

diff  --git a/llvm/include/llvm/CodeGen/DebugHandlerBase.h b/llvm/include/llvm/CodeGen/DebugHandlerBase.h
index 2596258aee7ee..1b7355f5d9fbb 100644
--- a/llvm/include/llvm/CodeGen/DebugHandlerBase.h
+++ b/llvm/include/llvm/CodeGen/DebugHandlerBase.h
@@ -14,12 +14,12 @@
 #ifndef LLVM_CODEGEN_DEBUGHANDLERBASE_H
 #define LLVM_CODEGEN_DEBUGHANDLERBASE_H
 
-#include "llvm/ADT/Optional.h"
 #include "llvm/CodeGen/AsmPrinterHandler.h"
 #include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
 #include "llvm/CodeGen/LexicalScopes.h"
 #include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/IR/DebugLoc.h"
+#include <optional>
 
 namespace llvm {
 
@@ -37,14 +37,14 @@ struct DbgVariableLocation {
   SmallVector<int64_t, 1> LoadChain;
 
   /// Present if the location is part of a larger variable.
-  llvm::Optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
+  std::optional<llvm::DIExpression::FragmentInfo> FragmentInfo;
 
   /// Extract a VariableLocation from a MachineInstr.
   /// This will only work if Instruction is a debug value instruction
   /// and the associated DIExpression is in one of the supported forms.
   /// If these requirements are not met, the returned Optional will not
   /// have a value.
-  static Optional<DbgVariableLocation>
+  static std::optional<DbgVariableLocation>
   extractFromMachineInstruction(const MachineInstr &Instruction);
 };
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
index 7fd0e9d1901c7..488df12f13f8d 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CSEMIRBuilder.h
@@ -70,7 +70,7 @@ class CSEMIRBuilder : public MachineIRBuilder {
   void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;
 
   void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
-                         ArrayRef<SrcOp> SrcOps, Optional<unsigned> Flags,
+                         ArrayRef<SrcOp> SrcOps, std::optional<unsigned> Flags,
                          GISelInstProfileBuilder &B) const;
 
   // Takes a MachineInstrBuilder and inserts it into the CSEMap using the
@@ -93,7 +93,7 @@ class CSEMIRBuilder : public MachineIRBuilder {
   // Unhide buildInstr
   MachineInstrBuilder
   buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
-             Optional<unsigned> Flag = std::nullopt) override;
+             std::optional<unsigned> Flag = std::nullopt) override;
   // Bring in the other overload from the base class.
   using MachineIRBuilder::buildConstant;
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 93ad4ceca1370..9dc1abbcfa40e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -359,9 +359,9 @@ class CombinerHelper {
 
   /// Transform fp_instr(cst) to constant result of the fp operation.
   bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
-                                       Optional<APFloat> &Cst);
+                                       std::optional<APFloat> &Cst);
   void applyCombineConstantFoldFpUnary(MachineInstr &MI,
-                                       Optional<APFloat> &Cst);
+                                       std::optional<APFloat> &Cst);
 
   /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
   bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
@@ -810,7 +810,7 @@ class CombinerHelper {
   /// \param [in] Root - The search root.
   ///
   /// \returns The Registers found during the search.
-  Optional<SmallVector<Register, 8>>
+  std::optional<SmallVector<Register, 8>>
   findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
 
   /// Helper function for matchLoadOrCombine.
@@ -824,7 +824,7 @@ class CombinerHelper {
   ///
   /// \returns On success, a 3-tuple containing lowest-index load found, the
   /// lowest index, and the last load in the sequence.
-  Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
+  std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
   findLoadOffsetsForLoadOrCombine(
       SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
       const SmallVector<Register, 8> &RegsToVisit,

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 44ba81223ec32..db1a5473e45ae 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -16,7 +16,6 @@
 #define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
 
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
 #include "llvm/CodeGen/MachineFunction.h"
@@ -27,6 +26,7 @@
 #include <cstdint>
 #include <functional>
 #include <initializer_list>
+#include <optional>
 #include <vector>
 
 namespace llvm {
@@ -468,7 +468,7 @@ class InstructionSelector {
 
 protected:
   using ComplexRendererFns =
-      Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+      std::optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
   using RecordedMIVector = SmallVector<MachineInstr *, 4>;
   using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 1921dcff4a60c..003af69582230 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -724,7 +724,7 @@ class LegalizationArtifactCombiner {
     /// and its callees rely upon.
     Register findValueFromDefImpl(Register DefReg, unsigned StartBit,
                                   unsigned Size) {
-      Optional<DefinitionAndSourceRegister> DefSrcReg =
+      std::optional<DefinitionAndSourceRegister> DefSrcReg =
           getDefSrcRegIgnoringCopies(DefReg, MRI);
       MachineInstr *Def = DefSrcReg->MI;
       DefReg = DefSrcReg->Reg;

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 3879e22552ecb..ea6ed322e9b19 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -63,17 +63,18 @@ inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
 }
 
 template <typename ConstT>
-inline Optional<ConstT> matchConstant(Register, const MachineRegisterInfo &);
+inline std::optional<ConstT> matchConstant(Register,
+                                           const MachineRegisterInfo &);
 
 template <>
-inline Optional<APInt> matchConstant(Register Reg,
-                                     const MachineRegisterInfo &MRI) {
+inline std::optional<APInt> matchConstant(Register Reg,
+                                          const MachineRegisterInfo &MRI) {
   return getIConstantVRegVal(Reg, MRI);
 }
 
 template <>
-inline Optional<int64_t> matchConstant(Register Reg,
-                                       const MachineRegisterInfo &MRI) {
+inline std::optional<int64_t> matchConstant(Register Reg,
+                                            const MachineRegisterInfo &MRI) {
   return getIConstantVRegSExtVal(Reg, MRI);
 }
 
@@ -97,18 +98,18 @@ inline ConstantMatch<int64_t> m_ICst(int64_t &Cst) {
 }
 
 template <typename ConstT>
-inline Optional<ConstT> matchConstantSplat(Register,
-                                           const MachineRegisterInfo &);
+inline std::optional<ConstT> matchConstantSplat(Register,
+                                                const MachineRegisterInfo &);
 
 template <>
-inline Optional<APInt> matchConstantSplat(Register Reg,
-                                          const MachineRegisterInfo &MRI) {
+inline std::optional<APInt> matchConstantSplat(Register Reg,
+                                               const MachineRegisterInfo &MRI) {
   return getIConstantSplatVal(Reg, MRI);
 }
 
 template <>
-inline Optional<int64_t> matchConstantSplat(Register Reg,
-                                            const MachineRegisterInfo &MRI) {
+inline std::optional<int64_t>
+matchConstantSplat(Register Reg, const MachineRegisterInfo &MRI) {
   return getIConstantSplatSExtVal(Reg, MRI);
 }
 
@@ -139,34 +140,35 @@ inline ICstOrSplatMatch<int64_t> m_ICstOrSplat(int64_t &Cst) {
 }
 
 struct GCstAndRegMatch {
-  Optional<ValueAndVReg> &ValReg;
-  GCstAndRegMatch(Optional<ValueAndVReg> &ValReg) : ValReg(ValReg) {}
+  std::optional<ValueAndVReg> &ValReg;
+  GCstAndRegMatch(std::optional<ValueAndVReg> &ValReg) : ValReg(ValReg) {}
   bool match(const MachineRegisterInfo &MRI, Register Reg) {
     ValReg = getIConstantVRegValWithLookThrough(Reg, MRI);
     return ValReg ? true : false;
   }
 };
 
-inline GCstAndRegMatch m_GCst(Optional<ValueAndVReg> &ValReg) {
+inline GCstAndRegMatch m_GCst(std::optional<ValueAndVReg> &ValReg) {
   return GCstAndRegMatch(ValReg);
 }
 
 struct GFCstAndRegMatch {
-  Optional<FPValueAndVReg> &FPValReg;
-  GFCstAndRegMatch(Optional<FPValueAndVReg> &FPValReg) : FPValReg(FPValReg) {}
+  std::optional<FPValueAndVReg> &FPValReg;
+  GFCstAndRegMatch(std::optional<FPValueAndVReg> &FPValReg)
+      : FPValReg(FPValReg) {}
   bool match(const MachineRegisterInfo &MRI, Register Reg) {
     FPValReg = getFConstantVRegValWithLookThrough(Reg, MRI);
     return FPValReg ? true : false;
   }
 };
 
-inline GFCstAndRegMatch m_GFCst(Optional<FPValueAndVReg> &FPValReg) {
+inline GFCstAndRegMatch m_GFCst(std::optional<FPValueAndVReg> &FPValReg) {
   return GFCstAndRegMatch(FPValReg);
 }
 
 struct GFCstOrSplatGFCstMatch {
-  Optional<FPValueAndVReg> &FPValReg;
-  GFCstOrSplatGFCstMatch(Optional<FPValueAndVReg> &FPValReg)
+  std::optional<FPValueAndVReg> &FPValReg;
+  GFCstOrSplatGFCstMatch(std::optional<FPValueAndVReg> &FPValReg)
       : FPValReg(FPValReg) {}
   bool match(const MachineRegisterInfo &MRI, Register Reg) {
     return (FPValReg = getFConstantSplat(Reg, MRI)) ||
@@ -175,7 +177,7 @@ struct GFCstOrSplatGFCstMatch {
 };
 
 inline GFCstOrSplatGFCstMatch
-m_GFCstOrSplat(Optional<FPValueAndVReg> &FPValReg) {
+m_GFCstOrSplat(std::optional<FPValueAndVReg> &FPValReg) {
   return GFCstOrSplatGFCstMatch(FPValReg);
 }
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index bb56571f971f6..f3d6ed514893d 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -487,9 +487,10 @@ class MachineIRBuilder {
   ///       type as \p Op0 or \p Op0 itself.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  Optional<MachineInstrBuilder> materializePtrAdd(Register &Res, Register Op0,
-                                                  const LLT ValueTy,
-                                                  uint64_t Value);
+  std::optional<MachineInstrBuilder> materializePtrAdd(Register &Res,
+                                                       Register Op0,
+                                                       const LLT ValueTy,
+                                                       uint64_t Value);
 
   /// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
   MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
@@ -658,7 +659,7 @@ class MachineIRBuilder {
 
   /// Build and insert \p Res = G_FPEXT \p Op
   MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
-                                 Optional<unsigned> Flags = std::nullopt) {
+                                 std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FPEXT, {Res}, {Op}, Flags);
   }
 
@@ -1092,8 +1093,9 @@ class MachineIRBuilder {
   /// \pre \p Res must be smaller than \p Op
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op,
-                                   Optional<unsigned> Flags = std::nullopt);
+  MachineInstrBuilder
+  buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+               std::optional<unsigned> Flags = std::nullopt);
 
   /// Build and insert \p Res = G_TRUNC \p Op
   ///
@@ -1137,7 +1139,7 @@ class MachineIRBuilder {
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
                                 const SrcOp &Op0, const SrcOp &Op1,
-                                Optional<unsigned> Flags = std::nullopt);
+                                std::optional<unsigned> Flags = std::nullopt);
 
   /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
   ///
@@ -1151,7 +1153,7 @@ class MachineIRBuilder {
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
                                   const SrcOp &Op0, const SrcOp &Op1,
-                                  Optional<unsigned> Flags = std::nullopt);
+                                  std::optional<unsigned> Flags = std::nullopt);
 
   /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
   /// \p Elt, \p Idx
@@ -1481,7 +1483,7 @@ class MachineIRBuilder {
 
   MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
-                               Optional<unsigned> Flags = std::nullopt) {
+                               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags);
   }
 
@@ -1498,7 +1500,7 @@ class MachineIRBuilder {
 
   MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
-                               Optional<unsigned> Flags = std::nullopt) {
+                               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags);
   }
 
@@ -1514,74 +1516,74 @@ class MachineIRBuilder {
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
-                               Optional<unsigned> Flags = std::nullopt) {
+                               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0,
                                  const SrcOp &Src1,
-                                 Optional<unsigned> Flags = std::nullopt) {
+                                 std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0,
                                  const SrcOp &Src1,
-                                 Optional<unsigned> Flags = std::nullopt) {
+                                 std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_UREM \p Op0, \p Op1
   MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_UREM, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMUL, {Dst}, {Src0, Src1}, Flags);
   }
 
-  MachineInstrBuilder buildFMinNum(const DstOp &Dst, const SrcOp &Src0,
-                                   const SrcOp &Src1,
-                                   Optional<unsigned> Flags = std::nullopt) {
+  MachineInstrBuilder
+  buildFMinNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMINNUM, {Dst}, {Src0, Src1}, Flags);
   }
 
-  MachineInstrBuilder buildFMaxNum(const DstOp &Dst, const SrcOp &Src0,
-                                   const SrcOp &Src1,
-                                   Optional<unsigned> Flags = std::nullopt) {
+  MachineInstrBuilder
+  buildFMaxNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMAXNUM, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder
   buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
-                   Optional<unsigned> Flags = std::nullopt) {
+                   std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMINNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder
   buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
-                   Optional<unsigned> Flags = std::nullopt) {
+                   std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMAXNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
-                               Optional<unsigned> Flags = std::nullopt) {
+                               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
   }
 
   MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
   }
 
@@ -1613,7 +1615,7 @@ class MachineIRBuilder {
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1,
-                              Optional<unsigned> Flags = std::nullopt) {
+                              std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1}, Flags);
   }
 
@@ -1672,99 +1674,100 @@ class MachineIRBuilder {
   /// Build and insert \p Res = G_FADD \p Op0, \p Op1
   MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_STRICT_FADD \p Op0, \p Op1
-  MachineInstrBuilder buildStrictFAdd(const DstOp &Dst, const SrcOp &Src0,
-                                      const SrcOp &Src1,
-                                      Optional<unsigned> Flags = std::nullopt) {
+  MachineInstrBuilder
+  buildStrictFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
+                  std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_STRICT_FADD, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FSUB \p Op0, \p Op1
   MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
   MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
   MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1, const SrcOp &Src2,
-                               Optional<unsigned> Flags = std::nullopt) {
+                               std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2}, Flags);
   }
 
   /// Build and insert \p Res = G_FMAD \p Op0, \p Op1, \p Op2
   MachineInstrBuilder buildFMAD(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1, const SrcOp &Src2,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FMAD, {Dst}, {Src0, Src1, Src2}, Flags);
   }
 
   /// Build and insert \p Res = G_FNEG \p Op0
   MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Res = G_FABS \p Op0
   MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Dst = G_FCANONICALIZE \p Src0
   MachineInstrBuilder
   buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0,
-                     Optional<unsigned> Flags = std::nullopt) {
+                     std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FCANONICALIZE, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Dst = G_INTRINSIC_TRUNC \p Src0
   MachineInstrBuilder
   buildIntrinsicTrunc(const DstOp &Dst, const SrcOp &Src0,
-                      Optional<unsigned> Flags = std::nullopt) {
+                      std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Res = GFFLOOR \p Op0, \p Op1
-  MachineInstrBuilder buildFFloor(const DstOp &Dst, const SrcOp &Src0,
-                                  Optional<unsigned> Flags = std::nullopt) {
+  MachineInstrBuilder
+  buildFFloor(const DstOp &Dst, const SrcOp &Src0,
+              std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FFLOOR, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Dst = G_FLOG \p Src
   MachineInstrBuilder buildFLog(const DstOp &Dst, const SrcOp &Src,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FLOG, {Dst}, {Src}, Flags);
   }
 
   /// Build and insert \p Dst = G_FLOG2 \p Src
   MachineInstrBuilder buildFLog2(const DstOp &Dst, const SrcOp &Src,
-                                 Optional<unsigned> Flags = std::nullopt) {
+                                 std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FLOG2, {Dst}, {Src}, Flags);
   }
 
   /// Build and insert \p Dst = G_FEXP2 \p Src
   MachineInstrBuilder buildFExp2(const DstOp &Dst, const SrcOp &Src,
-                                 Optional<unsigned> Flags = std::nullopt) {
+                                 std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
   }
 
   /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
   MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
-                                Optional<unsigned> Flags = std::nullopt) {
+                                std::optional<unsigned> Flags = std::nullopt) {
     return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
   }
 
@@ -1978,7 +1981,7 @@ class MachineIRBuilder {
 
   virtual MachineInstrBuilder
   buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
-             Optional<unsigned> Flags = std::nullopt);
+             std::optional<unsigned> Flags = std::nullopt);
 };
 
 } // End namespace llvm.

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index c236aa7f2457a..c9941afc8013f 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -166,12 +166,12 @@ void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
                         MachineOptimizationRemarkMissed &R);
 
 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
-Optional<APInt> getIConstantVRegVal(Register VReg,
-                                    const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantVRegVal(Register VReg,
+                                         const MachineRegisterInfo &MRI);
 
 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
-Optional<int64_t> getIConstantVRegSExtVal(Register VReg,
-                                          const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
+                                               const MachineRegisterInfo &MRI);
 
 /// Simple struct used to hold a constant integer value and a virtual
 /// register.
@@ -182,14 +182,14 @@ struct ValueAndVReg {
 
 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
 /// on a G_CONSTANT returns its APInt value and def register.
-Optional<ValueAndVReg>
+std::optional<ValueAndVReg>
 getIConstantVRegValWithLookThrough(Register VReg,
                                    const MachineRegisterInfo &MRI,
                                    bool LookThroughInstrs = true);
 
 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
-Optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
     Register VReg, const MachineRegisterInfo &MRI,
     bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
 
@@ -200,7 +200,7 @@ struct FPValueAndVReg {
 
 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
 /// on a G_FCONSTANT returns its APFloat value and def register.
-Optional<FPValueAndVReg>
+std::optional<FPValueAndVReg>
 getFConstantVRegValWithLookThrough(Register VReg,
                                    const MachineRegisterInfo &MRI,
                                    bool LookThroughInstrs = true);
@@ -225,7 +225,7 @@ struct DefinitionAndSourceRegister {
 /// away any copies.
 ///
 /// Also walks through hints such as G_ASSERT_ZEXT.
-Optional<DefinitionAndSourceRegister>
+std::optional<DefinitionAndSourceRegister>
 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
 
 /// Find the def instruction for \p Reg, folding away any trivial copies. May
@@ -260,12 +260,12 @@ APFloat getAPFloatFromSize(double Val, unsigned Size);
 /// fallback.
 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
 
-Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
-                                  const Register Op2,
-                                  const MachineRegisterInfo &MRI);
-Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
-                                      const Register Op2,
-                                      const MachineRegisterInfo &MRI);
+std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
+                                       const Register Op2,
+                                       const MachineRegisterInfo &MRI);
+std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
+                                           const Register Op2,
+                                           const MachineRegisterInfo &MRI);
 
 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
 /// Returns an empty vector on failure.
@@ -273,16 +273,17 @@ SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
                                            const Register Op2,
                                            const MachineRegisterInfo &MRI);
 
-Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
-                                  uint64_t Imm, const MachineRegisterInfo &MRI);
+std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
+                                       uint64_t Imm,
+                                       const MachineRegisterInfo &MRI);
 
-Optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
-                                         Register Src,
-                                         const MachineRegisterInfo &MRI);
+std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
+                                              Register Src,
+                                              const MachineRegisterInfo &MRI);
 
 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
 /// then it tries to do an element-wise constant fold.
-Optional<SmallVector<unsigned>>
+std::optional<SmallVector<unsigned>>
 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
 
 /// Test if the given value is known to have exactly one bit set. This 
diff ers
@@ -370,31 +371,31 @@ class RegOrConstant {
 
 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
 /// If \p MI is not a splat, returns std::nullopt.
-Optional<int> getSplatIndex(MachineInstr &MI);
+std::optional<int> getSplatIndex(MachineInstr &MI);
 
 /// \returns the scalar integral splat value of \p Reg if possible.
-Optional<APInt> getIConstantSplatVal(const Register Reg,
-                                     const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantSplatVal(const Register Reg,
+                                          const MachineRegisterInfo &MRI);
 
 /// \returns the scalar integral splat value defined by \p MI if possible.
-Optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
-                                     const MachineRegisterInfo &MRI);
+std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
+                                          const MachineRegisterInfo &MRI);
 
 /// \returns the scalar sign extended integral splat value of \p Reg if
 /// possible.
-Optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
-                                           const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
+                                                const MachineRegisterInfo &MRI);
 
 /// \returns the scalar sign extended integral splat value defined by \p MI if
 /// possible.
-Optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
-                                           const MachineRegisterInfo &MRI);
+std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
+                                                const MachineRegisterInfo &MRI);
 
 /// Returns a floating point scalar constant of a build vector splat if it
 /// exists. When \p AllowUndef == true some elements can be undef but not all.
-Optional<FPValueAndVReg> getFConstantSplat(Register VReg,
-                                           const MachineRegisterInfo &MRI,
-                                           bool AllowUndef = true);
+std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
+                                                const MachineRegisterInfo &MRI,
+                                                bool AllowUndef = true);
 
 /// Return true if the specified register is defined by G_BUILD_VECTOR or
 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
@@ -463,8 +464,8 @@ bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
 /// \endcode
 ///
 /// In the above case, this will return a RegOrConstant containing 4.
-Optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
-                                       const MachineRegisterInfo &MRI);
+std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
+                                            const MachineRegisterInfo &MRI);
 
 /// Determines if \p MI defines a constant integer or a build vector of
 /// constant integers. Treats undef values as constants.
@@ -474,8 +475,9 @@ bool isConstantOrConstantVector(MachineInstr &MI,
 /// Determines if \p MI defines a constant integer or a splat vector of
 /// constant integers.
 /// \returns the scalar constant or std::nullopt.
-Optional<APInt> isConstantOrConstantSplatVector(MachineInstr &MI,
-                                                const MachineRegisterInfo &MRI);
+std::optional<APInt>
+isConstantOrConstantSplatVector(MachineInstr &MI,
+                                const MachineRegisterInfo &MRI);
 
 /// Attempt to match a unary predicate against a scalar/splat constant or every
 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source

diff  --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index b7f6de40266e2..f4dee09672398 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1337,10 +1337,10 @@ bool isVPBinaryOp(unsigned Opcode);
 bool isVPReduction(unsigned Opcode);
 
 /// The operand position of the vector mask.
-Optional<unsigned> getVPMaskIdx(unsigned Opcode);
+std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
 
 /// The operand position of the explicit vector length parameter.
-Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
+std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
 
 //===--------------------------------------------------------------------===//
 /// MemIndexedMode enum - This enum defines the load / store indexed

diff  --git a/llvm/include/llvm/CodeGen/MIRFormatter.h b/llvm/include/llvm/CodeGen/MIRFormatter.h
index fb276ff117afa..a039da1bd5b34 100644
--- a/llvm/include/llvm/CodeGen/MIRFormatter.h
+++ b/llvm/include/llvm/CodeGen/MIRFormatter.h
@@ -13,10 +13,10 @@
 #ifndef LLVM_CODEGEN_MIRFORMATTER_H
 #define LLVM_CODEGEN_MIRFORMATTER_H
 
-#include "llvm/ADT/Optional.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstdint>
+#include <optional>
 
 namespace llvm {
 
@@ -37,7 +37,7 @@ class MIRFormatter {
   /// that we can have more meaningful mnemonic than a 64-bit integer. Passing
   /// None to OpIdx means the index is unknown.
   virtual void printImm(raw_ostream &OS, const MachineInstr &MI,
-                        Optional<unsigned> OpIdx, int64_t Imm) const {
+                        std::optional<unsigned> OpIdx, int64_t Imm) const {
     OS << Imm;
   }
 

diff  --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index ce2877f542122..f2fc266662b74 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -171,7 +171,7 @@ class MachineBasicBlock
 
   /// Fixed unique ID assigned to this basic block upon creation. Used with
   /// basic block sections and basic block labels.
-  Optional<unsigned> BBID;
+  std::optional<unsigned> BBID;
 
   /// With basic block sections, this stores the Section ID of the basic block.
   MBBSectionID SectionID{0};
@@ -624,7 +624,7 @@ class MachineBasicBlock
 
   void setIsEndSection(bool V = true) { IsEndSection = V; }
 
-  Optional<unsigned> getBBID() const { return BBID; }
+  std::optional<unsigned> getBBID() const { return BBID; }
 
   /// Returns the BBID of the block when BBAddrMapVersion >= 2, otherwise
   /// returns `MachineBasicBlock::Number`.

diff  --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 1ab3616bc94c8..08142d91b785f 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1686,16 +1686,16 @@ class MachineInstr
   bool allDefsAreDead() const;
 
   /// Return a valid size if the instruction is a spill instruction.
-  Optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
+  std::optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;
 
   /// Return a valid size if the instruction is a folded spill instruction.
-  Optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
+  std::optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;
 
   /// Return a valid size if the instruction is a restore instruction.
-  Optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
+  std::optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;
 
   /// Return a valid size if the instruction is a folded restore instruction.
-  Optional<unsigned>
+  std::optional<unsigned>
   getFoldedRestoreSize(const TargetInstrInfo *TII) const;
 
   /// Copy implicit register operands from specified

diff  --git a/llvm/include/llvm/CodeGen/MachineOperand.h b/llvm/include/llvm/CodeGen/MachineOperand.h
index 67cc82098fa1e..90f939285f3a2 100644
--- a/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -299,7 +299,7 @@ class MachineOperand {
   /// information from it's parent.
   /// \param IntrinsicInfo - same as \p TRI.
   void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
-             Optional<unsigned> OpIdx, bool PrintDef, bool IsStandalone,
+             std::optional<unsigned> OpIdx, bool PrintDef, bool IsStandalone,
              bool ShouldPrintRegisterTies, unsigned TiedOperandIdx,
              const TargetRegisterInfo *TRI,
              const TargetIntrinsicInfo *IntrinsicInfo) const;

diff  --git a/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
index 7346d2340ed3a..3d0f836b0c757 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -33,7 +33,7 @@ class BaseIndexOffset {
 private:
   SDValue Base;
   SDValue Index;
-  Optional<int64_t> Offset;
+  std::optional<int64_t> Offset;
   bool IsIndexSignExt = false;
 
 public:
@@ -82,9 +82,9 @@ class BaseIndexOffset {
   // Returns true `Op0` and `Op1` can be proven to alias/not alias, in
   // which case `IsAlias` is set to true/false.
   static bool computeAliasing(const SDNode *Op0,
-                              const Optional<int64_t> NumBytes0,
+                              const std::optional<int64_t> NumBytes0,
                               const SDNode *Op1,
-                              const Optional<int64_t> NumBytes1,
+                              const std::optional<int64_t> NumBytes1,
                               const SelectionDAG &DAG, bool &IsAlias);
 
   /// Parses tree in N for base, index, offset addresses.

diff  --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 983c046f54f18..b23a24ad73b7e 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -2103,7 +2103,7 @@ class BuildVectorSDNode : public SDNode {
   /// If this BuildVector is constant and represents the numerical series
   /// "<a, a+n, a+2n, a+3n, ...>" where a is integer and n is a non-zero integer,
   /// the value "<a,n>" is returned.
-  Optional<std::pair<APInt, APInt>> isConstantSequence() const;
+  std::optional<std::pair<APInt, APInt>> isConstantSequence() const;
 
   /// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
   /// Undef elements are treated as zero, and entirely undefined elements are

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 544f8c2555e82..373b8d39d98cc 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -437,7 +437,7 @@ def simplify_add_to_sub: GICombineRule <
 >;
 
 // Fold fp_op(cst) to the constant result of the floating point operation.
-def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
+def constant_fp_op_matchinfo: GIDefMatchData<"std::optional<APFloat>">;
 def constant_fp_op: GICombineRule <
   (defs root:$root, constant_fp_op_matchinfo:$info),
   (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,

diff  --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index fbbe19f294702..14f0b78e4a185 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1072,7 +1072,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
 
   // We assume a single instruction only has a spill or reload, not
   // both.
-  Optional<unsigned> Size;
+  std::optional<unsigned> Size;
   if ((Size = MI.getRestoreSize(TII))) {
     CommentOS << *Size << "-byte Reload\n";
   } else if ((Size = MI.getFoldedRestoreSize(TII))) {

diff  --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index dc09b529cc091..1b2e7ad140696 100644
--- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -1339,7 +1339,7 @@ void CodeViewDebug::calculateRanges(
     assert(DVInst->isDebugValue() && "Invalid History entry");
     // FIXME: Find a way to represent constant variables, since they are
     // relatively common.
-    Optional<DbgVariableLocation> Location =
+    std::optional<DbgVariableLocation> Location =
         DbgVariableLocation::extractFromMachineInstruction(*DVInst);
     if (!Location)
     {

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
index 20389527a8bbc..4cf4f02ce7228 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
@@ -30,7 +30,7 @@ using namespace llvm;
 /// variable's lexical scope instruction ranges.
 static cl::opt<bool> TrimVarLocs("trim-var-locs", cl::Hidden, cl::init(true));
 
-Optional<DbgVariableLocation>
+std::optional<DbgVariableLocation>
 DbgVariableLocation::extractFromMachineInstruction(
     const MachineInstr &Instruction) {
   DbgVariableLocation Location;

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h
index dda12f7fd7cb4..a458825f69b44 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h
@@ -159,7 +159,7 @@ class DebugLocStream::ListBuilder {
   DbgVariable &V;
   const MachineInstr &MI;
   size_t ListIndex;
-  Optional<uint8_t> TagOffset;
+  std::optional<uint8_t> TagOffset;
 
 public:
   ListBuilder(DebugLocStream &Locs, DwarfCompileUnit &CU, AsmPrinter &Asm,

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 1727671ff5816..6dde50375a60c 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -1129,7 +1129,7 @@ void DwarfCompileUnit::constructAbstractSubprogramScopeDIE(
   AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
   ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef);
   ContextCU->addSInt(*AbsDef, dwarf::DW_AT_inline,
-                     DD->getDwarfVersion() <= 4 ? Optional<dwarf::Form>()
+                     DD->getDwarfVersion() <= 4 ? std::optional<dwarf::Form>()
                                                 : dwarf::DW_FORM_implicit_const,
                      dwarf::DW_INL_inlined);
   if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef))

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 9245ac9242ff2..5d2ef8ee79a7b 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -116,7 +116,7 @@ class DbgVariable : public DbgEntity {
   /// Index of the entry list in DebugLocs.
   unsigned DebugLocListIndex = ~0u;
   /// DW_OP_LLVM_tag_offset value from DebugLocs.
-  Optional<uint8_t> DebugLocListTagOffset;
+  std::optional<uint8_t> DebugLocListTagOffset;
 
   /// Single value location description.
   std::unique_ptr<DbgValueLoc> ValueLoc = nullptr;
@@ -175,7 +175,9 @@ class DbgVariable : public DbgEntity {
   void setDebugLocListIndex(unsigned O) { DebugLocListIndex = O; }
   unsigned getDebugLocListIndex() const { return DebugLocListIndex; }
   void setDebugLocListTagOffset(uint8_t O) { DebugLocListTagOffset = O; }
-  Optional<uint8_t> getDebugLocListTagOffset() const { return DebugLocListTagOffset; }
+  std::optional<uint8_t> getDebugLocListTagOffset() const {
+    return DebugLocListTagOffset;
+  }
   StringRef getName() const { return getVariable()->getName(); }
   const DbgValueLoc *getValueLoc() const { return ValueLoc.get(); }
   /// Get the FI entries, sorted by fragment offset.

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index ebe351eeef58d..d89caac98eb8b 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -494,7 +494,7 @@ bool DwarfExpression::addExpression(
   // and not any other parts of the following DWARF expression.
   assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
 
-  Optional<DIExpression::ExprOperand> PrevConvertOp;
+  std::optional<DIExpression::ExprOperand> PrevConvertOp;
 
   while (ExprCursor) {
     auto Op = ExprCursor.take();

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
index b869e28a74f50..a5a19cdf9c27a 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -53,7 +53,7 @@ class DIExpressionCursor {
   DIExpressionCursor(const DIExpressionCursor &) = default;
 
   /// Consume one operation.
-  Optional<DIExpression::ExprOperand> take() {
+  std::optional<DIExpression::ExprOperand> take() {
     if (Start == End)
       return std::nullopt;
     return *(Start++);
@@ -63,14 +63,14 @@ class DIExpressionCursor {
   void consume(unsigned N) { std::advance(Start, N); }
 
   /// Return the current operation.
-  Optional<DIExpression::ExprOperand> peek() const {
+  std::optional<DIExpression::ExprOperand> peek() const {
     if (Start == End)
       return std::nullopt;
     return *(Start);
   }
 
   /// Return the next operation.
-  Optional<DIExpression::ExprOperand> peekNext() const {
+  std::optional<DIExpression::ExprOperand> peekNext() const {
     if (Start == End)
       return std::nullopt;
 
@@ -170,7 +170,7 @@ class DwarfExpression {
 
   bool isParameterValue() { return LocationFlags & CallSiteParamValue; }
 
-  Optional<uint8_t> TagOffset;
+  std::optional<uint8_t> TagOffset;
 
 protected:
   /// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index aad00c896b864..9cd6532a2d5a6 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -218,7 +218,7 @@ void DwarfUnit::addFlag(DIE &Die, dwarf::Attribute Attribute) {
 }
 
 void DwarfUnit::addUInt(DIEValueList &Die, dwarf::Attribute Attribute,
-                        Optional<dwarf::Form> Form, uint64_t Integer) {
+                        std::optional<dwarf::Form> Form, uint64_t Integer) {
   if (!Form)
     Form = DIEInteger::BestForm(false, Integer);
   assert(Form != dwarf::DW_FORM_implicit_const &&
@@ -232,13 +232,13 @@ void DwarfUnit::addUInt(DIEValueList &Block, dwarf::Form Form,
 }
 
 void DwarfUnit::addSInt(DIEValueList &Die, dwarf::Attribute Attribute,
-                        Optional<dwarf::Form> Form, int64_t Integer) {
+                        std::optional<dwarf::Form> Form, int64_t Integer) {
   if (!Form)
     Form = DIEInteger::BestForm(true, Integer);
   addAttribute(Die, Attribute, *Form, DIEInteger(Integer));
 }
 
-void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form,
+void DwarfUnit::addSInt(DIELoc &Die, std::optional<dwarf::Form> Form,
                         int64_t Integer) {
   addSInt(Die, (dwarf::Attribute)0, Form, Integer);
 }

diff  --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 48d63d126701a..395539fcc1c7b 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -15,10 +15,10 @@
 
 #include "DwarfDebug.h"
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/CodeGen/AsmPrinter.h"
 #include "llvm/CodeGen/DIE.h"
 #include "llvm/Target/TargetMachine.h"
+#include <optional>
 #include <string>
 
 namespace llvm {
@@ -143,15 +143,15 @@ class DwarfUnit : public DIEUnit {
 
   /// Add an unsigned integer attribute data and value.
   void addUInt(DIEValueList &Die, dwarf::Attribute Attribute,
-               Optional<dwarf::Form> Form, uint64_t Integer);
+               std::optional<dwarf::Form> Form, uint64_t Integer);
 
   void addUInt(DIEValueList &Block, dwarf::Form Form, uint64_t Integer);
 
   /// Add an signed integer attribute data and value.
   void addSInt(DIEValueList &Die, dwarf::Attribute Attribute,
-               Optional<dwarf::Form> Form, int64_t Integer);
+               std::optional<dwarf::Form> Form, int64_t Integer);
 
-  void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer);
+  void addSInt(DIELoc &Die, std::optional<dwarf::Form> Form, int64_t Integer);
 
   /// Add a string attribute data and value.
   ///

diff  --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 9af263081d5e3..806868131d6ca 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -229,7 +229,8 @@ walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start,
 /// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression
 /// doesn't explicitly describe a memory location with DW_OP_deref or if the
 /// expression is too complex to interpret.
-static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) {
+static std::optional<int64_t>
+getDerefOffsetInBytes(const DIExpression *DIExpr) {
   int64_t Offset = 0;
   const unsigned NumElements = DIExpr->getNumElements();
   const auto Elements = DIExpr->getElements();

diff  --git a/llvm/lib/CodeGen/CFIInstrInserter.cpp b/llvm/lib/CodeGen/CFIInstrInserter.cpp
index 842339a1a6858..25741686a8297 100644
--- a/llvm/lib/CodeGen/CFIInstrInserter.cpp
+++ b/llvm/lib/CodeGen/CFIInstrInserter.cpp
@@ -18,7 +18,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/Passes.h"
@@ -89,10 +88,10 @@ class CFIInstrInserter : public MachineFunctionPass {
 #define INVALID_OFFSET INT_MAX
   /// contains the location where CSR register is saved.
   struct CSRSavedLocation {
-    CSRSavedLocation(Optional<unsigned> R, Optional<int> O)
+    CSRSavedLocation(std::optional<unsigned> R, std::optional<int> O)
         : Reg(R), Offset(O) {}
-    Optional<unsigned> Reg;
-    Optional<int> Offset;
+    std::optional<unsigned> Reg;
+    std::optional<int> Offset;
   };
 
   /// Contains cfa offset and register values valid at entry and exit of basic
@@ -187,8 +186,8 @@ void CFIInstrInserter::calculateOutgoingCFAInfo(MBBCFAInfo &MBBInfo) {
   // Determine cfa offset and register set by the block.
   for (MachineInstr &MI : *MBBInfo.MBB) {
     if (MI.isCFIInstruction()) {
-      Optional<unsigned> CSRReg;
-      Optional<int> CSROffset;
+      std::optional<unsigned> CSRReg;
+      std::optional<int> CSROffset;
       unsigned CFIIndex = MI.getOperand(0).getCFIIndex();
       const MCCFIInstruction &CFI = Instrs[CFIIndex];
       switch (CFI.getOperation()) {

diff  --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index a432e4ed7fb7a..64e2d517e3b9c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -107,7 +107,7 @@ void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B,
 
 void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
                                       ArrayRef<SrcOp> SrcOps,
-                                      Optional<unsigned> Flags,
+                                      std::optional<unsigned> Flags,
                                       GISelInstProfileBuilder &B) const {
 
   profileMBBOpcode(B, Opc);
@@ -170,7 +170,7 @@ CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
 MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
                                               ArrayRef<DstOp> DstOps,
                                               ArrayRef<SrcOp> SrcOps,
-                                              Optional<unsigned> Flag) {
+                                              std::optional<unsigned> Flag) {
   switch (Opc) {
   default:
     break;
@@ -210,8 +210,8 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
       break;
     }
 
-    if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(),
-                                                SrcOps[1].getReg(), *getMRI()))
+    if (std::optional<APInt> Cst = ConstantFoldBinOp(
+            Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI()))
       return buildConstant(DstOps[0], *Cst);
     break;
   }
@@ -230,7 +230,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
     // Try to constant fold these.
     assert(SrcOps.size() == 2 && "Invalid sources");
     assert(DstOps.size() == 1 && "Invalid dsts");
-    if (Optional<APFloat> Cst = ConstantFoldFPBinOp(
+    if (std::optional<APFloat> Cst = ConstantFoldFPBinOp(
             Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI()))
       return buildFConstant(DstOps[0], *Cst);
     break;
@@ -251,7 +251,7 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
     // Try to constant fold these.
     assert(SrcOps.size() == 1 && "Invalid sources");
     assert(DstOps.size() == 1 && "Invalid dsts");
-    if (Optional<APFloat> Cst = ConstantFoldIntToFloat(
+    if (std::optional<APFloat> Cst = ConstantFoldIntToFloat(
             Opc, DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getReg(), *getMRI()))
       return buildFConstant(DstOps[0], *Cst);
     break;

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 27f4f4fd8fce4..c5d5d68d849cf 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -108,7 +108,7 @@ static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
 /// 1            1                2
 /// 2            2                1
 /// 3            3                0
-static Optional<bool>
+static std::optional<bool>
 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
             int64_t LowestIdx) {
   // Need at least two byte positions to decide on endianness.
@@ -1285,9 +1285,9 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
          LegalizerHelper::LegalizeResult::Legalized;
 }
 
-static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
-                                             const Register Op,
-                                             const MachineRegisterInfo &MRI) {
+static std::optional<APFloat>
+constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op,
+                    const MachineRegisterInfo &MRI) {
   const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
   if (!MaybeCst)
     return std::nullopt;
@@ -1327,8 +1327,8 @@ static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
   return V;
 }
 
-bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
-                                                     Optional<APFloat> &Cst) {
+bool CombinerHelper::matchCombineConstantFoldFpUnary(
+    MachineInstr &MI, std::optional<APFloat> &Cst) {
   Register DstReg = MI.getOperand(0).getReg();
   Register SrcReg = MI.getOperand(1).getReg();
   LLT DstTy = MRI.getType(DstReg);
@@ -1336,8 +1336,8 @@ bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
   return Cst.has_value();
 }
 
-void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
-                                                     Optional<APFloat> &Cst) {
+void CombinerHelper::applyCombineConstantFoldFpUnary(
+    MachineInstr &MI, std::optional<APFloat> &Cst) {
   assert(Cst && "Optional is unexpectedly empty!");
   Builder.setInstrAndDebugLoc(MI);
   MachineFunction &MF = Builder.getMF();
@@ -3269,7 +3269,7 @@ bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
   return true;
 }
 
-Optional<SmallVector<Register, 8>>
+std::optional<SmallVector<Register, 8>>
 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
   assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
   // We want to detect if Root is part of a tree which represents a bunch
@@ -3367,7 +3367,7 @@ matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
   return std::make_pair(Load, Shift / MemSizeInBits);
 }
 
-Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
+std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
 CombinerHelper::findLoadOffsetsForLoadOrCombine(
     SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
     const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
@@ -3559,7 +3559,7 @@ bool CombinerHelper::matchLoadOrCombine(
   // pattern. If it does, then we can represent it using a load + possibly a
   // BSWAP.
   bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
-  Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
+  std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
   if (!IsBigEndian)
     return false;
   bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
@@ -4612,7 +4612,7 @@ bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
   // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
   // if and only if (G_PTR_ADD X, C) has one use.
   Register LHSBase;
-  Optional<ValueAndVReg> LHSCstOff;
+  std::optional<ValueAndVReg> LHSCstOff;
   if (!mi_match(MI.getBaseReg(), MRI,
                 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
     return false;
@@ -5983,7 +5983,7 @@ bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
     return MRI.getType(MatchInfo) == DstVecTy;
   }
 
-  Optional<ValueAndVReg> ShiftAmount;
+  std::optional<ValueAndVReg> ShiftAmount;
   const auto LoPattern = m_GBitcast(m_Reg(Lo));
   const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount));
   if (mi_match(
@@ -6014,7 +6014,7 @@ bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
                                                    Register &MatchInfo) {
   // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
   //    y if K == size of vector element type
-  Optional<ValueAndVReg> ShiftAmt;
+  std::optional<ValueAndVReg> ShiftAmt;
   if (!mi_match(MI.getOperand(1).getReg(), MRI,
                 m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))),
                         m_GCst(ShiftAmt))))

diff  --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 6c44a1c1e384f..44fb5db43663b 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -197,7 +197,7 @@ MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
 }
 
-Optional<MachineInstrBuilder>
+std::optional<MachineInstrBuilder>
 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
                                     const LLT ValueTy, uint64_t Value) {
   assert(Res == 0 && "Res is a result argument");
@@ -762,9 +762,9 @@ MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
 }
 
-MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
-                                                   const SrcOp &Op,
-                                                   Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+                               std::optional<unsigned> Flags) {
   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
 }
 
@@ -779,16 +779,15 @@ MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
                                                 const DstOp &Res,
                                                 const SrcOp &Op0,
                                                 const SrcOp &Op1,
-                                                Optional<unsigned> Flags) {
+                                                std::optional<unsigned> Flags) {
 
   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
 }
 
-MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
-                                                  const SrcOp &Tst,
-                                                  const SrcOp &Op0,
-                                                  const SrcOp &Op1,
-                                                  Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
+                              const SrcOp &Op0, const SrcOp &Op1,
+                              std::optional<unsigned> Flags) {
 
   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
 }
@@ -1029,10 +1028,10 @@ void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
 #endif
 }
 
-MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
-                                                 ArrayRef<DstOp> DstOps,
-                                                 ArrayRef<SrcOp> SrcOps,
-                                                 Optional<unsigned> Flags) {
+MachineInstrBuilder
+MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
+                             ArrayRef<SrcOp> SrcOps,
+                             std::optional<unsigned> Flags) {
   switch (Opc) {
   default:
     break;

diff  --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 6750edd1d20c2..a164601b23931 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -286,9 +286,9 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
   reportGISelFailure(MF, TPC, MORE, R);
 }
 
-Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
-                                          const MachineRegisterInfo &MRI) {
-  Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
+std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
+                                               const MachineRegisterInfo &MRI) {
+  std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
       VReg, MRI, /*LookThroughInstrs*/ false);
   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
          "Value found while looking through instrs");
@@ -297,9 +297,9 @@ Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
   return ValAndVReg->Value;
 }
 
-Optional<int64_t>
+std::optional<int64_t>
 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
-  Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
+  std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
   if (Val && Val->getBitWidth() <= 64)
     return Val->getSExtValue();
   return std::nullopt;
@@ -308,9 +308,9 @@ llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
 namespace {
 
 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
-typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
+typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
 
-Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
     bool LookThroughAnyExt = false) {
@@ -347,7 +347,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
   if (!MI || !IsConstantOpcode(MI))
     return std::nullopt;
 
-  Optional<APInt> MaybeVal = getAPCstValue(MI);
+  std::optional<APInt> MaybeVal = getAPCstValue(MI);
   if (!MaybeVal)
     return std::nullopt;
   APInt &Val = *MaybeVal;
@@ -389,14 +389,14 @@ bool isAnyConstant(const MachineInstr *MI) {
   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
 }
 
-Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
+std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
   const MachineOperand &CstVal = MI->getOperand(1);
   if (CstVal.isCImm())
     return CstVal.getCImm()->getValue();
   return std::nullopt;
 }
 
-Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
+std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
   const MachineOperand &CstVal = MI->getOperand(1);
   if (CstVal.isCImm())
     return CstVal.getCImm()->getValue();
@@ -407,13 +407,13 @@ Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
 
 } // end anonymous namespace
 
-Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
                                            getCImmAsAPInt, LookThroughInstrs);
 }
 
-Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
+std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
     bool LookThroughAnyExt) {
   return getConstantVRegValWithLookThrough(
@@ -421,7 +421,7 @@ Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
       LookThroughAnyExt);
 }
 
-Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
+std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
   auto Reg = getConstantVRegValWithLookThrough(
       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
@@ -439,7 +439,7 @@ llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
   return MI->getOperand(1).getFPImm();
 }
 
-Optional<DefinitionAndSourceRegister>
+std::optional<DefinitionAndSourceRegister>
 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
   Register DefSrcReg = Reg;
   auto *DefMI = MRI.getVRegDef(Reg);
@@ -461,14 +461,14 @@ llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
 
 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
                                          const MachineRegisterInfo &MRI) {
-  Optional<DefinitionAndSourceRegister> DefSrcReg =
+  std::optional<DefinitionAndSourceRegister> DefSrcReg =
       getDefSrcRegIgnoringCopies(Reg, MRI);
   return DefSrcReg ? DefSrcReg->MI : nullptr;
 }
 
 Register llvm::getSrcRegIgnoringCopies(Register Reg,
                                        const MachineRegisterInfo &MRI) {
-  Optional<DefinitionAndSourceRegister> DefSrcReg =
+  std::optional<DefinitionAndSourceRegister> DefSrcReg =
       getDefSrcRegIgnoringCopies(Reg, MRI);
   return DefSrcReg ? DefSrcReg->Reg : Register();
 }
@@ -492,9 +492,10 @@ APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
   return APF;
 }
 
-Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
-                                        const Register Op2,
-                                        const MachineRegisterInfo &MRI) {
+std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
+                                             const Register Op1,
+                                             const Register Op2,
+                                             const MachineRegisterInfo &MRI) {
   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
   if (!MaybeOp2Cst)
     return std::nullopt;
@@ -556,9 +557,9 @@ Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
   return std::nullopt;
 }
 
-Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
-                                            const Register Op2,
-                                            const MachineRegisterInfo &MRI) {
+std::optional<APFloat>
+llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
+                          const Register Op2, const MachineRegisterInfo &MRI) {
   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
   if (!Op2Cst)
     return std::nullopt;
@@ -759,9 +760,9 @@ Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
   return LiveIn;
 }
 
-Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
-                                        uint64_t Imm,
-                                        const MachineRegisterInfo &MRI) {
+std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
+                                             const Register Op1, uint64_t Imm,
+                                             const MachineRegisterInfo &MRI) {
   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
   if (MaybeOp1Cst) {
     switch (Opcode) {
@@ -776,9 +777,9 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
   return std::nullopt;
 }
 
-Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
-                                               Register Src,
-                                               const MachineRegisterInfo &MRI) {
+std::optional<APFloat>
+llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
+                             const MachineRegisterInfo &MRI) {
   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
     APFloat DstVal(getFltSemanticForLLT(DstTy));
@@ -789,7 +790,7 @@ Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
   return std::nullopt;
 }
 
-Optional<SmallVector<unsigned>>
+std::optional<SmallVector<unsigned>>
 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
   LLT Ty = MRI.getType(Src);
   SmallVector<unsigned> FoldedCTLZs;
@@ -822,7 +823,7 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
 
 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
                                   GISelKnownBits *KB) {
-  Optional<DefinitionAndSourceRegister> DefSrcReg =
+  std::optional<DefinitionAndSourceRegister> DefSrcReg =
       getDefSrcRegIgnoringCopies(Reg, MRI);
   if (!DefSrcReg)
     return false;
@@ -1000,7 +1001,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
   return LLT::scalar(GCD);
 }
 
-Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
+std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Only G_SHUFFLE_VECTOR can have a splat index!");
   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
@@ -1028,9 +1029,9 @@ static bool isBuildVectorOp(unsigned Opcode) {
 
 namespace {
 
-Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
-                                           const MachineRegisterInfo &MRI,
-                                           bool AllowUndef) {
+std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
+                                                const MachineRegisterInfo &MRI,
+                                                bool AllowUndef) {
   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
   if (!MI)
     return std::nullopt;
@@ -1038,7 +1039,7 @@ Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
   if (!isBuildVectorOp(MI->getOpcode()))
     return std::nullopt;
 
-  Optional<ValueAndVReg> SplatValAndReg;
+  std::optional<ValueAndVReg> SplatValAndReg;
   for (MachineOperand &Op : MI->uses()) {
     Register Element = Op.getReg();
     auto ElementValAndReg =
@@ -1080,11 +1081,11 @@ bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
                                     AllowUndef);
 }
 
-Optional<APInt> llvm::getIConstantSplatVal(const Register Reg,
-                                           const MachineRegisterInfo &MRI) {
+std::optional<APInt>
+llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
   if (auto SplatValAndReg =
           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
-    Optional<ValueAndVReg> ValAndVReg =
+    std::optional<ValueAndVReg> ValAndVReg =
         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
     return ValAndVReg->Value;
   }
@@ -1092,12 +1093,13 @@ Optional<APInt> llvm::getIConstantSplatVal(const Register Reg,
   return std::nullopt;
 }
 
-Optional<APInt> llvm::getIConstantSplatVal(const MachineInstr &MI,
-                                           const MachineRegisterInfo &MRI) {
+std::optional<APInt>
+llvm::getIConstantSplatVal(const MachineInstr &MI,
+                           const MachineRegisterInfo &MRI) {
   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
 }
 
-Optional<int64_t>
+std::optional<int64_t>
 llvm::getIConstantSplatSExtVal(const Register Reg,
                                const MachineRegisterInfo &MRI) {
   if (auto SplatValAndReg =
@@ -1106,15 +1108,15 @@ llvm::getIConstantSplatSExtVal(const Register Reg,
   return std::nullopt;
 }
 
-Optional<int64_t>
+std::optional<int64_t>
 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
                                const MachineRegisterInfo &MRI) {
   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
 }
 
-Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
-                                                 const MachineRegisterInfo &MRI,
-                                                 bool AllowUndef) {
+std::optional<FPValueAndVReg>
+llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
+                        bool AllowUndef) {
   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
   return std::nullopt;
@@ -1132,8 +1134,8 @@ bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
 }
 
-Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
-                                             const MachineRegisterInfo &MRI) {
+std::optional<RegOrConstant>
+llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
   unsigned Opc = MI.getOpcode();
   if (!isBuildVectorOp(Opc))
     return std::nullopt;
@@ -1202,7 +1204,7 @@ bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
   return true;
 }
 
-Optional<APInt>
+std::optional<APInt>
 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
                                       const MachineRegisterInfo &MRI) {
   Register Def = MI.getOperand(0).getReg();

diff  --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
index 2bdae0e640079..0bdb32ce4f947 100644
--- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -97,11 +97,11 @@ class ImplicitNullChecks : public MachineFunctionPass {
 
     /// If non-None, then an instruction in \p Insts that also must be
     /// hoisted.
-    Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
+    std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
 
     /*implicit*/ DependenceResult(
         bool CanReorder,
-        Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
+        std::optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
         : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
       assert((!PotentialDependence || CanReorder) &&
              "!CanReorder && PotentialDependence.hasValue() not allowed!");
@@ -254,7 +254,7 @@ ImplicitNullChecks::computeDependence(const MachineInstr *MI,
   assert(llvm::all_of(Block, canHandle) && "Check this first!");
   assert(!is_contained(Block, MI) && "Block must be exclusive of MI!");
 
-  Optional<ArrayRef<MachineInstr *>::iterator> Dep;
+  std::optional<ArrayRef<MachineInstr *>::iterator> Dep;
 
   for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
     if (canReorder(*I, MI))

diff  --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
index b65eceb37f4dc..19e523a1099a4 100644
--- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
+++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
@@ -1038,7 +1038,7 @@ void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB,
   Masks.push_back(std::make_pair(MO, InstID));
 }
 
-Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) {
+std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) {
   SpillLocationNo SpillID(SpillLocs.idFor(L));
 
   if (SpillID.id() == 0) {
@@ -1278,7 +1278,7 @@ bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const {
 // void InstrRefBasedLDV::printVarLocInMBB(..)
 #endif
 
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
 InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) {
   assert(MI.hasOneMemOperand() &&
          "Spill instruction does not have exactly one memory operand?");
@@ -1293,9 +1293,9 @@ InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) {
   return MTracker->getOrTrackSpillLoc({Reg, Offset});
 }
 
-Optional<LocIdx>
+std::optional<LocIdx>
 InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
-  Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI);
+  std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI);
   if (!SpillLoc)
     return std::nullopt;
 
@@ -1426,7 +1426,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI,
 
   // Default machine value number is <None> -- if no instruction defines
   // the corresponding value, it must have been optimized out.
-  Optional<ValueIDNum> NewID;
+  std::optional<ValueIDNum> NewID;
 
   // Try to lookup the instruction number, and find the machine value number
   // that it defines. It could be an instruction, or a PHI.
@@ -1440,7 +1440,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI,
     // a register def was folded into a stack store.
     if (OpNo == MachineFunction::DebugOperandMemNumber &&
         TargetInstr.hasOneMemOperand()) {
-      Optional<LocIdx> L = findLocationForMemOperand(TargetInstr);
+      std::optional<LocIdx> L = findLocationForMemOperand(TargetInstr);
       if (L)
         NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L);
     } else if (OpNo != MachineFunction::DebugOperandMemNumber) {
@@ -1658,7 +1658,7 @@ bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) {
     Register Base;
     StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base);
     SpillLoc SL = {Base, Offs};
-    Optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL);
+    std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL);
 
     // We might be able to find a value, but have chosen not to, to avoid
     // tracking too much stack information.
@@ -1753,7 +1753,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
 
   // If this instruction writes to a spill slot, def that slot.
   if (hasFoldedStackStore(MI)) {
-    if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) {
+    if (std::optional<SpillLocationNo> SpillNo =
+            extractSpillBaseRegAndOffset(MI)) {
       for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
         unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I);
         LocIdx L = MTracker->getSpillMLoc(SpillID);
@@ -1795,7 +1796,8 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
 
   // Tell TTracker about any folded stack store.
   if (hasFoldedStackStore(MI)) {
-    if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) {
+    if (std::optional<SpillLocationNo> SpillNo =
+            extractSpillBaseRegAndOffset(MI)) {
       for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
         unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I);
         LocIdx L = MTracker->getSpillMLoc(SpillID);
@@ -1836,7 +1838,7 @@ void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) {
   }
 }
 
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
 InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI,
                                      MachineFunction *MF) {
   // TODO: Handle multiple stores folded into one.
@@ -1866,7 +1868,7 @@ bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI,
   return Reg != 0;
 }
 
-Optional<SpillLocationNo>
+std::optional<SpillLocationNo>
 InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI,
                                        MachineFunction *MF, unsigned &Reg) {
   if (!MI.hasOneMemOperand())
@@ -1910,7 +1912,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) {
   // First, if there are any DBG_VALUEs pointing at a spill slot that is
   // written to, terminate that variable location. The value in memory
   // will have changed. DbgEntityHistoryCalculator doesn't try to detect this.
-  if (Optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) {
+  if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) {
     // Un-set this location and clobber, so that earlier locations don't
     // continue past this store.
     for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) {
@@ -1961,7 +1963,7 @@ bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) {
     unsigned SpillID = MTracker->getLocID(Loc, {Size, 0});
     DoTransfer(Reg, SpillID);
   } else {
-    Optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg);
+    std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg);
     if (!Loc)
       return false;
 
@@ -2707,7 +2709,7 @@ bool InstrRefBasedLDV::pickVPHILoc(
       continue;
     }
 
-    Optional<ValueIDNum> JoinedOpLoc =
+    std::optional<ValueIDNum> JoinedOpLoc =
         pickOperandPHILoc(Idx, MBB, LiveOuts, MOutLocs, BlockOrders);
 
     if (!JoinedOpLoc)
@@ -2720,7 +2722,7 @@ bool InstrRefBasedLDV::pickVPHILoc(
   return true;
 }
 
-Optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc(
+std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc(
     unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts,
     FuncValueTable &MOutLocs,
     const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) {
@@ -3954,7 +3956,7 @@ template <> class SSAUpdaterTraits<LDVSSAUpdater> {
 
 } // end namespace llvm
 
-Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
+std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
     MachineFunction &MF, const ValueTable *MLiveOuts,
     const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
   assert(MLiveOuts && MLiveIns &&
@@ -3967,13 +3969,13 @@ Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
   if (SeenDbgPHIIt != SeenDbgPHIs.end())
     return SeenDbgPHIIt->second;
 
-  Optional<ValueIDNum> Result =
+  std::optional<ValueIDNum> Result =
       resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum);
   SeenDbgPHIs.insert({&Here, Result});
   return Result;
 }
 
-Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl(
+std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl(
     MachineFunction &MF, const ValueTable *MLiveOuts,
     const ValueTable *MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
   // Pick out records of DBG_PHI instructions that have been observed. If there

diff  --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
index 9d0f4d021ae39..5b8b3e0d3ab01 100644
--- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
+++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
@@ -892,7 +892,7 @@ class MLocTracker {
   /// Find LocIdx for SpillLoc \p L, creating a new one if it's not tracked.
   /// Returns std::nullopt when in scenarios where a spill slot could be
   /// tracked, but we would likely run into resource limitations.
-  Optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L);
+  std::optional<SpillLocationNo> getOrTrackSpillLoc(SpillLoc L);
 
   // Get LocIdx of a spill ID.
   LocIdx getSpillMLoc(unsigned SpillID) {
@@ -1129,10 +1129,10 @@ class InstrRefBasedLDV : public LDVImpl {
     MachineBasicBlock *MBB;
     /// The value number read by the DBG_PHI -- or std::nullopt if it didn't
     /// refer to a value.
-    Optional<ValueIDNum> ValueRead;
+    std::optional<ValueIDNum> ValueRead;
     /// Register/Stack location the DBG_PHI reads -- or std::nullopt if it
     /// referred to something unexpected.
-    Optional<LocIdx> ReadLoc;
+    std::optional<LocIdx> ReadLoc;
 
     operator unsigned() const { return InstrNum; }
   };
@@ -1151,7 +1151,7 @@ class InstrRefBasedLDV : public LDVImpl {
   /// DBG_INSTR_REFs that call resolveDbgPHIs. These variable references solve
   /// a mini SSA problem caused by DBG_PHIs being cloned, this collection caches
   /// the result.
-  DenseMap<MachineInstr *, Optional<ValueIDNum>> SeenDbgPHIs;
+  DenseMap<MachineInstr *, std::optional<ValueIDNum>> SeenDbgPHIs;
 
   DbgOpIDMap DbgOpStore;
 
@@ -1166,8 +1166,8 @@ class InstrRefBasedLDV : public LDVImpl {
   StringRef StackProbeSymbolName;
 
   /// Tests whether this instruction is a spill to a stack slot.
-  Optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI,
-                                               MachineFunction *MF);
+  std::optional<SpillLocationNo> isSpillInstruction(const MachineInstr &MI,
+                                                    MachineFunction *MF);
 
   /// Decide if @MI is a spill instruction and return true if it is. We use 2
   /// criteria to make this decision:
@@ -1180,12 +1180,13 @@ class InstrRefBasedLDV : public LDVImpl {
 
   /// If a given instruction is identified as a spill, return the spill slot
   /// and set \p Reg to the spilled register.
-  Optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI,
-                                          MachineFunction *MF, unsigned &Reg);
+  std::optional<SpillLocationNo> isRestoreInstruction(const MachineInstr &MI,
+                                                      MachineFunction *MF,
+                                                      unsigned &Reg);
 
   /// Given a spill instruction, extract the spill slot information, ensure it's
   /// tracked, and return the spill number.
-  Optional<SpillLocationNo>
+  std::optional<SpillLocationNo>
   extractSpillBaseRegAndOffset(const MachineInstr &MI);
 
   /// Observe a single instruction while stepping through a block.
@@ -1230,16 +1231,17 @@ class InstrRefBasedLDV : public LDVImpl {
   /// \p Here the position of a DBG_INSTR_REF seeking a machine value number
   /// \p InstrNum Debug instruction number defined by DBG_PHI instructions.
   /// \returns The machine value number at position Here, or std::nullopt.
-  Optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF,
-                                      const ValueTable *MLiveOuts,
-                                      const ValueTable *MLiveIns,
-                                      MachineInstr &Here, uint64_t InstrNum);
-
-  Optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF,
-                                          const ValueTable *MLiveOuts,
-                                          const ValueTable *MLiveIns,
-                                          MachineInstr &Here,
-                                          uint64_t InstrNum);
+  std::optional<ValueIDNum> resolveDbgPHIs(MachineFunction &MF,
+                                           const ValueTable *MLiveOuts,
+                                           const ValueTable *MLiveIns,
+                                           MachineInstr &Here,
+                                           uint64_t InstrNum);
+
+  std::optional<ValueIDNum> resolveDbgPHIsImpl(MachineFunction &MF,
+                                               const ValueTable *MLiveOuts,
+                                               const ValueTable *MLiveIns,
+                                               MachineInstr &Here,
+                                               uint64_t InstrNum);
 
   /// Step through the function, recording register definitions and movements
   /// in an MLocTracker. Convert the observations into a per-block transfer
@@ -1353,7 +1355,7 @@ class InstrRefBasedLDV : public LDVImpl {
               const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs,
               const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders);
 
-  Optional<ValueIDNum> pickOperandPHILoc(
+  std::optional<ValueIDNum> pickOperandPHILoc(
       unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts,
       FuncValueTable &MOutLocs,
       const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders);
@@ -1417,7 +1419,7 @@ class InstrRefBasedLDV : public LDVImpl {
            && !MemOperand->getPseudoValue()->isAliased(MFI);
   }
 
-  Optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI);
+  std::optional<LocIdx> findLocationForMemOperand(const MachineInstr &MI);
 };
 
 } // namespace LiveDebugValues

diff  --git a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp
index 222bbb0784cd9..5ad67607e4bf0 100644
--- a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp
+++ b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp
@@ -859,7 +859,7 @@ class VarLocBasedLDV : public LDVImpl {
     /// Insert a set of ranges.
     void insertFromLocSet(const VarLocSet &ToLoad, const VarLocMap &Map);
 
-    llvm::Optional<LocIndices> getEntryValueBackup(DebugVariable Var);
+    std::optional<LocIndices> getEntryValueBackup(DebugVariable Var);
 
     /// Empty the set.
     void clear() {
@@ -946,9 +946,9 @@ class VarLocBasedLDV : public LDVImpl {
 
   /// If a given instruction is identified as a spill, return the spill location
   /// and set \p Reg to the spilled register.
-  Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI,
-                                                  MachineFunction *MF,
-                                                  Register &Reg);
+  std::optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI,
+                                                       MachineFunction *MF,
+                                                       Register &Reg);
   /// Given a spill instruction, extract the register and offset used to
   /// address the spill location in a target independent way.
   VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI);
@@ -1110,7 +1110,7 @@ void VarLocBasedLDV::OpenRangesSet::insert(LocIndices VarLocIDs,
 
 /// Return the Loc ID of an entry value backup location, if it exists for the
 /// variable.
-llvm::Optional<LocIndices>
+std::optional<LocIndices>
 VarLocBasedLDV::OpenRangesSet::getEntryValueBackup(DebugVariable Var) {
   auto It = EntryValuesBackupVars.find(Var);
   if (It != EntryValuesBackupVars.end())
@@ -1398,7 +1398,7 @@ void VarLocBasedLDV::emitEntryValues(MachineInstr &MI,
       continue;
 
     auto DebugVar = VL.Var;
-    Optional<LocIndices> EntryValBackupIDs =
+    std::optional<LocIndices> EntryValBackupIDs =
         OpenRanges.getEntryValueBackup(DebugVar);
 
     // If the parameter has the entry value backup, it means we should
@@ -1618,9 +1618,9 @@ bool VarLocBasedLDV::isLocationSpill(const MachineInstr &MI,
   return false;
 }
 
-Optional<VarLocBasedLDV::VarLoc::SpillLoc>
+std::optional<VarLocBasedLDV::VarLoc::SpillLoc>
 VarLocBasedLDV::isRestoreInstruction(const MachineInstr &MI,
-                                      MachineFunction *MF, Register &Reg) {
+                                     MachineFunction *MF, Register &Reg) {
   if (!MI.hasOneMemOperand())
     return std::nullopt;
 
@@ -1647,7 +1647,7 @@ void VarLocBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI,
   MachineFunction *MF = MI.getMF();
   TransferKind TKind;
   Register Reg;
-  Optional<VarLoc::SpillLoc> Loc;
+  std::optional<VarLoc::SpillLoc> Loc;
 
   LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump(););
 

diff  --git a/llvm/lib/CodeGen/LiveDebugVariables.cpp b/llvm/lib/CodeGen/LiveDebugVariables.cpp
index 4db941ea2d9e6..d211536708e8c 100644
--- a/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -441,11 +441,12 @@ class UserValue {
   /// VNInfo.
   /// \param [out] Kills Append end points of VNI's live range to Kills.
   /// \param LIS Live intervals analysis.
-  void extendDef(SlotIndex Idx, DbgVariableValue DbgValue,
-                 SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
-                     &LiveIntervalInfo,
-                 Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
-                 LiveIntervals &LIS);
+  void
+  extendDef(SlotIndex Idx, DbgVariableValue DbgValue,
+            SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
+                &LiveIntervalInfo,
+            std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
+            LiveIntervals &LIS);
 
   /// The value in LI may be copies to other registers. Determine if
   /// any of the copies are available at the kill points, and add defs if
@@ -957,7 +958,7 @@ void UserValue::extendDef(
     SlotIndex Idx, DbgVariableValue DbgValue,
     SmallDenseMap<unsigned, std::pair<LiveRange *, const VNInfo *>>
         &LiveIntervalInfo,
-    Optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
+    std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> &Kills,
     LiveIntervals &LIS) {
   SlotIndex Start = Idx;
   MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start);
@@ -1131,7 +1132,7 @@ void UserValue::computeIntervals(MachineRegisterInfo &MRI,
         LIs[LocNo] = {LI, VNI};
     }
     if (ShouldExtendDef) {
-      Optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills;
+      std::optional<std::pair<SlotIndex, SmallVector<unsigned>>> Kills;
       extendDef(Idx, DbgValue, LIs, Kills, LIS);
 
       if (Kills) {

diff  --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 8bcf5e25373dc..21c95e1d13c21 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -376,10 +376,11 @@ struct ParsedMachineOperand {
   MachineOperand Operand;
   StringRef::iterator Begin;
   StringRef::iterator End;
-  Optional<unsigned> TiedDefIdx;
+  std::optional<unsigned> TiedDefIdx;
 
   ParsedMachineOperand(const MachineOperand &Operand, StringRef::iterator Begin,
-                       StringRef::iterator End, Optional<unsigned> &TiedDefIdx)
+                       StringRef::iterator End,
+                       std::optional<unsigned> &TiedDefIdx)
       : Operand(Operand), Begin(Begin), End(End), TiedDefIdx(TiedDefIdx) {
     if (TiedDefIdx)
       assert(Operand.isReg() && Operand.isUse() &&
@@ -448,7 +449,8 @@ class MIParser {
   bool parseSubRegisterIndex(unsigned &SubReg);
   bool parseRegisterTiedDefIndex(unsigned &TiedDefIdx);
   bool parseRegisterOperand(MachineOperand &Dest,
-                            Optional<unsigned> &TiedDefIdx, bool IsDef = false);
+                            std::optional<unsigned> &TiedDefIdx,
+                            bool IsDef = false);
   bool parseImmediateOperand(MachineOperand &Dest);
   bool parseIRConstant(StringRef::iterator Loc, StringRef StringValue,
                        const Constant *&C);
@@ -488,17 +490,17 @@ class MIParser {
   bool parseLiveoutRegisterMaskOperand(MachineOperand &Dest);
   bool parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
                            MachineOperand &Dest,
-                           Optional<unsigned> &TiedDefIdx);
+                           std::optional<unsigned> &TiedDefIdx);
   bool parseMachineOperandAndTargetFlags(const unsigned OpCode,
                                          const unsigned OpIdx,
                                          MachineOperand &Dest,
-                                         Optional<unsigned> &TiedDefIdx);
+                                         std::optional<unsigned> &TiedDefIdx);
   bool parseOffset(int64_t &Offset);
   bool parseIRBlockAddressTaken(BasicBlock *&BB);
   bool parseAlignment(uint64_t &Alignment);
   bool parseAddrspace(unsigned &Addrspace);
-  bool parseSectionID(Optional<MBBSectionID> &SID);
-  bool parseBBID(Optional<unsigned> &BBID);
+  bool parseSectionID(std::optional<MBBSectionID> &SID);
+  bool parseBBID(std::optional<unsigned> &BBID);
   bool parseOperandsOffset(MachineOperand &Op);
   bool parseIRValue(const Value *&V);
   bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags);
@@ -641,7 +643,7 @@ bool MIParser::consumeIfPresent(MIToken::TokenKind TokenKind) {
 }
 
 // Parse Machine Basic Block Section ID.
-bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) {
+bool MIParser::parseSectionID(std::optional<MBBSectionID> &SID) {
   assert(Token.is(MIToken::kw_bbsections));
   lex();
   if (Token.is(MIToken::IntegerLiteral)) {
@@ -663,7 +665,7 @@ bool MIParser::parseSectionID(Optional<MBBSectionID> &SID) {
 }
 
 // Parse Machine Basic Block ID.
-bool MIParser::parseBBID(Optional<unsigned> &BBID) {
+bool MIParser::parseBBID(std::optional<unsigned> &BBID) {
   assert(Token.is(MIToken::kw_bb_id));
   lex();
   unsigned Value = 0;
@@ -688,9 +690,9 @@ bool MIParser::parseBasicBlockDefinition(
   bool IsLandingPad = false;
   bool IsInlineAsmBrIndirectTarget = false;
   bool IsEHFuncletEntry = false;
-  Optional<MBBSectionID> SectionID;
+  std::optional<MBBSectionID> SectionID;
   uint64_t Alignment = 0;
-  Optional<unsigned> BBID;
+  std::optional<unsigned> BBID;
   BasicBlock *BB = nullptr;
   if (consumeIfPresent(MIToken::lparen)) {
     do {
@@ -1021,7 +1023,7 @@ bool MIParser::parse(MachineInstr *&MI) {
   SmallVector<ParsedMachineOperand, 8> Operands;
   while (Token.isRegister() || Token.isRegisterFlag()) {
     auto Loc = Token.location();
-    Optional<unsigned> TiedDefIdx;
+    std::optional<unsigned> TiedDefIdx;
     if (parseRegisterOperand(MO, TiedDefIdx, /*IsDef=*/true))
       return true;
     Operands.push_back(
@@ -1047,7 +1049,7 @@ bool MIParser::parse(MachineInstr *&MI) {
          Token.isNot(MIToken::kw_debug_instr_number) &&
          Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) {
     auto Loc = Token.location();
-    Optional<unsigned> TiedDefIdx;
+    std::optional<unsigned> TiedDefIdx;
     if (parseMachineOperandAndTargetFlags(OpCode, Operands.size(), MO, TiedDefIdx))
       return true;
     Operands.push_back(
@@ -1706,7 +1708,7 @@ bool MIParser::assignRegisterTies(MachineInstr &MI,
 }
 
 bool MIParser::parseRegisterOperand(MachineOperand &Dest,
-                                    Optional<unsigned> &TiedDefIdx,
+                                    std::optional<unsigned> &TiedDefIdx,
                                     bool IsDef) {
   unsigned Flags = IsDef ? RegState::Define : 0;
   while (Token.isRegisterFlag()) {
@@ -2812,7 +2814,7 @@ bool MIParser::parseLiveoutRegisterMaskOperand(MachineOperand &Dest) {
 
 bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
                                    MachineOperand &Dest,
-                                   Optional<unsigned> &TiedDefIdx) {
+                                   std::optional<unsigned> &TiedDefIdx) {
   switch (Token.kind()) {
   case MIToken::kw_implicit:
   case MIToken::kw_implicit_define:
@@ -2917,7 +2919,7 @@ bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
 
 bool MIParser::parseMachineOperandAndTargetFlags(
     const unsigned OpCode, const unsigned OpIdx, MachineOperand &Dest,
-    Optional<unsigned> &TiedDefIdx) {
+    std::optional<unsigned> &TiedDefIdx) {
   unsigned TF = 0;
   bool HasTargetFlags = false;
   if (Token.is(MIToken::kw_target_flags)) {

diff  --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
index d3840355130d9..8896f056f4b02 100644
--- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
+++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
@@ -1113,7 +1113,7 @@ int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
 }
 
 bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
-  Optional<float> CachedReward;
+  std::optional<float> CachedReward;
   auto GetReward = [&]() {
     if (!CachedReward)
       CachedReward = static_cast<float>(

diff  --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 6c886716646fd..271cd01e4ecc7 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -2329,7 +2329,7 @@ static unsigned getSpillSlotSize(const MMOList &Accesses,
   return Size;
 }
 
-Optional<unsigned>
+std::optional<unsigned>
 MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
   int FI;
   if (TII->isStoreToStackSlotPostFE(*this, FI)) {
@@ -2340,7 +2340,7 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
   return std::nullopt;
 }
 
-Optional<unsigned>
+std::optional<unsigned>
 MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
   MMOList Accesses;
   if (TII->hasStoreToStackSlot(*this, Accesses))
@@ -2348,7 +2348,7 @@ MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
   return std::nullopt;
 }
 
-Optional<unsigned>
+std::optional<unsigned>
 MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
   int FI;
   if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
@@ -2359,7 +2359,7 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
   return std::nullopt;
 }
 
-Optional<unsigned>
+std::optional<unsigned>
 MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
   MMOList Accesses;
   if (TII->hasLoadFromStackSlot(*this, Accesses))

diff  --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index bb2d1dac5c668..a3ffce905b67b 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -756,8 +756,9 @@ void MachineOperand::print(raw_ostream &OS, LLT TypeToPrint,
 }
 
 void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
-                           LLT TypeToPrint, Optional<unsigned> OpIdx, bool PrintDef,
-                           bool IsStandalone, bool ShouldPrintRegisterTies,
+                           LLT TypeToPrint, std::optional<unsigned> OpIdx,
+                           bool PrintDef, bool IsStandalone,
+                           bool ShouldPrintRegisterTies,
                            unsigned TiedOperandIdx,
                            const TargetRegisterInfo *TRI,
                            const TargetIntrinsicInfo *IntrinsicInfo) const {

diff  --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index 195fd45c223c7..20d251715bd9a 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -1281,7 +1281,7 @@ class KernelRewriter {
   // Insert a phi that carries LoopReg from the loop body and InitReg otherwise.
   // If InitReg is not given it is chosen arbitrarily. It will either be undef
   // or will be chosen so as to share another phi.
-  Register phi(Register LoopReg, Optional<Register> InitReg = {},
+  Register phi(Register LoopReg, std::optional<Register> InitReg = {},
                const TargetRegisterClass *RC = nullptr);
   // Create an undef register of the given register class.
   Register undef(const TargetRegisterClass *RC);
@@ -1389,7 +1389,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
 
   // First, dive through the phi chain to find the defaults for the generated
   // phis.
-  SmallVector<Optional<Register>, 4> Defaults;
+  SmallVector<std::optional<Register>, 4> Defaults;
   Register LoopReg = Reg;
   auto LoopProducer = Producer;
   while (LoopProducer->isPHI() && LoopProducer->getParent() == BB) {
@@ -1400,7 +1400,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
   }
   int LoopProducerStage = S.getStage(LoopProducer);
 
-  Optional<Register> IllegalPhiDefault;
+  std::optional<Register> IllegalPhiDefault;
 
   if (LoopProducerStage == -1) {
     // Do nothing.
@@ -1432,9 +1432,9 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
       // If we need more phis than we have defaults for, pad out with undefs for
       // the earliest phis, which are at the end of the defaults chain (the
       // chain is in reverse order).
-      Defaults.resize(Defaults.size() + StageDiff, Defaults.empty()
-                                                       ? Optional<Register>()
-                                                       : Defaults.back());
+      Defaults.resize(Defaults.size() + StageDiff,
+                      Defaults.empty() ? std::optional<Register>()
+                                       : Defaults.back());
     }
   }
 
@@ -1466,7 +1466,7 @@ Register KernelRewriter::remapUse(Register Reg, MachineInstr &MI) {
   return LoopReg;
 }
 
-Register KernelRewriter::phi(Register LoopReg, Optional<Register> InitReg,
+Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg,
                              const TargetRegisterClass *RC) {
   // If the init register is not undef, try and find an existing phi.
   if (InitReg) {

diff  --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 31e37c4cd7e3e..be1b7703d4d77 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -66,7 +66,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -273,11 +272,11 @@ namespace {
       : MI(MI), CommutePair(std::make_pair(Idx1, Idx2)) {}
 
     MachineInstr *getMI() const { return MI; }
-    Optional<IndexPair> getCommutePair() const { return CommutePair; }
+    std::optional<IndexPair> getCommutePair() const { return CommutePair; }
 
   private:
     MachineInstr *MI;
-    Optional<IndexPair> CommutePair;
+    std::optional<IndexPair> CommutePair;
   };
 
   /// Helper class to hold a reply for ValueTracker queries.

diff  --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
index a3936eacf560c..46838570a2fcb 100644
--- a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
+++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
@@ -10,7 +10,6 @@
 #define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H
 
 #include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/CodeGen/Register.h"
@@ -126,9 +125,9 @@ class RegAllocEvictionAdvisor {
 
   // Get the upper limit of elements in the given Order we need to analize.
   // TODO: is this heuristic,  we could consider learning it.
-  Optional<unsigned> getOrderLimit(const LiveInterval &VirtReg,
-                                   const AllocationOrder &Order,
-                                   unsigned CostPerUseLimit) const;
+  std::optional<unsigned> getOrderLimit(const LiveInterval &VirtReg,
+                                        const AllocationOrder &Order,
+                                        unsigned CostPerUseLimit) const;
 
   // Determine if it's worth trying to allocate this reg, given the
   // CostPerUseLimit

diff  --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index ead91f692b819..7c0f1d5d3288f 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -523,7 +523,7 @@ bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const {
   return !Matrix->isPhysRegUsed(PhysReg);
 }
 
-Optional<unsigned>
+std::optional<unsigned>
 RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg,
                                        const AllocationOrder &Order,
                                        unsigned CostPerUseLimit) const {

diff  --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp
index ad73e7617ffb1..5d4d9823d99e3 100644
--- a/llvm/lib/CodeGen/SelectOptimize.cpp
+++ b/llvm/lib/CodeGen/SelectOptimize.cpp
@@ -200,7 +200,7 @@ class SelectOptimize : public FunctionPass {
   SmallPtrSet<const Instruction *, 2> getSIset(const SelectGroups &SIGroups);
 
   // Returns the latency cost of a given instruction.
-  Optional<uint64_t> computeInstCost(const Instruction *I);
+  std::optional<uint64_t> computeInstCost(const Instruction *I);
 
   // Returns the misprediction cost of a given select when converted to branch.
   Scaled64 getMispredictionCost(const SelectInst *SI, const Scaled64 CondCost);
@@ -977,11 +977,11 @@ SelectOptimize::getSIset(const SelectGroups &SIGroups) {
   return SIset;
 }
 
-Optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) {
+std::optional<uint64_t> SelectOptimize::computeInstCost(const Instruction *I) {
   InstructionCost ICost =
       TTI->getInstructionCost(I, TargetTransformInfo::TCK_Latency);
   if (auto OC = ICost.getValue())
-    return Optional<uint64_t>(*OC);
+    return std::optional<uint64_t>(*OC);
   return std::nullopt;
 }
 

diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index f9a73351e2b3e..633198fd9e635 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -7929,7 +7929,7 @@ struct ByteProvider {
 ///                 LOAD
 ///
 /// *ExtractVectorElement
-static const Optional<ByteProvider>
+static const std::optional<ByteProvider>
 calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
                       std::optional<uint64_t> VectorIndex,
                       unsigned StartingIndex = 0) {
@@ -8003,7 +8003,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
 
     if (Index >= NarrowByteWidth)
       return Op.getOpcode() == ISD::ZERO_EXTEND
-                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+                 ? std::optional<ByteProvider>(ByteProvider::getConstantZero())
                  : std::nullopt;
     return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex,
                                  StartingIndex);
@@ -8053,7 +8053,7 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
     // question
     if (Index >= NarrowByteWidth)
       return L->getExtensionType() == ISD::ZEXTLOAD
-                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+                 ? std::optional<ByteProvider>(ByteProvider::getConstantZero())
                  : std::nullopt;
 
     unsigned BPVectorIndex = VectorIndex.value_or(0U);
@@ -8075,8 +8075,8 @@ static unsigned bigEndianByteAt(unsigned BW, unsigned i) {
 // Check if the bytes offsets we are looking at match with either big or
 // little endian value loaded. Return true for big endian, false for little
 // endian, and std::nullopt if match failed.
-static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
-                                  int64_t FirstOffset) {
+static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
+                                       int64_t FirstOffset) {
   // The endian can be decided only when it is 2 bytes at least.
   unsigned Width = ByteOffsets.size();
   if (Width < 2)
@@ -8367,7 +8367,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
   SDValue Chain;
 
   SmallPtrSet<LoadSDNode *, 8> Loads;
-  Optional<ByteProvider> FirstByteProvider;
+  std::optional<ByteProvider> FirstByteProvider;
   int64_t FirstOffset = INT64_MAX;
 
   // Check if all the bytes of the OR we are looking at are loaded from the same
@@ -8460,7 +8460,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
 
   // Check if the bytes of the OR we are looking at match with either big or
   // little endian value load
-  Optional<bool> IsBigEndian = isBigEndian(
+  std::optional<bool> IsBigEndian = isBigEndian(
       makeArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset);
   if (!IsBigEndian)
     return SDValue();
@@ -25157,7 +25157,7 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
     bool IsAtomic;
     SDValue BasePtr;
     int64_t Offset;
-    Optional<int64_t> NumBytes;
+    std::optional<int64_t> NumBytes;
     MachineMemOperand *MMO;
   };
 
@@ -25172,21 +25172,26 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
                            : 0;
       uint64_t Size =
           MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize());
-      return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(),
+      return {LSN->isVolatile(),
+              LSN->isAtomic(),
+              LSN->getBasePtr(),
               Offset /*base offset*/,
-              Optional<int64_t>(Size),
+              std::optional<int64_t>(Size),
               LSN->getMemOperand()};
     }
     if (const auto *LN = cast<LifetimeSDNode>(N))
-      return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1),
+      return {false /*isVolatile*/,
+              /*isAtomic*/ false,
+              LN->getOperand(1),
               (LN->hasOffset()) ? LN->getOffset() : 0,
-              (LN->hasOffset()) ? Optional<int64_t>(LN->getSize())
-                                : Optional<int64_t>(),
+              (LN->hasOffset()) ? std::optional<int64_t>(LN->getSize())
+                                : std::optional<int64_t>(),
               (MachineMemOperand *)nullptr};
     // Default.
-    return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(),
-            (int64_t)0 /*offset*/,
-            Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr};
+    return {false /*isvolatile*/,
+            /*isAtomic*/ false,          SDValue(),
+            (int64_t)0 /*offset*/,       std::optional<int64_t>() /*size*/,
+            (MachineMemOperand *)nullptr};
   };
 
   MemUseCharacteristics MUC0 = getCharacteristics(Op0),

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index a4eb634e4ae2b..790cba95cc8ab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -6593,9 +6593,10 @@ SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
 //  Align:     If 0, don't allow use of a wider type
 //  WidenEx:   If Align is not 0, the amount additional we can load/store from.
 
-static Optional<EVT> findMemType(SelectionDAG &DAG, const TargetLowering &TLI,
-                                 unsigned Width, EVT WidenVT,
-                                 unsigned Align = 0, unsigned WidenEx = 0) {
+static std::optional<EVT> findMemType(SelectionDAG &DAG,
+                                      const TargetLowering &TLI, unsigned Width,
+                                      EVT WidenVT, unsigned Align = 0,
+                                      unsigned WidenEx = 0) {
   EVT WidenEltVT = WidenVT.getVectorElementType();
   const bool Scalable = WidenVT.isScalableVector();
   unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize();
@@ -6718,7 +6719,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
       (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value();
 
   // Find the vector type that can load from.
-  Optional<EVT> FirstVT =
+  std::optional<EVT> FirstVT =
       findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
                   WidthDiff.getKnownMinSize());
 
@@ -6731,7 +6732,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
   // Unless we're able to load in one instruction we must work out how to load
   // the remainder.
   if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) {
-    Optional<EVT> NewVT = FirstVT;
+    std::optional<EVT> NewVT = FirstVT;
     TypeSize RemainingWidth = LdWidth;
     TypeSize NewVTWidth = FirstVTWidth;
     do {
@@ -6954,7 +6955,7 @@ bool DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
 
   while (StWidth.isNonZero()) {
     // Find the largest vector type we can store with.
-    Optional<EVT> NewVT =
+    std::optional<EVT> NewVT =
         findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT);
     if (!NewVT)
       return false;

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 2cb93dedb7680..e9f61e7828b61 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -493,7 +493,7 @@ bool ISD::isVPReduction(unsigned Opcode) {
 }
 
 /// The operand position of the vector mask.
-Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
+std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
   switch (Opcode) {
   default:
     return std::nullopt;
@@ -505,7 +505,7 @@ Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
 }
 
 /// The operand position of the explicit vector length parameter.
-Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
+std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
   switch (Opcode) {
   default:
     return std::nullopt;
@@ -5617,8 +5617,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
   return V;
 }
 
-static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
-                                       const APInt &C2) {
+static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
+                                      const APInt &C2) {
   switch (Opcode) {
   case ISD::ADD:  return C1 + C2;
   case ISD::SUB:  return C1 - C2;
@@ -5699,10 +5699,9 @@ static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
 
 // Handle constant folding with UNDEF.
 // TODO: Handle more cases.
-static llvm::Optional<APInt> FoldValueWithUndef(unsigned Opcode,
-                                                const APInt &C1, bool IsUndef1,
-                                                const APInt &C2,
-                                                bool IsUndef2) {
+static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1,
+                                               bool IsUndef1, const APInt &C2,
+                                               bool IsUndef2) {
   if (!(IsUndef1 || IsUndef2))
     return FoldValue(Opcode, C1, C2);
 
@@ -5787,7 +5786,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
         if (C1->isOpaque() || C2->isOpaque())
           return SDValue();
 
-        Optional<APInt> FoldAttempt =
+        std::optional<APInt> FoldAttempt =
             FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
         if (!FoldAttempt)
           return SDValue();
@@ -5832,7 +5831,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
           BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
         SmallVector<APInt> RawBits;
         for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
-          Optional<APInt> Fold = FoldValueWithUndef(
+          std::optional<APInt> Fold = FoldValueWithUndef(
               Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]);
           if (!Fold)
             break;
@@ -11967,7 +11966,7 @@ bool BuildVectorSDNode::isConstant() const {
   return true;
 }
 
-Optional<std::pair<APInt, APInt>>
+std::optional<std::pair<APInt, APInt>>
 BuildVectorSDNode::isConstantSequence() const {
   unsigned NumOps = getNumOperands();
   if (NumOps < 2)

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
index d236433f6fb49..a432d8e92bca5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
@@ -85,9 +85,9 @@ bool BaseIndexOffset::equalBaseIndex(const BaseIndexOffset &Other,
 }
 
 bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
-                                      const Optional<int64_t> NumBytes0,
+                                      const std::optional<int64_t> NumBytes0,
                                       const SDNode *Op1,
-                                      const Optional<int64_t> NumBytes1,
+                                      const std::optional<int64_t> NumBytes1,
                                       const SelectionDAG &DAG, bool &IsAlias) {
 
   BaseIndexOffset BasePtr0 = match(Op0, DAG);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 39e2c39eb29cb..708596a343658 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -159,9 +159,9 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType,
 /// Utility function for reservePreviousStackSlotForValue. Tries to find
 /// stack slot index to which we have spilled value for previous statepoints.
 /// LookUpDepth specifies maximum DFS depth this function is allowed to look.
-static Optional<int> findPreviousSpillSlot(const Value *Val,
-                                           SelectionDAGBuilder &Builder,
-                                           int LookUpDepth) {
+static std::optional<int> findPreviousSpillSlot(const Value *Val,
+                                                SelectionDAGBuilder &Builder,
+                                                int LookUpDepth) {
   // Can not look any further - give up now
   if (LookUpDepth <= 0)
     return std::nullopt;
@@ -196,10 +196,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val,
   // All incoming values should have same known stack slot, otherwise result
   // is unknown.
   if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
-    Optional<int> MergedResult;
+    std::optional<int> MergedResult;
 
     for (const auto &IncomingValue : Phi->incoming_values()) {
-      Optional<int> SpillSlot =
+      std::optional<int> SpillSlot =
           findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
       if (!SpillSlot)
         return std::nullopt;
@@ -283,7 +283,7 @@ static void reservePreviousStackSlotForValue(const Value *IncomingValue,
     return;
 
   const int LookUpDepth = 6;
-  Optional<int> Index =
+  std::optional<int> Index =
       findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
   if (!Index)
     return;

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index 79869039fea0b..92db89cc0915b 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -17,7 +17,7 @@
 
 using namespace llvm;
 
-Optional<RegOrConstant>
+std::optional<RegOrConstant>
 AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
                                          const MachineRegisterInfo &MRI) {
   if (auto Splat = getVectorSplat(MI, MRI))
@@ -31,7 +31,7 @@ AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
   return RegOrConstant(Src);
 }
 
-Optional<int64_t>
+std::optional<int64_t>
 AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI,
                                                const MachineRegisterInfo &MRI) {
   auto Splat = getAArch64VectorSplat(MI, MRI);

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
index 334147a60a765..791db7efaf0be 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
@@ -31,13 +31,14 @@ constexpr bool isLegalArithImmed(const uint64_t C) {
 
 /// \returns A value when \p MI is a vector splat of a Register or constant.
 /// Checks for generic opcodes and AArch64-specific generic opcodes.
-Optional<RegOrConstant> getAArch64VectorSplat(const MachineInstr &MI,
-                                              const MachineRegisterInfo &MRI);
+std::optional<RegOrConstant>
+getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI);
 
 /// \returns A value when \p MI is a constant vector splat.
 /// Checks for generic opcodes and AArch64-specific generic opcodes.
-Optional<int64_t> getAArch64VectorSplatScalar(const MachineInstr &MI,
-                                              const MachineRegisterInfo &MRI);
+std::optional<int64_t>
+getAArch64VectorSplatScalar(const MachineInstr &MI,
+                            const MachineRegisterInfo &MRI);
 
 /// \returns true if \p MaybeSub and \p Pred are part of a CMN tree for an
 /// integer compare.

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index a53efa6edc9b8..98acd4c02b224 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -150,7 +150,7 @@ class AArch64InstructionSelector : public InstructionSelector {
   /// The lane inserted into is defined by \p LaneIdx. The vector source
   /// register is given by \p SrcReg. The register containing the element is
   /// given by \p EltReg.
-  MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
+  MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg,
                                Register EltReg, unsigned LaneIdx,
                                const RegisterBank &RB,
                                MachineIRBuilder &MIRBuilder) const;
@@ -205,7 +205,7 @@ class AArch64InstructionSelector : public InstructionSelector {
                                          MachineIRBuilder &MIRBuilder) const;
 
   // Emit a vector concat operation.
-  MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
+  MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1,
                                  Register Op2,
                                  MachineIRBuilder &MIRBuilder) const;
 
@@ -218,7 +218,7 @@ class AArch64InstructionSelector : public InstructionSelector {
   /// \p Pred if given is the intended predicate to use.
   MachineInstr *
   emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
-                Optional<CmpInst::Predicate> = std::nullopt) const;
+                std::optional<CmpInst::Predicate> = std::nullopt) const;
 
   MachineInstr *
   emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
@@ -276,7 +276,7 @@ class AArch64InstructionSelector : public InstructionSelector {
   MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS,
                            AArch64CC::CondCode CC,
                            MachineIRBuilder &MIRBuilder) const;
-  MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
+  MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg,
                                      const RegisterBank &DstRB, LLT ScalarTy,
                                      Register VecReg, unsigned LaneIdx,
                                      MachineIRBuilder &MIRBuilder) const;
@@ -674,7 +674,7 @@ static Register createQTuple(ArrayRef<Register> Regs, MachineIRBuilder &MIB) {
   return createTuple(Regs, RegClassIDs, SubRegs, MIB);
 }
 
-static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
+static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
   auto &MI = *Root.getParent();
   auto &MBB = *MI.getParent();
   auto &MF = *MBB.getParent();
@@ -1782,8 +1782,8 @@ bool AArch64InstructionSelector::selectCompareBranch(
 
 /// Returns the element immediate value of a vector shift operand if found.
 /// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR.
-static Optional<int64_t> getVectorShiftImm(Register Reg,
-                                           MachineRegisterInfo &MRI) {
+static std::optional<int64_t> getVectorShiftImm(Register Reg,
+                                                MachineRegisterInfo &MRI) {
   assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand");
   MachineInstr *OpMI = MRI.getVRegDef(Reg);
   return getAArch64VectorSplatScalar(*OpMI, MRI);
@@ -1791,8 +1791,9 @@ static Optional<int64_t> getVectorShiftImm(Register Reg,
 
 /// Matches and returns the shift immediate value for a SHL instruction given
 /// a shift operand.
-static Optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegisterInfo &MRI) {
-  Optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
+static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg,
+                                              MachineRegisterInfo &MRI) {
+  std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
   if (!ShiftImm)
     return std::nullopt;
   // Check the immediate is in range for a SHL.
@@ -1836,7 +1837,7 @@ bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,
 
   // Check if we have a vector of constants on RHS that we can select as the
   // immediate form.
-  Optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
+  std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
 
   unsigned Opc = 0;
   if (Ty == LLT::fixed_vector(2, 64)) {
@@ -3102,7 +3103,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
 
   case TargetOpcode::G_PTRMASK: {
     Register MaskReg = I.getOperand(2).getReg();
-    Optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
+    std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
     // TODO: Implement arbitrary cases
     if (!MaskVal || !isShiftedMask_64(*MaskVal))
       return false;
@@ -4112,7 +4113,7 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
 }
 
 MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
-    Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
+    std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
     Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
   unsigned CopyOpc = 0;
@@ -4645,10 +4646,9 @@ MachineInstr *AArch64InstructionSelector::emitCSetForFCmp(
   return &*OrMI;
 }
 
-MachineInstr *
-AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS,
-                                          MachineIRBuilder &MIRBuilder,
-                                          Optional<CmpInst::Predicate> Pred) const {
+MachineInstr *AArch64InstructionSelector::emitFPCompare(
+    Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
+    std::optional<CmpInst::Predicate> Pred) const {
   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
   LLT Ty = MRI.getType(LHS);
   if (Ty.isVector())
@@ -4689,7 +4689,7 @@ AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS,
 }
 
 MachineInstr *AArch64InstructionSelector::emitVectorConcat(
-    Optional<Register> Dst, Register Op1, Register Op2,
+    std::optional<Register> Dst, Register Op1, Register Op2,
     MachineIRBuilder &MIRBuilder) const {
   // We implement a vector concat by:
   // 1. Use scalar_to_vector to insert the lower vector into the larger dest
@@ -4865,7 +4865,7 @@ MachineInstr *AArch64InstructionSelector::emitConditionalComparison(
   LLT OpTy = MRI.getType(LHS);
   assert(OpTy.getSizeInBits() == 32 || OpTy.getSizeInBits() == 64);
   unsigned CCmpOpc;
-  Optional<ValueAndVReg> C;
+  std::optional<ValueAndVReg> C;
   if (CmpInst::isIntPredicate(CC)) {
     C = getIConstantVRegValWithLookThrough(RHS, MRI);
     if (C && C->Value.ult(32))
@@ -5259,7 +5259,7 @@ bool AArch64InstructionSelector::selectShuffleVector(
 }
 
 MachineInstr *AArch64InstructionSelector::emitLaneInsert(
-    Optional<Register> DstReg, Register SrcReg, Register EltReg,
+    std::optional<Register> DstReg, Register SrcReg, Register EltReg,
     unsigned LaneIdx, const RegisterBank &RB,
     MachineIRBuilder &MIRBuilder) const {
   MachineInstr *InsElt = nullptr;
@@ -6669,7 +6669,7 @@ AArch64_AM::ShiftExtendType AArch64InstructionSelector::getExtendTypeForInst(
   if (Opc != TargetOpcode::G_AND)
     return AArch64_AM::InvalidShiftExtend;
 
-  Optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
+  std::optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
   if (!MaybeAndMask)
     return AArch64_AM::InvalidShiftExtend;
   uint64_t AndMask = *MaybeAndMask;
@@ -6724,7 +6724,7 @@ AArch64InstructionSelector::selectArithExtendedRegister(
   if (RootDef->getOpcode() == TargetOpcode::G_SHL) {
     // Look for a constant on the RHS of the shift.
     MachineOperand &RHS = RootDef->getOperand(2);
-    Optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
+    std::optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
     if (!MaybeShiftVal)
       return std::nullopt;
     ShiftVal = *MaybeShiftVal;
@@ -6774,7 +6774,7 @@ void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
          "Expected G_CONSTANT");
-  Optional<int64_t> CstVal =
+  std::optional<int64_t> CstVal =
       getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI);
   assert(CstVal && "Expected constant value");
   MIB.addImm(*CstVal);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 73bd446304bed..63b7645e32619 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -360,13 +360,9 @@ bool AMDGPUAsmPrinter::doFinalization(Module &M) {
 
 // Print comments that apply to both callable functions and entry points.
 void AMDGPUAsmPrinter::emitCommonFunctionComments(
-  uint32_t NumVGPR,
-  Optional<uint32_t> NumAGPR,
-  uint32_t TotalNumVGPR,
-  uint32_t NumSGPR,
-  uint64_t ScratchSize,
-  uint64_t CodeSize,
-  const AMDGPUMachineFunction *MFI) {
+    uint32_t NumVGPR, std::optional<uint32_t> NumAGPR, uint32_t TotalNumVGPR,
+    uint32_t NumSGPR, uint64_t ScratchSize, uint64_t CodeSize,
+    const AMDGPUMachineFunction *MFI) {
   OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false);
   OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false);
   OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false);
@@ -523,24 +519,21 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
       const AMDGPUResourceUsageAnalysis::SIFunctionResourceInfo &Info =
           ResourceUsage->getResourceInfo(&MF.getFunction());
       emitCommonFunctionComments(
-        Info.NumVGPR,
-        STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(),
-        Info.getTotalNumVGPRs(STM),
-        Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
-        Info.PrivateSegmentSize,
-        getFunctionCodeSize(MF), MFI);
+          Info.NumVGPR,
+          STM.hasMAIInsts() ? Info.NumAGPR : std::optional<uint32_t>(),
+          Info.getTotalNumVGPRs(STM),
+          Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
+          Info.PrivateSegmentSize, getFunctionCodeSize(MF), MFI);
       return false;
     }
 
     OutStreamer->emitRawComment(" Kernel info:", false);
-    emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR,
-                               STM.hasMAIInsts()
-                                 ? CurrentProgramInfo.NumAccVGPR
-                                 : Optional<uint32_t>(),
-                               CurrentProgramInfo.NumVGPR,
-                               CurrentProgramInfo.NumSGPR,
-                               CurrentProgramInfo.ScratchSize,
-                               getFunctionCodeSize(MF), MFI);
+    emitCommonFunctionComments(
+        CurrentProgramInfo.NumArchVGPR,
+        STM.hasMAIInsts() ? CurrentProgramInfo.NumAccVGPR
+                          : std::optional<uint32_t>(),
+        CurrentProgramInfo.NumVGPR, CurrentProgramInfo.NumSGPR,
+        CurrentProgramInfo.ScratchSize, getFunctionCodeSize(MF), MFI);
 
     OutStreamer->emitRawComment(
       " FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 2881b8d7bccaa..ea12086751a46 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -63,12 +63,10 @@ class AMDGPUAsmPrinter final : public AsmPrinter {
                        const SIProgramInfo &KernelInfo);
   void emitPALFunctionMetadata(const MachineFunction &MF);
   void emitCommonFunctionComments(uint32_t NumVGPR,
-                                  Optional<uint32_t> NumAGPR,
-                                  uint32_t TotalNumVGPR,
-                                  uint32_t NumSGPR,
-                                  uint64_t ScratchSize,
-                                  uint64_t CodeSize,
-                                  const AMDGPUMachineFunction* MFI);
+                                  std::optional<uint32_t> NumAGPR,
+                                  uint32_t TotalNumVGPR, uint32_t NumSGPR,
+                                  uint64_t ScratchSize, uint64_t CodeSize,
+                                  const AMDGPUMachineFunction *MFI);
   void emitResourceUsageRemarks(const MachineFunction &MF,
                                 const SIProgramInfo &CurrentProgramInfo,
                                 bool isModuleEntryFunction, bool hasMAIInsts);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index a32fd52f55f6e..7a626fac849a3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -813,7 +813,7 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
     } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
       LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
     } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
-      Optional<uint32_t> Id =
+      std::optional<uint32_t> Id =
           AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
       if (Id.has_value()) {
         MIRBuilder.buildConstant(InputReg, Id.value());

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index c16d8ee51a7af..069baf748bfa2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -150,7 +150,7 @@ static bool isInv2Pi(const APFloat &APF) {
 // additional cost to negate them.
 static bool isConstantCostlierToNegate(MachineInstr &MI, Register Reg,
                                        MachineRegisterInfo &MRI) {
-  Optional<FPValueAndVReg> FPValReg;
+  std::optional<FPValueAndVReg> FPValReg;
   if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) {
     if (FPValReg->Value.isZero() && !FPValReg->Value.isNegative())
       return true;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 159624892c4e0..7a3446a71b470 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -500,16 +500,16 @@ void MetadataStreamerMsgPackV3::verify(StringRef HSAMetadataString) const {
   }
 }
 
-Optional<StringRef>
+std::optional<StringRef>
 MetadataStreamerMsgPackV3::getAccessQualifier(StringRef AccQual) const {
-  return StringSwitch<Optional<StringRef>>(AccQual)
+  return StringSwitch<std::optional<StringRef>>(AccQual)
       .Case("read_only", StringRef("read_only"))
       .Case("write_only", StringRef("write_only"))
       .Case("read_write", StringRef("read_write"))
       .Default(std::nullopt);
 }
 
-Optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
+std::optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
     unsigned AddressSpace) const {
   switch (AddressSpace) {
   case AMDGPUAS::PRIVATE_ADDRESS:

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h
index f9344a5fbecac..597bcadc78d08 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h
@@ -69,9 +69,10 @@ class MetadataStreamerMsgPackV3 : public MetadataStreamer {
 
   void verify(StringRef HSAMetadataString) const;
 
-  Optional<StringRef> getAccessQualifier(StringRef AccQual) const;
+  std::optional<StringRef> getAccessQualifier(StringRef AccQual) const;
 
-  Optional<StringRef> getAddressSpaceQualifier(unsigned AddressSpace) const;
+  std::optional<StringRef>
+  getAddressSpaceQualifier(unsigned AddressSpace) const;
 
   StringRef getValueKind(Type *Ty, StringRef TypeQual,
                          StringRef BaseTypeName) const;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index 50b94afe3c09f..d29936c1d4dd1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -93,7 +93,7 @@ class SchedGroup {
   SchedGroupMask SGMask;
 
   // Maximum number of SUnits that can be added to this group.
-  Optional<unsigned> MaxSize;
+  std::optional<unsigned> MaxSize;
 
   // SchedGroups will only synchronize with other SchedGroups that have the same
   // SyncID.
@@ -175,13 +175,13 @@ class SchedGroup {
 
   SchedGroupMask getMask() { return SGMask; }
 
-  SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize,
+  SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize,
              ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
       : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {
     SGID = NumSchedGroups++;
   }
 
-  SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, int SyncID,
+  SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, int SyncID,
              ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
       : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {
     SGID = NumSchedGroups++;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 7662d3a1b74fd..d8a6bd80f9283 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1904,7 +1904,7 @@ bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
   // GFX9 and GFX10 have signed byte immediate offsets. The immediate
   // offset for S_BUFFER instructions is unsigned.
   int64_t ByteOffset = IsBuffer ? C->getZExtValue() : C->getSExtValue();
-  Optional<int64_t> EncodedOffset =
+  std::optional<int64_t> EncodedOffset =
       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, IsBuffer);
   if (EncodedOffset && Offset && !Imm32Only) {
     *Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 9d90cd35a91a9..32f4b79c1933b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -144,7 +144,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
       const TargetRegisterClass *SrcRC
         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
 
-      Optional<ValueAndVReg> ConstVal =
+      std::optional<ValueAndVReg> ConstVal =
           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
       if (ConstVal) {
         unsigned MovOpc =
@@ -975,7 +975,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
 
   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
 
-  Optional<ValueAndVReg> ConstSelect =
+  std::optional<ValueAndVReg> ConstSelect =
       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
   if (ConstSelect) {
     // The selector has to be an inline immediate, so we can use whatever for
@@ -984,7 +984,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
     MIB.addImm(ConstSelect->Value.getSExtValue() &
                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
   } else {
-    Optional<ValueAndVReg> ConstVal =
+    std::optional<ValueAndVReg> ConstVal =
         getIConstantVRegValWithLookThrough(Val, *MRI);
 
     // If the value written is an inline immediate, we can get away without a
@@ -1383,7 +1383,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
   if (Size != STI.getWavefrontSize())
     return false;
 
-  Optional<ValueAndVReg> Arg =
+  std::optional<ValueAndVReg> Arg =
       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
 
   if (Arg) {
@@ -3082,7 +3082,7 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
   }
 
   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
-  Optional<ValueAndVReg> MaybeVOffset =
+  std::optional<ValueAndVReg> MaybeVOffset =
       getIConstantVRegValWithLookThrough(VOffset, *MRI);
   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
 
@@ -3784,7 +3784,7 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
     return false;
 
   const GEPInfo &GEPI = AddrInfo[0];
-  Optional<int64_t> EncodedImm =
+  std::optional<int64_t> EncodedImm =
       AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
 
   if (SOffset && Offset) {
@@ -3856,7 +3856,7 @@ AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
 
   const GEPInfo &GEPInfo = AddrInfo[0];
   Register PtrReg = GEPInfo.SgprParts[0];
-  Optional<int64_t> EncodedImm =
+  std::optional<int64_t> EncodedImm =
       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
   if (!EncodedImm)
     return std::nullopt;
@@ -4293,7 +4293,8 @@ bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
                                                     unsigned ShAmtBits) const {
   assert(MI.getOpcode() == TargetOpcode::G_AND);
 
-  Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
+  std::optional<APInt> RHS =
+      getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
   if (!RHS)
     return false;
 
@@ -4477,7 +4478,7 @@ AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
     return {Root, 0};
 
   MachineOperand &RHS = RootI->getOperand(2);
-  Optional<ValueAndVReg> MaybeOffset =
+  std::optional<ValueAndVReg> MaybeOffset =
       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
   if (!MaybeOffset)
     return {Root, 0};
@@ -4804,10 +4805,10 @@ AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
 }
 
 /// Get an immediate that must be 32-bits, and treated as zero extended.
-static Optional<uint64_t> getConstantZext32Val(Register Reg,
-                                               const MachineRegisterInfo &MRI) {
+static std::optional<uint64_t>
+getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
   // getIConstantVRegVal sexts any values, so see if that matters.
-  Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
+  std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
   if (!OffsetVal || !isInt<32>(*OffsetVal))
     return std::nullopt;
   return Lo_32(*OffsetVal);
@@ -4815,11 +4816,11 @@ static Optional<uint64_t> getConstantZext32Val(Register Reg,
 
 InstructionSelector::ComplexRendererFns
 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
-  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
+  std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
   if (!OffsetVal)
     return {};
 
-  Optional<int64_t> EncodedImm =
+  std::optional<int64_t> EncodedImm =
       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
   if (!EncodedImm)
     return {};
@@ -4831,12 +4832,12 @@ InstructionSelector::ComplexRendererFns
 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
 
-  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
+  std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
   if (!OffsetVal)
     return {};
 
-  Optional<int64_t> EncodedImm
-    = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
+  std::optional<int64_t> EncodedImm =
+      AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
   if (!EncodedImm)
     return {};
 
@@ -4854,7 +4855,7 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
   if (!SOffset)
     return std::nullopt;
 
-  Optional<int64_t> EncodedOffset =
+  std::optional<int64_t> EncodedOffset =
       AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
   if (!EncodedOffset)
     return std::nullopt;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 52b601d95f071..bac333612a7d1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2340,7 +2340,7 @@ bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
   // FIXME: Artifact combiner probably should have replaced the truncated
   // constant before this, so we shouldn't need
   // getIConstantVRegValWithLookThrough.
-  Optional<ValueAndVReg> MaybeIdxVal =
+  std::optional<ValueAndVReg> MaybeIdxVal =
       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
   if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
     return true;
@@ -2374,7 +2374,7 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
   // FIXME: Artifact combiner probably should have replaced the truncated
   // constant before this, so we shouldn't need
   // getIConstantVRegValWithLookThrough.
-  Optional<ValueAndVReg> MaybeIdxVal =
+  std::optional<ValueAndVReg> MaybeIdxVal =
       getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
   if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
     return true;
@@ -4188,7 +4188,7 @@ bool AMDGPULegalizerInfo::getLDSKernelId(Register DstReg,
                                          MachineRegisterInfo &MRI,
                                          MachineIRBuilder &B) const {
   Function &F = B.getMF().getFunction();
-  Optional<uint32_t> KnownSize =
+  std::optional<uint32_t> KnownSize =
       AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
   if (KnownSize.has_value())
     B.buildConstant(DstReg, KnownSize.value());
@@ -4282,11 +4282,11 @@ void AMDGPULegalizerInfo::updateBufferMMO(MachineMemOperand *MMO,
                                           Register VOffset, Register SOffset,
                                           unsigned ImmOffset, Register VIndex,
                                           MachineRegisterInfo &MRI) const {
-  Optional<ValueAndVReg> MaybeVOffsetVal =
+  std::optional<ValueAndVReg> MaybeVOffsetVal =
       getIConstantVRegValWithLookThrough(VOffset, MRI);
-  Optional<ValueAndVReg> MaybeSOffsetVal =
+  std::optional<ValueAndVReg> MaybeSOffsetVal =
       getIConstantVRegValWithLookThrough(SOffset, MRI);
-  Optional<ValueAndVReg> MaybeVIndexVal =
+  std::optional<ValueAndVReg> MaybeVIndexVal =
       getIConstantVRegValWithLookThrough(VIndex, MRI);
   // If the combined VOffset + SOffset + ImmOffset + strided VIndex is constant,
   // update the MMO with that offset. The stride is unknown so we can only do
@@ -5288,7 +5288,7 @@ bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI,
       ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
     return legalizeTrapEndpgm(MI, MRI, B);
 
-  if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
+  if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
     switch (*HsaAbiVer) {
     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index d8133a97e7319..3414225211115 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -196,7 +196,7 @@ void AMDGPUMachineFunction::allocateKnownAddressLDSGlobal(const Function &F) {
   }
 }
 
-Optional<uint32_t>
+std::optional<uint32_t>
 AMDGPUMachineFunction::getLDSKernelIdMetadata(const Function &F) {
   auto MD = F.getMetadata("llvm.amdgcn.lds.kernel.id");
   if (MD && MD->getNumOperands() == 1) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
index 4d97e5aab7545..8c5d7b0d10c27 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
@@ -116,7 +116,7 @@ class AMDGPUMachineFunction : public MachineFunctionInfo {
   static bool isKnownAddressLDSGlobal(const GlobalVariable &GV);
   static unsigned calculateKnownAddressOfLDSGlobal(const GlobalVariable &GV);
 
-  static Optional<uint32_t> getLDSKernelIdMetadata(const Function &F);
+  static std::optional<uint32_t> getLDSKernelIdMetadata(const Function &F);
 
   Align getDynLDSAlign() const { return DynLDSAlign; }
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
index 91ca7cd2f7879..9fa053f7e0a87 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
@@ -48,7 +48,8 @@ class AMDGPUMachineModuleInfo final : public MachineModuleInfoELF {
   ///
   /// \returns \p SSID's inclusion ordering, or "std::nullopt" if \p SSID is not
   /// supported by the AMDGPU target.
-  Optional<uint8_t> getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
+  std::optional<uint8_t>
+  getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
     if (SSID == SyncScope::SingleThread ||
         SSID == getSingleThreadOneAddressSpaceSSID())
       return 0;
@@ -122,7 +123,8 @@ class AMDGPUMachineModuleInfo final : public MachineModuleInfoELF {
   /// synchronization scope \p B, false if synchronization scope \p A is smaller
   /// than synchronization scope \p B, or "std::nullopt" if either
   /// synchronization scope \p A or \p B is not supported by the AMDGPU target.
-  Optional<bool> isSyncScopeInclusion(SyncScope::ID A, SyncScope::ID B) const {
+  std::optional<bool> isSyncScopeInclusion(SyncScope::ID A,
+                                           SyncScope::ID B) const {
     const auto &AIO = getSyncScopeInclusionOrdering(A);
     const auto &BIO = getSyncScopeInclusionOrdering(B);
     if (!AIO || !BIO)

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
index b0099ffd18a12..6e823fb8b1d8b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
@@ -91,7 +91,7 @@ class AMDGPUPropagateAttributes {
     }
 
     FeatureBitset Features;
-    Optional<Attribute> Attributes[NumAttr];
+    std::optional<Attribute> Attributes[NumAttr];
   };
 
   class Clone {
@@ -127,7 +127,8 @@ class AMDGPUPropagateAttributes {
   void setFeatures(Function &F, const FeatureBitset &NewFeatures);
 
   // Set new function's attributes in place.
-  void setAttributes(Function &F, const ArrayRef<Optional<Attribute>> NewAttrs);
+  void setAttributes(Function &F,
+                     const ArrayRef<std::optional<Attribute>> NewAttrs);
 
   std::string getFeatureString(const FeatureBitset &Features) const;
 
@@ -343,8 +344,8 @@ void AMDGPUPropagateAttributes::setFeatures(Function &F,
   F.addFnAttr("target-features", NewFeatureStr);
 }
 
-void AMDGPUPropagateAttributes::setAttributes(Function &F,
-    const ArrayRef<Optional<Attribute>> NewAttrs) {
+void AMDGPUPropagateAttributes::setAttributes(
+    Function &F, const ArrayRef<std::optional<Attribute>> NewAttrs) {
   LLVM_DEBUG(dbgs() << "Set attributes on " << F.getName() << ":\n");
   for (unsigned I = 0; I < NumAttr; ++I) {
     F.removeFnAttr(AttributeNames[I]);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
index b3671eee3553b..b4315950b225f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
@@ -161,7 +161,7 @@ bool AMDGPURegBankCombinerHelper::matchIntMinMaxToMed3(
 
   MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode());
   Register Val;
-  Optional<ValueAndVReg> K0, K1;
+  std::optional<ValueAndVReg> K0, K1;
   // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
   if (!matchMed<GCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
     return false;
@@ -206,7 +206,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToMed3(
   auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
 
   Register Val;
-  Optional<FPValueAndVReg> K0, K1;
+  std::optional<FPValueAndVReg> K0, K1;
   // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
   if (!matchMed<GFCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
     return false;
@@ -238,7 +238,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToClamp(MachineInstr &MI,
   // Clamp is available on all types after regbankselect (f16, f32, f64, v2f16).
   auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
   Register Val;
-  Optional<FPValueAndVReg> K0, K1;
+  std::optional<FPValueAndVReg> K0, K1;
   // Match min(max(Val, K0), K1) or max(min(Val, K1), K0).
   if (!matchMed<GFCstOrSplatGFCstMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
     return false;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index fdeff4ee892e3..4d62244e54389 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1252,7 +1252,8 @@ static unsigned setBufferOffsets(MachineIRBuilder &B,
   const LLT S32 = LLT::scalar(32);
   MachineRegisterInfo *MRI = B.getMRI();
 
-  if (Optional<int64_t> Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
+  if (std::optional<int64_t> Imm =
+          getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
     uint32_t SOffset, ImmOffset;
     if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget,
                                  Alignment)) {

diff  --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 9aa709fc86757..e6d9d5ddfbf47 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1283,10 +1283,10 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
   /// \param SGPRBlocks [out] Result SGPR block count.
   bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
                           bool FlatScrUsed, bool XNACKUsed,
-                          Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
-                          SMRange VGPRRange, unsigned NextFreeSGPR,
-                          SMRange SGPRRange, unsigned &VGPRBlocks,
-                          unsigned &SGPRBlocks);
+                          std::optional<bool> EnableWavefrontSize32,
+                          unsigned NextFreeVGPR, SMRange VGPRRange,
+                          unsigned NextFreeSGPR, SMRange SGPRRange,
+                          unsigned &VGPRBlocks, unsigned &SGPRBlocks);
   bool ParseDirectiveAMDGCNTarget();
   bool ParseDirectiveAMDHSAKernel();
   bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
@@ -1334,7 +1334,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
 
   bool isRegister();
   bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
-  Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
+  std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
   void initializeGprCountSymbol(RegisterKind RegKind);
   bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
                              unsigned RegWidth);
@@ -1667,7 +1667,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
                              const SMLoc &IDLoc);
   bool validateExeczVcczOperands(const OperandVector &Operands);
   bool validateTFE(const MCInst &Inst, const OperandVector &Operands);
-  Optional<StringRef> validateLdsDirect(const MCInst &Inst);
+  std::optional<StringRef> validateLdsDirect(const MCInst &Inst);
   unsigned getConstantBusLimit(unsigned Opcode) const;
   bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
   bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
@@ -2853,7 +2853,7 @@ bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
   return false;
 }
 
-Optional<StringRef>
+std::optional<StringRef>
 AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
   switch (RegKind) {
   case IS_VGPR:
@@ -4056,7 +4056,8 @@ static bool IsRevOpcode(const unsigned Opcode)
   }
 }
 
-Optional<StringRef> AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
+std::optional<StringRef>
+AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
 
   using namespace SIInstrFlags;
   const unsigned Opcode = Inst.getOpcode();
@@ -4914,9 +4915,9 @@ bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
 
 bool AMDGPUAsmParser::calculateGPRBlocks(
     const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
-    bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
-    SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange,
-    unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
+    bool XNACKUsed, std::optional<bool> EnableWavefrontSize32,
+    unsigned NextFreeVGPR, SMRange VGPRRange, unsigned NextFreeSGPR,
+    SMRange SGPRRange, unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
   // TODO(scott.linder): These calculations are duplicated from
   // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
   IsaVersion Version = getIsaVersion(getSTI().getCPU());
@@ -4984,7 +4985,7 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
   std::optional<unsigned> ExplicitUserSGPRCount;
   bool ReserveVCC = true;
   bool ReserveFlatScr = true;
-  Optional<bool> EnableWavefrontSize32;
+  std::optional<bool> EnableWavefrontSize32;
 
   while (true) {
     while (trySkipToken(AsmToken::EndOfStatement));

diff  --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index 64c3ea6693c1c..7a4af1af33d63 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -407,7 +407,7 @@ void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
   if (IVersion.Major >= 7 && !ReserveFlatScr && !hasArchitectedFlatScratch(STI))
     OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';
 
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
     switch (*HsaAbiVer) {
     default:
       break;
@@ -594,7 +594,7 @@ unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() {
 unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
   assert(STI.getTargetTriple().getOS() == Triple::AMDHSA);
 
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
     switch (*HsaAbiVer) {
     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:

diff  --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index 030af8a7161b3..50511794a013f 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -36,7 +36,7 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {
 
 protected:
   // TODO: Move HSAMetadataStream to AMDGPUTargetStreamer.
-  Optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;
+  std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;
 
   MCContext &getContext() const { return Streamer.getContext(); }
 
@@ -98,10 +98,10 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {
   static StringRef getArchNameFromElfMach(unsigned ElfMach);
   static unsigned getElfMach(StringRef GPU);
 
-  const Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
+  const std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
     return TargetID;
   }
-  Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
+  std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
     return TargetID;
   }
   void initializeTargetID(const MCSubtargetInfo &STI) {

diff  --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index c785cfdaaef82..4bac27065c326 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -59,7 +59,7 @@ static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI,
 static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF,
                                            LivePhysRegs &LiveRegs,
                                            Register &TempSGPR,
-                                           Optional<int> &FrameIndex,
+                                           std::optional<int> &FrameIndex,
                                            bool IsFP) {
   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
@@ -773,8 +773,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
   // turn on all lanes before doing the spill to memory.
   Register ScratchExecCopy;
 
-  Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
-  Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
+  std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
+  std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
 
   // VGPRs used for SGPR->VGPR spills
   for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg :
@@ -990,8 +990,8 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
   const Register BasePtrReg =
       TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
 
-  Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
-  Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
+  std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
+  std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
 
   if (RoundedSize != 0 && hasFP(MF)) {
     auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b3fb27d3f809d..ffbac7fb3c161 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1702,7 +1702,7 @@ SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG,
                                          const SDLoc &SL) const {
 
   Function &F = DAG.getMachineFunction().getFunction();
-  Optional<uint32_t> KnownSize =
+  std::optional<uint32_t> KnownSize =
       AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
   if (KnownSize.has_value())
     return DAG.getConstant(KnownSize.value(), SL, MVT::i32);
@@ -2856,7 +2856,8 @@ void SITargetLowering::passSpecialInputs(
       // input for kernels, and is computed from the kernarg segment pointer.
       InputReg = getImplicitArgPtr(DAG, DL);
     } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
-      Optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
+      std::optional<uint32_t> Id =
+          AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
       if (Id.has_value()) {
         InputReg = DAG.getConstant(Id.value(), DL, ArgVT);
       } else {
@@ -5421,7 +5422,7 @@ SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
       Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
     return lowerTrapEndpgm(Op, DAG);
 
-  if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
+  if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
     switch (*HsaAbiVer) {
     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
@@ -10240,7 +10241,7 @@ bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF,
   if (Opcode == AMDGPU::G_FCANONICALIZE)
     return true;
 
-  Optional<FPValueAndVReg> FCR;
+  std::optional<FPValueAndVReg> FCR;
   // Constant splat (can be padded with undef) or scalar constant.
   if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) {
     if (FCR->Value.isSignaling())

diff  --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 214a16f9ce4a5..18c1875201549 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -263,7 +263,7 @@ class SILoadStoreOptimizer : public MachineFunctionPass {
                            int32_t NewOffset) const;
   Register computeBase(MachineInstr &MI, const MemAddress &Addr) const;
   MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
-  Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
+  std::optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
   void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
   /// Promotes constant offset to the immediate by adjusting the base. It
   /// tries to use a base from the nearby instructions that allows it to have
@@ -1983,7 +1983,7 @@ void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
   TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
 }
 
-Optional<int32_t>
+std::optional<int32_t>
 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
   if (Op.isImm())
     return Op.getImm();

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 26a62317a5e5e..a7f253b352c11 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -340,7 +340,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
         return false;
       }
 
-      Optional<int> SpillFI;
+      std::optional<int> SpillFI;
       // We need to preserve inactive lanes, so always save, even caller-save
       // registers.
       if (!isEntryFunction()) {

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 70a6009e4f5d8..cbe78f437ccfe 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -424,7 +424,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
   // Current recorded maximum possible occupancy.
   unsigned Occupancy;
 
-  mutable Optional<bool> UsesAGPRs;
+  mutable std::optional<bool> UsesAGPRs;
 
   MCPhysReg getNextUserSGPR() const;
 
@@ -437,9 +437,9 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
 
     // If the VGPR is used for SGPR spills in a non-entrypoint function, the
     // stack slot used to save/restore it in the prolog/epilog.
-    Optional<int> FI;
+    std::optional<int> FI;
 
-    SGPRSpillVGPR(Register V, Optional<int> F) : VGPR(V), FI(F) {}
+    SGPRSpillVGPR(Register V, std::optional<int> F) : VGPR(V), FI(F) {}
   };
 
   struct VGPRSpillToAGPR {
@@ -483,7 +483,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
 
   // Emergency stack slot. Sometimes, we create this before finalizing the stack
   // frame, so save it here and add it to the RegScavenger later.
-  Optional<int> ScavengeFI;
+  std::optional<int> ScavengeFI;
 
 private:
   Register VGPRForAGPRCopy;
@@ -501,12 +501,12 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
   /// If this is set, an SGPR used for save/restore of the register used for the
   /// frame pointer.
   Register SGPRForFPSaveRestoreCopy;
-  Optional<int> FramePointerSaveIndex;
+  std::optional<int> FramePointerSaveIndex;
 
   /// If this is set, an SGPR used for save/restore of the register used for the
   /// base pointer.
   Register SGPRForBPSaveRestoreCopy;
-  Optional<int> BasePointerSaveIndex;
+  std::optional<int> BasePointerSaveIndex;
 
   bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg);
 
@@ -573,7 +573,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
                               bool ResetSGPRSpillStackIDs);
 
   int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI);
-  Optional<int> getOptionalScavengeFI() const { return ScavengeFI; }
+  std::optional<int> getOptionalScavengeFI() const { return ScavengeFI; }
 
   unsigned getBytesInStackArgArea() const {
     return BytesInStackArgArea;

diff  --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 2cbfcca550ef8..eb6a9f9ef37ce 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -221,7 +221,7 @@ class SIMemOpAccess final {
   /// the SI atomic scope it corresponds to, the address spaces it
   /// covers, and whether the memory ordering applies between address
   /// spaces.
-  Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
+  std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
   toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const;
 
   /// \return Return a bit set of the address spaces accessed by \p AS.
@@ -229,8 +229,8 @@ class SIMemOpAccess final {
 
   /// \returns Info constructed from \p MI, which has at least machine memory
   /// operand.
-  Optional<SIMemOpInfo> constructFromMIWithMMO(
-      const MachineBasicBlock::iterator &MI) const;
+  std::optional<SIMemOpInfo>
+  constructFromMIWithMMO(const MachineBasicBlock::iterator &MI) const;
 
 public:
   /// Construct class to support accessing the machine memory operands
@@ -238,23 +238,23 @@ class SIMemOpAccess final {
   SIMemOpAccess(MachineFunction &MF);
 
   /// \returns Load info if \p MI is a load operation, "std::nullopt" otherwise.
-  Optional<SIMemOpInfo> getLoadInfo(
-      const MachineBasicBlock::iterator &MI) const;
+  std::optional<SIMemOpInfo>
+  getLoadInfo(const MachineBasicBlock::iterator &MI) const;
 
   /// \returns Store info if \p MI is a store operation, "std::nullopt"
   /// otherwise.
-  Optional<SIMemOpInfo> getStoreInfo(
-      const MachineBasicBlock::iterator &MI) const;
+  std::optional<SIMemOpInfo>
+  getStoreInfo(const MachineBasicBlock::iterator &MI) const;
 
   /// \returns Atomic fence info if \p MI is an atomic fence operation,
   /// "std::nullopt" otherwise.
-  Optional<SIMemOpInfo> getAtomicFenceInfo(
-      const MachineBasicBlock::iterator &MI) const;
+  std::optional<SIMemOpInfo>
+  getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const;
 
   /// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or
   /// rmw operation, "std::nullopt" otherwise.
-  Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo(
-      const MachineBasicBlock::iterator &MI) const;
+  std::optional<SIMemOpInfo>
+  getAtomicCmpxchgOrRmwInfo(const MachineBasicBlock::iterator &MI) const;
 };
 
 class SICacheControl {
@@ -622,7 +622,7 @@ void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI,
   Func.getContext().diagnose(Diag);
 }
 
-Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
+std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
 SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
                                SIAtomicAddrSpace InstrAddrSpace) const {
   if (SSID == SyncScope::System)
@@ -687,7 +687,7 @@ SIMemOpAccess::SIMemOpAccess(MachineFunction &MF) {
   MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
 }
 
-Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
+std::optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
     const MachineBasicBlock::iterator &MI) const {
   assert(MI->getNumMemOperands() > 0);
 
@@ -747,8 +747,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
                      IsNonTemporal);
 }
 
-Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
-    const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getLoadInfo(const MachineBasicBlock::iterator &MI) const {
   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
 
   if (!(MI->mayLoad() && !MI->mayStore()))
@@ -761,8 +761,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
   return constructFromMIWithMMO(MI);
 }
 
-Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
-    const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getStoreInfo(const MachineBasicBlock::iterator &MI) const {
   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
 
   if (!(!MI->mayLoad() && MI->mayStore()))
@@ -775,8 +775,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
   return constructFromMIWithMMO(MI);
 }
 
-Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
-    const MachineBasicBlock::iterator &MI) const {
+std::optional<SIMemOpInfo>
+SIMemOpAccess::getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const {
   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
 
   if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
@@ -808,7 +808,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
                      IsCrossAddressSpaceOrdering, AtomicOrdering::NotAtomic);
 }
 
-Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
+std::optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
     const MachineBasicBlock::iterator &MI) const {
   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
 

diff  --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 037e0ecaf37f8..6151eb0f5e6a6 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -53,7 +53,7 @@ class SIPeepholeSDWA : public MachineFunctionPass {
   MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
   SmallVector<MachineInstr *, 8> ConvertedInstructions;
 
-  Optional<int64_t> foldToImm(const MachineOperand &Op) const;
+  std::optional<int64_t> foldToImm(const MachineOperand &Op) const;
 
 public:
   static char ID;
@@ -490,7 +490,8 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
   return SDWADstOperand::convertToSDWA(MI, TII);
 }
 
-Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
+std::optional<int64_t>
+SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
   if (Op.isImm()) {
     return Op.getImm();
   }

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 0d218c88a7861..c77b2c1cf8045 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -98,7 +98,7 @@ namespace llvm {
 
 namespace AMDGPU {
 
-Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
+std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
   if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
     return std::nullopt;
 
@@ -118,25 +118,25 @@ Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
 }
 
 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
   return false;
 }
 
 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
   return false;
 }
 
 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
   return false;
 }
 
 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
-  if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
+  if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
   return false;
 }
@@ -536,7 +536,7 @@ unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
   return 0;
 }
 
-Optional<unsigned> InstInfo::getInvalidCompOperandIndex(
+std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
     std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
 
   auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx);
@@ -711,7 +711,7 @@ std::string AMDGPUTargetID::toString() const {
                     .str();
 
   std::string Features;
-  if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
+  if (std::optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
     switch (*HsaAbiVersion) {
     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
       // Code object V2 only supported specific processors and had fixed
@@ -972,7 +972,7 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
 }
 
 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
-                             Optional<bool> EnableWavefrontSize32) {
+                             std::optional<bool> EnableWavefrontSize32) {
   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
     return 8;
 
@@ -990,7 +990,7 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
 }
 
 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
-                                Optional<bool> EnableWavefrontSize32) {
+                                std::optional<bool> EnableWavefrontSize32) {
   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
     return 8;
 
@@ -1062,7 +1062,7 @@ unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
 }
 
 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
-                          Optional<bool> EnableWavefrontSize32) {
+                          std::optional<bool> EnableWavefrontSize32) {
   NumVGPRs = alignTo(std::max(1u, NumVGPRs),
                      getVGPREncodingGranule(STI, EnableWavefrontSize32));
   // VGPRBlocks is actual number of VGPR blocks minus 1.
@@ -2489,12 +2489,13 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
   return ByteOffset >> 2;
 }
 
-Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
-                                       int64_t ByteOffset, bool IsBuffer) {
+std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
+                                            int64_t ByteOffset, bool IsBuffer) {
   // The signed version is always a byte offset.
   if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
     assert(hasSMEMByteOffset(ST));
-    return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : std::nullopt;
+    return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
+                                 : std::nullopt;
   }
 
   if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
@@ -2502,17 +2503,17 @@ Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
 
   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
   return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
-             ? Optional<int64_t>(EncodedOffset)
+             ? std::optional<int64_t>(EncodedOffset)
              : std::nullopt;
 }
 
-Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
-                                                int64_t ByteOffset) {
+std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
+                                                     int64_t ByteOffset) {
   if (!isCI(ST) || !isDwordAligned(ByteOffset))
     return std::nullopt;
 
   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
-  return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset)
+  return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
                                    : std::nullopt;
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index f9136f198f110..e34617bddd603 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -42,7 +42,7 @@ namespace AMDGPU {
 struct IsaVersion;
 
 /// \returns HSA OS ABI Version identification.
-Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
+std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
 /// \returns True if HSA OS ABI Version identification is 2,
 /// false otherwise.
 bool isHsaAbiVersion2(const MCSubtargetInfo *STI);
@@ -264,15 +264,15 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs);
 /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
 unsigned
 getVGPRAllocGranule(const MCSubtargetInfo *STI,
-                    Optional<bool> EnableWavefrontSize32 = std::nullopt);
+                    std::optional<bool> EnableWavefrontSize32 = std::nullopt);
 
 /// \returns VGPR encoding granularity for given subtarget \p STI.
 ///
 /// For subtargets which support it, \p EnableWavefrontSize32 should match
 /// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
-unsigned
-getVGPREncodingGranule(const MCSubtargetInfo *STI,
-                       Optional<bool> EnableWavefrontSize32 = std::nullopt);
+unsigned getVGPREncodingGranule(
+    const MCSubtargetInfo *STI,
+    std::optional<bool> EnableWavefrontSize32 = std::nullopt);
 
 /// \returns Total number of VGPRs for given subtarget \p STI.
 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI);
@@ -298,8 +298,9 @@ unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI,
 ///
 /// For subtargets which support it, \p EnableWavefrontSize32 should match the
 /// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
-unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
-                          Optional<bool> EnableWavefrontSize32 = std::nullopt);
+unsigned
+getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
+                 std::optional<bool> EnableWavefrontSize32 = std::nullopt);
 
 } // end namespace IsaInfo
 
@@ -546,7 +547,7 @@ constexpr unsigned COMPONENTS_NUM = 2;
 class ComponentProps {
 private:
   unsigned SrcOperandsNum = 0;
-  Optional<unsigned> MandatoryLiteralIdx;
+  std::optional<unsigned> MandatoryLiteralIdx;
   bool HasSrc2Acc = false;
 
 public:
@@ -738,7 +739,7 @@ class InstInfo {
 
   // Check VOPD operands constraints.
   // Return the index of an invalid component operand, if any.
-  Optional<unsigned> getInvalidCompOperandIndex(
+  std::optional<unsigned> getInvalidCompOperandIndex(
       std::function<unsigned(unsigned, unsigned)> GetRegIdx) const;
 
 private:
@@ -1252,13 +1253,13 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset);
 /// SMRD offset field, or std::nullopt if it won't fit. On GFX9 and GFX10
 /// S_LOAD instructions have a signed offset, on other subtargets it is
 /// unsigned. S_BUFFER has an unsigned offset for all subtargets.
-Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
-                                       int64_t ByteOffset, bool IsBuffer);
+std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
+                                            int64_t ByteOffset, bool IsBuffer);
 
 /// \return The encoding that can be used for a 32-bit literal offset in an SMRD
 /// instruction. This is only useful on CI.s
-Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
-                                                int64_t ByteOffset);
+std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
+                                                     int64_t ByteOffset);
 
 /// For FLAT segment the offset must be positive;
 /// MSB is ignored and forced to zero.

diff  --git a/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp b/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp
index b11835c1bd4ae..029d61bbef000 100644
--- a/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp
@@ -80,158 +80,132 @@ TEST_F(AArch64GISelMITest, FoldBinOp) {
   auto MIBFCst2 = B.buildFConstant(s32, 2.0);
 
   // Test G_ADD folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGAddInt =
-      ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGAddInt = ConstantFoldBinOp(
+      TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAddInt.has_value());
   EXPECT_EQ(25ULL, FoldGAddInt.value().getLimitedValue());
-  Optional<APInt> FoldGAddMix =
-      ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGAddMix = ConstantFoldBinOp(
+      TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAddMix.has_value());
   EXPECT_EQ(1073741840ULL, FoldGAddMix.value().getLimitedValue());
 
   // Test G_AND folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGAndInt =
-      ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGAndInt = ConstantFoldBinOp(
+      TargetOpcode::G_AND, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAndInt.has_value());
   EXPECT_EQ(0ULL, FoldGAndInt.value().getLimitedValue());
-  Optional<APInt> FoldGAndMix =
-      ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst2.getReg(0),
-                        MIBFCst1.getReg(0), *MRI);
+  std::optional<APInt> FoldGAndMix = ConstantFoldBinOp(
+      TargetOpcode::G_AND, MIBCst2.getReg(0), MIBFCst1.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAndMix.has_value());
   EXPECT_EQ(1ULL, FoldGAndMix.value().getLimitedValue());
 
   // Test G_ASHR folding Integer + Mixed cases
-  Optional<APInt> FoldGAShrInt =
-      ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGAShrInt = ConstantFoldBinOp(
+      TargetOpcode::G_ASHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAShrInt.has_value());
   EXPECT_EQ(0ULL, FoldGAShrInt.value().getLimitedValue());
-  Optional<APInt> FoldGAShrMix =
-      ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBFCst2.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGAShrMix = ConstantFoldBinOp(
+      TargetOpcode::G_ASHR, MIBFCst2.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGAShrMix.has_value());
   EXPECT_EQ(2097152ULL, FoldGAShrMix.value().getLimitedValue());
 
   // Test G_LSHR folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGLShrInt =
-      ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGLShrInt = ConstantFoldBinOp(
+      TargetOpcode::G_LSHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGLShrInt.has_value());
   EXPECT_EQ(0ULL, FoldGLShrInt.value().getLimitedValue());
-  Optional<APInt> FoldGLShrMix =
-      ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBFCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGLShrMix = ConstantFoldBinOp(
+      TargetOpcode::G_LSHR, MIBFCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGLShrMix.has_value());
   EXPECT_EQ(2080768ULL, FoldGLShrMix.value().getLimitedValue());
 
   // Test G_MUL folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGMulInt =
-      ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGMulInt = ConstantFoldBinOp(
+      TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGMulInt.has_value());
   EXPECT_EQ(144ULL, FoldGMulInt.value().getLimitedValue());
-  Optional<APInt> FoldGMulMix =
-      ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGMulMix = ConstantFoldBinOp(
+      TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGMulMix.has_value());
   EXPECT_EQ(0ULL, FoldGMulMix.value().getLimitedValue());
 
   // Test G_OR folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGOrInt =
-      ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGOrInt = ConstantFoldBinOp(
+      TargetOpcode::G_OR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGOrInt.has_value());
   EXPECT_EQ(25ULL, FoldGOrInt.value().getLimitedValue());
-  Optional<APInt> FoldGOrMix =
-      ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGOrMix = ConstantFoldBinOp(
+      TargetOpcode::G_OR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGOrMix.has_value());
   EXPECT_EQ(1073741840ULL, FoldGOrMix.value().getLimitedValue());
 
   // Test G_SHL folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGShlInt =
-      ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGShlInt = ConstantFoldBinOp(
+      TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGShlInt.has_value());
   EXPECT_EQ(8192ULL, FoldGShlInt.value().getLimitedValue());
-  Optional<APInt> FoldGShlMix =
-      ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGShlMix = ConstantFoldBinOp(
+      TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGShlMix.has_value());
   EXPECT_EQ(0ULL, FoldGShlMix.value().getLimitedValue());
 
   // Test G_SUB folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGSubInt =
-      ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSubInt = ConstantFoldBinOp(
+      TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSubInt.has_value());
   EXPECT_EQ(7ULL, FoldGSubInt.value().getLimitedValue());
-  Optional<APInt> FoldGSubMix =
-      ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSubMix = ConstantFoldBinOp(
+      TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSubMix.has_value());
   EXPECT_EQ(3221225488ULL, FoldGSubMix.value().getLimitedValue());
 
   // Test G_XOR folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGXorInt =
-      ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGXorInt = ConstantFoldBinOp(
+      TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGXorInt.has_value());
   EXPECT_EQ(25ULL, FoldGXorInt.value().getLimitedValue());
-  Optional<APInt> FoldGXorMix =
-      ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGXorMix = ConstantFoldBinOp(
+      TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGXorMix.has_value());
   EXPECT_EQ(1073741840ULL, FoldGXorMix.value().getLimitedValue());
 
   // Test G_UDIV folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGUdivInt =
-      ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGUdivInt = ConstantFoldBinOp(
+      TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGUdivInt.has_value());
   EXPECT_EQ(1ULL, FoldGUdivInt.value().getLimitedValue());
-  Optional<APInt> FoldGUdivMix =
-      ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGUdivMix = ConstantFoldBinOp(
+      TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGUdivMix.has_value());
   EXPECT_EQ(0ULL, FoldGUdivMix.value().getLimitedValue());
 
   // Test G_SDIV folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGSdivInt =
-      ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSdivInt = ConstantFoldBinOp(
+      TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSdivInt.has_value());
   EXPECT_EQ(1ULL, FoldGSdivInt.value().getLimitedValue());
-  Optional<APInt> FoldGSdivMix =
-      ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSdivMix = ConstantFoldBinOp(
+      TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSdivMix.has_value());
   EXPECT_EQ(0ULL, FoldGSdivMix.value().getLimitedValue());
 
   // Test G_UREM folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGUremInt =
-      ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGUremInt = ConstantFoldBinOp(
+      TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGUremInt.has_value());
   EXPECT_EQ(1ULL, FoldGUremInt.value().getLimitedValue());
-  Optional<APInt> FoldGUremMix =
-      ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGUremMix = ConstantFoldBinOp(
+      TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGUremMix.has_value());
   EXPECT_EQ(0ULL, FoldGUremMix.value().getLimitedValue());
 
   // Test G_SREM folding Integer + Mixed Int-Float cases
-  Optional<APInt> FoldGSremInt =
-      ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
-                        MIBCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSremInt = ConstantFoldBinOp(
+      TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSremInt.has_value());
   EXPECT_EQ(7ULL, FoldGSremInt.value().getLimitedValue());
-  Optional<APInt> FoldGSremMix =
-      ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
-                        MIBFCst2.getReg(0), *MRI);
+  std::optional<APInt> FoldGSremMix = ConstantFoldBinOp(
+      TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
   EXPECT_TRUE(FoldGSremMix.has_value());
   EXPECT_EQ(16ULL, FoldGSremMix.value().getLimitedValue());
 }

diff  --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
index 9ed72c9b6f5fb..387999d6043c0 100644
--- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
@@ -45,7 +45,7 @@ TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
   if (!TM)
     return;
   auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
-  Optional<ValueAndVReg> Src0;
+  std::optional<ValueAndVReg> Src0;
   bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
   EXPECT_TRUE(match);
   EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
@@ -700,8 +700,8 @@ TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {
 
   Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
   Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
-  Optional<ValueAndVReg> ValReg;
-  Optional<FPValueAndVReg> FValReg;
+  std::optional<ValueAndVReg> ValReg;
+  std::optional<FPValueAndVReg> FValReg;
 
   EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
   EXPECT_EQ(IntOne, ValReg->VReg);
@@ -723,7 +723,7 @@ TEST_F(AArch64GISelMITest, MatchConstantSplat) {
   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
   Register Undef = B.buildUndef(s64).getReg(0);
-  Optional<FPValueAndVReg> FValReg;
+  std::optional<FPValueAndVReg> FValReg;
 
   // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
   // from padding to legalize into available operation and then ignore added

diff  --git a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
index 0f354f4a1610a..a8e868d8d4041 100644
--- a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
@@ -110,7 +110,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObject) {
   SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
   SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
                                 PtrInfo.getWithOffset(Offset));
-  Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -137,7 +137,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObjectUnknownSize) {
   // Maybe unlikely that BaseIndexOffset::computeAliasing is used with the
   // optional NumBytes being unset like in this test, but it would be confusing
   // if that function determined IsAlias=false here.
-  Optional<int64_t> NumBytes;
+  std::optional<int64_t> NumBytes;
 
   bool IsAlias;
   bool IsValid = BaseIndexOffset::computeAliasing(
@@ -165,9 +165,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, noAliasingFrameObjects) {
                                  PtrInfo.getWithOffset(Offset0));
   SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
                                  PtrInfo.getWithOffset(Offset1));
-  Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
-  Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -195,9 +195,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, unknownSizeFrameObjects) {
       DAG->getStore(DAG->getEntryNode(), Loc, Value, FIPtr, PtrInfo);
   SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
                                  MachinePointerInfo(PtrInfo.getAddrSpace()));
-  Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
-  Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -220,7 +220,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithFrameObject) {
   SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
   SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
                                 PtrInfo.getWithOffset(Offset));
-  Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
   EVT GTy = DAG->getTargetLoweringInfo().getValueType(DAG->getDataLayout(),
                                                       G->getType());
@@ -228,7 +228,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithFrameObject) {
   SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
   SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
                                  MachinePointerInfo(G, 0));
-  Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -248,7 +248,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithAliasedGlobal) {
   SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
   SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
                                  MachinePointerInfo(G, 0));
-  Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());
 
   SDValue AliasedGValue = DAG->getConstant(1, Loc, GTy);
@@ -290,9 +290,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsWithinDiff) {
                                  PtrInfo.getWithOffset(Offset0));
   SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
                                  PtrInfo.getWithOffset(Offset1));
-  Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
-  Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -331,9 +331,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsOutOfDiff) {
                                  PtrInfo.getWithOffset(Offset0));
   SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
                                  PtrInfo.getWithOffset(Offset1));
-  Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
-  Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
 
   bool IsAlias;
@@ -365,9 +365,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, twoFixedStackObjects) {
                                  PtrInfo0.getWithOffset(Offset0));
   SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
                                  PtrInfo1.getWithOffset(Offset0));
-  Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
-  Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
+  std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
       cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());
 
   bool IsAlias;


        


More information about the llvm-commits mailing list