[llvm] 3152156 - [Transforms/Scalar] llvm::Optional => std::optional

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 13 00:05:20 PST 2022


Author: Fangrui Song
Date: 2022-12-13T08:05:14Z
New Revision: 31521563345c527ac74febcab22e65e55477637b

URL: https://github.com/llvm/llvm-project/commit/31521563345c527ac74febcab22e65e55477637b
DIFF: https://github.com/llvm/llvm-project/commit/31521563345c527ac74febcab22e65e55477637b.diff

LOG: [Transforms/Scalar] llvm::Optional => std::optional

Added: 
    

Modified: 
    llvm/include/llvm/Transforms/Scalar/Float2Int.h
    llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
    llvm/include/llvm/Transforms/Scalar/Scalarizer.h
    llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
    llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/lib/Transforms/Scalar/Float2Int.cpp
    llvm/lib/Transforms/Scalar/GVNSink.cpp
    llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
    llvm/lib/Transforms/Scalar/LoopDistribute.cpp
    llvm/lib/Transforms/Scalar/LoopFuse.cpp
    llvm/lib/Transforms/Scalar/LoopPassManager.cpp
    llvm/lib/Transforms/Scalar/LoopPredication.cpp
    llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
    llvm/lib/Transforms/Scalar/MergeICmps.cpp
    llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
    llvm/lib/Transforms/Scalar/Scalarizer.cpp
    llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Transforms/Scalar/Float2Int.h b/llvm/include/llvm/Transforms/Scalar/Float2Int.h
index f4bec228ea96..4da07663e760 100644
--- a/llvm/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/llvm/include/llvm/Transforms/Scalar/Float2Int.h
@@ -42,7 +42,7 @@ class Float2IntPass : public PassInfoMixin<Float2IntPass> {
   ConstantRange badRange();
   ConstantRange unknownRange();
   ConstantRange validateRange(ConstantRange R);
-  Optional<ConstantRange> calcRange(Instruction *I);
+  std::optional<ConstantRange> calcRange(Instruction *I);
   void walkBackwards();
   void walkForwards();
   bool validateAndTransform();

diff  --git a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
index d8caa99ea0ba..b996e9e3613e 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -188,7 +188,7 @@ class PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
   /// PassInstrumentation's BeforePass returns false. Otherwise, returns the
   /// preserved analyses of the pass.
   template <typename IRUnitT, typename PassT>
-  Optional<PreservedAnalyses>
+  std::optional<PreservedAnalyses>
   runSinglePass(IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
                 LoopStandardAnalysisResults &AR, LPMUpdater &U,
                 PassInstrumentation &PI);
@@ -394,7 +394,7 @@ class LPMUpdater {
 };
 
 template <typename IRUnitT, typename PassT>
-Optional<PreservedAnalyses> LoopPassManager::runSinglePass(
+std::optional<PreservedAnalyses> LoopPassManager::runSinglePass(
     IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
     LoopStandardAnalysisResults &AR, LPMUpdater &U, PassInstrumentation &PI) {
   // Get the loop in case of Loop pass and outermost loop in case of LoopNest

diff  --git a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
index 5cc67f78e5a2..80c3f187be8c 100644
--- a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
+++ b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
@@ -17,8 +17,8 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_SCALARIZER_H
 #define LLVM_TRANSFORMS_SCALAR_SCALARIZER_H
 
-#include "llvm/ADT/Optional.h"
 #include "llvm/IR/PassManager.h"
+#include <optional>
 
 namespace llvm {
 
@@ -30,8 +30,8 @@ struct ScalarizerPassOptions {
   // Scalarizer.cpp. When the cl::opt are specified, they take precedence.
   // When the cl::opt are not specified, the present optional booleans allow to
   // override the cl::opt's default values.
-  llvm::Optional<bool> ScalarizeVariableInsertExtract;
-  llvm::Optional<bool> ScalarizeLoadStore;
+  std::optional<bool> ScalarizeVariableInsertExtract;
+  std::optional<bool> ScalarizeLoadStore;
 };
 
 class ScalarizerPass : public PassInfoMixin<ScalarizerPass> {

diff  --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index 09f11464b014..a8b37e48d272 100644
--- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -35,7 +35,6 @@
 #include "llvm/Transforms/Scalar/ConstantHoisting.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
@@ -532,8 +531,9 @@ void ConstantHoistingPass::collectConstantCandidates(Function &Fn) {
 // bit widths (APInt Operator- does not like that). If the value cannot be
 // represented in uint64 we return an "empty" APInt. This is then interpreted
 // as the value is not in range.
-static Optional<APInt> calculateOffsetDiff(const APInt &V1, const APInt &V2) {
-  Optional<APInt> Res;
+static std::optional<APInt> calculateOffsetDiff(const APInt &V1,
+                                                const APInt &V2) {
+  std::optional<APInt> Res;
   unsigned BW = V1.getBitWidth() > V2.getBitWidth() ?
                 V1.getBitWidth() : V2.getBitWidth();
   uint64_t LimVal1 = V1.getLimitedValue();
@@ -605,9 +605,8 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
       LLVM_DEBUG(dbgs() << "Cost: " << Cost << "\n");
 
       for (auto C2 = S; C2 != E; ++C2) {
-        Optional<APInt> Diff = calculateOffsetDiff(
-                                   C2->ConstInt->getValue(),
-                                   ConstCand->ConstInt->getValue());
+        std::optional<APInt> Diff = calculateOffsetDiff(
+            C2->ConstInt->getValue(), ConstCand->ConstInt->getValue());
         if (Diff) {
           const InstructionCost ImmCosts =
               TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.value(), Ty);

diff  --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 5df5af9ba364..777fd5dc6424 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1296,7 +1296,7 @@ struct DSEState {
   // there is no such MemoryDef, return std::nullopt. The returned value may not
   // (completely) overwrite \p KillingLoc. Currently we bail out when we
   // encounter an aliasing MemoryUse (read).
-  Optional<MemoryAccess *>
+  std::optional<MemoryAccess *>
   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
                   const MemoryLocation &KillingLoc, const Value *KillingUndObj,
                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
@@ -2082,7 +2082,7 @@ static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
       if (State.SkipStores.count(Current))
         continue;
 
-      Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
+      std::optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
           KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit,
           WalkerStepLimit, IsMemTerm, PartialLimit);
 

diff  --git a/llvm/lib/Transforms/Scalar/Float2Int.cpp b/llvm/lib/Transforms/Scalar/Float2Int.cpp
index 30967cc5efa9..f66d1b914b0b 100644
--- a/llvm/lib/Transforms/Scalar/Float2Int.cpp
+++ b/llvm/lib/Transforms/Scalar/Float2Int.cpp
@@ -236,7 +236,7 @@ void Float2IntPass::walkBackwards() {
 
 // Calculate result range from operand ranges.
 // Return std::nullopt if the range cannot be calculated yet.
-Optional<ConstantRange> Float2IntPass::calcRange(Instruction *I) {
+std::optional<ConstantRange> Float2IntPass::calcRange(Instruction *I) {
   SmallVector<ConstantRange, 4> OpRanges;
   for (Value *O : I->operands()) {
     if (Instruction *OI = dyn_cast<Instruction>(O)) {
@@ -335,7 +335,7 @@ void Float2IntPass::walkForwards() {
     Instruction *I = Worklist.back();
     Worklist.pop_back();
 
-    if (Optional<ConstantRange> Range = calcRange(I))
+    if (std::optional<ConstantRange> Range = calcRange(I))
       seen(I, *Range);
     else
       Worklist.push_front(I); // Reprocess later.

diff  --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp
index 69cffdbff549..6e3328e67387 100644
--- a/llvm/lib/Transforms/Scalar/GVNSink.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp
@@ -599,7 +599,7 @@ class GVNSink {
   /// The main heuristic function. Analyze the set of instructions pointed to by
   /// LRI and return a candidate solution if these instructions can be sunk, or
   /// std::nullopt otherwise.
-  Optional<SinkingInstructionCandidate> analyzeInstructionForSinking(
+  std::optional<SinkingInstructionCandidate> analyzeInstructionForSinking(
       LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
       ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents);
 
@@ -639,9 +639,12 @@ class GVNSink {
   }
 };
 
-Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
-  LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
-  ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) {
+std::optional<SinkingInstructionCandidate>
+GVNSink::analyzeInstructionForSinking(LockstepReverseIterator &LRI,
+                                      unsigned &InstNum,
+                                      unsigned &MemoryInstNum,
+                                      ModelledPHISet &NeededPHIs,
+                                      SmallPtrSetImpl<Value *> &PHIContents) {
   auto Insts = *LRI;
   LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
                                                                   : Insts) {

diff  --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 973e3b4d708c..055cfddcd4cc 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -211,9 +211,9 @@ class InductiveRangeCheck {
   /// Computes a range for the induction variable (IndVar) in which the range
   /// check is redundant and can be constant-folded away.  The induction
   /// variable is not required to be the canonical {0,+,1} induction variable.
-  Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
-                                            const SCEVAddRecExpr *IndVar,
-                                            bool IsLatchSigned) const;
+  std::optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
+                                                 const SCEVAddRecExpr *IndVar,
+                                                 bool IsLatchSigned) const;
 
   /// Parse out a set of inductive range checks from \p BI and append them to \p
   /// Checks.
@@ -503,8 +503,8 @@ struct LoopStructure {
     return Result;
   }
 
-  static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, Loop &,
-                                                    const char *&);
+  static std::optional<LoopStructure> parseLoopStructure(ScalarEvolution &,
+                                                         Loop &, const char *&);
 };
 
 /// This class is used to constrain loops to run within a given iteration space.
@@ -554,7 +554,7 @@ class LoopConstrainer {
   // Compute a safe set of limits for the main loop to run in -- effectively the
   // intersection of `Range' and the iteration space of the original loop.
   // Return std::nullopt if unable to compute the set of subranges.
-  Optional<SubRanges> calculateSubRanges(bool IsSignedPredicate) const;
+  std::optional<SubRanges> calculateSubRanges(bool IsSignedPredicate) const;
 
   // Clone `OriginalLoop' and return the result in CLResult.  The IR after
   // running `cloneLoop' is well formed except for the PHI nodes in CLResult --
@@ -747,7 +747,7 @@ static bool isSafeIncreasingBound(const SCEV *Start,
           SE.isLoopEntryGuardedByCond(L, BoundPred, BoundSCEV, Limit));
 }
 
-Optional<LoopStructure>
+std::optional<LoopStructure>
 LoopStructure::parseLoopStructure(ScalarEvolution &SE, Loop &L,
                                   const char *&FailureReason) {
   if (!L.isLoopSimplifyForm()) {
@@ -1061,7 +1061,7 @@ static const SCEV *NoopOrExtend(const SCEV *S, Type *Ty, ScalarEvolution &SE,
   return Signed ? SE.getNoopOrSignExtend(S, Ty) : SE.getNoopOrZeroExtend(S, Ty);
 }
 
-Optional<LoopConstrainer::SubRanges>
+std::optional<LoopConstrainer::SubRanges>
 LoopConstrainer::calculateSubRanges(bool IsSignedPredicate) const {
   IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType());
 
@@ -1413,7 +1413,7 @@ bool LoopConstrainer::run() {
   MainLoopPreheader = Preheader;
 
   bool IsSignedPredicate = MainLoopStructure.IsSignedPredicate;
-  Optional<SubRanges> MaybeSR = calculateSubRanges(IsSignedPredicate);
+  std::optional<SubRanges> MaybeSR = calculateSubRanges(IsSignedPredicate);
   if (!MaybeSR) {
     LLVM_DEBUG(dbgs() << "irce: could not compute subranges\n");
     return false;
@@ -1428,7 +1428,7 @@ bool LoopConstrainer::run() {
   Instruction *InsertPt = OriginalPreheader->getTerminator();
 
   // It would have been better to make `PreLoop' and `PostLoop'
-  // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
+  // `std::optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
   // constructor.
   ClonedLoop PreLoop, PostLoop;
   bool NeedsPreLoop =
@@ -1581,10 +1581,10 @@ bool LoopConstrainer::run() {
 /// Computes and returns a range of values for the induction variable (IndVar)
 /// in which the range check can be safely elided.  If it cannot compute such a
 /// range, returns std::nullopt.
-Optional<InductiveRangeCheck::Range>
-InductiveRangeCheck::computeSafeIterationSpace(
-    ScalarEvolution &SE, const SCEVAddRecExpr *IndVar,
-    bool IsLatchSigned) const {
+std::optional<InductiveRangeCheck::Range>
+InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
+                                               const SCEVAddRecExpr *IndVar,
+                                               bool IsLatchSigned) const {
   // We can deal when types of latch check and range checks don't match in case
   // if latch check is more narrow.
   auto *IVType = dyn_cast<IntegerType>(IndVar->getType());
@@ -1710,9 +1710,9 @@ InductiveRangeCheck::computeSafeIterationSpace(
   return InductiveRangeCheck::Range(Begin, End);
 }
 
-static Optional<InductiveRangeCheck::Range>
+static std::optional<InductiveRangeCheck::Range>
 IntersectSignedRange(ScalarEvolution &SE,
-                     const Optional<InductiveRangeCheck::Range> &R1,
+                     const std::optional<InductiveRangeCheck::Range> &R1,
                      const InductiveRangeCheck::Range &R2) {
   if (R2.isEmpty(SE, /* IsSigned */ true))
     return std::nullopt;
@@ -1739,9 +1739,9 @@ IntersectSignedRange(ScalarEvolution &SE,
   return Ret;
 }
 
-static Optional<InductiveRangeCheck::Range>
+static std::optional<InductiveRangeCheck::Range>
 IntersectUnsignedRange(ScalarEvolution &SE,
-                       const Optional<InductiveRangeCheck::Range> &R1,
+                       const std::optional<InductiveRangeCheck::Range> &R1,
                        const InductiveRangeCheck::Range &R2) {
   if (R2.isEmpty(SE, /* IsSigned */ false))
     return std::nullopt;
@@ -1928,7 +1928,7 @@ bool InductiveRangeCheckElimination::run(
     PrintRecognizedRangeChecks(errs());
 
   const char *FailureReason = nullptr;
-  Optional<LoopStructure> MaybeLoopStructure =
+  std::optional<LoopStructure> MaybeLoopStructure =
       LoopStructure::parseLoopStructure(SE, *L, FailureReason);
   if (!MaybeLoopStructure) {
     LLVM_DEBUG(dbgs() << "irce: could not parse loop structure: "
@@ -1941,7 +1941,7 @@ bool InductiveRangeCheckElimination::run(
   const SCEVAddRecExpr *IndVar =
       cast<SCEVAddRecExpr>(SE.getMinusSCEV(SE.getSCEV(LS.IndVarBase), SE.getSCEV(LS.IndVarStep)));
 
-  Optional<InductiveRangeCheck::Range> SafeIterRange;
+  std::optional<InductiveRangeCheck::Range> SafeIterRange;
   Instruction *ExprInsertPt = Preheader->getTerminator();
 
   SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate;

diff  --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index 7a2a34a8d595..13f26ed0f8c1 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -892,7 +892,7 @@ class LoopDistributeForLoop {
   /// If the optional has a value, it indicates whether distribution was forced
   /// to be enabled (true) or disabled (false).  If the optional has no value
   /// distribution was not forced either way.
-  const Optional<bool> &isForced() const { return IsForced; }
+  const std::optional<bool> &isForced() const { return IsForced; }
 
 private:
   /// Filter out checks between pointers from the same partition.
@@ -963,7 +963,7 @@ class LoopDistributeForLoop {
   /// If the optional has a value, it indicates whether distribution was forced
   /// to be enabled (true) or disabled (false).  If the optional has no value
   /// distribution was not forced either way.
-  Optional<bool> IsForced;
+  std::optional<bool> IsForced;
 };
 
 } // end anonymous namespace

diff  --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index 5f64260bf0b8..6a4a6ee9aa3c 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -701,7 +701,7 @@ struct LoopFuser {
   /// have the same TripCount. The second is the 
diff erence in the two
   /// TripCounts. This information can be used later to determine whether or not
   /// peeling can be performed on either one of the candidates.
-  std::pair<bool, Optional<unsigned>>
+  std::pair<bool, std::optional<unsigned>>
   haveIdenticalTripCounts(const FusionCandidate &FC0,
                           const FusionCandidate &FC1) const {
     const SCEV *TripCount0 = SE.getBackedgeTakenCount(FC0.L);
@@ -743,7 +743,7 @@ struct LoopFuser {
       return {false, std::nullopt};
     }
 
-    Optional<unsigned> Difference;
+    std::optional<unsigned> Difference;
     int Diff = TC0 - TC1;
 
     if (Diff > 0)
@@ -860,10 +860,10 @@ struct LoopFuser {
           // the loops (second value of pair). The 
diff erence is not equal to
           // None iff the loops iterate a constant number of times, and have a
           // single exit.
-          std::pair<bool, Optional<unsigned>> IdenticalTripCountRes =
+          std::pair<bool, std::optional<unsigned>> IdenticalTripCountRes =
               haveIdenticalTripCounts(*FC0, *FC1);
           bool SameTripCount = IdenticalTripCountRes.first;
-          Optional<unsigned> TCDifference = IdenticalTripCountRes.second;
+          std::optional<unsigned> TCDifference = IdenticalTripCountRes.second;
 
           // Here we are checking that FC0 (the first loop) can be peeled, and
           // both loops have 
diff erent tripcounts.

diff  --git a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
index e701ce560a1a..fadd0707d66b 100644
--- a/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPassManager.cpp
@@ -87,7 +87,7 @@ LoopPassManager::runWithLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
   Loop *OuterMostLoop = &L;
 
   for (size_t I = 0, E = IsLoopNestPass.size(); I != E; ++I) {
-    Optional<PreservedAnalyses> PassPA;
+    std::optional<PreservedAnalyses> PassPA;
     if (!IsLoopNestPass[I]) {
       // The `I`-th pass is a loop pass.
       auto &Pass = LoopPasses[LoopPassIndex++];
@@ -157,7 +157,8 @@ LoopPassManager::runWithoutLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
   // instrumenting callbacks for the passes later.
   PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(L, AR);
   for (auto &Pass : LoopPasses) {
-    Optional<PreservedAnalyses> PassPA = runSinglePass(L, Pass, AM, AR, U, PI);
+    std::optional<PreservedAnalyses> PassPA =
+        runSinglePass(L, Pass, AM, AR, U, PI);
 
     // `PassPA` is `None` means that the before-pass callbacks in
     // `PassInstrumentation` return false. The pass does not run in this case,

diff  --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index 054674d0ba0b..0492ed5cdb1c 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -271,8 +271,8 @@ class LoopPredication {
   LoopICmp LatchCheck;
 
   bool isSupportedStep(const SCEV* Step);
-  Optional<LoopICmp> parseLoopICmp(ICmpInst *ICI);
-  Optional<LoopICmp> parseLoopLatchICmp();
+  std::optional<LoopICmp> parseLoopICmp(ICmpInst *ICI);
+  std::optional<LoopICmp> parseLoopLatchICmp();
 
   /// Return an insertion point suitable for inserting a safe to speculate
   /// instruction whose only user will be 'User' which has operands 'Ops'.  A
@@ -295,16 +295,17 @@ class LoopPredication {
                      ICmpInst::Predicate Pred, const SCEV *LHS,
                      const SCEV *RHS);
 
-  Optional<Value *> widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
-                                        Instruction *Guard);
-  Optional<Value *> widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck,
-                                                        LoopICmp RangeCheck,
-                                                        SCEVExpander &Expander,
-                                                        Instruction *Guard);
-  Optional<Value *> widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck,
-                                                        LoopICmp RangeCheck,
-                                                        SCEVExpander &Expander,
-                                                        Instruction *Guard);
+  std::optional<Value *> widenICmpRangeCheck(ICmpInst *ICI,
+                                             SCEVExpander &Expander,
+                                             Instruction *Guard);
+  std::optional<Value *>
+  widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck, LoopICmp RangeCheck,
+                                      SCEVExpander &Expander,
+                                      Instruction *Guard);
+  std::optional<Value *>
+  widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck, LoopICmp RangeCheck,
+                                      SCEVExpander &Expander,
+                                      Instruction *Guard);
   unsigned collectChecks(SmallVectorImpl<Value *> &Checks, Value *Condition,
                          SCEVExpander &Expander, Instruction *Guard);
   bool widenGuardConditions(IntrinsicInst *II, SCEVExpander &Expander);
@@ -384,8 +385,7 @@ PreservedAnalyses LoopPredicationPass::run(Loop &L, LoopAnalysisManager &AM,
   return PA;
 }
 
-Optional<LoopICmp>
-LoopPredication::parseLoopICmp(ICmpInst *ICI) {
+std::optional<LoopICmp> LoopPredication::parseLoopICmp(ICmpInst *ICI) {
   auto Pred = ICI->getPredicate();
   auto *LHS = ICI->getOperand(0);
   auto *RHS = ICI->getOperand(1);
@@ -576,9 +576,9 @@ bool LoopPredication::isLoopInvariantValue(const SCEV* S) {
   return false;
 }
 
-Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
-    LoopICmp LatchCheck, LoopICmp RangeCheck,
-    SCEVExpander &Expander, Instruction *Guard) {
+std::optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
+    LoopICmp LatchCheck, LoopICmp RangeCheck, SCEVExpander &Expander,
+    Instruction *Guard) {
   auto *Ty = RangeCheck.IV->getType();
   // Generate the widened condition for the forward loop:
   //   guardStart u< guardLimit &&
@@ -625,9 +625,9 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
   return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
 }
 
-Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
-    LoopICmp LatchCheck, LoopICmp RangeCheck,
-    SCEVExpander &Expander, Instruction *Guard) {
+std::optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
+    LoopICmp LatchCheck, LoopICmp RangeCheck, SCEVExpander &Expander,
+    Instruction *Guard) {
   auto *Ty = RangeCheck.IV->getType();
   const SCEV *GuardStart = RangeCheck.IV->getStart();
   const SCEV *GuardLimit = RangeCheck.Limit;
@@ -687,9 +687,9 @@ static void normalizePredicate(ScalarEvolution *SE, Loop *L,
 /// If ICI can be widened to a loop invariant condition emits the loop
 /// invariant condition in the loop preheader and return it, otherwise
 /// returns std::nullopt.
-Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
-                                                       SCEVExpander &Expander,
-                                                       Instruction *Guard) {
+std::optional<Value *>
+LoopPredication::widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
+                                     Instruction *Guard) {
   LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
   LLVM_DEBUG(ICI->dump());
 
@@ -872,7 +872,7 @@ bool LoopPredication::widenWidenableBranchGuardConditions(
   return true;
 }
 
-Optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
+std::optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
   using namespace PatternMatch;
 
   BasicBlock *LoopLatch = L->getLoopLatch();
@@ -939,7 +939,6 @@ Optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
   return Result;
 }
 
-
 bool LoopPredication::isLoopProfitableToPredicate() {
   if (SkipProfitabilityChecks)
     return true;

diff  --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index cc0ca7ab9717..a454c0573092 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -344,7 +344,7 @@ struct PragmaInfo {
 /// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
 /// the analysis failed (no benefits expected from the unrolling, or the loop is
 /// too big to analyze), the returned value is std::nullopt.
-static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
+static std::optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
     const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE,
     const SmallPtrSetImpl<const Value *> &EphValues,
     const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize,
@@ -819,7 +819,7 @@ static std::optional<unsigned> shouldFullUnroll(
   // The loop isn't that small, but we still can fully unroll it if that
   // helps to remove a significant number of instructions.
   // To check that, run additional analysis on the loop.
-  if (Optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost(
+  if (std::optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost(
           L, FullUnrollTripCount, DT, SE, EphValues, TTI,
           UP.Threshold * UP.MaxPercentThresholdBoost / 100,
           UP.MaxIterationsCountToAnalyze)) {

diff  --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index 0c09c0cdb9ee..bcedb05890af 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -300,9 +300,9 @@ bool BCECmpBlock::doesOtherWork() const {
 
 // Visit the given comparison. If this is a comparison between two valid
 // BCE atoms, returns the comparison.
-Optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
-                           const ICmpInst::Predicate ExpectedPredicate,
-                           BaseIdentifier &BaseId) {
+std::optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
+                                const ICmpInst::Predicate ExpectedPredicate,
+                                BaseIdentifier &BaseId) {
   // The comparison can only be used once:
   //  - For intermediate blocks, as a branch condition.
   //  - For the final block, as an incoming value for the Phi.
@@ -330,9 +330,10 @@ Optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
 
 // Visit the given comparison block. If this is a comparison between two valid
 // BCE atoms, returns the comparison.
-Optional<BCECmpBlock> visitCmpBlock(Value *const Val, BasicBlock *const Block,
-                                    const BasicBlock *const PhiBlock,
-                                    BaseIdentifier &BaseId) {
+std::optional<BCECmpBlock> visitCmpBlock(Value *const Val,
+                                         BasicBlock *const Block,
+                                         const BasicBlock *const PhiBlock,
+                                         BaseIdentifier &BaseId) {
   if (Block->empty())
     return std::nullopt;
   auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
@@ -368,7 +369,7 @@ Optional<BCECmpBlock> visitCmpBlock(Value *const Val, BasicBlock *const Block,
     return std::nullopt;
   LLVM_DEBUG(dbgs() << "icmp\n");
 
-  Optional<BCECmp> Result = visitICmp(CmpI, ExpectedPredicate, BaseId);
+  std::optional<BCECmp> Result = visitICmp(CmpI, ExpectedPredicate, BaseId);
   if (!Result)
     return std::nullopt;
 
@@ -476,7 +477,7 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
   BaseIdentifier BaseId;
   for (BasicBlock *const Block : Blocks) {
     assert(Block && "invalid block");
-    Optional<BCECmpBlock> Comparison = visitCmpBlock(
+    std::optional<BCECmpBlock> Comparison = visitCmpBlock(
         Phi.getIncomingValueForBlock(Block), Block, Phi.getParent(), BaseId);
     if (!Comparison) {
       LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");

diff  --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index b42546e05ced..907408d0a9e6 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -317,7 +317,7 @@ static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
                               StatepointLiveSetTy &out);
 
 // TODO: Once we can get to the GCStrategy, this becomes
-// Optional<bool> isGCManagedPointer(const Type *Ty) const override {
+// std::optional<bool> isGCManagedPointer(const Type *Ty) const override {
 
 static bool isGCPointerType(Type *T) {
   if (auto *PT = dyn_cast<PointerType>(T))

diff  --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 0517d2f32a20..4aab88b74f10 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -191,7 +191,7 @@ struct VectorLayout {
 
 template <typename T>
 T getWithDefaultOverride(const cl::opt<T> &ClOption,
-                         const llvm::Optional<T> &DefaultOverride) {
+                         const std::optional<T> &DefaultOverride) {
   return ClOption.getNumOccurrences() ? ClOption
                                       : DefaultOverride.value_or(ClOption);
 }
@@ -235,8 +235,8 @@ class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
   void replaceUses(Instruction *Op, Value *CV);
   bool canTransferMetadata(unsigned Kind);
   void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
-  Optional<VectorLayout> getVectorLayout(Type *Ty, Align Alignment,
-                                         const DataLayout &DL);
+  std::optional<VectorLayout> getVectorLayout(Type *Ty, Align Alignment,
+                                              const DataLayout &DL);
   bool finish();
 
   template<typename T> bool splitUnary(Instruction &, const T &);
@@ -486,7 +486,7 @@ void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
 // Try to fill in Layout from Ty, returning true on success.  Alignment is
 // the alignment of the vector, or std::nullopt if the ABI default should be
 // used.
-Optional<VectorLayout>
+std::optional<VectorLayout>
 ScalarizerVisitor::getVectorLayout(Type *Ty, Align Alignment,
                                    const DataLayout &DL) {
   VectorLayout Layout;
@@ -920,7 +920,7 @@ bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
   if (!LI.isSimple())
     return false;
 
-  Optional<VectorLayout> Layout = getVectorLayout(
+  std::optional<VectorLayout> Layout = getVectorLayout(
       LI.getType(), LI.getAlign(), LI.getModule()->getDataLayout());
   if (!Layout)
     return false;
@@ -946,7 +946,7 @@ bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
     return false;
 
   Value *FullValue = SI.getValueOperand();
-  Optional<VectorLayout> Layout = getVectorLayout(
+  std::optional<VectorLayout> Layout = getVectorLayout(
       FullValue->getType(), SI.getAlign(), SI.getModule()->getDataLayout());
   if (!Layout)
     return false;

diff  --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index 25c95d2a44ab..c9dec06c4316 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -122,9 +122,10 @@ namespace {
 struct NonTrivialUnswitchCandidate {
   Instruction *TI = nullptr;
   TinyPtrVector<Value *> Invariants;
-  Optional<InstructionCost> Cost;
-  NonTrivialUnswitchCandidate(Instruction *TI, ArrayRef<Value *> Invariants,
-                              Optional<InstructionCost> Cost = std::nullopt)
+  std::optional<InstructionCost> Cost;
+  NonTrivialUnswitchCandidate(
+      Instruction *TI, ArrayRef<Value *> Invariants,
+      std::optional<InstructionCost> Cost = std::nullopt)
       : TI(TI), Invariants(Invariants), Cost(Cost){};
 };
 } // end anonymous namespace.


        


More information about the llvm-commits mailing list