[llvm] 51b6857 - [Transforms,CodeGen] std::optional::value => operator*/operator->

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 16 15:21:33 PST 2022


Author: Fangrui Song
Date: 2022-12-16T23:21:27Z
New Revision: 51b685734b0e185bca9d0eec66b3bcb636ed9c02

URL: https://github.com/llvm/llvm-project/commit/51b685734b0e185bca9d0eec66b3bcb636ed9c02
DIFF: https://github.com/llvm/llvm-project/commit/51b685734b0e185bca9d0eec66b3bcb636ed9c02.diff

LOG: [Transforms,CodeGen] std::optional::value => operator*/operator->

value() has undesired exception checking semantics and calls
__throw_bad_optional_access in libc++. Moreover, the API is unavailable without
_LIBCPP_NO_EXCEPTIONS on older Mach-O platforms (see
_LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS).

Added: 
    

Modified: 
    llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
    llvm/lib/CodeGen/ExpandVectorPredication.cpp
    llvm/lib/CodeGen/MachineBasicBlock.cpp
    llvm/lib/CodeGen/MachineFunctionSplitter.cpp
    llvm/lib/CodeGen/ModuloSchedule.cpp
    llvm/lib/CodeGen/SelectOptimize.cpp
    llvm/lib/CodeGen/TargetInstrInfo.cpp
    llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
    llvm/lib/Transforms/Scalar/GVN.cpp
    llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
    llvm/lib/Transforms/Scalar/LoopDistribute.cpp
    llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
    llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
    llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 806868131d6ca..1561fb4d3e5ce 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -344,7 +344,7 @@ class MemLocFragmentFill {
         return false; // B has fewer elements than A.
       if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop())
         return false; // Interval is 
diff erent.
-      if (AIt.value() != BIt.value())
+      if (*AIt != *BIt)
         return false; // Value at interval is 
diff erent.
     }
     // AIt == AEnd. Check BIt is also now at end.
@@ -422,8 +422,8 @@ class MemLocFragmentFill {
         // [ r ]
         LLVM_DEBUG(dbgs() << "- a is contained within "
                           << toString(FirstOverlap));
-        if (AIt.value() && AIt.value() == FirstOverlap.value())
-          Result.insert(AIt.start(), AIt.stop(), AIt.value());
+        if (*AIt && *AIt == *FirstOverlap)
+          Result.insert(AIt.start(), AIt.stop(), *AIt);
       } else {
         // There's an overlap but `a` is not fully contained within
         // `b`. Shorten any end-point intersections.
@@ -435,8 +435,8 @@ class MemLocFragmentFill {
         if (IntersectStart) {
           LLVM_DEBUG(dbgs() << "- insert intersection of a and "
                             << toString(FirstOverlap));
-          if (AIt.value() && AIt.value() == FirstOverlap.value())
-            Result.insert(AIt.start(), FirstOverlap.stop(), AIt.value());
+          if (*AIt && *AIt == *FirstOverlap)
+            Result.insert(AIt.start(), FirstOverlap.stop(), *AIt);
           ++Next;
         }
         // [ - a - ]
@@ -446,8 +446,8 @@ class MemLocFragmentFill {
         if (IntersectEnd) {
           LLVM_DEBUG(dbgs() << "- insert intersection of a and "
                             << toString(LastOverlap));
-          if (AIt.value() && AIt.value() == LastOverlap.value())
-            Result.insert(LastOverlap.start(), AIt.stop(), AIt.value());
+          if (*AIt && *AIt == *LastOverlap)
+            Result.insert(LastOverlap.start(), AIt.stop(), *AIt);
         }
 
         // Insert all intervals in map `B` that are contained within interval
@@ -460,8 +460,8 @@ class MemLocFragmentFill {
                Next.stop() <= AIt.stop()) {
           LLVM_DEBUG(dbgs()
                      << "- insert intersection of a and " << toString(Next));
-          if (AIt.value() && AIt.value() == Next.value())
-            Result.insert(Next.start(), Next.stop(), Next.value());
+          if (*AIt && *AIt == *Next)
+            Result.insert(Next.start(), Next.stop(), *Next);
           ++Next;
         }
       }
@@ -653,12 +653,12 @@ class MemLocFragmentFill {
       auto EndBitOfOverlap = FirstOverlap.stop();
       FirstOverlap.setStop(StartBit);
       insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
-                   FirstOverlap.value(), VarLoc.DL);
+                   *FirstOverlap, VarLoc.DL);
 
       // Insert a new interval to represent the end part.
-      FragMap.insert(EndBit, EndBitOfOverlap, FirstOverlap.value());
-      insertMemLoc(BB, Before, Var, EndBit, EndBitOfOverlap,
-                   FirstOverlap.value(), VarLoc.DL);
+      FragMap.insert(EndBit, EndBitOfOverlap, *FirstOverlap);
+      insertMemLoc(BB, Before, Var, EndBit, EndBitOfOverlap, *FirstOverlap,
+                   VarLoc.DL);
 
       // Insert the new (middle) fragment now there is space.
       FragMap.insert(StartBit, EndBit, Base);
@@ -676,7 +676,7 @@ class MemLocFragmentFill {
         // Split off at the intersection.
         FirstOverlap.setStop(StartBit);
         insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
-                     FirstOverlap.value(), VarLoc.DL);
+                     *FirstOverlap, VarLoc.DL);
       }
       // [ - f - ]
       //      [ - i - ]
@@ -686,8 +686,8 @@ class MemLocFragmentFill {
         LLVM_DEBUG(dbgs() << "- Intersect interval at end\n");
         // Split off at the intersection.
         LastOverlap.setStart(EndBit);
-        insertMemLoc(BB, Before, Var, EndBit, LastOverlap.stop(),
-                     LastOverlap.value(), VarLoc.DL);
+        insertMemLoc(BB, Before, Var, EndBit, LastOverlap.stop(), *LastOverlap,
+                     VarLoc.DL);
       }
 
       LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n");
@@ -1266,7 +1266,7 @@ void AssignmentTrackingLowering::emitDbgValue(
       // Copy the fragment info over from the value-expression to the new
       // DIExpression.
       if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) {
-        auto FragInfo = OptFragInfo.value();
+        auto FragInfo = *OptFragInfo;
         Expr = *DIExpression::createFragmentExpression(
             Expr, FragInfo.OffsetInBits, FragInfo.SizeInBits);
       }
@@ -1346,7 +1346,7 @@ void AssignmentTrackingLowering::processUntaggedInstruction(
       auto R = DIExpression::createFragmentExpression(DIE, Frag->OffsetInBits,
                                                       Frag->SizeInBits);
       assert(R && "unexpected createFragmentExpression failure");
-      DIE = R.value();
+      DIE = *R;
     }
     SmallVector<uint64_t, 3> Ops;
     if (Info.OffsetInBits)

diff  --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 23b70ce4a99fe..5ee76ff567fb7 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -423,7 +423,7 @@ CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
       StoreInst *NewStore =
           Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
       if (AlignOpt.has_value())
-        NewStore->setAlignment(AlignOpt.value());
+        NewStore->setAlignment(*AlignOpt);
       NewMemoryInst = NewStore;
     } else
       NewMemoryInst = Builder.CreateMaskedStore(
@@ -435,7 +435,7 @@ CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
       LoadInst *NewLoad =
           Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
       if (AlignOpt.has_value())
-        NewLoad->setAlignment(AlignOpt.value());
+        NewLoad->setAlignment(*AlignOpt);
       NewMemoryInst = NewLoad;
     } else
       NewMemoryInst = Builder.CreateMaskedLoad(

diff  --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 7a377b49f67b5..ea522c6019432 100644
--- a/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -456,8 +456,8 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
 
   if (IrrLoopHeaderWeight && IsStandalone) {
     if (Indexes) OS << '\t';
-    OS.indent(2) << "; Irreducible loop header weight: "
-                 << IrrLoopHeaderWeight.value() << '\n';
+    OS.indent(2) << "; Irreducible loop header weight: " << *IrrLoopHeaderWeight
+                 << '\n';
   }
 }
 

diff  --git a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
index 5d8139b3ee641..613c52900331e 100644
--- a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
+++ b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
@@ -187,8 +187,8 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) {
   // We don't want to proceed further for cold functions
   // or functions of unknown hotness. Lukewarm functions have no prefix.
   std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
-  if (SectionPrefix && (SectionPrefix.value().equals("unlikely") ||
-                        SectionPrefix.value().equals("unknown"))) {
+  if (SectionPrefix &&
+      (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
     return false;
   }
 

diff  --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp
index 20d251715bd9a..a0058825e9a36 100644
--- a/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -1470,7 +1470,7 @@ Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg,
                              const TargetRegisterClass *RC) {
   // If the init register is not undef, try and find an existing phi.
   if (InitReg) {
-    auto I = Phis.find({LoopReg, InitReg.value()});
+    auto I = Phis.find({LoopReg, *InitReg});
     if (I != Phis.end())
       return I->second;
   } else {
@@ -1491,10 +1491,10 @@ Register KernelRewriter::phi(Register LoopReg, std::optional<Register> InitReg,
       return R;
     // Found a phi taking undef as input, so rewrite it to take InitReg.
     MachineInstr *MI = MRI.getVRegDef(R);
-    MI->getOperand(1).setReg(InitReg.value());
-    Phis.insert({{LoopReg, InitReg.value()}, R});
+    MI->getOperand(1).setReg(*InitReg);
+    Phis.insert({{LoopReg, *InitReg}, R});
     const TargetRegisterClass *ConstrainRegClass =
-        MRI.constrainRegClass(R, MRI.getRegClass(InitReg.value()));
+        MRI.constrainRegClass(R, MRI.getRegClass(*InitReg));
     assert(ConstrainRegClass && "Expected a valid constrained register class!");
     (void)ConstrainRegClass;
     UndefPhis.erase(I);

diff  --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp
index 8c20406b87915..5fd78eccf7323 100644
--- a/llvm/lib/CodeGen/SelectOptimize.cpp
+++ b/llvm/lib/CodeGen/SelectOptimize.cpp
@@ -921,8 +921,8 @@ bool SelectOptimize::computeLoopCosts(
           EmitAndPrintRemark(ORE, ORmissL);
           return false;
         }
-        IPredCost += Scaled64::get(ILatency.value());
-        INonPredCost += Scaled64::get(ILatency.value());
+        IPredCost += Scaled64::get(*ILatency);
+        INonPredCost += Scaled64::get(*ILatency);
 
         // For a select that can be converted to branch,
         // compute its cost as a branch (non-predicated cost).

diff  --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 66b35c6a01ee9..5e21e72f77255 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -834,7 +834,7 @@ TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern,
   assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
          "Incorrectly matched pattern");
   unsigned AssocCommutOpcode = Root.getOpcode();
-  unsigned InverseOpcode = getInverseOpcode(Root.getOpcode()).value();
+  unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
   if (!AssocCommutRoot)
     std::swap(AssocCommutOpcode, InverseOpcode);
 

diff  --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index a8b37e48d272e..6b0cc1f1ee5c9 100644
--- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -609,9 +609,9 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
             C2->ConstInt->getValue(), ConstCand->ConstInt->getValue());
         if (Diff) {
           const InstructionCost ImmCosts =
-              TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.value(), Ty);
+              TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, *Diff, Ty);
           Cost -= ImmCosts;
-          LLVM_DEBUG(dbgs() << "Offset " << Diff.value() << " "
+          LLVM_DEBUG(dbgs() << "Offset " << *Diff << " "
                             << "has penalty: " << ImmCosts << "\n"
                             << "Adjusted cost: " << Cost << "\n");
         }

diff  --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 1d13157a61717..3996613aded9a 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -764,14 +764,14 @@ void GVNPass::printPipeline(
 
   OS << "<";
   if (Options.AllowPRE != std::nullopt)
-    OS << (Options.AllowPRE.value() ? "" : "no-") << "pre;";
+    OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
   if (Options.AllowLoadPRE != std::nullopt)
-    OS << (Options.AllowLoadPRE.value() ? "" : "no-") << "load-pre;";
+    OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
   if (Options.AllowLoadPRESplitBackedge != std::nullopt)
-    OS << (Options.AllowLoadPRESplitBackedge.value() ? "" : "no-")
+    OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
        << "split-backedge-load-pre;";
   if (Options.AllowMemDep != std::nullopt)
-    OS << (Options.AllowMemDep.value() ? "" : "no-") << "memdep";
+    OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
   OS << ">";
 }
 

diff  --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 17d9314804c73..933d8d1e14804 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -1717,7 +1717,7 @@ IntersectSignedRange(ScalarEvolution &SE,
     return std::nullopt;
   if (!R1)
     return R2;
-  auto &R1Value = R1.value();
+  auto &R1Value = *R1;
   // We never return empty ranges from this function, and R1 is supposed to be
   // a result of intersection. Thus, R1 is never empty.
   assert(!R1Value.isEmpty(SE, /* IsSigned */ true) &&
@@ -1746,7 +1746,7 @@ IntersectUnsignedRange(ScalarEvolution &SE,
     return std::nullopt;
   if (!R1)
     return R2;
-  auto &R1Value = R1.value();
+  auto &R1Value = *R1;
   // We never return empty ranges from this function, and R1 is supposed to be
   // a result of intersection. Thus, R1 is never empty.
   assert(!R1Value.isEmpty(SE, /* IsSigned */ false) &&
@@ -1956,13 +1956,12 @@ bool InductiveRangeCheckElimination::run(
     auto Result = IRC.computeSafeIterationSpace(SE, IndVar,
                                                 LS.IsSignedPredicate);
     if (Result) {
-      auto MaybeSafeIterRange =
-          IntersectRange(SE, SafeIterRange, Result.value());
+      auto MaybeSafeIterRange = IntersectRange(SE, SafeIterRange, *Result);
       if (MaybeSafeIterRange) {
-        assert(!MaybeSafeIterRange.value().isEmpty(SE, LS.IsSignedPredicate) &&
+        assert(!MaybeSafeIterRange->isEmpty(SE, LS.IsSignedPredicate) &&
                "We should never return empty ranges!");
         RangeChecksToEliminate.push_back(IRC);
-        SafeIterRange = MaybeSafeIterRange.value();
+        SafeIterRange = *MaybeSafeIterRange;
       }
     }
   }
@@ -1970,7 +1969,7 @@ bool InductiveRangeCheckElimination::run(
   if (!SafeIterRange)
     return false;
 
-  LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, SafeIterRange.value());
+  LoopConstrainer LC(*L, LI, LPMAddNewLoop, LS, SE, DT, *SafeIterRange);
   bool Changed = LC.run();
 
   if (Changed) {

diff  --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index e6322f160cf5f..7b52b7dca85f9 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -599,7 +599,7 @@ class InstPartitionContainer {
                              : LLVMLoopDistributeFollowupCoincident});
     if (PartitionID) {
       Loop *NewLoop = Part->getDistributedLoop();
-      NewLoop->setLoopID(PartitionID.value());
+      NewLoop->setLoopID(*PartitionID);
     }
   }
 };
@@ -819,12 +819,10 @@ class LoopDistributeForLoop {
       // The unversioned loop will not be changed, so we inherit all attributes
       // from the original loop, but remove the loop distribution metadata to
       // avoid to distribute it again.
-      MDNode *UnversionedLoopID =
-          makeFollowupLoopID(OrigLoopID,
-                             {LLVMLoopDistributeFollowupAll,
-                              LLVMLoopDistributeFollowupFallback},
-                             "llvm.loop.distribute.", true)
-              .value();
+      MDNode *UnversionedLoopID = *makeFollowupLoopID(
+          OrigLoopID,
+          {LLVMLoopDistributeFollowupAll, LLVMLoopDistributeFollowupFallback},
+          "llvm.loop.distribute.", true);
       LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID);
     }
 

diff  --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index f1762205ab1c4..85e2befce0d10 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1482,7 +1482,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
     // anything where the alignment isn't at least the element size.
     assert((StoreAlign && LoadAlign) &&
            "Expect unordered load/store to have align.");
-    if (StoreAlign.value() < StoreSize || LoadAlign.value() < StoreSize)
+    if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
       return Changed;
 
     // If the element.atomic memcpy is not lowered into explicit
@@ -1496,9 +1496,8 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
     // Note that unordered atomic loads/stores are *required* by the spec to
     // have an alignment but non-atomic loads/stores may not.
     NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
-        StoreBasePtr, StoreAlign.value(), LoadBasePtr, LoadAlign.value(),
-        NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope,
-        AATags.NoAlias);
+        StoreBasePtr, *StoreAlign, LoadBasePtr, *LoadAlign, NumBytes, StoreSize,
+        AATags.TBAA, AATags.TBAAStruct, AATags.Scope, AATags.NoAlias);
   }
   NewCall->setDebugLoc(TheStore->getDebugLoc());
 

diff  --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 502c2a8d44fe3..c01c5964c24fa 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -6438,9 +6438,8 @@ static bool SalvageDVI(llvm::Loop *L, ScalarEvolution &SE,
     // less DWARF ops than an iteration count-based expression.
     if (std::optional<APInt> Offset =
             SE.computeConstantDifference(DVIRec.SCEVs[i], SCEVInductionVar)) {
-      if (Offset.value().getMinSignedBits() <= 64)
-        SalvageExpr->createOffsetExpr(Offset.value().getSExtValue(),
-                                      LSRInductionVar);
+      if (Offset->getMinSignedBits() <= 64)
+        SalvageExpr->createOffsetExpr(Offset->getSExtValue(), LSRInductionVar);
     } else if (!SalvageExpr->createIterCountExpr(DVIRec.SCEVs[i], IterCountExpr,
                                                  SE))
       return false;

diff  --git a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
index 118f148c86e68..0ae26b494c5a8 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollAndJamPass.cpp
@@ -371,7 +371,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
       OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
                         LLVMLoopUnrollAndJamFollowupRemainderInner});
   if (NewInnerEpilogueLoopID)
-    SubLoop->setLoopID(NewInnerEpilogueLoopID.value());
+    SubLoop->setLoopID(*NewInnerEpilogueLoopID);
 
   // Find trip count and trip multiple
   BasicBlock *Latch = L->getLoopLatch();
@@ -401,14 +401,14 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
         OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
                           LLVMLoopUnrollAndJamFollowupRemainderOuter});
     if (NewOuterEpilogueLoopID)
-      EpilogueOuterLoop->setLoopID(NewOuterEpilogueLoopID.value());
+      EpilogueOuterLoop->setLoopID(*NewOuterEpilogueLoopID);
   }
 
   std::optional<MDNode *> NewInnerLoopID =
       makeFollowupLoopID(OrigOuterLoopID, {LLVMLoopUnrollAndJamFollowupAll,
                                            LLVMLoopUnrollAndJamFollowupInner});
   if (NewInnerLoopID)
-    SubLoop->setLoopID(NewInnerLoopID.value());
+    SubLoop->setLoopID(*NewInnerLoopID);
   else
     SubLoop->setLoopID(OrigSubLoopID);
 
@@ -417,7 +417,7 @@ tryToUnrollAndJamLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
         OrigOuterLoopID,
         {LLVMLoopUnrollAndJamFollowupAll, LLVMLoopUnrollAndJamFollowupOuter});
     if (NewOuterLoopID) {
-      L->setLoopID(NewOuterLoopID.value());
+      L->setLoopID(*NewOuterLoopID);
 
       // Do not setLoopAlreadyUnrolled if a followup was given.
       return UnrollResult;

diff  --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 8d10a5f6eb97f..34ac98fee4758 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -1328,7 +1328,7 @@ tryToUnrollLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
         makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
                                         LLVMLoopUnrollFollowupRemainder});
     if (RemainderLoopID)
-      RemainderLoop->setLoopID(RemainderLoopID.value());
+      RemainderLoop->setLoopID(*RemainderLoopID);
   }
 
   if (UnrollResult != LoopUnrollResult::FullyUnrolled) {
@@ -1336,7 +1336,7 @@ tryToUnrollLoop(Loop *L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
         makeFollowupLoopID(OrigLoopID, {LLVMLoopUnrollFollowupAll,
                                         LLVMLoopUnrollFollowupUnrolled});
     if (NewLoopID) {
-      L->setLoopID(NewLoopID.value());
+      L->setLoopID(*NewLoopID);
 
       // Do not setLoopAlreadyUnrolled if loop attributes have been specified
       // explicitly.
@@ -1652,15 +1652,15 @@ void LoopUnrollPass::printPipeline(
       OS, MapClassName2PassName);
   OS << "<";
   if (UnrollOpts.AllowPartial != std::nullopt)
-    OS << (UnrollOpts.AllowPartial.value() ? "" : "no-") << "partial;";
+    OS << (*UnrollOpts.AllowPartial ? "" : "no-") << "partial;";
   if (UnrollOpts.AllowPeeling != std::nullopt)
-    OS << (UnrollOpts.AllowPeeling.value() ? "" : "no-") << "peeling;";
+    OS << (*UnrollOpts.AllowPeeling ? "" : "no-") << "peeling;";
   if (UnrollOpts.AllowRuntime != std::nullopt)
-    OS << (UnrollOpts.AllowRuntime.value() ? "" : "no-") << "runtime;";
+    OS << (*UnrollOpts.AllowRuntime ? "" : "no-") << "runtime;";
   if (UnrollOpts.AllowUpperBound != std::nullopt)
-    OS << (UnrollOpts.AllowUpperBound.value() ? "" : "no-") << "upperbound;";
+    OS << (*UnrollOpts.AllowUpperBound ? "" : "no-") << "upperbound;";
   if (UnrollOpts.AllowProfileBasedPeeling != std::nullopt)
-    OS << (UnrollOpts.AllowProfileBasedPeeling.value() ? "" : "no-")
+    OS << (*UnrollOpts.AllowProfileBasedPeeling ? "" : "no-")
        << "profile-peeling;";
   if (UnrollOpts.FullUnrollMaxCount != std::nullopt)
     OS << "full-unroll-max=" << UnrollOpts.FullUnrollMaxCount << ";";


        


More information about the llvm-commits mailing list