[llvm] 19aff0f - [Analysis] Use std::nullopt instead of None (NFC)
Kazu Hirata via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 2 19:43:11 PST 2022
Author: Kazu Hirata
Date: 2022-12-02T19:43:04-08:00
New Revision: 19aff0f37dd68ee51e78b764c0ce629ae73d1eef
URL: https://github.com/llvm/llvm-project/commit/19aff0f37dd68ee51e78b764c0ce629ae73d1eef
DIFF: https://github.com/llvm/llvm-project/commit/19aff0f37dd68ee51e78b764c0ce629ae73d1eef.diff
LOG: [Analysis] Use std::nullopt instead of None (NFC)
This patch mechanically replaces None with std::nullopt where the
compiler would warn if None were deprecated. The intent is to reduce
the amount of manual work required in migrating from Optional to
std::optional.
This is part of an effort to migrate from llvm::Optional to
std::optional:
https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716
Added:
Modified:
llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
llvm/include/llvm/Analysis/InlineAdvisor.h
llvm/include/llvm/Analysis/InlineCost.h
llvm/include/llvm/Analysis/LoopAccessAnalysis.h
llvm/include/llvm/Analysis/LoopCacheAnalysis.h
llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
llvm/include/llvm/Analysis/ObjCARCUtil.h
llvm/include/llvm/Analysis/ScalarEvolution.h
llvm/include/llvm/Analysis/TargetLibraryInfo.h
llvm/include/llvm/Analysis/TargetTransformInfo.h
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/include/llvm/Analysis/ValueLattice.h
llvm/lib/Analysis/AliasAnalysis.cpp
llvm/lib/Analysis/AliasAnalysisSummary.cpp
llvm/lib/Analysis/BasicAliasAnalysis.cpp
llvm/lib/Analysis/BlockFrequencyInfo.cpp
llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
llvm/lib/Analysis/BranchProbabilityInfo.cpp
llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
llvm/lib/Analysis/InlineAdvisor.cpp
llvm/lib/Analysis/InlineCost.cpp
llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
llvm/lib/Analysis/LazyValueInfo.cpp
llvm/lib/Analysis/Lint.cpp
llvm/lib/Analysis/LoopAccessAnalysis.cpp
llvm/lib/Analysis/LoopCacheAnalysis.cpp
llvm/lib/Analysis/LoopInfo.cpp
llvm/lib/Analysis/LoopNestAnalysis.cpp
llvm/lib/Analysis/MemoryBuiltins.cpp
llvm/lib/Analysis/MemorySSA.cpp
llvm/lib/Analysis/OptimizationRemarkEmitter.cpp
llvm/lib/Analysis/ProfileSummaryInfo.cpp
llvm/lib/Analysis/ReplayInlineAdvisor.cpp
llvm/lib/Analysis/ScalarEvolution.cpp
llvm/lib/Analysis/StratifiedSets.h
llvm/lib/Analysis/TensorSpec.cpp
llvm/lib/Analysis/VFABIDemangling.cpp
llvm/lib/Analysis/ValueTracking.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
index fd14104c2395e..4bfdda247c52d 100644
--- a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
+++ b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
@@ -893,7 +893,7 @@ class IRSimilarityCandidate {
assert(V != nullptr && "Value is a nullptr?");
DenseMap<Value *, unsigned>::iterator VNIt = ValueToNumber.find(V);
if (VNIt == ValueToNumber.end())
- return None;
+ return std::nullopt;
return VNIt->second;
}
@@ -904,7 +904,7 @@ class IRSimilarityCandidate {
Optional<Value *> fromGVN(unsigned Num) {
DenseMap<unsigned, Value *>::iterator VNIt = NumberToValue.find(Num);
if (VNIt == NumberToValue.end())
- return None;
+ return std::nullopt;
assert(VNIt->second != nullptr && "Found value is a nullptr!");
return VNIt->second;
}
@@ -918,7 +918,7 @@ class IRSimilarityCandidate {
Optional<unsigned> getCanonicalNum(unsigned N) {
DenseMap<unsigned, unsigned>::iterator NCIt = NumberToCanonNum.find(N);
if (NCIt == NumberToCanonNum.end())
- return None;
+ return std::nullopt;
return NCIt->second;
}
@@ -931,7 +931,7 @@ class IRSimilarityCandidate {
Optional<unsigned> fromCanonicalNum(unsigned N) {
DenseMap<unsigned, unsigned>::iterator CNIt = CanonNumToNumber.find(N);
if (CNIt == CanonNumToNumber.end())
- return None;
+ return std::nullopt;
return CNIt->second;
}
diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h
index 7d45798758cd6..42063ea3887c4 100644
--- a/llvm/include/llvm/Analysis/InlineAdvisor.h
+++ b/llvm/include/llvm/Analysis/InlineAdvisor.h
@@ -200,7 +200,7 @@ class InlineAdvisor {
protected:
InlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
- Optional<InlineContext> IC = llvm::None);
+ Optional<InlineContext> IC = std::nullopt);
virtual std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) = 0;
virtual std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
bool Advice);
diff --git a/llvm/include/llvm/Analysis/InlineCost.h b/llvm/include/llvm/Analysis/InlineCost.h
index 731e79f8de3ff..3f246cc3ea5be 100644
--- a/llvm/include/llvm/Analysis/InlineCost.h
+++ b/llvm/include/llvm/Analysis/InlineCost.h
@@ -103,12 +103,12 @@ class InlineCost {
const char *Reason = nullptr;
/// The cost-benefit pair computed by cost-benefit analysis.
- Optional<CostBenefitPair> CostBenefit = None;
+ Optional<CostBenefitPair> CostBenefit = std::nullopt;
// Trivial constructor, interesting logic in the factory functions below.
InlineCost(int Cost, int Threshold, int StaticBonusApplied,
const char *Reason = nullptr,
- Optional<CostBenefitPair> CostBenefit = None)
+ Optional<CostBenefitPair> CostBenefit = std::nullopt)
: Cost(Cost), Threshold(Threshold),
StaticBonusApplied(StaticBonusApplied), Reason(Reason),
CostBenefit(CostBenefit) {
@@ -122,12 +122,14 @@ class InlineCost {
assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
return InlineCost(Cost, Threshold, StaticBonus);
}
- static InlineCost getAlways(const char *Reason,
- Optional<CostBenefitPair> CostBenefit = None) {
+ static InlineCost
+ getAlways(const char *Reason,
+ Optional<CostBenefitPair> CostBenefit = std::nullopt) {
return InlineCost(AlwaysInlineCost, 0, 0, Reason, CostBenefit);
}
- static InlineCost getNever(const char *Reason,
- Optional<CostBenefitPair> CostBenefit = None) {
+ static InlineCost
+ getNever(const char *Reason,
+ Optional<CostBenefitPair> CostBenefit = std::nullopt) {
return InlineCost(NeverInlineCost, 0, 0, Reason, CostBenefit);
}
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 9a3c06bb8da21..c57904ebc8171 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -455,7 +455,7 @@ class RuntimePointerChecking {
// None if pointer-
diff erence checks cannot be used.
std::optional<ArrayRef<PointerDiffInfo>> getDiffChecks() const {
if (!CanUseDiffCheck)
- return None;
+ return std::nullopt;
return {DiffChecks};
}
diff --git a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h
index 2b5e68f7c9cf7..c6d4382533f48 100644
--- a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h
@@ -200,7 +200,7 @@ class CacheCost {
/// classified to have temporal reuse.
CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI, ScalarEvolution &SE,
TargetTransformInfo &TTI, AAResults &AA, DependenceInfo &DI,
- Optional<unsigned> TRT = None);
+ Optional<unsigned> TRT = std::nullopt);
/// Create a CacheCost for the loop nest rooted by \p Root.
/// The optional parameter \p TRT can be used to specify the max. distance
@@ -208,7 +208,7 @@ class CacheCost {
/// classified to have temporal reuse.
static std::unique_ptr<CacheCost>
getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR, DependenceInfo &DI,
- Optional<unsigned> TRT = None);
+ Optional<unsigned> TRT = std::nullopt);
/// Return the estimated cost of loop \p L if the given loop is part of the
/// loop nest associated with this object. Return -1 otherwise.
diff --git a/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 64a635ebb4cc7..8130a7c4c0a17 100644
--- a/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -475,7 +475,7 @@ class MemoryDependenceResults {
const auto Off = ClobberOffsets.find(DepInst);
if (Off != ClobberOffsets.end())
return Off->getSecond();
- return None;
+ return std::nullopt;
}
private:
diff --git a/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 3e1a892a4a542..1cefc33d8b1fb 100644
--- a/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -241,9 +241,9 @@ class ARCMDKindCache {
public:
void init(Module *Mod) {
M = Mod;
- ImpreciseReleaseMDKind = llvm::None;
- CopyOnEscapeMDKind = llvm::None;
- NoObjCARCExceptionsMDKind = llvm::None;
+ ImpreciseReleaseMDKind = std::nullopt;
+ CopyOnEscapeMDKind = std::nullopt;
+ NoObjCARCExceptionsMDKind = std::nullopt;
}
unsigned get(ARCMDKindID ID) {
diff --git a/llvm/include/llvm/Analysis/ObjCARCUtil.h b/llvm/include/llvm/Analysis/ObjCARCUtil.h
index 56faa20c4c6e9..913fb3e428f57 100644
--- a/llvm/include/llvm/Analysis/ObjCARCUtil.h
+++ b/llvm/include/llvm/Analysis/ObjCARCUtil.h
@@ -43,7 +43,7 @@ inline bool hasAttachedCallOpBundle(const CallBase *CB) {
inline Optional<Function *> getAttachedARCFunction(const CallBase *CB) {
auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
if (!B)
- return None;
+ return std::nullopt;
return cast<Function>(B->Inputs[0]);
}
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 5ea4bf83c9a35..a180cf5783dbf 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1334,10 +1334,9 @@ class ScalarEvolution {
/// as arguments and asserts enforce that internally.
/*implicit*/ ExitLimit(const SCEV *E);
- ExitLimit(
- const SCEV *E, const SCEV *ConstantMaxNotTaken, bool MaxOrZero,
- ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList =
- None);
+ ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken, bool MaxOrZero,
+ ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *>
+ PredSetList = std::nullopt);
ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken, bool MaxOrZero,
const SmallPtrSetImpl<const SCEVPredicate *> &PredSet);
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index 5b200e05218dc..83e06a52e1e39 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -236,7 +236,7 @@ class TargetLibraryInfo {
public:
explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl,
- std::optional<const Function *> F = None)
+ std::optional<const Function *> F = std::nullopt)
: Impl(&Impl), OverrideAsUnavailable(NumLibFuncs) {
if (!F)
return;
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 6aa78c9d562ea..522230903834a 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1110,10 +1110,11 @@ class TargetTransformInfo {
/// cases, like in broadcast loads.
/// NOTE: For subvector extractions Tp represents the source type.
InstructionCost
- getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = None,
+ getShuffleCost(ShuffleKind Kind, VectorType *Tp,
+ ArrayRef<int> Mask = std::nullopt,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
int Index = 0, VectorType *SubTp = nullptr,
- ArrayRef<const Value *> Args = None) const;
+ ArrayRef<const Value *> Args = std::nullopt) const;
/// Represents a hint about the context in which a cast is used.
///
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index e814d86c1f7b8..ec98bc40e1691 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -516,11 +516,10 @@ class TargetTransformInfoImplBase {
return 1;
}
- InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty,
- ArrayRef<int> Mask,
- TTI::TargetCostKind CostKind, int Index,
- VectorType *SubTp,
- ArrayRef<const Value *> Args = None) const {
+ InstructionCost
+ getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask,
+ TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
+ ArrayRef<const Value *> Args = std::nullopt) const {
return 1;
}
diff --git a/llvm/include/llvm/Analysis/ValueLattice.h b/llvm/include/llvm/Analysis/ValueLattice.h
index 8bf6b2a095f6c..cd2d731321c7a 100644
--- a/llvm/include/llvm/Analysis/ValueLattice.h
+++ b/llvm/include/llvm/Analysis/ValueLattice.h
@@ -281,7 +281,7 @@ class ValueLatticeElement {
} else if (isConstantRange() && getConstantRange().isSingleElement()) {
return *getConstantRange().getSingleElement();
}
- return None;
+ return std::nullopt;
}
bool markOverdefined() {
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 4f27acade05a4..f3faae9eb486a 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -603,7 +603,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
ModRefInfo AAResults::getModRefInfo(const Instruction *I,
const Optional<MemoryLocation> &OptLoc,
AAQueryInfo &AAQIP) {
- if (OptLoc == None) {
+ if (OptLoc == std::nullopt) {
if (const auto *Call = dyn_cast<CallBase>(I))
return getMemoryEffects(Call, AAQIP).getModRef();
}
diff --git a/llvm/lib/Analysis/AliasAnalysisSummary.cpp b/llvm/lib/Analysis/AliasAnalysisSummary.cpp
index d9c5732da1f33..1627c403cc8fa 100644
--- a/llvm/lib/Analysis/AliasAnalysisSummary.cpp
+++ b/llvm/lib/Analysis/AliasAnalysisSummary.cpp
@@ -79,17 +79,17 @@ Optional<InstantiatedValue> instantiateInterfaceValue(InterfaceValue IValue,
auto *V = (Index == 0) ? &Call : Call.getArgOperand(Index - 1);
if (V->getType()->isPointerTy())
return InstantiatedValue{V, IValue.DerefLevel};
- return None;
+ return std::nullopt;
}
Optional<InstantiatedRelation>
instantiateExternalRelation(ExternalRelation ERelation, CallBase &Call) {
auto From = instantiateInterfaceValue(ERelation.From, Call);
if (!From)
- return None;
+ return std::nullopt;
auto To = instantiateInterfaceValue(ERelation.To, Call);
if (!To)
- return None;
+ return std::nullopt;
return InstantiatedRelation{*From, *To, ERelation.Offset};
}
@@ -97,7 +97,7 @@ Optional<InstantiatedAttr> instantiateExternalAttribute(ExternalAttribute EAttr,
CallBase &Call) {
auto Value = instantiateInterfaceValue(EAttr.IValue, Call);
if (!Value)
- return None;
+ return std::nullopt;
return InstantiatedAttr{*Value, EAttr.Attr};
}
}
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 3881bebb3eaa3..e70be0f20d4c0 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -574,7 +574,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
// Track whether we've seen at least one in bounds gep, and if so, whether
// all geps parsed were in bounds.
- if (Decomposed.InBounds == None)
+ if (Decomposed.InBounds == std::nullopt)
Decomposed.InBounds = GEPOp->isInBounds();
else if (!GEPOp->isInBounds())
Decomposed.InBounds = false;
diff --git a/llvm/lib/Analysis/BlockFrequencyInfo.cpp b/llvm/lib/Analysis/BlockFrequencyInfo.cpp
index 436b017640339..c21c46b2f40c7 100644
--- a/llvm/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/llvm/lib/Analysis/BlockFrequencyInfo.cpp
@@ -208,7 +208,7 @@ Optional<uint64_t>
BlockFrequencyInfo::getBlockProfileCount(const BasicBlock *BB,
bool AllowSynthetic) const {
if (!BFI)
- return None;
+ return std::nullopt;
return BFI->getBlockProfileCount(*getFunction(), BB, AllowSynthetic);
}
@@ -216,7 +216,7 @@ BlockFrequencyInfo::getBlockProfileCount(const BasicBlock *BB,
Optional<uint64_t>
BlockFrequencyInfo::getProfileCountFromFreq(uint64_t Freq) const {
if (!BFI)
- return None;
+ return std::nullopt;
return BFI->getProfileCountFromFreq(*getFunction(), Freq);
}
diff --git a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
index ec8d318b675bc..be4a602076b25 100644
--- a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -599,7 +599,7 @@ BlockFrequencyInfoImplBase::getProfileCountFromFreq(const Function &F,
bool AllowSynthetic) const {
auto EntryCount = F.getEntryCount(AllowSynthetic);
if (!EntryCount)
- return None;
+ return std::nullopt;
// Use 128 bit APInt to do the arithmetic to avoid overflow.
APInt BlockCount(128, EntryCount->getCount());
APInt BlockFreq(128, Freq);
diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 50fdf57e01dbc..1d4159a37b2cc 100644
--- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -651,7 +651,7 @@ Optional<uint32_t>
BranchProbabilityInfo::getEstimatedBlockWeight(const BasicBlock *BB) const {
auto WeightIt = EstimatedBlockWeight.find(BB);
if (WeightIt == EstimatedBlockWeight.end())
- return None;
+ return std::nullopt;
return WeightIt->second;
}
@@ -659,7 +659,7 @@ Optional<uint32_t>
BranchProbabilityInfo::getEstimatedLoopWeight(const LoopData &L) const {
auto WeightIt = EstimatedLoopWeight.find(L);
if (WeightIt == EstimatedLoopWeight.end())
- return None;
+ return std::nullopt;
return WeightIt->second;
}
@@ -682,7 +682,7 @@ Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight(
auto Weight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB});
if (!Weight)
- return None;
+ return std::nullopt;
if (!MaxWeight || *MaxWeight < *Weight)
MaxWeight = Weight;
@@ -805,7 +805,7 @@ Optional<uint32_t> BranchProbabilityInfo::getInitialEstimatedBlockWeight(
if (CI->hasFnAttr(Attribute::Cold))
return static_cast<uint32_t>(BlockExecWeight::COLD);
- return None;
+ return std::nullopt;
}
// Does RPO traversal over all blocks in \p F and assigns weights to
diff --git a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
index d9afdd019a41f..9c875678848e1 100644
--- a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -365,7 +365,7 @@ getInterfaceValue(InstantiatedValue IValue,
if (Index)
return InterfaceValue{*Index, IValue.DerefLevel};
- return None;
+ return std::nullopt;
}
static void populateAttrMap(DenseMap<const Value *, AliasAttrs> &AttrMap,
@@ -515,7 +515,7 @@ CFLAndersAAResult::FunctionInfo::getAttrs(const Value *V) const {
auto Itr = AttrMap.find(V);
if (Itr != AttrMap.end())
return Itr->second;
- return None;
+ return std::nullopt;
}
bool CFLAndersAAResult::FunctionInfo::mayAlias(
@@ -631,7 +631,7 @@ static std::optional<InstantiatedValue> getNodeBelow(const CFLGraph &Graph,
auto NodeBelow = InstantiatedValue{V.Val, V.DerefLevel + 1};
if (Graph.getNode(NodeBelow))
return NodeBelow;
- return None;
+ return std::nullopt;
}
static void processWorkListItem(const WorkListItem &Item, const CFLGraph &Graph,
diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp
index e4335eff83f9a..ae57a9754c66b 100644
--- a/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -402,7 +402,7 @@ llvm::shouldInline(CallBase &CB,
});
}
setInlineRemark(CB, inlineCostStr(IC));
- return None;
+ return std::nullopt;
}
int TotalSecondaryCost = 0;
@@ -419,7 +419,7 @@ llvm::shouldInline(CallBase &CB,
<< "' in other contexts";
});
setInlineRemark(CB, "deferred");
- return None;
+ return std::nullopt;
}
LLVM_DEBUG(dbgs() << " Inlining " << inlineCostStr(IC) << ", Call: " << CB
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 96b733d82d7fd..9a5f985acf071 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -168,7 +168,7 @@ Optional<int> getStringFnAttrAsInt(const Attribute &Attr) {
if (!Attr.getValueAsString().getAsInteger(10, AttrValue))
return AttrValue;
}
- return None;
+ return std::nullopt;
}
Optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind) {
@@ -493,7 +493,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
std::optional<Constant *> getSimplifiedValue(Instruction *I) {
if (SimplifiedValues.find(I) != SimplifiedValues.end())
return SimplifiedValues[I];
- return None;
+ return std::nullopt;
}
// Keep a bunch of stats about the cost savings found so we can print them
@@ -584,7 +584,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
bool DecidedByCostBenefit = false;
// The cost-benefit pair computed by cost-benefit analysis.
- Optional<CostBenefitPair> CostBenefit = None;
+ Optional<CostBenefitPair> CostBenefit = std::nullopt;
bool SingleBB = true;
@@ -817,14 +817,14 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
// suficient profiling information to determine.
std::optional<bool> costBenefitAnalysis() {
if (!CostBenefitAnalysisEnabled)
- return None;
+ return std::nullopt;
// buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
// for the prelink phase of the AutoFDO + ThinLTO build. Honor the logic by
// falling back to the cost-based metric.
// TODO: Improve this hacky condition.
if (Threshold == 0)
- return None;
+ return std::nullopt;
assert(GetBFI);
BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
@@ -1056,7 +1056,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
return InstructionCostDetailMap[I];
- return None;
+ return std::nullopt;
}
virtual ~InlineCostCallAnalyzer() = default;
@@ -1793,7 +1793,7 @@ InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
// Otherwise we need BFI to be available and to have a locally hot callsite
// threshold.
if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
- return None;
+ return std::nullopt;
// Determine if the callsite is hot relative to caller's entry. We could
// potentially cache the computation of scaled entry frequency, but the added
@@ -1806,7 +1806,7 @@ InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
return Params.LocallyHotCallSiteThreshold;
// Otherwise treat it normally.
- return None;
+ return std::nullopt;
}
void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
@@ -2850,7 +2850,7 @@ Optional<int> llvm::getInliningCostEstimate(
/*IgnoreThreshold*/ true);
auto R = CA.analyze();
if (!R.isSuccess())
- return None;
+ return std::nullopt;
return CA.getCost();
}
@@ -2863,7 +2863,7 @@ Optional<InlineCostFeatures> llvm::getInliningCostFeatures(
ORE, *Call.getCalledFunction(), Call);
auto R = CFA.analyze();
if (!R.isSuccess())
- return None;
+ return std::nullopt;
return CFA.features();
}
@@ -2935,7 +2935,7 @@ Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
if (Call.isNoInline())
return InlineResult::failure("noinline call site attribute");
- return None;
+ return std::nullopt;
}
InlineCost llvm::getInlineCost(
diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
index 2371ecbba615d..0a8f18489f622 100644
--- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
+++ b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
@@ -267,7 +267,7 @@ InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() = default;
InlineSizeEstimatorAnalysis::Result
InlineSizeEstimatorAnalysis::run(const Function &F,
FunctionAnalysisManager &FAM) {
- return None;
+ return std::nullopt;
}
bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; }
#endif
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index f65ef7c2adf6d..1e0b9c27fd8b2 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -215,14 +215,14 @@ namespace {
BasicBlock *BB) const {
const BlockCacheEntry *Entry = getBlockEntry(BB);
if (!Entry)
- return None;
+ return std::nullopt;
if (Entry->OverDefined.count(V))
return ValueLatticeElement::getOverdefined();
auto LatticeIt = Entry->LatticeElements.find_as(V);
if (LatticeIt == Entry->LatticeElements.end())
- return None;
+ return std::nullopt;
return LatticeIt->second;
}
@@ -551,7 +551,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue(
return ValueLatticeElement::getOverdefined();
// Yet to be resolved.
- return None;
+ return std::nullopt;
}
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
@@ -694,7 +694,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueNonLocal(
Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
if (!EdgeResult)
// Explore that input, then return here
- return None;
+ return std::nullopt;
Result.mergeIn(*EdgeResult);
@@ -730,7 +730,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValuePHINode(
getEdgeValue(PhiVal, PhiBB, BB, PN);
if (!EdgeResult)
// Explore that input, then return here
- return None;
+ return std::nullopt;
Result.mergeIn(*EdgeResult);
@@ -809,13 +809,13 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
Optional<ValueLatticeElement> OptTrueVal =
getBlockValue(SI->getTrueValue(), BB, SI);
if (!OptTrueVal)
- return None;
+ return std::nullopt;
ValueLatticeElement &TrueVal = *OptTrueVal;
Optional<ValueLatticeElement> OptFalseVal =
getBlockValue(SI->getFalseValue(), BB, SI);
if (!OptFalseVal)
- return None;
+ return std::nullopt;
ValueLatticeElement &FalseVal = *OptFalseVal;
if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
@@ -889,7 +889,7 @@ Optional<ConstantRange> LazyValueInfoImpl::getRangeFor(Value *V,
BasicBlock *BB) {
Optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
if (!OptVal)
- return None;
+ return std::nullopt;
return getConstantRangeOrFull(*OptVal, V->getType(), DL);
}
@@ -922,7 +922,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
Optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
if (!LHSRes)
// More work to do before applying this transfer rule.
- return None;
+ return std::nullopt;
const ConstantRange &LHSRange = LHSRes.value();
const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
@@ -946,7 +946,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
Optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
if (!LHSRes || !RHSRes)
// More work to do before applying this transfer rule.
- return None;
+ return std::nullopt;
const ConstantRange &LHSRange = LHSRes.value();
const ConstantRange &RHSRange = RHSRes.value();
@@ -998,7 +998,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueIntrinsic(
for (Value *Op : II->args()) {
Optional<ConstantRange> Range = getRangeFor(Op, II, BB);
if (!Range)
- return None;
+ return std::nullopt;
OpRanges.push_back(*Range);
}
@@ -1210,7 +1210,7 @@ getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
Worklist.push_back(L);
if (RV == Visited.end())
Worklist.push_back(R);
- return None;
+ return std::nullopt;
}
return intersect(LV->second, RV->second);
@@ -1372,7 +1372,7 @@ static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
Value *Condition = SI->getCondition();
if (!isa<IntegerType>(Val->getType()))
- return None;
+ return std::nullopt;
bool ValUsesConditionAndMayBeFoldable = false;
if (Condition != Val) {
// Check if Val has Condition as an operand.
@@ -1380,7 +1380,7 @@ static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
usesOperand(Usr, Condition);
if (!ValUsesConditionAndMayBeFoldable)
- return None;
+ return std::nullopt;
}
assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
"Condition != Val nor Val doesn't use Condition");
@@ -1398,7 +1398,7 @@ static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
ValueLatticeElement EdgeLatticeVal =
constantFoldUser(Usr, Condition, CaseValue, DL);
if (EdgeLatticeVal.isOverdefined())
- return None;
+ return std::nullopt;
EdgeVal = EdgeLatticeVal.getConstantRange();
}
if (DefaultCase) {
@@ -1415,7 +1415,7 @@ static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
}
return ValueLatticeElement::getRange(std::move(EdgesVals));
}
- return None;
+ return std::nullopt;
}
/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
@@ -1436,7 +1436,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::getEdgeValue(
Optional<ValueLatticeElement> OptInBlock =
getBlockValue(Val, BBFrom, BBFrom->getTerminator());
if (!OptInBlock)
- return None;
+ return std::nullopt;
ValueLatticeElement &InBlock = *OptInBlock;
// We can use the context instruction (generically the ultimate instruction
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 8b0f2a8ed99b5..d3120a41ac270 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -187,8 +187,8 @@ void Lint::visitFunction(Function &F) {
void Lint::visitCallBase(CallBase &I) {
Value *Callee = I.getCalledOperand();
- visitMemoryReference(I, MemoryLocation::getAfter(Callee), None, nullptr,
- MemRef::Callee);
+ visitMemoryReference(I, MemoryLocation::getAfter(Callee), std::nullopt,
+ nullptr, MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee,
/*OffsetOk=*/false))) {
@@ -347,26 +347,26 @@ void Lint::visitCallBase(CallBase &I) {
"Undefined behavior: va_start called in a non-varargs function",
&I);
- visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
- nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
+ std::nullopt, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
- visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
- nullptr, MemRef::Write);
- visitMemoryReference(I, MemoryLocation::getForArgument(&I, 1, TLI), None,
- nullptr, MemRef::Read);
+ visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
+ std::nullopt, nullptr, MemRef::Write);
+ visitMemoryReference(I, MemoryLocation::getForArgument(&I, 1, TLI),
+ std::nullopt, nullptr, MemRef::Read);
break;
case Intrinsic::vaend:
- visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
- nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
+ std::nullopt, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
// Stackrestore doesn't read or write memory, but it sets the
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
- visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
- nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
+ std::nullopt, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::get_active_lane_mask:
if (auto *TripCount = dyn_cast<ConstantInt>(I.getArgOperand(1)))
@@ -588,13 +588,13 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, MemoryLocation::get(&I), None, nullptr,
+ visitMemoryReference(I, MemoryLocation::get(&I), std::nullopt, nullptr,
MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, MemoryLocation::getAfter(I.getAddress()), None,
- nullptr, MemRef::Branchee);
+ visitMemoryReference(I, MemoryLocation::getAfter(I.getAddress()),
+ std::nullopt, nullptr, MemRef::Branchee);
Check(I.getNumDestinations() != 0,
"Undefined behavior: indirectbr with no destinations", &I);
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index eb06fb67bd3c2..b65db6e3aff75 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1376,7 +1376,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
if (isa<ScalableVectorType>(AccessTy)) {
LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
<< "\n");
- return None;
+ return std::nullopt;
}
const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
@@ -1388,14 +1388,14 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
if (!AR) {
LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
<< " SCEV: " << *PtrScev << "\n");
- return None;
+ return std::nullopt;
}
// The access function must stride over the innermost loop.
if (Lp != AR->getLoop()) {
LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
<< *Ptr << " SCEV: " << *AR << "\n");
- return None;
+ return std::nullopt;
}
// The address calculation must not wrap. Otherwise, a dependence could be
@@ -1423,7 +1423,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
LLVM_DEBUG(
dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
<< *Ptr << " SCEV: " << *AR << "\n");
- return None;
+ return std::nullopt;
}
}
@@ -1435,7 +1435,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
if (!C) {
LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
<< " SCEV: " << *AR << "\n");
- return None;
+ return std::nullopt;
}
auto &DL = Lp->getHeader()->getModule()->getDataLayout();
@@ -1445,7 +1445,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
// Huge step value - give up.
if (APStepVal.getBitWidth() > 64)
- return None;
+ return std::nullopt;
int64_t StepVal = APStepVal.getSExtValue();
@@ -1453,7 +1453,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
int64_t Stride = StepVal / Size;
int64_t Rem = StepVal % Size;
if (Rem)
- return None;
+ return std::nullopt;
// If the SCEV could wrap but we have an inbounds gep with a unit stride we
// know we can't "wrap around the address space". In case of address space
@@ -1470,7 +1470,7 @@ llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
<< "LAA: Added an overflow assumption\n");
PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
} else
- return None;
+ return std::nullopt;
}
return Stride;
@@ -1492,14 +1492,14 @@ Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
// Make sure that the element types are the same if required.
if (CheckType && ElemTyA != ElemTyB)
- return None;
+ return std::nullopt;
unsigned ASA = PtrA->getType()->getPointerAddressSpace();
unsigned ASB = PtrB->getType()->getPointerAddressSpace();
// Check that the address spaces match.
if (ASA != ASB)
- return None;
+ return std::nullopt;
unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
@@ -1514,7 +1514,7 @@ Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
// Check that the address spaces match and that the pointers are valid.
if (ASA != ASB)
- return None;
+ return std::nullopt;
IdxWidth = DL.getIndexSizeInBits(ASA);
OffsetA = OffsetA.sextOrTrunc(IdxWidth);
@@ -1529,7 +1529,7 @@ Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
const auto *Diff =
dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
if (!Diff)
- return None;
+ return std::nullopt;
Val = Diff->getAPInt().getSExtValue();
}
int Size = DL.getTypeStoreSize(ElemTyA);
@@ -1539,7 +1539,7 @@ Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
// the bitcasts removal in the provided pointers.
if (!StrictCheck || Dist * Size == Val)
return Dist;
- return None;
+ return std::nullopt;
}
bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index de8156a46cbc5..884a018840779 100644
--- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -196,7 +196,7 @@ Optional<bool> IndexedReference::hasSpacialReuse(const IndexedReference &Other,
<< "No spacial reuse,
diff erence between subscript:\n\t"
<< *LastSubscript << "\n\t" << OtherLastSubscript
<< "\nis not constant.\n");
- return None;
+ return std::nullopt;
}
bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS);
@@ -248,7 +248,7 @@ Optional<bool> IndexedReference::hasTemporalReuse(const IndexedReference &Other,
if (SCEVConst == nullptr) {
LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n");
- return None;
+ return std::nullopt;
}
const ConstantInt &CI = *SCEVConst->getValue();
diff --git a/llvm/lib/Analysis/LoopInfo.cpp b/llvm/lib/Analysis/LoopInfo.cpp
index 057fc931c7f3f..0ffdf4ed76e38 100644
--- a/llvm/lib/Analysis/LoopInfo.cpp
+++ b/llvm/lib/Analysis/LoopInfo.cpp
@@ -203,12 +203,12 @@ Optional<Loop::LoopBounds> Loop::LoopBounds::getBounds(const Loop &L,
ScalarEvolution &SE) {
InductionDescriptor IndDesc;
if (!InductionDescriptor::isInductionPHI(&IndVar, &L, &SE, IndDesc))
- return None;
+ return std::nullopt;
Value *InitialIVValue = IndDesc.getStartValue();
Instruction *StepInst = IndDesc.getInductionBinOp();
if (!InitialIVValue || !StepInst)
- return None;
+ return std::nullopt;
const SCEV *Step = IndDesc.getStep();
Value *StepInstOp1 = StepInst->getOperand(1);
@@ -221,7 +221,7 @@ Optional<Loop::LoopBounds> Loop::LoopBounds::getBounds(const Loop &L,
Value *FinalIVValue = findFinalIVValue(L, IndVar, *StepInst);
if (!FinalIVValue)
- return None;
+ return std::nullopt;
return LoopBounds(L, *InitialIVValue, *StepInst, StepValue, *FinalIVValue,
SE);
@@ -288,7 +288,7 @@ Optional<Loop::LoopBounds> Loop::getBounds(ScalarEvolution &SE) const {
if (PHINode *IndVar = getInductionVariable(SE))
return LoopBounds::getBounds(*this, *IndVar, SE);
- return None;
+ return std::nullopt;
}
PHINode *Loop::getInductionVariable(ScalarEvolution &SE) const {
@@ -1053,7 +1053,7 @@ Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
StringRef Name) {
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
if (!MD)
- return None;
+ return std::nullopt;
switch (MD->getNumOperands()) {
case 1:
return nullptr;
@@ -1068,7 +1068,7 @@ Optional<bool> llvm::getOptionalBoolLoopAttribute(const Loop *TheLoop,
StringRef Name) {
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
if (!MD)
- return None;
+ return std::nullopt;
switch (MD->getNumOperands()) {
case 1:
// When the value is absent it is interpreted as 'attribute set'.
diff --git a/llvm/lib/Analysis/LoopNestAnalysis.cpp b/llvm/lib/Analysis/LoopNestAnalysis.cpp
index 9c751936bebd3..7e80e2596300f 100644
--- a/llvm/lib/Analysis/LoopNestAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopNestAnalysis.cpp
@@ -128,7 +128,7 @@ LoopNest::LoopNestEnum LoopNest::analyzeLoopNestForPerfectNest(
// Bail out if we cannot retrieve the outer loop bounds.
auto OuterLoopLB = OuterLoop.getBounds(SE);
- if (OuterLoopLB == None) {
+ if (OuterLoopLB == std::nullopt) {
LLVM_DEBUG(dbgs() << "Cannot compute loop bounds of OuterLoop: "
<< OuterLoop << "\n";);
return OuterLoopLowerBoundUnknown;
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 9deb96661f3e9..2d0a3e12c3ca6 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -177,12 +177,12 @@ getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
// Don't perform a slow TLI lookup, if this function doesn't return a pointer
// and thus can't be an allocation function.
if (!Callee->getReturnType()->isPointerTy())
- return None;
+ return std::nullopt;
// Make sure that the function is available.
LibFunc TLIFn;
if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn))
- return None;
+ return std::nullopt;
const auto *Iter = find_if(
AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) {
@@ -190,11 +190,11 @@ getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
});
if (Iter == std::end(AllocationFnData))
- return None;
+ return std::nullopt;
const AllocFnsTy *FnData = &Iter->second;
if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
- return None;
+ return std::nullopt;
// Check function prototype.
int FstParam = FnData->FstParam;
@@ -210,7 +210,7 @@ getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
FTy->getParamType(SndParam)->isIntegerTy(32) ||
FTy->getParamType(SndParam)->isIntegerTy(64)))
return *FnData;
- return None;
+ return std::nullopt;
}
static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy,
@@ -219,7 +219,7 @@ static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy,
if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall))
if (!IsNoBuiltinCall)
return getAllocationDataForFunction(Callee, AllocTy, TLI);
- return None;
+ return std::nullopt;
}
static Optional<AllocFnsTy>
@@ -230,7 +230,7 @@ getAllocationData(const Value *V, AllocType AllocTy,
if (!IsNoBuiltinCall)
return getAllocationDataForFunction(
Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee)));
- return None;
+ return std::nullopt;
}
static Optional<AllocFnsTy> getAllocationSize(const Value *V,
@@ -239,7 +239,7 @@ static Optional<AllocFnsTy> getAllocationSize(const Value *V,
const Function *Callee =
getCalledFunction(V, IsNoBuiltinCall);
if (!Callee)
- return None;
+ return std::nullopt;
// Prefer to use existing information over allocsize. This will give us an
// accurate AllocTy.
@@ -250,7 +250,7 @@ static Optional<AllocFnsTy> getAllocationSize(const Value *V,
Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize);
if (Attr == Attribute())
- return None;
+ return std::nullopt;
std::pair<unsigned, std::optional<unsigned>> Args = Attr.getAllocSizeArgs();
@@ -402,7 +402,7 @@ llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI,
// allocsize. The code structure could stand to be cleaned up a bit.
Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI);
if (!FnData)
- return None;
+ return std::nullopt;
// Get the index type for this address space, results and intermediate
// computations are performed at that width.
@@ -413,14 +413,14 @@ llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI,
if (FnData->AllocTy == StrDupLike) {
APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0))));
if (!Size)
- return None;
+ return std::nullopt;
// Strndup limits strlen.
if (FnData->FstParam > 0) {
const ConstantInt *Arg =
dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
if (!Arg)
- return None;
+ return std::nullopt;
APInt MaxSize = Arg->getValue().zext(IntTyBits);
if (Size.ugt(MaxSize))
@@ -432,11 +432,11 @@ llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI,
const ConstantInt *Arg =
dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
if (!Arg)
- return None;
+ return std::nullopt;
APInt Size = Arg->getValue();
if (!CheckedZextOrTrunc(Size, IntTyBits))
- return None;
+ return std::nullopt;
// Size is determined by just 1 parameter.
if (FnData->SndParam < 0)
@@ -444,16 +444,16 @@ llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI,
Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam)));
if (!Arg)
- return None;
+ return std::nullopt;
APInt NumElems = Arg->getValue();
if (!CheckedZextOrTrunc(NumElems, IntTyBits))
- return None;
+ return std::nullopt;
bool Overflow;
Size = Size.umul_ov(NumElems, Overflow);
if (Overflow)
- return None;
+ return std::nullopt;
return Size;
}
@@ -529,7 +529,7 @@ Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee,
return P.first == TLIFn;
});
if (Iter == std::end(FreeFnData))
- return None;
+ return std::nullopt;
return Iter->second;
}
@@ -538,7 +538,7 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I,
bool IsNoBuiltin;
const Function *Callee = getCalledFunction(I, IsNoBuiltin);
if (Callee == nullptr || IsNoBuiltin)
- return None;
+ return std::nullopt;
LibFunc TLIFn;
if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn)) {
@@ -557,7 +557,7 @@ Optional<StringRef> llvm::getAllocationFamily(const Value *I,
if (Attr.isValid())
return Attr.getValueAsString();
}
- return None;
+ return std::nullopt;
}
/// isLibFreeFunction - Returns true if the function is a builtin free()
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index f6410b1f7af2e..c308005b3cf3a 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -694,7 +694,7 @@ template <class AliasAnalysisType> class ClobberWalker {
addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
}
- return None;
+ return std::nullopt;
}
template <typename T, typename Walker>
@@ -721,7 +721,7 @@ template <class AliasAnalysisType> class ClobberWalker {
T &curNode() const { return W->Paths[*N]; }
Walker *W = nullptr;
- Optional<ListIndex> N = None;
+ Optional<ListIndex> N = std::nullopt;
};
using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
@@ -771,7 +771,7 @@ template <class AliasAnalysisType> class ClobberWalker {
assert(Paths.empty() && VisitedPhis.empty() &&
"Reset the optimization state.");
- Paths.emplace_back(Loc, Start, Phi, None);
+ Paths.emplace_back(Loc, Start, Phi, std::nullopt);
// Stores how many "valid" optimization nodes we had prior to calling
// addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
auto PriorPathsSize = Paths.size();
@@ -947,7 +947,7 @@ template <class AliasAnalysisType> class ClobberWalker {
if (auto *MU = dyn_cast<MemoryUse>(Start))
Current = MU->getDefiningAccess();
- DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
+ DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
// Fast path for the overly-common case (no crazy phi optimization
// necessary)
UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
@@ -1756,7 +1756,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
Def = isa<MemoryDef>(Template);
Use = isa<MemoryUse>(Template);
#if !defined(NDEBUG)
- ModRefInfo ModRef = AAP->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
bool DefCheck, UseCheck;
DefCheck = isModSet(ModRef) || isOrdered(I);
UseCheck = isRefSet(ModRef);
@@ -1771,7 +1771,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
#endif
} else {
// Find out what affect this instruction has on memory.
- ModRefInfo ModRef = AAP->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
// The isOrdered check is used to ensure that volatiles end up as defs
// (atomics end up as ModRef right now anyway). Until we separate the
// ordering chain from the memory chain, this enables people to see at least
diff --git a/llvm/lib/Analysis/OptimizationRemarkEmitter.cpp b/llvm/lib/Analysis/OptimizationRemarkEmitter.cpp
index 17b40f03a5a51..d4d13b0b22ff0 100644
--- a/llvm/lib/Analysis/OptimizationRemarkEmitter.cpp
+++ b/llvm/lib/Analysis/OptimizationRemarkEmitter.cpp
@@ -62,7 +62,7 @@ bool OptimizationRemarkEmitter::invalidate(
Optional<uint64_t> OptimizationRemarkEmitter::computeHotness(const Value *V) {
if (!BFI)
- return None;
+ return std::nullopt;
return BFI->getBlockProfileCount(cast<BasicBlock>(V));
}
diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index c0d9bfb53088a..31ee8137f5862 100644
--- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -87,11 +87,11 @@ Optional<uint64_t> ProfileSummaryInfo::getProfileCount(
uint64_t TotalCount;
if (Call.extractProfTotalWeight(TotalCount))
return TotalCount;
- return None;
+ return std::nullopt;
}
if (BFI)
return BFI->getBlockProfileCount(Call.getParent(), AllowSynthetic);
- return None;
+ return std::nullopt;
}
/// Returns true if the function's entry is hot. If it returns false, it
@@ -267,7 +267,7 @@ void ProfileSummaryInfo::computeThresholds() {
Optional<uint64_t>
ProfileSummaryInfo::computeThreshold(int PercentileCutoff) const {
if (!hasProfileSummary())
- return None;
+ return std::nullopt;
auto iter = ThresholdCache.find(PercentileCutoff);
if (iter != ThresholdCache.end()) {
return iter->second;
diff --git a/llvm/lib/Analysis/ReplayInlineAdvisor.cpp b/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
index afc3d7fc4c358..043dddfdbd1d3 100644
--- a/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
@@ -124,7 +124,7 @@ std::unique_ptr<InlineAdvice> ReplayInlineAdvisor::getAdviceImpl(CallBase &CB) {
LLVM_DEBUG(dbgs() << "Replay Inliner: Not Inlined " << Callee << " @ "
<< CallSiteLoc << "\n");
// A negative inline is conveyed by "None" Optional<InlineCost>
- return std::make_unique<DefaultInlineAdvice>(this, CB, None, ORE,
+ return std::make_unique<DefaultInlineAdvice>(this, CB, std::nullopt, ORE,
EmitRemarks);
}
}
@@ -138,7 +138,7 @@ std::unique_ptr<InlineAdvice> ReplayInlineAdvisor::getAdviceImpl(CallBase &CB) {
else if (ReplaySettings.ReplayFallback ==
ReplayInlinerSettings::Fallback::NeverInline)
// A negative inline is conveyed by "None" Optional<InlineCost>
- return std::make_unique<DefaultInlineAdvice>(this, CB, None, ORE,
+ return std::make_unique<DefaultInlineAdvice>(this, CB, std::nullopt, ORE,
EmitRemarks);
else {
assert(ReplaySettings.ReplayFallback ==
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index b2e1c78097965..2029e47eb1374 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -732,7 +732,7 @@ CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
return 0;
if (Depth > MaxSCEVCompareDepth)
- return None;
+ return std::nullopt;
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
@@ -2359,7 +2359,7 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
const OverflowingBinaryOperator *OBO) {
// It cannot be done any better.
if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
- return None;
+ return std::nullopt;
SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
@@ -2373,7 +2373,7 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
if (OBO->getOpcode() != Instruction::Add &&
OBO->getOpcode() != Instruction::Sub &&
OBO->getOpcode() != Instruction::Mul)
- return None;
+ return std::nullopt;
const SCEV *LHS = getSCEV(OBO->getOperand(0));
const SCEV *RHS = getSCEV(OBO->getOperand(1));
@@ -2396,7 +2396,7 @@ ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
if (Deduced)
return Flags;
- return None;
+ return std::nullopt;
}
// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
@@ -3966,7 +3966,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
if (!Changed)
return S;
if (NewOps.empty())
- return None;
+ return std::nullopt;
return isa<SCEVSequentialMinMaxExpr>(S)
? SE.getSequentialMinMaxExpr(Kind, NewOps)
@@ -3976,7 +3976,7 @@ class SCEVSequentialMinMaxDeduplicatingVisitor final
RetVal visit(const SCEV *S) {
// Has the whole operand been seen already?
if (!SeenOps.insert(S).second)
- return None;
+ return std::nullopt;
return Base::visit(S);
}
@@ -4401,7 +4401,7 @@ bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) {
ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
if (SI == ExprValueMap.end())
- return None;
+ return std::nullopt;
#ifndef NDEBUG
if (VerifySCEVMap) {
// Check there is no dangling Value in the set returned.
@@ -4915,7 +4915,7 @@ SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
if (BackedgeCond == IC)
return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
: SE.getZero(Type::getInt1Ty(SE.getContext()));
- return None;
+ return std::nullopt;
}
class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
@@ -5130,7 +5130,7 @@ struct BinaryOp {
static std::optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
auto *Op = dyn_cast<Operator>(V);
if (!Op)
- return None;
+ return std::nullopt;
// Implementation detail: all the cleverness here should happen without
// creating new SCEV expressions -- our caller knowns tricks to avoid creating
@@ -5209,7 +5209,7 @@ static std::optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
- return None;
+ return std::nullopt;
}
/// Helper function to createAddRecFromPHIWithCasts. We have a phi
@@ -5353,7 +5353,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
}
}
if (!BEValueV || !StartValueV)
- return None;
+ return std::nullopt;
const SCEV *BEValue = getSCEV(BEValueV);
@@ -5362,7 +5362,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// an appropriate runtime guard, then we found a simple induction variable!
const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
if (!Add)
- return None;
+ return std::nullopt;
// If there is a single occurrence of the symbolic value, possibly
// casted, replace it with a recurrence.
@@ -5378,7 +5378,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
}
if (FoundIndex == Add->getNumOperands())
- return None;
+ return std::nullopt;
// Create an add with everything but the specified operand.
SmallVector<const SCEV *, 8> Ops;
@@ -5390,7 +5390,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// The runtime checks will not be valid if the step amount is
// varying inside the loop.
if (!isLoopInvariant(Accum, L))
- return None;
+ return std::nullopt;
// *** Part2: Create the predicates
@@ -5495,7 +5495,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
if (PredIsKnownFalse(StartVal, StartExtended)) {
LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
- return None;
+ return std::nullopt;
}
// The Step is always Signed (because the overflow checks are either
@@ -5503,7 +5503,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
if (PredIsKnownFalse(Accum, AccumExtended)) {
LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
- return None;
+ return std::nullopt;
}
auto AppendPredicate = [&](const SCEV *Expr,
@@ -5537,7 +5537,7 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
if (!L)
- return None;
+ return std::nullopt;
// Check to see if we already analyzed this PHI.
auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
@@ -5546,7 +5546,7 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
I->second;
// Analysis was done before and failed to create an AddRec:
if (Rewrite.first == SymbolicPHI)
- return None;
+ return std::nullopt;
// Analysis was done before and succeeded to create an AddRec under
// a predicate:
assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
@@ -5561,7 +5561,7 @@ ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
if (!Rewrite) {
SmallVector<const SCEVPredicate *, 3> Predicates;
PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
- return None;
+ return std::nullopt;
}
return Rewrite;
@@ -6127,7 +6127,7 @@ createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr,
// FIXME: while we can't legally model the case where both of the hands
// are fully variable, we only require that the *
diff erence* is constant.
if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
- return None;
+ return std::nullopt;
const SCEV *X, *C;
if (isa<SCEVConstant>(TrueExpr)) {
@@ -6147,7 +6147,7 @@ static Optional<const SCEV *> createNodeForSelectViaUMinSeq(ScalarEvolution *SE,
Value *TrueVal,
Value *FalseVal) {
if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
- return None;
+ return std::nullopt;
const auto *SECond = SE->getSCEV(Cond);
const auto *SETrue = SE->getSCEV(TrueVal);
@@ -6300,7 +6300,7 @@ static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
return getConstantRangeFromMetadata(*MD);
- return None;
+ return std::nullopt;
}
void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
@@ -8600,8 +8600,7 @@ bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
}
ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
- : ExitLimit(E, E, false, None) {
-}
+ : ExitLimit(E, E, false, std::nullopt) {}
ScalarEvolution::ExitLimit::ExitLimit(
const SCEV *E, const SCEV *ConstantMaxNotTaken, bool MaxOrZero,
@@ -8818,7 +8817,7 @@ ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
"Variance in assumed invariant key components!");
auto Itr = TripCountMap.find({ExitCond, ControlsExit});
if (Itr == TripCountMap.end())
- return None;
+ return std::nullopt;
return Itr->second;
}
@@ -8924,7 +8923,7 @@ ScalarEvolution::computeExitLimitFromCondFromBinOp(
else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
IsAnd = false;
else
- return None;
+ return std::nullopt;
// EitherMayExit is true in these two cases:
// br (and Op0 Op1), loop, exit
@@ -10020,7 +10019,7 @@ GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
// We currently can only solve this if the coefficients are constants.
if (!LC || !MC || !NC) {
LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
- return None;
+ return std::nullopt;
}
APInt L = LC->getAPInt();
@@ -10070,7 +10069,7 @@ static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
return XW.slt(YW) ? *X : *Y;
}
if (!X && !Y)
- return None;
+ return std::nullopt;
return X ? *X : *Y;
}
@@ -10087,7 +10086,7 @@ static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
/// the addrec to the equation).
static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
if (!X)
- return None;
+ return std::nullopt;
unsigned W = X->getBitWidth();
if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
return X->trunc(BitWidth);
@@ -10114,18 +10113,18 @@ SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
unsigned BitWidth;
auto T = GetQuadraticEquation(AddRec);
if (!T)
- return None;
+ return std::nullopt;
std::tie(A, B, C, M, BitWidth) = *T;
LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
if (!X)
- return None;
+ return std::nullopt;
ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
if (!V->isZero())
- return None;
+ return std::nullopt;
return TruncIfPossible(X, BitWidth);
}
@@ -10156,7 +10155,7 @@ SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
unsigned BitWidth;
auto T = GetQuadraticEquation(AddRec);
if (!T)
- return None;
+ return std::nullopt;
// Be careful about the return value: there can be two reasons for not
// returning an actual number. First, if no solutions to the equations
@@ -10201,7 +10200,7 @@ SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
// be a solution, but the function failed to find it. We cannot treat it
// as "no solution".
if (!SO || !UO)
- return { None, false };
+ return {std::nullopt, false};
// Check the smaller value first to see if it leaves the range.
// At this point, both SO and UO must have values.
@@ -10213,7 +10212,7 @@ SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
return { Max, true };
// Solutions were found, but were eliminated, hence the "true".
- return { None, true };
+ return {std::nullopt, true};
};
std::tie(A, B, C, M, BitWidth) = *T;
@@ -10225,7 +10224,7 @@ SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
// If any of the solutions was unknown, no meaninigful conclusions can
// be made.
if (!SL.second || !SU.second)
- return None;
+ return std::nullopt;
// Claim: The correct solution is not some value between Min and Max.
//
@@ -10776,7 +10775,7 @@ Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
return true;
else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
return false;
- return None;
+ return std::nullopt;
}
bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
@@ -10801,7 +10800,7 @@ Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred,
ICmpInst::getInversePredicate(Pred),
LHS, RHS))
return false;
- return None;
+ return std::nullopt;
}
bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
@@ -10848,7 +10847,7 @@ ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
// Only handle LE/LT/GE/GT predicates.
if (!ICmpInst::isRelational(Pred))
- return None;
+ return std::nullopt;
bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
@@ -10857,13 +10856,13 @@ ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
// Check that AR does not wrap.
if (ICmpInst::isUnsigned(Pred)) {
if (!LHS->hasNoUnsignedWrap())
- return None;
+ return std::nullopt;
return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
} else {
assert(ICmpInst::isSigned(Pred) &&
"Relational predicate is either signed or unsigned!");
if (!LHS->hasNoSignedWrap())
- return None;
+ return std::nullopt;
const SCEV *Step = LHS->getStepRecurrence(*this);
@@ -10873,7 +10872,7 @@ ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
if (isKnownNonPositive(Step))
return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
- return None;
+ return std::nullopt;
}
}
@@ -10885,7 +10884,7 @@ ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
// If there is a loop-invariant, force it into the RHS, otherwise bail out.
if (!isLoopInvariant(RHS, L)) {
if (!isLoopInvariant(LHS, L))
- return None;
+ return std::nullopt;
std::swap(LHS, RHS);
Pred = ICmpInst::getSwappedPredicate(Pred);
@@ -10893,11 +10892,11 @@ ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
if (!ArLHS || ArLHS->getLoop() != L)
- return None;
+ return std::nullopt;
auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
if (!MonotonicType)
- return None;
+ return std::nullopt;
// If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
// true as the loop iterates, and the backedge is control dependent on
// "ArLHS `Pred` RHS" == true then we can reason as follows:
@@ -10923,7 +10922,7 @@ ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
RHS);
if (!CtxI)
- return None;
+ return std::nullopt;
// Try to prove via context.
// TODO: Support other cases.
switch (Pred) {
@@ -10960,7 +10959,7 @@ ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
}
}
- return None;
+ return std::nullopt;
}
Optional<ScalarEvolution::LoopInvariantPredicate>
@@ -10978,7 +10977,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
// If there is a loop-invariant, force it into the RHS, otherwise bail out.
if (!isLoopInvariant(RHS, L)) {
if (!isLoopInvariant(LHS, L))
- return None;
+ return std::nullopt;
std::swap(LHS, RHS);
Pred = ICmpInst::getSwappedPredicate(Pred);
@@ -10986,30 +10985,30 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
if (!AR || AR->getLoop() != L)
- return None;
+ return std::nullopt;
// The predicate must be relational (i.e. <, <=, >=, >).
if (!ICmpInst::isRelational(Pred))
- return None;
+ return std::nullopt;
// TODO: Support steps other than +/- 1.
const SCEV *Step = AR->getStepRecurrence(*this);
auto *One = getOne(Step->getType());
auto *MinusOne = getNegativeSCEV(One);
if (Step != One && Step != MinusOne)
- return None;
+ return std::nullopt;
// Type mismatch here means that MaxIter is potentially larger than max
// unsigned value in start type, which mean we cannot prove no wrap for the
// indvar.
if (AR->getType() != MaxIter->getType())
- return None;
+ return std::nullopt;
// Value of IV on suggested last iteration.
const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
// Does it still meet the requirement?
if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
- return None;
+ return std::nullopt;
// Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
// not exceed max unsigned value of this type), this effectively proves
// that there is no wrap during the iteration. To prove that there is no
@@ -11021,7 +11020,7 @@ ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
const SCEV *Start = AR->getStart();
if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI))
- return None;
+ return std::nullopt;
// Everything is fine.
return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
@@ -11758,15 +11757,15 @@ Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
const auto *MAR = cast<SCEVAddRecExpr>(More);
if (LAR->getLoop() != MAR->getLoop())
- return None;
+ return std::nullopt;
// We look at affine expressions only; not for correctness but to keep
// getStepRecurrence cheap.
if (!LAR->isAffine() || !MAR->isAffine())
- return None;
+ return std::nullopt;
if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
- return None;
+ return std::nullopt;
Less = LAR->getStart();
More = MAR->getStart();
@@ -11800,7 +11799,7 @@ Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
if (C1 && C2 && RLess == RMore)
return C2->getAPInt() - C1->getAPInt();
- return None;
+ return std::nullopt;
}
bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
diff --git a/llvm/lib/Analysis/StratifiedSets.h b/llvm/lib/Analysis/StratifiedSets.h
index 883ebd24efdcc..4da6d476b1cac 100644
--- a/llvm/lib/Analysis/StratifiedSets.h
+++ b/llvm/lib/Analysis/StratifiedSets.h
@@ -94,7 +94,7 @@ template <typename T> class StratifiedSets {
Optional<StratifiedInfo> find(const T &Elem) const {
auto Iter = Values.find(Elem);
if (Iter == Values.end())
- return None;
+ return std::nullopt;
return Iter->second;
}
@@ -547,21 +547,21 @@ template <typename T> class StratifiedSetsBuilder {
Optional<const StratifiedInfo *> get(const T &Val) const {
auto Result = Values.find(Val);
if (Result == Values.end())
- return None;
+ return std::nullopt;
return &Result->second;
}
Optional<StratifiedInfo *> get(const T &Val) {
auto Result = Values.find(Val);
if (Result == Values.end())
- return None;
+ return std::nullopt;
return &Result->second;
}
Optional<StratifiedIndex> indexOf(const T &Val) {
auto MaybeVal = get(Val);
if (!MaybeVal)
- return None;
+ return std::nullopt;
auto *Info = *MaybeVal;
auto &Link = linksAt(Info->Index);
return Link.Number;
diff --git a/llvm/lib/Analysis/TensorSpec.cpp b/llvm/lib/Analysis/TensorSpec.cpp
index 4dbab51f9e03a..1ca8de8647bd2 100644
--- a/llvm/lib/Analysis/TensorSpec.cpp
+++ b/llvm/lib/Analysis/TensorSpec.cpp
@@ -47,7 +47,7 @@ Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
llvm::raw_string_ostream OS(S);
OS << Value;
Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
- return None;
+ return std::nullopt;
};
// FIXME: accept a Path as a parameter, and use it for error reporting.
json::Path::Root Root("tensor_spec");
@@ -74,7 +74,7 @@ Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
SUPPORTED_TENSOR_TYPES(PARSE_TYPE)
#undef PARSE_TYPE
- return None;
+ return std::nullopt;
}
} // namespace llvm
diff --git a/llvm/lib/Analysis/VFABIDemangling.cpp b/llvm/lib/Analysis/VFABIDemangling.cpp
index aa38c39ae71a1..c32ca6aaa858a 100644
--- a/llvm/lib/Analysis/VFABIDemangling.cpp
+++ b/llvm/lib/Analysis/VFABIDemangling.cpp
@@ -324,24 +324,24 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
// Parse the fixed size part of the manled name
if (!MangledName.consume_front("_ZGV"))
- return None;
+ return std::nullopt;
// Extract ISA. An unknow ISA is also supported, so we accept all
// values.
VFISAKind ISA;
if (tryParseISA(MangledName, ISA) != ParseRet::OK)
- return None;
+ return std::nullopt;
// Extract <mask>.
bool IsMasked;
if (tryParseMask(MangledName, IsMasked) != ParseRet::OK)
- return None;
+ return std::nullopt;
// Parse the variable size, starting from <vlen>.
unsigned VF;
bool IsScalable;
if (tryParseVLEN(MangledName, VF, IsScalable) != ParseRet::OK)
- return None;
+ return std::nullopt;
// Parse the <parameters>.
ParseRet ParamFound;
@@ -354,7 +354,7 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
// Bail off if there is a parsing error in the parsing of the parameter.
if (ParamFound == ParseRet::Error)
- return None;
+ return std::nullopt;
if (ParamFound == ParseRet::OK) {
Align Alignment;
@@ -362,7 +362,7 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
const ParseRet AlignFound = tryParseAlign(MangledName, Alignment);
// Bail off if there is a syntax error in the align token.
if (AlignFound == ParseRet::Error)
- return None;
+ return std::nullopt;
// Add the parameter.
Parameters.push_back({ParameterPos, PKind, StepOrPos, Alignment});
@@ -372,12 +372,12 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
// A valid MangledName must have at least one valid entry in the
// <parameters>.
if (Parameters.empty())
- return None;
+ return std::nullopt;
// Check for the <scalarname> and the optional <redirection>, which
// are separated from the prefix with "_"
if (!MangledName.consume_front("_"))
- return None;
+ return std::nullopt;
// The rest of the string must be in the format:
// <scalarname>[(<redirection>)]
@@ -385,25 +385,25 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
MangledName.take_while([](char In) { return In != '('; });
if (ScalarName.empty())
- return None;
+ return std::nullopt;
// Reduce MangledName to [(<redirection>)].
MangledName = MangledName.ltrim(ScalarName);
// Find the optional custom name redirection.
if (MangledName.consume_front("(")) {
if (!MangledName.consume_back(")"))
- return None;
+ return std::nullopt;
// Update the vector variant with the one specified by the user.
VectorName = MangledName;
// If the vector name is missing, bail out.
if (VectorName.empty())
- return None;
+ return std::nullopt;
}
// LLVM internal mapping via the TargetLibraryInfo (TLI) must be
// redirected to an existing name.
if (ISA == VFISAKind::LLVM && VectorName == OriginalName)
- return None;
+ return std::nullopt;
// When <mask> is "M", we need to add a parameter that is used as
// global predicate for the function.
@@ -438,7 +438,7 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
// The declaration of the function must be present in the module
// to be able to retrieve its signature.
if (!F)
- return None;
+ return std::nullopt;
const ElementCount EC = getECFromSignature(F->getFunctionType());
VF = EC.getKnownMinValue();
}
@@ -447,9 +447,9 @@ Optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
// 2. We don't accept the demangling if the vector function is not
// present in the module.
if (VF == 0)
- return None;
+ return std::nullopt;
if (!M.getFunction(VectorName))
- return None;
+ return std::nullopt;
const VFShape Shape({ElementCount::get(VF, IsScalable), Parameters});
return VFInfo({Shape, std::string(ScalarName), std::string(VectorName), ISA});
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index f29b0f8e634ef..878177e08eabc 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2750,7 +2750,7 @@ static std::optional<std::pair<Value*, Value*>>
getInvertibleOperands(const Operator *Op1,
const Operator *Op2) {
if (Op1->getOpcode() != Op2->getOpcode())
- return None;
+ return std::nullopt;
auto getOperands = [&](unsigned OpNum) -> auto {
return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
@@ -2844,7 +2844,7 @@ getInvertibleOperands(const Operator *Op1,
return std::make_pair(Start1, Start2);
}
}
- return None;
+ return std::nullopt;
}
/// Return true if V2 == V1 + X, where X is known non-zero.
@@ -6664,21 +6664,21 @@ isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
const DataLayout &DL, unsigned Depth) {
switch (Pred) {
default:
- return None;
+ return std::nullopt;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
return true;
- return None;
+ return std::nullopt;
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
return true;
- return None;
+ return std::nullopt;
}
}
@@ -6707,7 +6707,7 @@ static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
return false;
- return None;
+ return std::nullopt;
}
/// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
@@ -6724,7 +6724,7 @@ static Optional<bool> isImpliedCondCommonOperandWithConstants(
return false;
if (Difference.isEmptySet())
return true;
- return None;
+ return std::nullopt;
}
/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
@@ -6757,7 +6757,7 @@ static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
if (LPred == RPred)
return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
- return None;
+ return std::nullopt;
}
/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
@@ -6788,9 +6788,9 @@ isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
if (Optional<bool> Implication = isImpliedCondition(
ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
return Implication;
- return None;
+ return std::nullopt;
}
- return None;
+ return std::nullopt;
}
Optional<bool>
@@ -6799,12 +6799,12 @@ llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
// Bail out when we hit the limit.
if (Depth == MaxAnalysisRecursionDepth)
- return None;
+ return std::nullopt;
// A mismatch occurs when we compare a scalar cmp to a vector cmp, for
// example.
if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
- return None;
+ return std::nullopt;
assert(LHS->getType()->isIntOrIntVectorTy(1) &&
"Expected integer type only!");
@@ -6825,7 +6825,7 @@ llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
Depth);
}
- return None;
+ return std::nullopt;
}
Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
@@ -6841,7 +6841,7 @@ Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
LHSIsTrue, Depth);
if (Depth == MaxAnalysisRecursionDepth)
- return None;
+ return std::nullopt;
// LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
// LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
@@ -6867,7 +6867,7 @@ Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
return false;
}
- return None;
+ return std::nullopt;
}
// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
@@ -6908,7 +6908,7 @@ Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
auto PredCond = getDomPredecessorCondition(ContextI);
if (PredCond.first)
return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
- return None;
+ return std::nullopt;
}
Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
@@ -6919,7 +6919,7 @@ Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
if (PredCond.first)
return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
PredCond.second);
- return None;
+ return std::nullopt;
}
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
@@ -7335,7 +7335,7 @@ getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (!OpC)
- return None;
+ return std::nullopt;
if (OpC->isZero())
continue; // No offset.
@@ -7349,7 +7349,7 @@ getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
// vector. Multiply the index by the ElementSize.
TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
if (Size.isScalable())
- return None;
+ return std::nullopt;
Offset += Size.getFixedSize() * OpC->getSExtValue();
}
@@ -7377,7 +7377,7 @@ Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
// handle no other case.
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
GEP1->getSourceElementType() != GEP2->getSourceElementType())
- return None;
+ return std::nullopt;
// Skip any common indices and track the GEP types.
unsigned Idx = 1;
@@ -7388,7 +7388,7 @@ Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
if (!IOffset1 || !IOffset2)
- return None;
+ return std::nullopt;
return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
Offset1.getSExtValue();
}
More information about the llvm-commits
mailing list