[llvm] 1ec11d2 - [Transforms/Vectorize] llvm::Optional => std::optional
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 12 00:56:41 PST 2022
Author: Fangrui Song
Date: 2022-12-12T08:56:35Z
New Revision: 1ec11d2d48dea8eb669880e9242adab87f134c52
URL: https://github.com/llvm/llvm-project/commit/1ec11d2d48dea8eb669880e9242adab87f134c52
DIFF: https://github.com/llvm/llvm-project/commit/1ec11d2d48dea8eb669880e9242adab87f134c52.diff
LOG: [Transforms/Vectorize] llvm::Optional => std::optional
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/lib/Transforms/Vectorize/VPlan.h
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index 4c635579b865..8990a65afdb4 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -297,7 +297,7 @@ class LoopVectorizationPlanner {
/// Plan how to best vectorize, return the best VF and its cost, or
/// std::nullopt if vectorization and interleaving should be avoided up front.
- Optional<VectorizationFactor> plan(ElementCount UserVF, unsigned UserIC);
+ std::optional<VectorizationFactor> plan(ElementCount UserVF, unsigned UserIC);
/// Use the VPlan-native path to plan how to best vectorize, return the best
/// VF and its cost.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f366c31a5bbe..e6f3030a7a93 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -402,7 +402,8 @@ static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
/// 2) Returns expected trip count according to profile data if any.
/// 3) Returns upper bound estimate if it is known.
/// 4) Returns std::nullopt if all of the above failed.
-static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
+static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
+ Loop *L) {
// Check if exact trip count is known.
if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
return ExpectedTC;
@@ -1636,7 +1637,7 @@ class LoopVectorizationCostModel {
/// Return the cost of instructions in an inloop reduction pattern, if I is
/// part of that pattern.
- Optional<InstructionCost>
+ std::optional<InstructionCost>
getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
TTI::TargetCostKind CostKind);
@@ -6556,7 +6557,8 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
return Cost;
}
-Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
+std::optional<InstructionCost>
+LoopVectorizationCostModel::getReductionPatternCost(
Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
using namespace llvm::PatternMatch;
// Early exit for no inloop reductions
@@ -6725,7 +6727,7 @@ Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
}
}
- return I == RetI ? Optional<InstructionCost>(BaseCost) : std::nullopt;
+ return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
}
InstructionCost
@@ -7506,7 +7508,7 @@ LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
return VectorizationFactor::Disabled();
}
-Optional<VectorizationFactor>
+std::optional<VectorizationFactor>
LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
assert(OrigLoop->isInnermost() && "Inner loop expected.");
FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
@@ -10322,7 +10324,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
unsigned UserIC = Hints.getInterleave();
// Plan how to best vectorize, return the best VF and its cost.
- Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
+ std::optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
VectorizationFactor VF = VectorizationFactor::Disabled();
unsigned IC = 1;
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 42dcb3a58a83..880d1c0a54be 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -287,7 +287,7 @@ static bool isCommutative(Instruction *I) {
/// \returns inserting index of InsertElement or InsertValue instruction,
/// using Offset as base offset for index.
-static Optional<unsigned> getInsertIndex(const Value *InsertInst,
+static std::optional<unsigned> getInsertIndex(const Value *InsertInst,
unsigned Offset = 0) {
int Index = Offset;
if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
@@ -378,7 +378,7 @@ static SmallBitVector isUndefVector(const Value *V,
if (isa<T>(II->getOperand(1)))
continue;
Base = II->getOperand(0);
- Optional<unsigned> Idx = getInsertIndex(II);
+ std::optional<unsigned> Idx = getInsertIndex(II);
if (!Idx)
continue;
if (*Idx < UseMask.size() && !UseMask.test(*Idx))
@@ -447,7 +447,7 @@ static SmallBitVector isUndefVector(const Value *V,
/// Mask will return the Shuffle Mask equivalent to the extracted elements.
/// TODO: Can we split off and reuse the shuffle mask detection from
/// ShuffleVectorInst/getShuffleCost?
-static Optional<TargetTransformInfo::ShuffleKind>
+static std::optional<TargetTransformInfo::ShuffleKind>
isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
const auto *It =
find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
@@ -764,7 +764,7 @@ static bool allSameType(ArrayRef<Value *> VL) {
}
/// \returns True if Extract{Value,Element} instruction extracts element Idx.
-static Optional<unsigned> getExtractIndex(Instruction *E) {
+static std::optional<unsigned> getExtractIndex(Instruction *E) {
unsigned Opcode = E->getOpcode();
assert((Opcode == Instruction::ExtractElement ||
Opcode == Instruction::ExtractValue) &&
@@ -1069,17 +1069,17 @@ class BoUpSLP {
/// shuffled vector entry + (possibly) permutation with other gathers. It
/// implements the checks only for possibly ordered scalars (Loads,
/// ExtractElement, ExtractValue), which can be part of the graph.
- Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
+ std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
/// Sort loads into increasing pointers offsets to allow greater clustering.
- Optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
+ std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
/// Gets reordering data for the given tree entry. If the entry is vectorized
/// - just return ReorderIndices, otherwise check if the scalars can be
/// reordered and return the most optimal order.
/// \param TopToBottom If true, include the order of vectorized stores and
/// insertelement nodes, otherwise skip them.
- Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
+ std::optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
/// Reorders the current graph to the most profitable order starting from the
/// root node to the leaf nodes. The best order is chosen only from the nodes
@@ -1677,7 +1677,7 @@ class BoUpSLP {
// Search all operands in Ops[*][Lane] for the one that matches best
// Ops[OpIdx][LastLane] and return its opreand index.
// If no good match can be found, return std::nullopt.
- Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
+ std::optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
ArrayRef<ReorderingMode> ReorderingModes,
ArrayRef<Value *> MainAltOps) {
unsigned NumOperands = getNumOperands();
@@ -1697,7 +1697,7 @@ class BoUpSLP {
// Sometimes we have more than one option (e.g., Opcode and Undefs), so we
// are using the score to
diff erentiate between the two.
struct BestOpData {
- Optional<unsigned> Idx = std::nullopt;
+ std::optional<unsigned> Idx = std::nullopt;
unsigned Score = 0;
} BestOp;
BestOp.Score =
@@ -2086,7 +2086,7 @@ class BoUpSLP {
// Look for a good match for each operand.
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
// Search for the operand that matches SortedOps[OpIdx][Lane-1].
- Optional<unsigned> BestIdx = getBestOperand(
+ std::optional<unsigned> BestIdx = getBestOperand(
OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
// By not selecting a value, we allow the operands that follow to
// select a better matching value. We will get a non-null value in
@@ -2177,13 +2177,13 @@ class BoUpSLP {
/// root of profitable tree to vectorize. Return std::nullopt if no candidate
/// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit
/// of the cost, considered to be good enough score.
- Optional<int>
+ std::optional<int>
findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
int Limit = LookAheadHeuristics::ScoreFail) {
LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2,
RootLookAheadMaxDepth);
int BestScore = Limit;
- Optional<int> Index;
+ std::optional<int> Index;
for (int I : seq<int>(0, Candidates.size())) {
int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
Candidates[I].second,
@@ -2337,7 +2337,7 @@ class BoUpSLP {
/// tree entries.
/// \returns ShuffleKind, if gathered values can be represented as shuffles of
/// previous tree entries. \p Mask is filled with the shuffle mask.
- Optional<TargetTransformInfo::ShuffleKind>
+ std::optional<TargetTransformInfo::ShuffleKind>
isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
SmallVectorImpl<const TreeEntry *> &Entries);
@@ -2687,7 +2687,7 @@ class BoUpSLP {
#endif
/// Create a new VectorizableTree entry.
- TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
+ TreeEntry *newTreeEntry(ArrayRef<Value *> VL, std::optional<ScheduleData *> Bundle,
const InstructionsState &S,
const EdgeInfo &UserTreeIdx,
ArrayRef<int> ReuseShuffleIndices = std::nullopt,
@@ -2700,7 +2700,7 @@ class BoUpSLP {
TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
TreeEntry::EntryState EntryState,
- Optional<ScheduleData *> Bundle,
+ std::optional<ScheduleData *> Bundle,
const InstructionsState &S,
const EdgeInfo &UserTreeIdx,
ArrayRef<int> ReuseShuffleIndices = std::nullopt,
@@ -2822,7 +2822,7 @@ class BoUpSLP {
Instruction *Inst2) {
// First check if the result is already in the cache.
AliasCacheKey key = std::make_pair(Inst1, Inst2);
- Optional<bool> &result = AliasCache[key];
+ std::optional<bool> &result = AliasCache[key];
if (result) {
return result.value();
}
@@ -2838,7 +2838,7 @@ class BoUpSLP {
/// Cache for alias results.
/// TODO: consider moving this to the AliasAnalysis itself.
- DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
+ DenseMap<AliasCacheKey, std::optional<bool>> AliasCache;
// Cache for pointerMayBeCaptured calls inside AA. This is preserved
// globally through SLP because we don't perform any action which
@@ -3292,7 +3292,7 @@ class BoUpSLP {
/// actually moved at this stage.
/// \returns the scheduling bundle. The returned Optional value is not
/// std::nullopt if \p VL is allowed to be scheduled.
- Optional<ScheduleData *>
+ std::optional<ScheduleData *>
tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
const InstructionsState &S);
@@ -3595,7 +3595,7 @@ static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) {
fixupOrderingIndices(Order);
}
-Optional<BoUpSLP::OrdersType>
+std::optional<BoUpSLP::OrdersType>
BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
unsigned NumScalars = TE.Scalars.size();
@@ -3833,7 +3833,7 @@ bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
return true;
}
-Optional<BoUpSLP::OrdersType>
+std::optional<BoUpSLP::OrdersType>
BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
Type *ScalarTy = TE.Scalars[0]->getType();
@@ -3868,8 +3868,8 @@ static bool areTwoInsertFromSameBuildVector(
return false;
auto *IE1 = VU;
auto *IE2 = V;
- Optional<unsigned> Idx1 = getInsertIndex(IE1);
- Optional<unsigned> Idx2 = getInsertIndex(IE2);
+ std::optional<unsigned> Idx1 = getInsertIndex(IE1);
+ std::optional<unsigned> Idx2 = getInsertIndex(IE2);
if (Idx1 == std::nullopt || Idx2 == std::nullopt)
return false;
// Go through the vector operand of insertelement instructions trying to find
@@ -3898,7 +3898,7 @@ static bool areTwoInsertFromSameBuildVector(
return false;
}
-Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
+std::optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
bool TopToBottom) {
// No need to reorder if need to shuffle reuses, still need to shuffle the
// node.
@@ -3920,7 +3920,7 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
TE.ReuseShuffleIndices.end());
if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() &&
all_of(TE.Scalars, [Sz](Value *V) {
- Optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
+ std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
return Idx && *Idx < Sz;
})) {
SmallVector<int> ReorderMask(Sz, UndefMaskElem);
@@ -3933,7 +3933,7 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
if (Idx == UndefMaskElem)
continue;
Value *V = TE.Scalars[ReorderMask[Idx]];
- Optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
+ std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI));
}
}
@@ -3973,8 +3973,8 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
IE1, IE2,
[](InsertElementInst *II) { return II->getOperand(0); }))
return false;
- Optional<unsigned> Idx1 = getInsertIndex(IE1);
- Optional<unsigned> Idx2 = getInsertIndex(IE2);
+ std::optional<unsigned> Idx1 = getInsertIndex(IE1);
+ std::optional<unsigned> Idx2 = getInsertIndex(IE2);
if (Idx1 == std::nullopt || Idx2 == std::nullopt)
return false;
return *Idx1 < *Idx2;
@@ -3983,8 +3983,8 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) {
if (EE1->getOperand(0) != EE2->getOperand(0))
return false;
- Optional<unsigned> Idx1 = getExtractIndex(EE1);
- Optional<unsigned> Idx2 = getExtractIndex(EE2);
+ std::optional<unsigned> Idx1 = getExtractIndex(EE1);
+ std::optional<unsigned> Idx2 = getExtractIndex(EE2);
if (Idx1 == std::nullopt || Idx2 == std::nullopt)
return false;
return *Idx1 < *Idx2;
@@ -4040,10 +4040,10 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
return CurrentOrder;
}
}
- if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
+ if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
return CurrentOrder;
if (TE.Scalars.size() >= 4)
- if (Optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
+ if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
return Order;
}
return std::nullopt;
@@ -4144,7 +4144,7 @@ void BoUpSLP::reorderTopToBottom() {
// TODO: Check the reverse order too.
}
- if (Optional<OrdersType> CurrentOrder =
+ if (std::optional<OrdersType> CurrentOrder =
getReorderingData(*TE, /*TopToBottom=*/true)) {
// Do not include ordering for nodes used in the alt opcode vectorization,
// better to reorder them during bottom-to-top stage. If follow the order
@@ -4388,7 +4388,7 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
const std::unique_ptr<TreeEntry> &TE) {
if (TE->State != TreeEntry::Vectorize)
NonVectorized.push_back(TE.get());
- if (Optional<OrdersType> CurrentOrder =
+ if (std::optional<OrdersType> CurrentOrder =
getReorderingData(*TE, /*TopToBottom=*/false)) {
OrderedEntries.insert(TE.get());
if (TE->State != TreeEntry::Vectorize || !TE->ReuseShuffleIndices.empty())
@@ -5247,7 +5247,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
BlockScheduling &BS = *BSRef;
- Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
+ std::optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
#ifdef EXPENSIVE_CHECKS
// Make sure we didn't break any internal invariants
BS.verify();
@@ -6027,7 +6027,7 @@ bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
if (isa<UndefValue>(EE->getIndexOperand()))
continue;
- Optional<unsigned> Idx = getExtractIndex(Inst);
+ std::optional<unsigned> Idx = getExtractIndex(Inst);
if (!Idx)
break;
const unsigned ExtIdx = *Idx;
@@ -6333,7 +6333,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
(VE && VE != E))
continue;
auto *EE = cast<ExtractElementInst>(V);
- Optional<unsigned> EEIdx = getExtractIndex(EE);
+ std::optional<unsigned> EEIdx = getExtractIndex(EE);
if (!EEIdx)
continue;
unsigned Idx = *EEIdx;
@@ -6398,7 +6398,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
return InstructionCost::getInvalid();
SmallVector<int> Mask;
SmallVector<const TreeEntry *> Entries;
- Optional<TargetTransformInfo::ShuffleKind> Shuffle =
+ std::optional<TargetTransformInfo::ShuffleKind> Shuffle =
isGatherShuffledEntry(E, Mask, Entries);
if (Shuffle) {
InstructionCost GatherCost = 0;
@@ -6434,7 +6434,7 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
// Check that gather of extractelements can be represented as just a
// shuffle of a single/two vectors the scalars are extracted from.
SmallVector<int> Mask;
- Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
+ std::optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
isFixedVectorShuffle(VL, Mask);
if (ShuffleKind) {
// Found the bunch of extractelement instructions that must be gathered
@@ -7529,7 +7529,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
// to detect it as a final shuffled/identity match.
if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) {
if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
- Optional<unsigned> InsertIdx = getInsertIndex(VU);
+ std::optional<unsigned> InsertIdx = getInsertIndex(VU);
if (InsertIdx) {
const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
auto *It = find_if(
@@ -7699,7 +7699,7 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
return Cost;
}
-Optional<TargetTransformInfo::ShuffleKind>
+std::optional<TargetTransformInfo::ShuffleKind>
BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
SmallVectorImpl<const TreeEntry *> &Entries) {
// TODO: currently checking only for Scalars in the tree entry, need to count
@@ -8394,7 +8394,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *Vec;
SmallVector<int> Mask;
SmallVector<const TreeEntry *> Entries;
- Optional<TargetTransformInfo::ShuffleKind> Shuffle =
+ std::optional<TargetTransformInfo::ShuffleKind> Shuffle =
isGatherShuffledEntry(E, Mask, Entries);
if (Shuffle) {
assert((Entries.size() == 1 || Entries.size() == 2) &&
@@ -9185,7 +9185,7 @@ Value *BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues,
// Skip if the scalar is another vector op or Vec is not an instruction.
if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
- Optional<unsigned> InsertIdx = getInsertIndex(VU);
+ std::optional<unsigned> InsertIdx = getInsertIndex(VU);
if (InsertIdx) {
// Need to use original vector, if the root is truncated.
if (MinBWs.count(Scalar) &&
@@ -9750,7 +9750,7 @@ BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
// Groups the instructions to a bundle (which is then a single scheduling entity)
// and schedules instructions until the bundle gets ready.
-Optional<BoUpSLP::ScheduleData *>
+std::optional<BoUpSLP::ScheduleData *>
BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
const InstructionsState &S) {
// No need to schedule PHIs, insertelement, extractelement and extractvalue
@@ -11144,7 +11144,7 @@ bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
return tryToVectorizePair(Op0, Op1, R);
// We have multiple options. Try to pick the single best.
- Optional<int> BestCandidate = R.findBestRootPair(Candidates);
+ std::optional<int> BestCandidate = R.findBestRootPair(Candidates);
if (!BestCandidate)
return false;
return tryToVectorizePair(Candidates[*BestCandidate].first,
@@ -12316,7 +12316,7 @@ class HorizontalReduction {
} // end anonymous namespace
-static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
+static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) {
if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
return cast<FixedVectorType>(IE->getType())->getNumElements();
@@ -12351,7 +12351,7 @@ static void findBuildAggregate_rec(Instruction *LastInsertInst,
unsigned OperandOffset) {
do {
Value *InsertedOperand = LastInsertInst->getOperand(1);
- Optional<unsigned> OperandIndex =
+ std::optional<unsigned> OperandIndex =
getInsertIndex(LastInsertInst, OperandOffset);
if (!OperandIndex)
return;
@@ -12395,7 +12395,7 @@ static bool findBuildAggregate(Instruction *LastInsertInst,
assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
"Expected empty result vectors!");
- Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
+ std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
if (!AggregateSize)
return false;
BuildVectorOpds.resize(*AggregateSize);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 17ed42744d3e..c2c495c501d2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -216,7 +216,7 @@ struct VPTransformState {
/// Hold the indices to generate specific scalar instructions. Null indicates
/// that all instances are to be generated, using either scalar or vector
/// instructions.
- Optional<VPIteration> Instance;
+ std::optional<VPIteration> Instance;
struct DataState {
/// A type for vectorized values in the new loop. Each value from the
More information about the llvm-commits
mailing list