[llvm] 4674328 - MemoryLocation: convert Optional to std::optional
Krzysztof Parzyszek via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 1 15:36:44 PST 2022
Author: Krzysztof Parzyszek
Date: 2022-12-01T15:36:20-08:00
New Revision: 467432899bc2f71842ed1b24d24c094da02af7d4
URL: https://github.com/llvm/llvm-project/commit/467432899bc2f71842ed1b24d24c094da02af7d4
DIFF: https://github.com/llvm/llvm-project/commit/467432899bc2f71842ed1b24d24c094da02af7d4.diff
LOG: MemoryLocation: convert Optional to std::optional
Added:
Modified:
llvm/include/llvm/Analysis/MemoryLocation.h
llvm/lib/Analysis/MemoryLocation.cpp
llvm/lib/CodeGen/StackProtector.cpp
llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
llvm/lib/Transforms/IPO/FunctionAttrs.cpp
llvm/lib/Transforms/IPO/OpenMPOpt.cpp
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index dfac49445d750..0dba415b78af4 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -16,10 +16,11 @@
#define LLVM_ANALYSIS_MEMORYLOCATION_H
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/TypeSize.h"
+#include <optional>
+
namespace llvm {
class CallBase;
@@ -39,9 +40,9 @@ class VAArgInst;
class Value;
// Represents the size of a MemoryLocation. Logically, it's an
-// Optional<uint63_t> that also carries a bit to represent whether the integer
-// it contains, N, is 'precise'. Precise, in this context, means that we know
-// that the area of storage referenced by the given MemoryLocation must be
+// std::optional<uint63_t> that also carries a bit to represent whether the
+// integer it contains, N, is 'precise'. Precise, in this context, means that we
+// know that the area of storage referenced by the given MemoryLocation must be
// precisely N bytes. An imprecise value is formed as the union of two or more
// precise values, and can conservatively represent all of the values unioned
// into it. Importantly, imprecise values are an *upper-bound* on the size of a
@@ -62,7 +63,7 @@ class Value;
// we'll ever actually do so.
//
// If asked to represent a pathologically large value, this will degrade to
-// None.
+// std::nullopt.
class LocationSize {
enum : uint64_t {
BeforeOrAfterPointer = ~uint64_t(0),
@@ -242,7 +243,7 @@ class MemoryLocation {
static MemoryLocation get(const Instruction *Inst) {
return *MemoryLocation::getOrNone(Inst);
}
- static Optional<MemoryLocation> getOrNone(const Instruction *Inst);
+ static std::optional<MemoryLocation> getOrNone(const Instruction *Inst);
/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
@@ -254,8 +255,8 @@ class MemoryLocation {
static MemoryLocation getForDest(const MemIntrinsic *MI);
static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
- static Optional<MemoryLocation> getForDest(const CallBase *CI,
- const TargetLibraryInfo &TLI);
+ static std::optional<MemoryLocation> getForDest(const CallBase *CI,
+ const TargetLibraryInfo &TLI);
/// Return a location representing a particular argument of a call.
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index fb23f0b4ec01f..e839f9e0dfb2e 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -74,7 +74,8 @@ MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
RMWI->getAAMetadata());
}
-Optional<MemoryLocation> MemoryLocation::getOrNone(const Instruction *Inst) {
+std::optional<MemoryLocation>
+MemoryLocation::getOrNone(const Instruction *Inst) {
switch (Inst->getOpcode()) {
case Instruction::Load:
return get(cast<LoadInst>(Inst));
@@ -87,7 +88,7 @@ Optional<MemoryLocation> MemoryLocation::getOrNone(const Instruction *Inst) {
case Instruction::AtomicRMW:
return get(cast<AtomicRMWInst>(Inst));
default:
- return None;
+ return std::nullopt;
}
}
@@ -117,39 +118,39 @@ MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
return getForArgument(MI, 0, nullptr);
}
-Optional<MemoryLocation>
+std::optional<MemoryLocation>
MemoryLocation::getForDest(const CallBase *CB, const TargetLibraryInfo &TLI) {
if (!CB->onlyAccessesArgMemory())
- return None;
+ return std::nullopt;
if (CB->hasOperandBundles())
// TODO: remove implementation restriction
- return None;
+ return std::nullopt;
Value *UsedV = nullptr;
std::optional<unsigned> UsedIdx;
for (unsigned i = 0; i < CB->arg_size(); i++) {
if (!CB->getArgOperand(i)->getType()->isPointerTy())
continue;
- if (CB->onlyReadsMemory(i))
- continue;
+ if (CB->onlyReadsMemory(i))
+ continue;
if (!UsedV) {
// First potentially writing parameter
UsedV = CB->getArgOperand(i);
UsedIdx = i;
continue;
}
- UsedIdx = None;
+ UsedIdx = std::nullopt;
if (UsedV != CB->getArgOperand(i))
// Can't describe writing to two distinct locations.
// TODO: This results in an inprecision when two values derived from the
// same object are passed as arguments to the same function.
- return None;
+ return std::nullopt;
}
if (!UsedV)
// We don't currently have a way to represent a "does not write" result
// and thus have to be conservative and return unknown.
- return None;
+ return std::nullopt;
if (UsedIdx)
return getForArgument(CB, *UsedIdx, &TLI);
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 2eef88bc1c8d0..2f0056f125ca6 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -46,6 +46,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include <optional>
#include <utility>
using namespace llvm;
@@ -166,7 +167,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
const auto *I = cast<Instruction>(U);
// If this instruction accesses memory make sure it doesn't access beyond
// the bounds of the allocated object.
- Optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
+ std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
if (MemLoc && MemLoc->Size.hasValue() &&
!TypeSize::isKnownGE(AllocSize,
TypeSize::getFixed(MemLoc->Size.getValue())))
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 9837ca985c5db..2e350cd86d6fd 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -15,7 +15,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -2391,7 +2390,8 @@ auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In,
BasicBlock::const_iterator To,
const T &IgnoreInsts) const
-> bool {
- auto getLocOrNone = [this](const Instruction &I) -> Optional<MemoryLocation> {
+ auto getLocOrNone =
+ [this](const Instruction &I) -> std::optional<MemoryLocation> {
if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
switch (II->getIntrinsicID()) {
case Intrinsic::masked_load:
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 541975c68c562..875f7e27e966e 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -2292,7 +2292,7 @@ static int64_t getKnownNonNullAndDerefBytesForUse(
return DerefAA.getKnownDereferenceableBytes();
}
- Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
+ std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
return 0;
@@ -4238,7 +4238,7 @@ struct AADereferenceableImpl : AADereferenceable {
if (!UseV->getType()->isPointerTy())
return;
- Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
+ std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
return;
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 3058dc25202ec..3f61dbe3354e7 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -63,6 +63,7 @@
#include <cassert>
#include <iterator>
#include <map>
+#include <optional>
#include <vector>
using namespace llvm;
@@ -211,7 +212,7 @@ static MemoryEffects checkFunctionMemoryAccess(Function &F, bool ThisBody,
if (MR == ModRefInfo::NoModRef)
continue;
- Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(&I);
+ std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(&I);
if (!Loc) {
// If no location is known, conservatively assume anything can be
// accessed.
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 9181ce541a12b..571ce33d9266d 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -50,6 +50,7 @@
#include "llvm/Transforms/Utils/CallGraphUpdater.h"
#include <algorithm>
+#include <optional>
using namespace llvm;
using namespace omp;
@@ -1508,7 +1509,7 @@ struct OpenMPOpt {
continue;
auto IsPotentiallyAffectedByBarrier =
- [](Optional<MemoryLocation> Loc) {
+ [](std::optional<MemoryLocation> Loc) {
const Value *Obj = (Loc && Loc->Ptr)
? getUnderlyingObject(Loc->Ptr)
: nullptr;
@@ -1538,11 +1539,12 @@ struct OpenMPOpt {
};
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
- Optional<MemoryLocation> Loc = MemoryLocation::getForDest(MI);
+ std::optional<MemoryLocation> Loc =
+ MemoryLocation::getForDest(MI);
if (IsPotentiallyAffectedByBarrier(Loc))
return false;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
- Optional<MemoryLocation> Loc =
+ std::optional<MemoryLocation> Loc =
MemoryLocation::getForSource(MTI);
if (IsPotentiallyAffectedByBarrier(Loc))
return false;
@@ -1554,7 +1556,7 @@ struct OpenMPOpt {
if (LI->hasMetadata(LLVMContext::MD_invariant_load))
continue;
- Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
+ std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
if (IsPotentiallyAffectedByBarrier(Loc))
return false;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 828828e850202..4aefcad69fa9f 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -105,6 +105,7 @@
#include <cassert>
#include <cstdint>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -2709,7 +2710,7 @@ static bool isRemovableWrite(CallBase &CB, Value *UsedV,
// If the only possible side effect of the call is writing to the alloca,
// and the result isn't used, we can safely remove any reads implied by the
// call including those which might read the alloca itself.
- Optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
+ std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
return Dest && Dest->Ptr == UsedV;
}
@@ -4003,7 +4004,7 @@ static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
// to allow reload along used path as described below. Otherwise, this
// is simply a store to a dead allocation which will be removed.
return false;
- Optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
+ std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
if (!Dest)
return false;
auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 21603c96fecd9..a60f88a95c47d 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -86,6 +86,7 @@
#include <cstdint>
#include <iterator>
#include <map>
+#include <optional>
#include <utility>
using namespace llvm;
@@ -1045,9 +1046,9 @@ struct DSEState {
return !I.first->second;
}
- Optional<MemoryLocation> getLocForWrite(Instruction *I) const {
+ std::optional<MemoryLocation> getLocForWrite(Instruction *I) const {
if (!I->mayWriteToMemory())
- return None;
+ return std::nullopt;
if (auto *CB = dyn_cast<CallBase>(I))
return MemoryLocation::getForDest(CB, TLI);
@@ -1157,7 +1158,7 @@ struct DSEState {
/// If \p I is a memory terminator like llvm.lifetime.end or free, return a
/// pair with the MemoryLocation terminated by \p I and a boolean flag
/// indicating whether \p I is a free-like call.
- Optional<std::pair<MemoryLocation, bool>>
+ std::optional<std::pair<MemoryLocation, bool>>
getLocForTerminator(Instruction *I) const {
uint64_t Len;
Value *Ptr;
@@ -1170,7 +1171,7 @@ struct DSEState {
return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
}
- return None;
+ return std::nullopt;
}
/// Returns true if \p I is a memory terminator instruction like
@@ -1185,7 +1186,7 @@ struct DSEState {
/// instruction \p AccessI.
bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
Instruction *MaybeTerm) {
- Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
+ std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
getLocForTerminator(MaybeTerm);
if (!MaybeTermLoc)
@@ -1296,7 +1297,7 @@ struct DSEState {
!KillingI->mayReadFromMemory();
// Find the next clobbering Mod access for DefLoc, starting at StartAccess.
- Optional<MemoryLocation> CurrentLoc;
+ std::optional<MemoryLocation> CurrentLoc;
for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
LLVM_DEBUG({
dbgs() << " visiting " << *Current;
@@ -2024,12 +2025,13 @@ static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
continue;
Instruction *KillingI = KillingDef->getMemoryInst();
- Optional<MemoryLocation> MaybeKillingLoc;
- if (State.isMemTerminatorInst(KillingI))
- MaybeKillingLoc = State.getLocForTerminator(KillingI).transform(
- [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
- else
+ std::optional<MemoryLocation> MaybeKillingLoc;
+ if (State.isMemTerminatorInst(KillingI)) {
+ if (auto KillingLoc = State.getLocForTerminator(KillingI))
+ MaybeKillingLoc = KillingLoc->first;
+ } else {
MaybeKillingLoc = State.getLocForWrite(KillingI);
+ }
if (!MaybeKillingLoc) {
LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
More information about the llvm-commits
mailing list