[llvm] 9c444f7 - [llvm] Use std::nullopt instead of None (NFC)

Kazu Hirata via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 9 18:32:44 PST 2022


Author: Kazu Hirata
Date: 2022-12-09T18:32:32-08:00
New Revision: 9c444f7021e6fd077b6b8bea4d9ab8883c34c7e5

URL: https://github.com/llvm/llvm-project/commit/9c444f7021e6fd077b6b8bea4d9ab8883c34c7e5
DIFF: https://github.com/llvm/llvm-project/commit/9c444f7021e6fd077b6b8bea4d9ab8883c34c7e5.diff

LOG: [llvm] Use std::nullopt instead of None (NFC)

This is part of an effort to migrate from llvm::Optional to
std::optional:

https://discourse.llvm.org/t/deprecating-llvm-optional-x-hasvalue-getvalue-getvalueor/63716

Added: 
    

Modified: 
    llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
    llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
    llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
    llvm/lib/Analysis/TFLiteUtils.cpp
    llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 9d86f376e2e48..855eeb9e17bee 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -27,6 +27,7 @@
 #include "llvm/Support/ManagedStatic.h"
 
 #include <vector>
+#include <optional>
 
 using namespace llvm;
 
@@ -355,7 +356,7 @@ DevelopmentModeMLInlineAdvisor::~DevelopmentModeMLInlineAdvisor() {
 Optional<size_t>
 DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
   if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
-    return None;
+    return std::nullopt;
   auto &R =
       FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
   if (!R) {

diff  --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
index 0a8f18489f622..ad7c9b94634b7 100644
--- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
+++ b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
@@ -35,6 +35,7 @@ AnalysisKey InlineSizeEstimatorAnalysis::Key;
 #include "llvm/Support/CommandLine.h"
 #include <algorithm>
 #include <deque>
+#include <optional>
 
 cl::opt<std::string> TFIR2NativeModelPath(
     "ml-inliner-ir2native-model", cl::Hidden,
@@ -237,14 +238,14 @@ InlineSizeEstimatorAnalysis::Result
 InlineSizeEstimatorAnalysis::run(const Function &F,
                                  FunctionAnalysisManager &FAM) {
   if (!Evaluator)
-    return None;
+    return std::nullopt;
   auto Features = IRToNativeSizeLearning::getFunctionFeatures(
       const_cast<Function &>(F), FAM);
   int32_t *V = Evaluator->getInput<int32_t>(0);
   Features.fillTensor(V);
   auto ER = Evaluator->evaluate();
   if (!ER)
-    return None;
+    return std::nullopt;
   float Ret = *ER->getTensorValue<float>(0);
   if (Ret < 0.0)
     Ret = 0.0;

diff  --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
index c7e1dd26ecf58..59e4666210018 100644
--- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
+++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
@@ -17,6 +17,7 @@
 #include "llvm/Analysis/ModelUnderTrainingRunner.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/Path.h"
+#include <optional>
 
 using namespace llvm;
 namespace {
@@ -39,18 +40,18 @@ loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
   if (!BufferOrError) {
     Ctx.emitError("Error opening output specs file: " + FileName + " : " +
                   BufferOrError.getError().message());
-    return None;
+    return std::nullopt;
   }
   auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
   if (!ParsedJSONValues) {
     Ctx.emitError("Could not parse specs file: " + FileName);
-    return None;
+    return std::nullopt;
   }
   auto ValuesArray = ParsedJSONValues->getAsArray();
   if (!ValuesArray) {
     Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
                   "logging_name:<name>} dictionaries");
-    return None;
+    return std::nullopt;
   }
   std::vector<LoggedFeatureSpec> Ret;
   for (const auto &Value : *ValuesArray)
@@ -65,7 +66,7 @@ loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
                   "Only int64, int32, and float tensors are supported. "
                   "Found unsupported type for tensor named " +
                   TensorSpec->name());
-              return None;
+              return std::nullopt;
             }
             Ret.push_back({*TensorSpec, LoggingName->str()});
           }
@@ -77,13 +78,13 @@ loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
         "with a json object describing a TensorSpec; and a 'logging_name' key, "
         "which is a string to use as name when logging this tensor in the "
         "training log.");
-    return None;
+    return std::nullopt;
   }
   if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
     Ctx.emitError("The first output spec must describe the decision tensor, "
                   "and must have the logging_name " +
                   StringRef(ExpectedDecisionName));
-    return None;
+    return std::nullopt;
   }
   return Ret;
 }

diff  --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp
index 1f17cd0efdffa..843411a202b62 100644
--- a/llvm/lib/Analysis/TFLiteUtils.cpp
+++ b/llvm/lib/Analysis/TFLiteUtils.cpp
@@ -31,6 +31,7 @@
 
 #include <cassert>
 #include <numeric>
+#include <optional>
 
 using namespace llvm;
 
@@ -209,7 +210,7 @@ bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor,
 
 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
   if (!isValid())
-    return None;
+    return std::nullopt;
   return EvaluationResult(Impl->evaluate());
 }
 

diff  --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 6e12876e1b0b6..fd573e6e9e459 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -23,6 +23,7 @@
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include <assert.h>
 #include <cstdint>
+#include <optional>
 #include <sstream>
 #include <unordered_map>
 
@@ -194,7 +195,7 @@ void FunctionVarLocs::init(FunctionVarLocsBuilder &Builder) {
   // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values
   // are one-based) so reserve an extra and insert a dummy.
   Variables.reserve(Builder.Variables.size() + 1);
-  Variables.push_back(DebugVariable(nullptr, None, nullptr));
+  Variables.push_back(DebugVariable(nullptr, std::nullopt, nullptr));
   Variables.append(Builder.Variables.begin(), Builder.Variables.end());
 }
 
@@ -244,17 +245,17 @@ static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) {
     else if (Elements[2] == dwarf::DW_OP_minus)
       Offset = -Elements[1];
     else
-      return None;
+      return std::nullopt;
   }
 
   // If that's all there is it means there's no deref.
   if (NextElement >= NumElements)
-    return None;
+    return std::nullopt;
 
   // Check the next element is DW_OP_deref - otherwise this is too complex or
   // isn't a deref expression.
   if (Elements[NextElement] != dwarf::DW_OP_deref)
-    return None;
+    return std::nullopt;
 
   // Check the final operation is either the DW_OP_deref or is a fragment.
   if (NumElements == NextElement + 1)
@@ -264,7 +265,7 @@ static Optional<int64_t> getDerefOffsetInBytes(const DIExpression *DIExpr) {
     return Offset; // Ends with deref + fragment.
 
   // Don't bother trying to interpret anything more complex.
-  return None;
+  return std::nullopt;
 }
 
 /// A whole (unfragmented) source variable.
@@ -846,7 +847,7 @@ class MemLocFragmentFill {
         auto &Ctx = Fn.getContext();
 
         for (auto FragMemLoc : FragMemLocs) {
-          DIExpression *Expr = DIExpression::get(Ctx, None);
+          DIExpression *Expr = DIExpression::get(Ctx, std::nullopt);
           Expr = *DIExpression::createFragmentExpression(
               Expr, FragMemLoc.OffsetInBits, FragMemLoc.SizeInBits);
           Expr = DIExpression::prepend(Expr, DIExpression::DerefAfter,
@@ -1339,7 +1340,7 @@ void AssignmentTrackingLowering::processUntaggedInstruction(
     //
     // DIExpression: Add fragment and offset.
     DebugVariable V = FnVarLocs->getVariable(Var);
-    DIExpression *DIE = DIExpression::get(I.getContext(), None);
+    DIExpression *DIE = DIExpression::get(I.getContext(), std::nullopt);
     if (auto Frag = V.getFragment()) {
       auto R = DIExpression::createFragmentExpression(DIE, Frag->OffsetInBits,
                                                       Frag->SizeInBits);
@@ -2058,7 +2059,7 @@ bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
         // built appropriately rather than always using an empty DIExpression.
         // The assert below is a reminder.
         assert(Simple);
-        VarLoc.Expr = DIExpression::get(Fn.getContext(), None);
+        VarLoc.Expr = DIExpression::get(Fn.getContext(), std::nullopt);
         DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
         FnVarLocs->addSingleLocVar(Var, VarLoc.Expr, VarLoc.DL, VarLoc.V);
         InsertedAnyIntrinsics = true;


        


More information about the llvm-commits mailing list