[clang] [clang] add flow-sensitive nullability analysis for C/C++ (PR #189131)
Chad Smith via cfe-commits
cfe-commits at lists.llvm.org
Fri Mar 27 22:52:47 PDT 2026
https://github.com/cs01 updated https://github.com/llvm/llvm-project/pull/189131
>From b88a7a52fa397f2528c8355efe8a03ae085cc5a9 Mon Sep 17 00:00:00 2001
From: Chad Smith <cssmith at fb.com>
Date: Fri, 27 Mar 2026 22:52:33 -0700
Subject: [PATCH] add flow-sensitive nullability analysis for C/C++
Adds a new compile-time analysis that detects null pointer dereferences
using flow-sensitive dataflow analysis on the CFG. The analysis tracks
nullability state through control flow, supporting null checks, early
returns, assertions, ternary operators, loops, and boolean intermediaries.
New flags:
-fflow-sensitive-nullability enables the analysis
-fnullability-default=<mode> sets default nullability (nullable|nonnull|unspecified)
The analysis follows the same architecture as ThreadSafety and
UninitializedValues: a standalone analysis in lib/Analysis/ invoked
from AnalysisBasedWarnings.cpp, reporting via a handler interface.
---
.../clang/Analysis/Analyses/FlowNullability.h | 39 +
clang/include/clang/Basic/DiagnosticGroups.td | 2 +
.../clang/Basic/DiagnosticSemaKinds.td | 9 +
clang/include/clang/Basic/LangOptions.def | 4 +
clang/include/clang/Basic/LangOptions.h | 2 +
clang/include/clang/Options/Options.td | 12 +
clang/include/clang/Sema/Sema.h | 11 +-
clang/lib/Analysis/CMakeLists.txt | 1 +
clang/lib/Analysis/FlowNullability.cpp | 1195 +++++++++++++++++
clang/lib/Driver/ToolChains/Clang.cpp | 5 +
clang/lib/Sema/AnalysisBasedWarnings.cpp | 64 +-
clang/lib/Sema/Sema.cpp | 92 +-
clang/lib/Sema/SemaDecl.cpp | 19 +
clang/lib/Sema/SemaExprCXX.cpp | 2 +-
clang/lib/Sema/SemaInit.cpp | 5 +-
clang/lib/Sema/SemaOverload.cpp | 4 +-
clang/lib/Sema/SemaType.cpp | 72 +-
clang/test/Driver/nullsafe-flags-negative.c | 31 +
clang/test/Driver/nullsafe-flags.c | 8 +
.../test/Sema/flow-nullability-address-of.cpp | 43 +
.../flow-nullability-and-shortcircuit.cpp | 49 +
.../Sema/flow-nullability-array-subscript.cpp | 36 +
.../Sema/flow-nullability-arrow-deref.cpp | 61 +
.../flow-nullability-bool-intermediary.cpp | 121 ++
.../Sema/flow-nullability-brace-assert.cpp | 124 ++
.../Sema/flow-nullability-builtin-expect.cpp | 87 ++
clang/test/Sema/flow-nullability-c-basic.c | 45 +
.../Sema/flow-nullability-c-comprehensive.c | 211 +++
clang/test/Sema/flow-nullability-c-idioms.c | 301 +++++
.../Sema/flow-nullability-call-invalidation.c | 49 +
.../flow-nullability-cast-propagation.cpp | 116 ++
.../Sema/flow-nullability-chained-deref.cpp | 263 ++++
.../Sema/flow-nullability-complex-cfg.cpp | 190 +++
.../flow-nullability-compound-conditions.cpp | 219 +++
.../Sema/flow-nullability-conversion-op.cpp | 38 +
.../test/Sema/flow-nullability-coroutines.cpp | 117 ++
.../Sema/flow-nullability-default-nonnull.cpp | 48 +
.../Sema/flow-nullability-duplicate-diag.cpp | 37 +
.../Sema/flow-nullability-else-branch.cpp | 65 +
.../test/Sema/flow-nullability-exceptions.cpp | 97 ++
.../Sema/flow-nullability-false-positives.cpp | 225 ++++
clang/test/Sema/flow-nullability-for-loop.cpp | 25 +
.../flow-nullability-gradual-adoption.cpp | 87 ++
.../Sema/flow-nullability-if-constexpr.cpp | 55 +
clang/test/Sema/flow-nullability-lambda.cpp | 148 ++
clang/test/Sema/flow-nullability-new-expr.cpp | 42 +
.../Sema/flow-nullability-nonnull-attr.cpp | 91 ++
...ow-nullability-nonnull-param-narrowing.cpp | 71 +
.../Sema/flow-nullability-nonnull-param.cpp | 43 +
clang/test/Sema/flow-nullability-noreturn.cpp | 81 ++
...-nullability-nullable-default-template.cpp | 43 +
.../Sema/flow-nullability-perf-stress.cpp | 401 ++++++
.../test/Sema/flow-nullability-range-for.cpp | 31 +
.../Sema/flow-nullability-reassignment.cpp | 64 +
.../test/Sema/flow-nullability-smart-ptr.cpp | 224 +++
.../flow-nullability-structured-bindings.cpp | 160 +++
clang/test/Sema/flow-nullability-switch.cpp | 38 +
.../test/Sema/flow-nullability-templates.cpp | 155 +++
.../Sema/flow-nullability-terminators.cpp | 86 ++
clang/test/Sema/flow-nullability-ternary.cpp | 59 +
.../Sema/flow-nullability-type-identity.cpp | 107 ++
.../Sema/flow-nullability-unannotated-fp.cpp | 44 +
.../Sema/flow-nullability-void-star-cast.cpp | 44 +
.../Sema/flow-nullability-warning-groups.cpp | 20 +
.../test/Sema/flow-nullability-while-loop.cpp | 38 +
65 files changed, 6249 insertions(+), 27 deletions(-)
create mode 100644 clang/include/clang/Analysis/Analyses/FlowNullability.h
create mode 100644 clang/lib/Analysis/FlowNullability.cpp
create mode 100644 clang/test/Driver/nullsafe-flags-negative.c
create mode 100644 clang/test/Driver/nullsafe-flags.c
create mode 100644 clang/test/Sema/flow-nullability-address-of.cpp
create mode 100644 clang/test/Sema/flow-nullability-and-shortcircuit.cpp
create mode 100644 clang/test/Sema/flow-nullability-array-subscript.cpp
create mode 100644 clang/test/Sema/flow-nullability-arrow-deref.cpp
create mode 100644 clang/test/Sema/flow-nullability-bool-intermediary.cpp
create mode 100644 clang/test/Sema/flow-nullability-brace-assert.cpp
create mode 100644 clang/test/Sema/flow-nullability-builtin-expect.cpp
create mode 100644 clang/test/Sema/flow-nullability-c-basic.c
create mode 100644 clang/test/Sema/flow-nullability-c-comprehensive.c
create mode 100644 clang/test/Sema/flow-nullability-c-idioms.c
create mode 100644 clang/test/Sema/flow-nullability-call-invalidation.c
create mode 100644 clang/test/Sema/flow-nullability-cast-propagation.cpp
create mode 100644 clang/test/Sema/flow-nullability-chained-deref.cpp
create mode 100644 clang/test/Sema/flow-nullability-complex-cfg.cpp
create mode 100644 clang/test/Sema/flow-nullability-compound-conditions.cpp
create mode 100644 clang/test/Sema/flow-nullability-conversion-op.cpp
create mode 100644 clang/test/Sema/flow-nullability-coroutines.cpp
create mode 100644 clang/test/Sema/flow-nullability-default-nonnull.cpp
create mode 100644 clang/test/Sema/flow-nullability-duplicate-diag.cpp
create mode 100644 clang/test/Sema/flow-nullability-else-branch.cpp
create mode 100644 clang/test/Sema/flow-nullability-exceptions.cpp
create mode 100644 clang/test/Sema/flow-nullability-false-positives.cpp
create mode 100644 clang/test/Sema/flow-nullability-for-loop.cpp
create mode 100644 clang/test/Sema/flow-nullability-gradual-adoption.cpp
create mode 100644 clang/test/Sema/flow-nullability-if-constexpr.cpp
create mode 100644 clang/test/Sema/flow-nullability-lambda.cpp
create mode 100644 clang/test/Sema/flow-nullability-new-expr.cpp
create mode 100644 clang/test/Sema/flow-nullability-nonnull-attr.cpp
create mode 100644 clang/test/Sema/flow-nullability-nonnull-param-narrowing.cpp
create mode 100644 clang/test/Sema/flow-nullability-nonnull-param.cpp
create mode 100644 clang/test/Sema/flow-nullability-noreturn.cpp
create mode 100644 clang/test/Sema/flow-nullability-nullable-default-template.cpp
create mode 100644 clang/test/Sema/flow-nullability-perf-stress.cpp
create mode 100644 clang/test/Sema/flow-nullability-range-for.cpp
create mode 100644 clang/test/Sema/flow-nullability-reassignment.cpp
create mode 100644 clang/test/Sema/flow-nullability-smart-ptr.cpp
create mode 100644 clang/test/Sema/flow-nullability-structured-bindings.cpp
create mode 100644 clang/test/Sema/flow-nullability-switch.cpp
create mode 100644 clang/test/Sema/flow-nullability-templates.cpp
create mode 100644 clang/test/Sema/flow-nullability-terminators.cpp
create mode 100644 clang/test/Sema/flow-nullability-ternary.cpp
create mode 100644 clang/test/Sema/flow-nullability-type-identity.cpp
create mode 100644 clang/test/Sema/flow-nullability-unannotated-fp.cpp
create mode 100644 clang/test/Sema/flow-nullability-void-star-cast.cpp
create mode 100644 clang/test/Sema/flow-nullability-warning-groups.cpp
create mode 100644 clang/test/Sema/flow-nullability-while-loop.cpp
diff --git a/clang/include/clang/Analysis/Analyses/FlowNullability.h b/clang/include/clang/Analysis/Analyses/FlowNullability.h
new file mode 100644
index 0000000000000..ec3749397c96c
--- /dev/null
+++ b/clang/include/clang/Analysis/Analyses/FlowNullability.h
@@ -0,0 +1,39 @@
+//=- FlowNullability.h - Flow-sensitive null dereference checking -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs for invoking flow-sensitive nullability analysis
+// that detects dereferences of nullable pointers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_FLOWNULLABILITY_H
+#define LLVM_CLANG_ANALYSIS_ANALYSES_FLOWNULLABILITY_H
+
+#include "clang/Basic/Specifiers.h"
+
+namespace clang {
+
+class AnalysisDeclContext;
+class Expr;
+class QualType;
+
+class FlowNullabilityHandler {
+public:
+ virtual ~FlowNullabilityHandler();
+ virtual void handleNullableDereference(const Expr *DerefExpr,
+ QualType PtrType) = 0;
+};
+
+void runFlowNullabilityAnalysis(AnalysisDeclContext &AC,
+ FlowNullabilityHandler &Handler,
+ bool StrictMode,
+ NullabilityKind DefaultNullability);
+
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_FLOWNULLABILITY_H
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index a8d9745d91083..105dfa54c0a68 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -526,6 +526,8 @@ def CXX26Compat : DiagGroup<"c++2c-compat", [DeleteIncomplete]>;
def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
+def FlowNullableDereference : DiagGroup<"flow-nullable-dereference">;
+def FlowNullability : DiagGroup<"flow-nullability", [FlowNullableDereference]>;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
def GlobalConstructors : DiagGroup<"global-constructors"> {
code Documentation = [{
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index db1e3630435d0..1f728ce572be8 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -12998,6 +12998,15 @@ def warn_nullability_lost : Warning<
"implicit conversion from nullable pointer %0 to non-nullable pointer "
"type %1">,
InGroup<NullableToNonNullConversion>, DefaultIgnore;
+def warn_flow_nullable_dereference : Warning<
+ "dereference of nullable pointer %0">,
+ InGroup<FlowNullableDereference>;
+def warn_null_init_nonnull : Warning<
+ "null assigned to a variable of nonnull type %0">,
+ InGroup<FlowNullableDereference>;
+def note_nullable_dereference_fix : Note<
+ "add a null check before dereferencing, or annotate as '_Nonnull' if this "
+ "pointer cannot be null">;
def warn_zero_as_null_pointer_constant : Warning<
"zero as null pointer constant">,
InGroup<DiagGroup<"zero-as-null-pointer-constant">>, DefaultIgnore;
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index dd4c5a653d38b..49e4684bc86fc 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -141,6 +141,10 @@ LANGOPT(PointerAuthObjcClassROPointers, 1, 0, Benign, "class_ro_t pointer authen
LANGOPT(PointerAuthBlockDescriptorPointers, 1, 0, NotCompatible, "enable signed block descriptors")
+// Nullability options
+ENUM_LANGOPT(NullabilityDefault, NullabilityKind, 2, NullabilityKind::Unspecified, NotCompatible, "default nullability for unannotated pointers")
+LANGOPT(FlowSensitiveNullability, 1, 0, NotCompatible, "enable flow-sensitive nullability analysis")
+
LANGOPT(DoubleSquareBracketAttributes, 1, 0, NotCompatible, "'[[]]' attributes extension for all language standard modes")
LANGOPT(ExperimentalLateParseAttributes, 1, 0, NotCompatible, "experimental late parsing of attributes")
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index 64b12b6fd72c7..31d4d80b69e9b 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -20,6 +20,7 @@
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/FloatingPointMode.h"
@@ -77,6 +78,7 @@ class LangOptionsBase {
using Visibility = clang::Visibility;
using RoundingMode = llvm::RoundingMode;
using CFBranchLabelSchemeKind = clang::CFBranchLabelSchemeKind;
+ using NullabilityKind = clang::NullabilityKind;
/// For ASTs produced with different option value, signifies their level of
/// compatibility.
diff --git a/clang/include/clang/Options/Options.td b/clang/include/clang/Options/Options.td
index 215d4e885709c..b6cbfe5443ce7 100644
--- a/clang/include/clang/Options/Options.td
+++ b/clang/include/clang/Options/Options.td
@@ -1641,6 +1641,18 @@ defm apple_pragma_pack : BoolFOption<"apple-pragma-pack",
PosFlag<SetTrue, [], [ClangOption, CC1Option],
"Enable Apple gcc-compatible #pragma pack handling">,
NegFlag<SetFalse>>;
+def fnullability_default_EQ : Joined<["-"], "fnullability-default=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set default nullability for unannotated pointers (unspecified, nullable, nonnull)">,
+ Values<"unspecified,nullable,nonnull">, NormalizedValuesScope<"clang::NullabilityKind">,
+ NormalizedValues<["Unspecified", "Nullable", "NonNull"]>,
+ MarshallingInfoEnum<LangOpts<"NullabilityDefault">, "Unspecified">;
+
+defm flow_sensitive_nullability : BoolFOption<"flow-sensitive-nullability",
+ LangOpts<"FlowSensitiveNullability">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable flow-sensitive nullability analysis">,
+ NegFlag<SetFalse>>;
defm xl_pragma_pack : BoolFOption<"xl-pragma-pack",
LangOpts<"XLPragmaPack">, DefaultFalse,
PosFlag<SetTrue, [], [ClangOption, CC1Option],
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 4e6058b8e5f79..7d0c6e6c8ab5c 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -1186,9 +1186,16 @@ class Sema final : public SemaBase {
NamedDecl *getCurFunctionOrMethodDecl() const;
/// Warn if we're implicitly casting from a _Nullable pointer type to a
- /// _Nonnull one.
+ /// _Nonnull one. If \p SrcExpr is provided and flow-sensitive nullability
+ /// is enabled, the warning is suppressed when the expression is provably
+ /// non-null despite its declared type.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
- SourceLocation Loc);
+ SourceLocation Loc,
+ Expr *SrcExpr = nullptr);
+
+ /// Check if a function has any nullability annotations on its
+ /// parameters or return type.
+ bool functionHasNullabilityAnnotations(const FunctionDecl *FD) const;
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
diff --git a/clang/lib/Analysis/CMakeLists.txt b/clang/lib/Analysis/CMakeLists.txt
index c5952dbdad51d..39f6b4fab80c6 100644
--- a/clang/lib/Analysis/CMakeLists.txt
+++ b/clang/lib/Analysis/CMakeLists.txt
@@ -20,6 +20,7 @@ add_clang_library(clangAnalysis
CodeInjector.cpp
Dominators.cpp
ExprMutationAnalyzer.cpp
+ FlowNullability.cpp
FixitUtil.cpp
IntervalPartition.cpp
IssueHash.cpp
diff --git a/clang/lib/Analysis/FlowNullability.cpp b/clang/lib/Analysis/FlowNullability.cpp
new file mode 100644
index 0000000000000..5742ab3c358cf
--- /dev/null
+++ b/clang/lib/Analysis/FlowNullability.cpp
@@ -0,0 +1,1195 @@
+//===- FlowNullability.cpp - Flow-sensitive null dereference checking -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a CFG-based forward dataflow analysis that detects
+// dereferences of nullable pointers, tracking nullability narrowing through
+// control flow (null checks, early returns, assertions, etc.).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/FlowNullability.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
+#include <utility>
+
+#define DEBUG_TYPE "flow-nullability"
+
+using namespace clang;
+
+FlowNullabilityHandler::~FlowNullabilityHandler() = default;
+
+namespace {
+
+using MemberKey = std::pair<const VarDecl *, const FieldDecl *>;
+
+/// Per-block dataflow lattice tracking which pointers are narrowed (known
+/// non-null) or nullable. Uses DenseSet for simplicity; a BitVector keyed
+/// by variable index would reduce fixpoint comparison cost for functions
+/// with many tracked pointers, but profiling hasn't shown this to be a
+/// bottleneck in practice (the perf stress test passes comfortably).
+struct NullState {
+ // Pointers proven non-null by control flow (null checks, nonnull init, etc.).
+ // A variable should not be in both NarrowedVars and NullableVars — narrowing
+ // is always erased before re-evaluating nullability on reassignment.
+ llvm::DenseSet<const VarDecl *> NarrowedVars;
+ llvm::DenseSet<MemberKey> NarrowedMembers;
+ llvm::DenseSet<const FieldDecl *> NarrowedThisMembers;
+ llvm::DenseSet<const VarDecl *> NullableVars;
+ // Smart pointer this-members known to be nullable in the current function
+ // (e.g., after reset() or std::move()). Used to avoid false positives on
+ // member smart pointers that are always initialized in the constructor.
+ llvm::DenseSet<const FieldDecl *> NullableThisMembers;
+
+ // Maps bool variables to the null-check they capture.
+ // E.g., bool valid = (p != nullptr) → {valid → (p, false)}
+ // The bool is true when the bool being true means the pointer IS null.
+ using BoolGuardMap =
+ llvm::DenseMap<const VarDecl *, std::pair<const VarDecl *, bool>>;
+ BoolGuardMap BoolGuards;
+
+ bool operator==(const NullState &Other) const {
+ return NarrowedVars == Other.NarrowedVars &&
+ NarrowedMembers == Other.NarrowedMembers &&
+ NarrowedThisMembers == Other.NarrowedThisMembers &&
+ NullableVars == Other.NullableVars &&
+ NullableThisMembers == Other.NullableThisMembers &&
+ BoolGuards == Other.BoolGuards;
+ }
+ bool operator!=(const NullState &Other) const { return !(*this == Other); }
+};
+
+static NullState join(const NullState &A, const NullState &B) {
+ NullState Result;
+ // Narrowed = intersection: only narrowed if ALL paths agree.
+ for (const auto *VD : A.NarrowedVars)
+ if (B.NarrowedVars.contains(VD))
+ Result.NarrowedVars.insert(VD);
+ for (const auto &MK : A.NarrowedMembers)
+ if (B.NarrowedMembers.contains(MK))
+ Result.NarrowedMembers.insert(MK);
+ for (const auto *FD : A.NarrowedThisMembers)
+ if (B.NarrowedThisMembers.contains(FD))
+ Result.NarrowedThisMembers.insert(FD);
+ // Nullable = union: if nullable on either path, it's nullable.
+ for (const auto *VD : A.NullableVars)
+ Result.NullableVars.insert(VD);
+ for (const auto *VD : B.NullableVars)
+ Result.NullableVars.insert(VD);
+ for (const auto *FD : A.NullableThisMembers)
+ Result.NullableThisMembers.insert(FD);
+ for (const auto *FD : B.NullableThisMembers)
+ Result.NullableThisMembers.insert(FD);
+ // BoolGuards: keep only entries present in both with the same mapping.
+ for (const auto &[BoolVD, GuardInfo] : A.BoolGuards) {
+ auto It = B.BoolGuards.find(BoolVD);
+ if (It != B.BoolGuards.end() && It->second == GuardInfo)
+ Result.BoolGuards[BoolVD] = GuardInfo;
+ }
+ // Invariant: a variable should not be both narrowed and nullable.
+ // Narrowed takes priority (proven non-null on all paths), so remove
+ // stale nullable entries that conflict. This prevents NullableVars
+ // from accumulating stale entries across fixpoint iterations.
+ for (const auto *VD : Result.NarrowedVars)
+ Result.NullableVars.erase(VD);
+ return Result;
+}
+
+static const Expr *unwrapBuiltinExpect(const Expr *E) {
+ if (const auto *CE = dyn_cast<CallExpr>(E)) {
+ if (const auto *Callee = CE->getDirectCallee()) {
+ unsigned BuiltinID = Callee->getBuiltinID();
+ if ((BuiltinID == Builtin::BI__builtin_expect ||
+ BuiltinID == Builtin::BI__builtin_expect_with_probability) &&
+ CE->getNumArgs() >= 1) {
+ return CE->getArg(0)->IgnoreParenImpCasts();
+ }
+ }
+ }
+ return E;
+}
+
+/// Extract the rightmost leaf of a && / || chain.
+/// The CFG decomposes `a && b && c` into separate blocks — each operand
+/// becomes its own block's terminator condition. So for `if (a && b && c)`,
+/// the block evaluating 'c' has the full `a && b && c` as its terminator,
+/// but 'a' and 'b' are handled by their own blocks. We recurse into the
+/// RHS to find the leaf that's actually being evaluated in this block.
+static const Expr *getTerminalCondition(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr)
+ return getTerminalCondition(BO->getRHS());
+ }
+ return E;
+}
+
+static bool isNullableType(QualType Ty, bool StrictMode,
+ NullabilityKind Default) {
+ std::optional<NullabilityKind> Nullability = Ty->getNullability();
+ if (!Nullability)
+ return false;
+ // Explicit _Nullable always triggers.
+ if (*Nullability == NullabilityKind::Nullable)
+ return true;
+ // _Null_unspecified means "not explicitly annotated — use the default".
+ // Under -fnullability-default=nullable, treat as nullable.
+ // Under -fnullability-default=nonnull, treat as nonnull (no warning).
+ if (*Nullability == NullabilityKind::Unspecified &&
+ Default == NullabilityKind::Nullable)
+ return true;
+ return false;
+}
+
+static bool isNonnullType(QualType Ty) {
+ std::optional<NullabilityKind> Nullability = Ty->getNullability();
+ return Nullability && *Nullability == NullabilityKind::NonNull;
+}
+
+/// Check if a type is std::unique_ptr, std::shared_ptr, or std::weak_ptr.
+/// Uses getAsCXXRecordDecl() which operates on the canonical type, so
+/// type aliases (using/typedef) are handled. Does not match non-std
+/// smart pointers (e.g. boost::shared_ptr).
+static bool isSmartPointerType(QualType Ty) {
+ const auto *RD = Ty->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+ const auto *DC = RD->getDeclContext();
+ if (!DC || !DC->isStdNamespace())
+ return false;
+ StringRef Name = RD->getName();
+ return Name == "unique_ptr" || Name == "shared_ptr" || Name == "weak_ptr";
+}
+
+/// Check if a smart pointer expression (the implicit object of operator->)
+/// is narrowed in the current state.
+static bool isSmartPointerNarrowed(const Expr *E, const NullState &State) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ return State.NarrowedVars.contains(VD);
+ } else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base))
+ return State.NarrowedThisMembers.contains(FD);
+ if (const auto *BaseDRE = dyn_cast<DeclRefExpr>(Base))
+ if (const auto *BaseVD = dyn_cast<VarDecl>(BaseDRE->getDecl()))
+ return State.NarrowedMembers.contains({BaseVD, FD});
+ }
+ }
+ return false;
+}
+
+/// Check if a callee is std::make_unique or std::make_shared.
+static bool isMakeSmartPtrCall(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ // Look through CXXConstructExpr wrapping the call (implicit conversion)
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
+ if (CE->getNumArgs() == 1)
+ return isMakeSmartPtrCall(CE->getArg(0));
+ }
+ if (const auto *CE = dyn_cast<CallExpr>(E)) {
+ if (const auto *Callee = CE->getDirectCallee()) {
+ const auto *DC = Callee->getDeclContext();
+ if (DC && DC->isStdNamespace() && Callee->getDeclName().isIdentifier()) {
+ StringRef Name = Callee->getName();
+ return Name == "make_unique" || Name == "make_shared";
+ }
+ }
+ }
+ return false;
+}
+
+/// Get the VarDecl from a smart pointer expression, if it's a simple
+/// DeclRefExpr to a VarDecl.
+static const VarDecl *getSmartPtrVarDecl(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (isSmartPointerType(VD->getType()))
+ return VD;
+ return nullptr;
+}
+
+/// Get the FieldDecl from a smart pointer this->member expression.
+static const FieldDecl *getSmartPtrThisMemberDecl(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base))
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ if (isSmartPointerType(FD->getType()))
+ return FD;
+ }
+ return nullptr;
+}
+
+struct ConditionResult {
+ const VarDecl *VD = nullptr;
+ const FieldDecl *FD = nullptr;
+ bool IsThisMember = false;
+ bool Negated = false;
+};
+
+// Forward declaration — decomposeAnd calls analyzeCondition on leaves.
+static void
+analyzeCondition(const Expr *Cond, ASTContext &Ctx,
+ SmallVectorImpl<ConditionResult> &Results,
+ const NullState::BoolGuardMap *BoolGuards = nullptr);
+
+/// Recursively flatten a chain of && operators and analyze each leaf.
+/// Used by analyzeCondition to handle !(A && B && C).
+static void decomposeAnd(const Expr *E, ASTContext &Ctx,
+ SmallVectorImpl<ConditionResult> &Results,
+ const NullState::BoolGuardMap *BoolGuards) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_LAnd) {
+ decomposeAnd(BO->getLHS(), Ctx, Results, BoolGuards);
+ decomposeAnd(BO->getRHS(), Ctx, Results, BoolGuards);
+ return;
+ }
+ }
+ analyzeCondition(E, Ctx, Results, BoolGuards);
+}
+
+/// Analyze a branch condition to extract pointer null-check information.
+///
+/// Note: We decompose && (via decomposeAnd) but intentionally do NOT
+/// decompose ||. For || the CFG already splits each operand into its own
+/// block, so narrowing on the true-edge of individual operands is handled
+/// naturally. Decomposing || on the false-edge (where all operands are
+/// false) would be possible but adds complexity for limited practical gain
+/// — most real null-checks use && or standalone conditions.
+static void analyzeCondition(const Expr *Cond, ASTContext &Ctx,
+ SmallVectorImpl<ConditionResult> &Results,
+ const NullState::BoolGuardMap *BoolGuards) {
+ if (!Cond)
+ return;
+
+ const Expr *E = Cond->IgnoreParenImpCasts();
+ E = unwrapBuiltinExpect(E);
+
+ bool Negated = false;
+ while (auto *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() != UO_LNot)
+ break;
+ Negated = !Negated;
+ E = UO->getSubExpr()->IgnoreParenImpCasts();
+ }
+
+ // !(A && B): the CFG merges the && operand paths before the if-decision,
+ // so individual narrowing from the && blocks is lost at the merge.
+ // Recursively decompose the && to narrow ALL operands on the false edge
+ // (where && was true → all operands are true → all pointers non-null).
+ if (Negated) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_LAnd) {
+ // Flatten nested && and analyze each leaf
+ decomposeAnd(BO, Ctx, Results, BoolGuards);
+ // Keep only sub-conditions where the pointer is non-null when the
+ // sub-condition is true (Negated=false). Flip to Negated=true so
+ // narrowing lands on the false edge of the outer !.
+ llvm::erase_if(Results,
+ [](const ConditionResult &CR) { return CR.Negated; });
+ for (auto &CR : Results)
+ CR.Negated = true;
+ return;
+ }
+ }
+ }
+
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_NE || BO->getOpcode() == BO_EQ) {
+ const Expr *LHS = BO->getLHS()->IgnoreParenImpCasts();
+ const Expr *RHS = BO->getRHS()->IgnoreParenImpCasts();
+
+ bool LHSIsNull =
+ LHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull);
+ bool RHSIsNull =
+ RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull);
+
+ if (LHSIsNull || RHSIsNull) {
+ const Expr *PtrExpr = LHSIsNull ? RHS : LHS;
+ bool EqNegated = Negated;
+ if (BO->getOpcode() == BO_EQ)
+ EqNegated = !EqNegated;
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(PtrExpr)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ Results.push_back({VD, nullptr, false, EqNegated});
+ return;
+ }
+ }
+ if (const auto *ME = dyn_cast<MemberExpr>(PtrExpr)) {
+ if (ME->getType()->isPointerType()) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ Results.push_back({nullptr, FD, true, EqNegated});
+ return;
+ }
+ }
+ if (const auto *BaseDRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *BaseVD = dyn_cast<VarDecl>(BaseDRE->getDecl())) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ Results.push_back({BaseVD, FD, false, EqNegated});
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ return;
+ }
+ }
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Deref) {
+ const Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts();
+ if (auto *DRE = dyn_cast<DeclRefExpr>(SubExpr)) {
+ if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (VD->getType()->isPointerType()) {
+ Results.push_back({VD, nullptr, false, Negated});
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (VD->getType()->isPointerType()) {
+ Results.push_back({VD, nullptr, false, Negated});
+ return;
+ }
+ // Bool intermediary: if (valid) where valid = (p != nullptr)
+ if (BoolGuards && VD->getType()->isBooleanType()) {
+ auto It = BoolGuards->find(VD);
+ if (It != BoolGuards->end()) {
+ // XOR: outer ! flips the guard's sense
+ Results.push_back(
+ {It->second.first, nullptr, false, Negated != It->second.second});
+ return;
+ }
+ }
+ }
+ }
+
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ if (ME->getType()->isPointerType()) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ Results.push_back({nullptr, FD, true, Negated});
+ return;
+ }
+ }
+ if (const auto *BaseDRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *BaseVD = dyn_cast<VarDecl>(BaseDRE->getDecl())) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ Results.push_back({BaseVD, FD, false, Negated});
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ // Handle smart pointer implicit bool conversion: if (sp) { ... }
+ // The AST represents this as a CXXMemberCallExpr to operator bool().
+ if (const auto *MCE = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (const auto *CD =
+ dyn_cast_or_null<CXXConversionDecl>(MCE->getMethodDecl())) {
+ if (CD->getConversionType()->isBooleanType()) {
+ const Expr *Obj = MCE->getImplicitObjectArgument();
+ if (Obj && isSmartPointerType(Obj->getType())) {
+ Obj = Obj->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Obj)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ Results.push_back({VD, nullptr, false, Negated});
+ return;
+ }
+ }
+ if (const auto *ObjME = dyn_cast<MemberExpr>(Obj)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ObjME->getMemberDecl())) {
+ const Expr *ObjBase = ObjME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(ObjBase)) {
+ Results.push_back({nullptr, FD, true, Negated});
+ return;
+ }
+ if (const auto *BaseDRE = dyn_cast<DeclRefExpr>(ObjBase)) {
+ if (const auto *BaseVD =
+ dyn_cast<VarDecl>(BaseDRE->getDecl())) {
+ Results.push_back({BaseVD, FD, false, Negated});
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Transfer functions for the flow-sensitive nullability dataflow analysis.
+/// Processes each CFG statement to update the NullState lattice — tracking
+/// narrowing from null checks, invalidation from assignments, and reporting
+/// dereferences of nullable pointers via the Handler interface.
+class TransferFunctions {
+ NullState &State;
+ FlowNullabilityHandler &Handler;
+ ASTContext &Ctx;
+ bool StrictMode;
+ NullabilityKind DefaultNullability;
+
+ bool isNarrowed(const VarDecl *VD) const {
+ return State.NarrowedVars.contains(VD);
+ }
+
+ bool isMemberNarrowed(const VarDecl *BaseVD, const FieldDecl *FD) const {
+ return State.NarrowedMembers.contains({BaseVD, FD});
+ }
+
+ bool isThisMemberNarrowed(const FieldDecl *FD) const {
+ return State.NarrowedThisMembers.contains(FD);
+ }
+
+ /// Unwrap explicit casts and pointer arithmetic to find the original
+ /// pointer expression and whether a cast was traversed. Template
+ /// instantiations can bake _Nullable into cast result types even when
+ /// the source is unannotated (e.g. reinterpret_cast<T*>(p) where T
+ /// is itself a pointer type). When a cast is found, callers should
+ /// check nullability on the SOURCE type, not the cast result.
+ static const Expr *unwrapCastsAndArithmetic(const Expr *E, bool &FoundCast) {
+ FoundCast = false;
+ for (;;) {
+ if (const auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
+ FoundCast = true;
+ E = CE->getSubExpr()->IgnoreParenImpCasts();
+ } else if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
+ E = BO->getLHS()->getType()->isPointerType()
+ ? BO->getLHS()->IgnoreParenImpCasts()
+ : BO->getRHS()->IgnoreParenImpCasts();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ return E;
+ }
+
+ void checkDeref(const Expr *DerefExpr, QualType PtrType) {
+ if (isNullableType(PtrType, StrictMode, DefaultNullability)) {
+ LLVM_DEBUG(llvm::dbgs() << "flow-nullability: dereference of nullable "
+ << PtrType.getAsString() << "\n");
+ Handler.handleNullableDereference(DerefExpr, PtrType);
+ }
+ }
+
+ /// Check dereference of a non-variable, non-member expression.
+ /// Unwraps casts/arithmetic to avoid template-instantiation false
+ /// positives where _Nullable is baked into cast result types.
+ void checkExprDeref(const Expr *DerefExpr, const Expr *PtrExpr) {
+ bool FoundCast = false;
+ const Expr *Origin = unwrapCastsAndArithmetic(PtrExpr, FoundCast);
+
+ // If the origin is inherently non-null, skip.
+ if (isa<CXXThisExpr>(Origin))
+ return;
+ if (const auto *UO = dyn_cast<UnaryOperator>(Origin))
+ if (UO->getOpcode() == UO_AddrOf)
+ return;
+
+ QualType CheckTy = FoundCast ? Origin->getType() : PtrExpr->getType();
+ checkDeref(DerefExpr, CheckTy);
+ }
+
+ void checkVarDeref(const Expr *DerefExpr, const VarDecl *VD) {
+ QualType Ty = VD->getType();
+ if (isNullableType(Ty, StrictMode, DefaultNullability))
+ return Handler.handleNullableDereference(DerefExpr, Ty);
+ if (State.NullableVars.contains(VD))
+ return Handler.handleNullableDereference(DerefExpr, Ty);
+ }
+
+ /// Warn on smart pointer dereference. For local vars/params, always warn
+ /// (they're nullable by default). For this->member smart pointers, only warn
+ /// if there's evidence of nullability in the current function (reset, move,
+ /// or null check) to avoid false positives on members set in constructors.
+ void warnSmartPtrDeref(const Expr *DerefExpr, const Expr *Obj) {
+ Obj = Obj->IgnoreParenImpCasts();
+ // Local variable or parameter — always warn when not narrowed
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Obj)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ Handler.handleNullableDereference(DerefExpr, VD->getType());
+ return;
+ }
+ }
+ // this->member — only warn if known nullable in current function
+ if (const auto *FD = getSmartPtrThisMemberDecl(Obj)) {
+ if (State.NullableThisMembers.contains(FD))
+ Handler.handleNullableDereference(DerefExpr, FD->getType());
+ }
+ }
+
+ /// Remove any BoolGuards that reference the given pointer variable.
+ void invalidateBoolGuardsFor(const VarDecl *VD) {
+ SmallVector<const VarDecl *, 2> ToRemove;
+ for (const auto &[BoolVD, GuardInfo] : State.BoolGuards)
+ if (GuardInfo.first == VD)
+ ToRemove.push_back(BoolVD);
+ for (const auto *BoolVD : ToRemove)
+ State.BoolGuards.erase(BoolVD);
+ }
+
+ void invalidateMembersFor(const VarDecl *VD) {
+ SmallVector<MemberKey, 4> ToRemove;
+ for (const auto &MK : State.NarrowedMembers)
+ if (MK.first == VD)
+ ToRemove.push_back(MK);
+ for (const auto &MK : ToRemove)
+ State.NarrowedMembers.erase(MK);
+ }
+
+public:
+ TransferFunctions(NullState &State, FlowNullabilityHandler &Handler,
+ ASTContext &Ctx, bool StrictMode,
+ NullabilityKind DefaultNullability)
+ : State(State), Handler(Handler), Ctx(Ctx), StrictMode(StrictMode),
+ DefaultNullability(DefaultNullability) {}
+
+ void visit(const Stmt *S) {
+ if (!S)
+ return;
+
+ if (const auto *DS = dyn_cast<DeclStmt>(S))
+ handleDeclStmt(DS);
+ else if (const auto *BO = dyn_cast<BinaryOperator>(S))
+ handleBinaryOperator(BO);
+ else if (const auto *UO = dyn_cast<UnaryOperator>(S))
+ handleUnaryOperator(UO);
+ else if (const auto *ME = dyn_cast<MemberExpr>(S))
+ handleMemberExpr(ME);
+ else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(S))
+ handleArraySubscript(ASE);
+ else if (const auto *CE = dyn_cast<CallExpr>(S))
+ handleCallExpr(CE);
+ }
+
+private:
+ void handleDeclStmt(const DeclStmt *DS) {
+ for (const auto *D : DS->decls()) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ // Track raw pointer initialization
+ if (VD->getType()->isPointerType()) {
+ if (isNonnullType(VD->getType())) {
+ State.NarrowedVars.insert(VD);
+ } else if (VD->hasInit()) {
+ const Expr *Init = VD->getInit()->IgnoreParenImpCasts();
+ if (const auto *UO = dyn_cast<UnaryOperator>(Init)) {
+ if (UO->getOpcode() == UO_AddrOf)
+ State.NarrowedVars.insert(VD);
+ } else if (isNonnullInit(Init) || isNonnullType(Init->getType())) {
+ State.NarrowedVars.insert(VD);
+ } else {
+ // Unwrap explicit casts to check the SOURCE type, not the
+ // cast result type. Template instantiations can bake
+ // _Nullable into cast result types even when the source is
+ // unannotated (e.g. static_cast<T*>(void_ptr)).
+ const Expr *TypeExpr = Init;
+ bool HasCast = false;
+ while (const auto *CE = dyn_cast<ExplicitCastExpr>(TypeExpr)) {
+ HasCast = true;
+ TypeExpr = CE->getSubExpr()->IgnoreParenImpCasts();
+ }
+ if (isNullableType(TypeExpr->getType(), StrictMode,
+ DefaultNullability) ||
+ isNullableInit(Init)) {
+ State.NullableVars.insert(VD);
+ } else if (HasCast) {
+ // The cast source is not nullable — narrow the var to
+ // override any _Nullable baked into the var's own type
+ // by template instantiation.
+ State.NarrowedVars.insert(VD);
+ }
+ }
+ }
+ continue;
+ }
+
+ // Track smart pointer initialization
+ if (isSmartPointerType(VD->getType()) && VD->hasInit()) {
+ const Expr *Init = VD->getInit()->IgnoreParenImpCasts();
+ if (isMakeSmartPtrCall(Init)) {
+ // make_unique/make_shared always return non-null
+ State.NarrowedVars.insert(VD);
+ }
+ // Default-constructed, nullptr, or moved-from → nullable (don't
+ // narrow)
+ }
+
+ // Track bool variables assigned from null-comparisons so that
+ // boolean intermediaries like bool valid = (p != nullptr) can
+ // later narrow p when used as a condition.
+ if (VD->getType()->isBooleanType() && VD->hasInit()) {
+ const Expr *Init = VD->getInit()->IgnoreParenImpCasts();
+ SmallVector<ConditionResult, 2> InitResults;
+ analyzeCondition(Init, Ctx, InitResults);
+ if (InitResults.size() == 1 && InitResults[0].VD &&
+ !InitResults[0].FD)
+ State.BoolGuards[VD] = {InitResults[0].VD, InitResults[0].Negated};
+ }
+ }
+ }
+ }
+
+ /// Check if an init expression is provably non-null (address-of, new,
+ /// this, _Nonnull typed, narrowed var, cast of non-null, pointer arith).
+ /// See also: isExprProvablyNonnull() in Sema.cpp, which is a similar
+ /// heuristic used to suppress nullable-to-nonnull conversion warnings.
+ bool isNonnullInit(const Expr *Init) const {
+ if (!Init)
+ return false;
+ Init = Init->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Init)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (isNonnullType(VD->getType()) || isNarrowed(VD))
+ return true;
+ }
+ // Throwing operator new never returns null.
+ if (const auto *NE = dyn_cast<CXXNewExpr>(Init)) {
+ if (!NE->shouldNullCheckAllocation())
+ return true;
+ }
+ // Look through explicit casts — they don't change null/nonnull status.
+ if (const auto *CE = dyn_cast<ExplicitCastExpr>(Init))
+ return isNonnullInit(CE->getSubExpr());
+ // this is always non-null.
+ if (isa<CXXThisExpr>(Init))
+ return true;
+ // Pointer arithmetic on a non-null pointer is non-null.
+ if (const auto *BO = dyn_cast<BinaryOperator>(Init)) {
+ if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
+ if (BO->getLHS()->getType()->isPointerType())
+ return isNonnullInit(BO->getLHS()->IgnoreParenImpCasts());
+ if (BO->getRHS()->getType()->isPointerType())
+ return isNonnullInit(BO->getRHS()->IgnoreParenImpCasts());
+ }
+ }
+ return false;
+ }
+
+ /// Check if an init expression is nullable — either by type or because it
+ /// refers to a variable known to be nullable. Unwraps casts to propagate
+ /// nullability through cast chains (e.g., `(Derived *)nullableBase`).
+ bool isNullableInit(const Expr *Init) const {
+ if (!Init)
+ return false;
+ Init = Init->IgnoreParenImpCasts();
+ // Unwrap explicit casts first — template instantiations can bake
+ // _Nullable into cast result types. Check the SOURCE type.
+ if (const auto *CE = dyn_cast<ExplicitCastExpr>(Init))
+ return isNullableInit(CE->getSubExpr());
+ if (isNullableType(Init->getType(), StrictMode, DefaultNullability))
+ return true;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Init)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ return State.NullableVars.contains(VD);
+ }
+ // nothrow new can return null.
+ if (const auto *NE = dyn_cast<CXXNewExpr>(Init))
+ return NE->shouldNullCheckAllocation();
+ return false;
+ }
+
+ void handleBinaryOperator(const BinaryOperator *BO) {
+ if (BO->isAssignmentOp()) {
+ const Expr *LHS = BO->getLHS()->IgnoreParenImpCasts();
+
+ // Assignment to a member (this->field or var->field) invalidates
+ // any narrowing on that member.
+ if (const auto *ME = dyn_cast<MemberExpr>(LHS)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (ME->isArrow()) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base)) {
+ State.NarrowedThisMembers.erase(FD);
+ State.NullableThisMembers.erase(FD);
+ } else if (const auto *BaseDRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *BaseVD = dyn_cast<VarDecl>(BaseDRE->getDecl()))
+ State.NarrowedMembers.erase({BaseVD, FD});
+ }
+ }
+ }
+ }
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // Bool reassignment invalidates any stored guard
+ if (VD->getType()->isBooleanType()) {
+ State.BoolGuards.erase(VD);
+ return;
+ }
+ if (!VD->getType()->isPointerType())
+ return;
+ State.NarrowedVars.erase(VD);
+ State.NullableVars.erase(VD);
+ invalidateMembersFor(VD);
+ invalidateBoolGuardsFor(VD);
+
+ if (BO->getOpcode() == BO_Assign) {
+ const Expr *RHS = BO->getRHS()->IgnoreParenImpCasts();
+ if (const auto *RHSUO = dyn_cast<UnaryOperator>(RHS)) {
+ if (RHSUO->getOpcode() == UO_AddrOf) {
+ State.NarrowedVars.insert(VD);
+ return;
+ }
+ }
+ if (const auto *RHSDRE = dyn_cast<DeclRefExpr>(RHS)) {
+ if (const auto *RHSVD = dyn_cast<VarDecl>(RHSDRE->getDecl())) {
+ if (isNonnullType(RHSVD->getType()) || isNarrowed(RHSVD)) {
+ State.NarrowedVars.insert(VD);
+ return;
+ }
+ }
+ }
+ if (isNonnullInit(RHS)) {
+ State.NarrowedVars.insert(VD);
+ return;
+ }
+ if (isNonnullType(BO->getRHS()->getType())) {
+ State.NarrowedVars.insert(VD);
+ } else if (isNullableType(BO->getRHS()->getType(), StrictMode,
+ DefaultNullability) ||
+ isNullableInit(RHS)) {
+ State.NullableVars.insert(VD);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void handleUnaryOperator(const UnaryOperator *UO) {
+ if (UO->getOpcode() == UO_Deref) {
+ const Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts();
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(SubExpr)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!VD->isImplicit() && !isNarrowed(VD))
+ checkVarDeref(UO, VD);
+ }
+ } else if (const auto *ME = dyn_cast<MemberExpr>(SubExpr)) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(Base)) {
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (!isThisMemberNarrowed(FD))
+ checkDeref(UO, ME->getType());
+ }
+ } else {
+ checkMemberExprDeref(UO, ME);
+ }
+ } else if (!isa<CXXThisExpr>(SubExpr)) {
+ checkExprDeref(UO, SubExpr);
+ }
+ }
+
+ // Pointer increment/decrement (p++, ++p, p--, --p): the pointer now
+ // points elsewhere, so member narrowing and bool guards are stale.
+ // But the pointer itself is still non-null — arithmetic on a non-null
+ // pointer cannot produce null (matching isNonnullInit's treatment of
+ // pointer arithmetic via BO_Add/BO_Sub).
+ if (UO->getOpcode() == UO_PostInc || UO->getOpcode() == UO_PreInc ||
+ UO->getOpcode() == UO_PostDec || UO->getOpcode() == UO_PreDec) {
+ const Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(SubExpr)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (VD->getType()->isPointerType()) {
+ invalidateMembersFor(VD);
+ invalidateBoolGuardsFor(VD);
+ }
+ }
+ }
+ }
+ }
+
+ void handleMemberExpr(const MemberExpr *ME) {
+ if (!ME->isArrow())
+ return;
+
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+
+ if (isa<CXXThisExpr>(Base))
+ return;
+
+ // Handle overloaded operator-> (smart pointers, iterators, etc.)
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(Base)) {
+ if (OCE->getOperator() == OO_Arrow) {
+ // For smart pointers, warn if not narrowed.
+ // For non-smart-pointer types (iterators etc), skip as before.
+ if (OCE->getNumArgs() >= 1) {
+ const Expr *Obj = OCE->getArg(0);
+ if (isSmartPointerType(Obj->getType())) {
+ if (!isSmartPointerNarrowed(Obj, State))
+ warnSmartPtrDeref(ME, Obj);
+ }
+ }
+ return;
+ }
+ }
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!isNarrowed(VD))
+ checkVarDeref(ME, VD);
+ }
+ } else if (const auto *BaseME = dyn_cast<MemberExpr>(Base)) {
+ checkMemberExprDeref(ME, BaseME);
+ } else {
+ checkExprDeref(ME, Base);
+ }
+ }
+
+ void handleArraySubscript(const ArraySubscriptExpr *ASE) {
+ const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ if (const auto *UO = dyn_cast<UnaryOperator>(Base))
+ if (UO->getOpcode() == UO_AddrOf)
+ return;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!isNarrowed(VD) && !VD->getType()->isArrayType())
+ checkVarDeref(ASE, VD);
+ }
+ } else {
+ QualType BaseTy = Base->getType();
+ if (!BaseTy->isArrayType())
+ checkExprDeref(ASE, Base);
+ }
+ }
+
+ /// Handle function calls. By design, calls do NOT invalidate pointer
+ /// narrowing — even when a pointer's address is taken (&p) and passed as
+ /// a T** argument. This is a pragmatic trade-off: invalidating on
+ /// address-escape would produce excessive false positives on common
+ /// patterns (output parameters, init functions). The same approach is
+ /// used by Clang's ThreadSafety analysis.
+ void handleCallExpr(const CallExpr *CE) {
+ if (const auto *Callee = CE->getDirectCallee()) {
+ // __builtin_assume(cond) narrows pointers mentioned in cond.
+ if (Callee->getBuiltinID() == Builtin::BI__builtin_assume &&
+ CE->getNumArgs() >= 1) {
+ const Expr *Arg = CE->getArg(0)->IgnoreParenImpCasts();
+ SmallVector<ConditionResult, 2> Results;
+ analyzeCondition(Arg, Ctx, Results, &State.BoolGuards);
+ for (const auto &CR : Results) {
+ if (CR.Negated)
+ continue;
+ if (CR.IsThisMember) {
+ State.NarrowedThisMembers.insert(CR.FD);
+ } else if (CR.VD) {
+ if (!CR.FD)
+ State.NarrowedVars.insert(CR.VD);
+ else
+ State.NarrowedMembers.insert({CR.VD, CR.FD});
+ }
+ }
+ }
+
+ // Narrow pointers passed to _Nonnull parameters — surviving the call
+ // proves the pointer was non-null. Recognizes both Clang _Nonnull
+ // and GCC-style __attribute__((nonnull)).
+ const auto *NNAttr = Callee->getAttr<NonNullAttr>();
+ for (unsigned I = 0,
+ N = std::min(CE->getNumArgs(), Callee->getNumParams());
+ I < N; ++I) {
+ const ParmVarDecl *Param = Callee->getParamDecl(I);
+ if (!Param->getType()->isPointerType())
+ continue;
+ bool ParamIsNonnull =
+ isNonnullType(Param->getType()) || (NNAttr && NNAttr->isNonNull(I));
+ if (ParamIsNonnull) {
+ const Expr *Arg = CE->getArg(I)->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->getType()->isPointerType())
+ State.NarrowedVars.insert(VD);
+ }
+ }
+ }
+ }
+
+ // Handle sp.reset() / sp.reset(ptr) — CXXMemberCallExpr
+ if (const auto *MCE = dyn_cast<CXXMemberCallExpr>(CE)) {
+ const Expr *Obj = MCE->getImplicitObjectArgument();
+ if (Obj && isSmartPointerType(Obj->getType())) {
+ if (const auto *MD = MCE->getMethodDecl()) {
+ if (MD->getDeclName().isIdentifier() && MD->getName() == "reset") {
+ // reset(nullptr) makes it null; reset(ptr) makes it non-null;
+ // reset() with no args makes it null.
+ bool ResetsToNonnull =
+ MCE->getNumArgs() > 0 &&
+ !MCE->getArg(0)->IgnoreParenImpCasts()->isNullPointerConstant(
+ Ctx, Expr::NPC_ValueDependentIsNotNull);
+ // Local variable
+ if (const auto *VD = getSmartPtrVarDecl(Obj)) {
+ State.NarrowedVars.erase(VD);
+ if (ResetsToNonnull)
+ State.NarrowedVars.insert(VD);
+ }
+ // this->member
+ if (const auto *FD = getSmartPtrThisMemberDecl(Obj)) {
+ State.NarrowedThisMembers.erase(FD);
+ if (ResetsToNonnull) {
+ State.NarrowedThisMembers.insert(FD);
+ State.NullableThisMembers.erase(FD);
+ } else {
+ State.NullableThisMembers.insert(FD);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Handle sp = nullptr / sp = make_unique(...) / sp = std::move(other)
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (OCE->getOperator() == OO_Equal && OCE->getNumArgs() >= 2) {
+ const VarDecl *LhsVD = getSmartPtrVarDecl(OCE->getArg(0));
+ if (LhsVD) {
+ State.NarrowedVars.erase(LhsVD);
+ const Expr *RHS = OCE->getArg(1)->IgnoreParenImpCasts();
+ // Strip MaterializeTemporaryExpr — move-assignment wraps the
+ // RHS in one (e.g. sp = foo() produces MTE around the call).
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(RHS))
+ RHS = MTE->getSubExpr()->IgnoreParenImpCasts();
+
+ if (isMakeSmartPtrCall(RHS)) {
+ // sp = make_unique<T>(...) — non-null
+ State.NarrowedVars.insert(LhsVD);
+ } else if (const auto *RhsCE = dyn_cast<CallExpr>(RHS)) {
+ if (RhsCE->isCallToStdMove() && RhsCE->getNumArgs() >= 1) {
+ // sp = std::move(other) — LHS inherits source's state
+ if (const auto *SrcVD = getSmartPtrVarDecl(RhsCE->getArg(0))) {
+ // Only narrow LHS if source was narrowed (known non-null)
+ if (State.NarrowedVars.contains(SrcVD))
+ State.NarrowedVars.insert(LhsVD);
+ State.NarrowedVars.erase(SrcVD);
+ }
+ } else if (isNonnullType(RhsCE->getType())) {
+ // sp = someFunction() — only narrow if return type is _Nonnull
+ State.NarrowedVars.insert(LhsVD);
+ }
+ }
+ // sp = nullptr or non-call — remains nullable (erased above)
+ }
+ }
+ }
+
+ // Handle std::move(sp) — marks the source as nullable
+ if (CE->isCallToStdMove() && CE->getNumArgs() >= 1) {
+ if (const auto *VD = getSmartPtrVarDecl(CE->getArg(0))) {
+ State.NarrowedVars.erase(VD);
+ }
+ if (const auto *FD = getSmartPtrThisMemberDecl(CE->getArg(0))) {
+ State.NarrowedThisMembers.erase(FD);
+ State.NullableThisMembers.insert(FD);
+ }
+ }
+ }
+
+ void checkMemberExprDeref(const Expr *DerefExpr, const MemberExpr *ME) {
+ const Expr *Base = ME->getBase()->IgnoreParenImpCasts();
+
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(Base)) {
+ if (OCE->getOperator() == OO_Arrow) {
+ if (OCE->getNumArgs() >= 1) {
+ const Expr *Obj = OCE->getArg(0);
+ if (isSmartPointerType(Obj->getType())) {
+ if (!isSmartPointerNarrowed(Obj, State))
+ warnSmartPtrDeref(DerefExpr, Obj);
+ }
+ }
+ return;
+ }
+ }
+
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (isa<CXXThisExpr>(Base)) {
+ if (!isThisMemberNarrowed(FD))
+ checkDeref(DerefExpr, ME->getType());
+ } else if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *BaseVD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!isMemberNarrowed(BaseVD, FD))
+ checkDeref(DerefExpr, ME->getType());
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+void clang::runFlowNullabilityAnalysis(AnalysisDeclContext &AC,
+ FlowNullabilityHandler &Handler,
+ bool StrictMode,
+ NullabilityKind Default) {
+ CFG *Cfg = AC.getCFG();
+ if (!Cfg)
+ return;
+
+ ASTContext &Ctx = AC.getASTContext();
+
+ using EdgeKey = std::pair<unsigned, unsigned>;
+ llvm::DenseMap<EdgeKey, NullState> EdgeStates;
+ llvm::DenseMap<unsigned, NullState> BlockEntryStates;
+
+ ForwardDataflowWorklist Worklist(*Cfg, AC);
+
+ const CFGBlock &Entry = Cfg->getEntry();
+ NullState InitState;
+
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(AC.getDecl())) {
+ for (const auto *Param : FD->parameters()) {
+ if (Param->getType()->isPointerType() && isNonnullType(Param->getType()))
+ InitState.NarrowedVars.insert(Param);
+ }
+ }
+
+ BlockEntryStates[Entry.getBlockID()] = InitState;
+ Worklist.enqueueBlock(&Entry);
+
+ // Fixpoint iteration. Termination is guaranteed because the lattice is
+ // finite: NarrowedVars can only shrink (intersection at joins) and
+ // NullableVars can only grow (union at joins), both bounded by the number
+ // of declarations in the function.
+ while (const CFGBlock *Block = Worklist.dequeue()) {
+ unsigned BlockID = Block->getBlockID();
+
+ NullState State;
+ bool FirstPred = true;
+
+ if (BlockID == Entry.getBlockID()) {
+ State = BlockEntryStates[BlockID];
+ FirstPred = false;
+ }
+
+ for (auto PI = Block->pred_begin(), PE = Block->pred_end(); PI != PE;
+ ++PI) {
+ if (const CFGBlock *Pred = *PI) {
+ EdgeKey EK = {Pred->getBlockID(), BlockID};
+ auto It = EdgeStates.find(EK);
+ if (It != EdgeStates.end()) {
+ if (FirstPred) {
+ State = It->second;
+ FirstPred = false;
+ } else {
+ State = join(State, It->second);
+ }
+ }
+ }
+ }
+
+ if (FirstPred)
+ continue;
+
+ // Standard fixpoint check: skip re-processing if entry state is unchanged.
+ // This prevents duplicate warnings when the worklist re-visits a block.
+ // Skip this check for the entry block — its state is pre-seeded, so it
+ // would always match and prevent the first visit from propagating.
+ if (BlockID != Entry.getBlockID()) {
+ auto OldIt = BlockEntryStates.find(BlockID);
+ if (OldIt != BlockEntryStates.end() && OldIt->second == State)
+ continue;
+ }
+ BlockEntryStates[BlockID] = State;
+
+ TransferFunctions TF(State, Handler, Ctx, StrictMode, Default);
+ for (const auto &Elem : *Block) {
+ if (std::optional<CFGStmt> CS = Elem.getAs<CFGStmt>())
+ TF.visit(CS->getStmt());
+ }
+
+ NullState TrueState = State;
+ NullState FalseState = State;
+
+ if (const Stmt *Term = Block->getTerminatorStmt()) {
+ const Expr *Cond = nullptr;
+ if (const auto *IS = dyn_cast<IfStmt>(Term)) {
+ Cond = getTerminalCondition(IS->getCond());
+ } else if (const auto *WS = dyn_cast<WhileStmt>(Term)) {
+ Cond = getTerminalCondition(WS->getCond());
+ } else if (const auto *FS = dyn_cast<ForStmt>(Term)) {
+ if (FS->getCond())
+ Cond = getTerminalCondition(FS->getCond());
+ } else if (const auto *DS = dyn_cast<DoStmt>(Term)) {
+ Cond = getTerminalCondition(DS->getCond());
+ } else if (const auto *BO = dyn_cast<BinaryOperator>(Term)) {
+ if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr)
+ Cond = getTerminalCondition(BO->getLHS());
+ } else if (const auto *CO = dyn_cast<ConditionalOperator>(Term)) {
+ Cond = getTerminalCondition(CO->getCond());
+ }
+
+ if (Cond) {
+ SmallVector<ConditionResult, 2> Results;
+ analyzeCondition(Cond, Ctx, Results, &State.BoolGuards);
+ for (const auto &CR : Results) {
+ NullState &Narrow = CR.Negated ? FalseState : TrueState;
+ if (CR.IsThisMember) {
+ Narrow.NarrowedThisMembers.insert(CR.FD);
+ } else if (CR.VD) {
+ if (!CR.FD)
+ Narrow.NarrowedVars.insert(CR.VD);
+ else
+ Narrow.NarrowedMembers.insert({CR.VD, CR.FD});
+ }
+ }
+ }
+ }
+
+ unsigned SucIdx = 0;
+ for (auto SI = Block->succ_begin(), SE = Block->succ_end(); SI != SE;
+ ++SI, ++SucIdx) {
+ if (const CFGBlock *Succ = *SI) {
+ const NullState &SuccState =
+ (Block->succ_size() == 2) ? (SucIdx == 0 ? TrueState : FalseState)
+ : State;
+ EdgeKey EK = {BlockID, Succ->getBlockID()};
+ auto It = EdgeStates.find(EK);
+ if (It == EdgeStates.end() || It->second != SuccState) {
+ EdgeStates[EK] = SuccState;
+ Worklist.enqueueBlock(Succ);
+ }
+ }
+ }
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index db82695f87d6b..c3535c5164825 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -7716,6 +7716,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptInFlag(CmdArgs, options::OPT_fapple_pragma_pack,
options::OPT_fno_apple_pragma_pack);
+ Args.addOptInFlag(CmdArgs, options::OPT_fflow_sensitive_nullability,
+ options::OPT_fno_flow_sensitive_nullability);
+ if (Arg *A = Args.getLastArg(options::OPT_fnullability_default_EQ))
+ A->render(Args, CmdArgs);
+
// Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
if (willEmitRemarks(Args) && checkRemarksOptions(D, Args, Triple))
renderRemarksOptions(Args, CmdArgs, Triple, Input, Output, JA);
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 37ed7488bb927..614db51f64080 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -32,6 +32,7 @@
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/Analyses/CalledOnceCheck.h"
#include "clang/Analysis/Analyses/Consumed.h"
+#include "clang/Analysis/Analyses/FlowNullability.h"
#include "clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/Analysis/Analyses/ThreadSafety.h"
@@ -2931,8 +2932,24 @@ LifetimeSafetyTUAnalysis(Sema &S, TranslationUnitDecl *TU,
}
}
+namespace {
+class FlowNullabilityReporter : public FlowNullabilityHandler {
+ Sema &S;
+
+public:
+ FlowNullabilityReporter(Sema &S) : S(S) {}
+
+ void handleNullableDereference(const Expr *DerefExpr,
+ QualType PtrType) override {
+ S.Diag(DerefExpr->getExprLoc(), diag::warn_flow_nullable_dereference)
+ << PtrType;
+ S.Diag(DerefExpr->getExprLoc(), diag::note_nullable_dereference_fix);
+ }
+};
+} // anonymous namespace
+
void clang::sema::AnalysisBasedWarnings::IssueWarnings(
- TranslationUnitDecl *TU) {
+ TranslationUnitDecl *TU) {
if (!TU)
return; // This is unexpected, give up quietly.
@@ -3047,19 +3064,24 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
// prototyping, but we need a way for analyses to say what expressions they
// expect to always be CFGElements and then fill in the BuildOptions
// appropriately. This is essentially a layering violation.
+ bool EnableFlowNullability =
+ S.getLangOpts().FlowSensitiveNullability &&
+ !Diags.isIgnored(diag::warn_flow_nullable_dereference, D->getBeginLoc());
+
if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis ||
- P.enableConsumedAnalysis || EnableLifetimeSafetyAnalysis) {
- // Unreachable code analysis and thread safety require a linearized CFG.
+ P.enableConsumedAnalysis || EnableLifetimeSafetyAnalysis ||
+ EnableFlowNullability) {
+ // These analyses require a linearized CFG with all statements visible.
AC.getCFGBuildOptions().setAllAlwaysAdd();
} else {
AC.getCFGBuildOptions()
- .setAlwaysAdd(Stmt::BinaryOperatorClass)
- .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
- .setAlwaysAdd(Stmt::BlockExprClass)
- .setAlwaysAdd(Stmt::CStyleCastExprClass)
- .setAlwaysAdd(Stmt::DeclRefExprClass)
- .setAlwaysAdd(Stmt::ImplicitCastExprClass)
- .setAlwaysAdd(Stmt::UnaryOperatorClass);
+ .setAlwaysAdd(Stmt::BinaryOperatorClass)
+ .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
+ .setAlwaysAdd(Stmt::BlockExprClass)
+ .setAlwaysAdd(Stmt::CStyleCastExprClass)
+ .setAlwaysAdd(Stmt::DeclRefExprClass)
+ .setAlwaysAdd(Stmt::ImplicitCastExprClass)
+ .setAlwaysAdd(Stmt::UnaryOperatorClass);
}
if (EnableLifetimeSafetyAnalysis)
AC.getCFGBuildOptions().AddLifetime = true;
@@ -3117,6 +3139,28 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
Reporter.emitDiagnostics();
}
+ // Gradual adoption: only run flow-sensitive nullability when the function
+ // opts in — either via -fnullability-default, an active assume_nonnull
+ // pragma, or explicit nullability annotations on the function signature.
+ // Computed here (not stored on Sema) to avoid scoping bugs when lambda
+ // bodies interleave with the enclosing function's processing.
+ if (EnableFlowNullability) {
+ bool FlowNullabilityForFunc = S.getLangOpts().getNullabilityDefault() !=
+ NullabilityKind::Unspecified ||
+ S.PP.getPragmaAssumeNonNullLoc().isValid();
+ if (!FlowNullabilityForFunc) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ FlowNullabilityForFunc = S.functionHasNullabilityAnnotations(FD);
+ }
+ if (FlowNullabilityForFunc && AC.getCFG()) {
+ llvm::TimeTraceScope TimeProfile("FlowNullabilityAnalysis");
+ FlowNullabilityReporter Reporter(S);
+ NullabilityKind Default = S.getLangOpts().getNullabilityDefault();
+ bool StrictMode = (Default != NullabilityKind::Unspecified);
+ runFlowNullabilityAnalysis(AC, Reporter, StrictMode, Default);
+ }
+ }
+
// Check for violations of consumed properties.
if (P.enableConsumedAnalysis) {
consumed::ConsumedWarningsHandler WarningHandler(S);
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 98318fc597f36..7a7caac449778 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -678,9 +678,59 @@ void Sema::PrintStats() const {
AnalysisWarnings.PrintStats();
}
+/// Check if an expression is provably non-null by tracing through casts and
+/// pointer arithmetic back to a known non-null source (CXXThisExpr, _Nonnull
+/// annotated, operator new, address-of). Used to suppress false positive
+/// nullable-to-nonnull warnings on patterns like reinterpret_cast<T*>(this)+n.
+/// See also: isNonnullInit() in FlowNullability.cpp, which is the flow
+/// analysis's version with access to narrowing state.
+static constexpr unsigned MaxProvablyNonnullDepth = 16;
+
+static bool isExprProvablyNonnull(const Expr *E, unsigned Depth = 0) {
+ // Depth limit guards against pathological init chains (a = b; b = c; ...).
+ // Each recursion peels one AST node, so the stack usage is negligible.
+ // Note: this is a best-effort heuristic — it does not account for
+ // reassignment after initialization (the flow analysis handles that).
+ if (!E || Depth > MaxProvablyNonnullDepth)
+ return false;
+ E = E->IgnoreParenImpCasts();
+ if (isa<CXXThisExpr>(E))
+ return true;
+ if (isa<CXXNewExpr>(E))
+ return true;
+ if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_AddrOf)
+ return true;
+ // Look through explicit casts (static_cast, reinterpret_cast, C-style).
+ if (const auto *CE = dyn_cast<ExplicitCastExpr>(E))
+ return isExprProvablyNonnull(CE->getSubExpr(), Depth + 1);
+ // Trace through local variable references to their initializer.
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && VD->hasInit())
+ return isExprProvablyNonnull(VD->getInit()->IgnoreParenImpCasts(),
+ Depth + 1);
+ }
+ // Pointer arithmetic on a non-null pointer is non-null.
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
+ if (BO->getLHS()->getType()->isPointerType())
+ return isExprProvablyNonnull(BO->getLHS(), Depth + 1);
+ if (BO->getRHS()->getType()->isPointerType())
+ return isExprProvablyNonnull(BO->getRHS(), Depth + 1);
+ }
+ }
+ // Check if the type itself is _Nonnull.
+ if (auto Null = E->getType()->getNullability())
+ if (*Null == NullabilityKind::NonNull)
+ return true;
+ return false;
+}
+
void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
QualType SrcType,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ Expr *SrcExpr) {
std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
*ExprNullability != NullabilityKind::NullableResult))
@@ -690,9 +740,45 @@ void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
return;
+ // Suppress when the expression is provably non-null despite its type.
+ // This handles patterns like reinterpret_cast<T*>(this) + offset where
+ // the type system infers _Nullable on the cast destination but the value
+ // is clearly non-null (derived from this, new, address-of, etc).
+ if (getLangOpts().FlowSensitiveNullability && SrcExpr &&
+ isExprProvablyNonnull(SrcExpr))
+ return;
+
Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
}
+bool Sema::functionHasNullabilityAnnotations(const FunctionDecl *FD) const {
+ if (!FD || FD->isInvalidDecl())
+ return false;
+
+ // Check return type
+ QualType ReturnType = FD->getReturnType();
+ if (!ReturnType.isNull() && !ReturnType->isDependentType()) {
+ if (ReturnType->getNullability())
+ return true;
+ }
+
+ // Check parameters — during early function processing, parameters might
+ // not be fully set up, so guard with param_empty().
+ if (!FD->param_empty()) {
+ for (const ParmVarDecl *Param : FD->parameters()) {
+ if (!Param)
+ continue;
+ QualType ParamType = Param->getType();
+ if (!ParamType.isNull() && !ParamType->isDependentType()) {
+ if (ParamType->getNullability())
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
// Generate diagnostics when adding or removing effects in a type conversion.
void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
SourceLocation Loc) {
@@ -782,7 +868,9 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
"can't cast prvalue to glvalue");
#endif
- diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
+ // Pass the source expression so flow-sensitive analysis can suppress the
+ // warning when the expression is provably non-null despite its declared type.
+ diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc(), E);
diagnoseZeroToNullptrConversion(Kind, E);
if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 2951fd09294d8..2145ff3223518 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -14142,6 +14142,25 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
}
Init = Result.getAs<Expr>();
+
+ // Note: this may fire in constexpr-if discarded branches during template
+ // instantiation. Suppressing that case cleanly requires tracking whether
+ // we're inside a discarded branch at declaration processing time, which
+ // Clang doesn't currently expose here. In practice, the scenario
+ // (explicit _Nonnull p = nullptr in a discarded branch) is rare.
+ if (VDecl && Init && getLangOpts().FlowSensitiveNullability) {
+ QualType VDeclType = VDecl->getType();
+ if (auto Nullability = VDeclType->getNullability()) {
+ if (*Nullability == NullabilityKind::NonNull) {
+ if (Init->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull)) {
+ Diag(Init->getBeginLoc(), diag::warn_null_init_nonnull)
+ << VDeclType << Init->getSourceRange();
+ }
+ }
+ }
+ }
+
IsParenListInit = !InitSeq.steps().empty() &&
InitSeq.step_begin()->Kind ==
InitializationSequence::SK_ParenthesizedListInit;
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index f7e005a40363c..820efcfdf8dc7 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -5431,7 +5431,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// _Nullable type to a _Nonnull one, complain.
if (!isCast(CCK))
diagnoseNullableToNonnullConversion(ToType, InitialFromType,
- From->getBeginLoc());
+ From->getBeginLoc(), From);
return From;
}
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index ede2b9beef49b..13b04aaadd3ee 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -7593,8 +7593,9 @@ PerformConstructorInitialization(Sema &S,
// A smart pointer constructed from a nullable pointer is nullable.
if (NumArgs == 1 && !Kind.isExplicitCast())
- S.diagnoseNullableToNonnullConversion(
- Entity.getType(), Args.front()->getType(), Kind.getLocation());
+ S.diagnoseNullableToNonnullConversion(Entity.getType(),
+ Args.front()->getType(),
+ Kind.getLocation(), Args.front());
// Determine the arguments required to actually perform the constructor
// call.
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 1ca340e8b72c7..6e554368c1789 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -15615,8 +15615,8 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// This won't be caught in the arg's initialization: the parameter to
// the assignment operator is not marked nonnull.
if (Op == OO_Equal)
- diagnoseNullableToNonnullConversion(Args[0]->getType(),
- Args[1]->getType(), OpLoc);
+ diagnoseNullableToNonnullConversion(
+ Args[0]->getType(), Args[1]->getType(), OpLoc, Args[1]);
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 846474fe94adf..6fade9f4fb02a 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -4463,7 +4463,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
} else {
bool isFunctionOrMethod = false;
- switch (auto context = state.getDeclarator().getContext()) {
+ switch (state.getDeclarator().getContext()) {
case DeclaratorContext::ObjCParameter:
case DeclaratorContext::ObjCResult:
case DeclaratorContext::Prototype:
@@ -4504,12 +4504,24 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
case PointerDeclaratorKind::SingleLevelPointer:
- // Infer _Nonnull if we are in an assumes-nonnull region.
- if (inAssumeNonNullRegion) {
+ // Infer nullability based on pragma or default mode
+ // Pragma takes precedence and works in all modes (including
+ // unspecified) Skip -fnullability-default for system headers to avoid
+ // false positives on std library code (e.g. std::chrono, vsnprintf).
+ // Explicit #pragma clang assume_nonnull still works in system headers.
+ if (inAssumeNonNullRegion ||
+ (!S.getSourceManager().isInSystemHeader(D.getBeginLoc()) &&
+ S.getLangOpts().getNullabilityDefault() !=
+ NullabilityKind::Unspecified)) {
complainAboutInferringWithinChunk = wrappingKind;
- inferNullability = NullabilityKind::NonNull;
- inferNullabilityCS = (context == DeclaratorContext::ObjCParameter ||
- context == DeclaratorContext::ObjCResult);
+ if (inAssumeNonNullRegion) {
+ inferNullability = NullabilityKind::NonNull;
+ } else {
+ // Use Unspecified instead of the raw default so the flow checker
+ // can distinguish explicit _Nullable from default-inferred.
+ inferNullability = NullabilityKind::Unspecified;
+ }
+ inferNullabilityCS = false;
}
break;
@@ -4541,6 +4553,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
}
+ // For double-pointers (T**) without CF attrs, apply the same
+ // Unspecified default as SingleLevelPointer so the flow checker
+ // doesn't treat them as explicitly _Nullable.
+ if (!inferNullability && !inAssumeNonNullRegion &&
+ !S.getSourceManager().isInSystemHeader(D.getBeginLoc()) &&
+ S.getLangOpts().getNullabilityDefault() !=
+ NullabilityKind::Unspecified) {
+ inferNullability = NullabilityKind::Unspecified;
+ }
break;
}
break;
@@ -4569,7 +4590,37 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::FunctionalCast:
case DeclaratorContext::RequiresExpr:
case DeclaratorContext::Association:
- // Don't infer in these contexts.
+ // Upstream: don't infer nullability in these contexts (locals,
+ // template args, casts, etc.). When flow-sensitive nullability is
+ // active we silently tag single-level pointers as Unspecified so the
+ // flow checker can track them, but we never fire the consistency
+ // warning ("pointer is missing a nullability type specifier") here.
+ if (S.getLangOpts().FlowSensitiveNullability) {
+ auto wrappingKind = PointerWrappingDeclaratorKind::None;
+ switch (classifyPointerDeclarator(S, T, D, wrappingKind)) {
+ case PointerDeclaratorKind::NonPointer:
+ case PointerDeclaratorKind::MultiLevelPointer:
+ case PointerDeclaratorKind::CFErrorRefPointer:
+ case PointerDeclaratorKind::NSErrorPointerPointer:
+ break;
+
+ case PointerDeclaratorKind::SingleLevelPointer:
+ if (!inAssumeNonNullRegion &&
+ !S.getSourceManager().isInSystemHeader(D.getBeginLoc())) {
+ inferNullability = NullabilityKind::Unspecified;
+ }
+ inferNullabilityCS = false;
+ break;
+
+ case PointerDeclaratorKind::MaybePointerToCFRef:
+ if (!inAssumeNonNullRegion &&
+ !S.getSourceManager().isInSystemHeader(D.getBeginLoc())) {
+ inferNullability = NullabilityKind::Unspecified;
+ }
+ inferNullabilityCS = false;
+ break;
+ }
+ }
break;
}
}
@@ -4657,7 +4708,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If the type itself could have nullability but does not, infer pointer
// nullability and perform consistency checking.
if (S.CodeSynthesisContexts.empty()) {
- if (shouldHaveNullability(T) && !T->getNullability()) {
+ // Skip conversion operators (operator T*()) — their return type is
+ // part of the operator's identity, and applying default nullability
+ // would change the type identity, breaking overload resolution and
+ // causing spurious diagnostics on the conversion result type.
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_ConversionFunctionId &&
+ shouldHaveNullability(T) && !T->getNullability()) {
if (isVaList(T)) {
// Record that we've seen a pointer, but do nothing else.
if (NumPointersRemaining > 0)
diff --git a/clang/test/Driver/nullsafe-flags-negative.c b/clang/test/Driver/nullsafe-flags-negative.c
new file mode 100644
index 0000000000000..4255fb162fff1
--- /dev/null
+++ b/clang/test/Driver/nullsafe-flags-negative.c
@@ -0,0 +1,31 @@
+// Negative driver tests for nullsafe flags.
+// Verifies flag interaction patterns and valid/invalid combinations.
+
+// === -fflow-sensitive-nullability without -fnullability-default ===
+// Should be accepted — defaults to unspecified.
+// RUN: %clang -### -fflow-sensitive-nullability %s 2>&1 | FileCheck -check-prefix=FLOW-ONLY %s
+// FLOW-ONLY: "-fflow-sensitive-nullability"
+
+// === All three valid values for -fnullability-default ===
+// RUN: %clang -### -fnullability-default=nullable %s 2>&1 | FileCheck -check-prefix=NULLABLE %s
+// RUN: %clang -### -fnullability-default=nonnull %s 2>&1 | FileCheck -check-prefix=NONNULL %s
+// RUN: %clang -### -fnullability-default=unspecified %s 2>&1 | FileCheck -check-prefix=UNSPEC %s
+// NULLABLE: "-fnullability-default=nullable"
+// NONNULL: "-fnullability-default=nonnull"
+// UNSPEC: "-fnullability-default=unspecified"
+
+// === Invalid -fnullability-default value is passed through to cc1 ===
+// RUN: %clang -### -fnullability-default=invalid %s 2>&1 | FileCheck -check-prefix=INVALID %s
+// INVALID: "-fnullability-default=invalid"
+
+// === cc1 rejects invalid -fnullability-default value ===
+// (tested in Sema/flow-nullability-warning-groups.cpp — cc1 tests can't live in Driver/)
+
+// === -fno-flow-sensitive-nullability disables the flag ===
+// RUN: %clang -### -fflow-sensitive-nullability -fno-flow-sensitive-nullability %s 2>&1 | FileCheck -check-prefix=NO-FLOW %s
+// NO-FLOW-NOT: "-fflow-sensitive-nullability"
+
+// === All flags together ===
+// RUN: %clang -### -fflow-sensitive-nullability -fnullability-default=nullable %s 2>&1 | FileCheck -check-prefix=ALL %s
+// ALL: "-fflow-sensitive-nullability"
+// ALL: "-fnullability-default=nullable"
diff --git a/clang/test/Driver/nullsafe-flags.c b/clang/test/Driver/nullsafe-flags.c
new file mode 100644
index 0000000000000..e73eae4215502
--- /dev/null
+++ b/clang/test/Driver/nullsafe-flags.c
@@ -0,0 +1,8 @@
+// RUN: %clang -### -fflow-sensitive-nullability %s 2>&1 | FileCheck -check-prefix=FLOW %s
+// RUN: %clang -### -fnullability-default=nullable %s 2>&1 | FileCheck -check-prefix=DEFAULT %s
+// RUN: %clang -### -fflow-sensitive-nullability -fnullability-default=nullable %s 2>&1 | FileCheck -check-prefix=BOTH %s
+
+// FLOW: "-fflow-sensitive-nullability"
+// DEFAULT: "-fnullability-default=nullable"
+// BOTH: "-fflow-sensitive-nullability"
+// BOTH: "-fnullability-default=nullable"
diff --git a/clang/test/Sema/flow-nullability-address-of.cpp b/clang/test/Sema/flow-nullability-address-of.cpp
new file mode 100644
index 0000000000000..7890111600bb3
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-address-of.cpp
@@ -0,0 +1,43 @@
+// Tests that address-of (&) expressions produce _Nonnull pointers.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+};
+
+int * _Nullable getNullableInt();
+Entity * _Nullable getNullableEntity();
+
+#pragma clang assume_nonnull begin
+
+void test_addr_of_local() {
+ int x = 0;
+ int *p = &x;
+ *p = 1; // OK - &x is nonnull
+}
+
+void test_addr_of_direct() {
+ Entity e;
+ Entity *p = &e;
+ p->x = 1; // OK - &e is nonnull
+}
+
+void test_addr_of_member(Entity *_Nonnull obj) {
+ int *p = &(obj->x);
+ *p = 1; // OK - &(obj->x) is nonnull
+}
+
+void test_reassign_nullable_warns() {
+ int x = 0;
+ int *p = &x;
+ *p = 1; // OK - initially nonnull
+ p = getNullableInt();
+ *p = 2; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_nullable_control() {
+ Entity *e = getNullableEntity();
+ e->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-and-shortcircuit.cpp b/clang/test/Sema/flow-nullability-and-shortcircuit.cpp
new file mode 100644
index 0000000000000..307a3080301cd
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-and-shortcircuit.cpp
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node* _Nullable next;
+};
+
+#pragma clang assume_nonnull begin
+
+void test_and_basic(Node* _Nullable p) {
+ if (p && p->value == 42) {
+ p->value = 0; // OK - p narrowed by && LHS
+ }
+}
+
+void test_and_star(Node* _Nullable p) {
+ if (p && (*p).value == 42) {
+ (*p).value = 0; // OK
+ }
+}
+
+// CFG decomposes chained && into separate blocks, so member narrowing
+// works correctly at each stage.
+void test_and_chained_no_warning(Node* _Nullable p) {
+ if (p && p->next && p->next->value > 0) {
+ p->next->value = 0; // OK - member narrowing works throughout
+ }
+}
+
+void test_and_two_vars(Node* _Nullable p, Node* _Nullable q) {
+ if (p && q) {
+ p->value = q->value; // OK - both narrowed
+ }
+}
+
+void test_and_three_vars(Node* _Nullable a, Node* _Nullable b, Node* _Nullable c) {
+ if (a && b && c) {
+ a->value = b->value + c->value; // OK
+ }
+}
+
+void test_and_member_two_part(Node* _Nullable p) {
+ if (p && p->next) {
+ p->next->value = 1; // OK - both p and p->next narrowed
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-array-subscript.cpp b/clang/test/Sema/flow-nullability-array-subscript.cpp
new file mode 100644
index 0000000000000..01663148f1afe
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-array-subscript.cpp
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// Array subscript (p[0]) is semantically a dereference (*(p + 0)).
+// The CFG-based analysis checks array subscript base pointers.
+
+#pragma clang assume_nonnull begin
+
+void test_subscript_warns(int* _Nullable p) {
+ p[0] = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_subscript_after_check(int* _Nullable p) {
+ if (p) {
+ p[0] = 42; // OK - narrowed by check
+ }
+}
+
+void test_subscript_offset(int* _Nullable p) {
+ p[5] = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_fixed_array_no_warn() {
+ int arr[4] = {1, 2, 3, 4};
+ arr[0] = 10; // OK - fixed-size array, not a pointer
+}
+
+struct S {
+ float gridColor[4];
+ struct { int x; } nested[2];
+};
+
+void test_member_fixed_array_no_warn(S s) {
+ float r = s.gridColor[0]; // OK - fixed-size array member
+ int x = s.nested[1].x; // OK - fixed-size array member
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-arrow-deref.cpp b/clang/test/Sema/flow-nullability-arrow-deref.cpp
new file mode 100644
index 0000000000000..5b49291f9b556
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-arrow-deref.cpp
@@ -0,0 +1,61 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+ int value() const { return x; }
+};
+
+Entity* _Nullable getHead();
+Entity* _Nullable getChest();
+
+#pragma clang assume_nonnull begin
+
+void test_arrow_deref_warns(Entity* _Nullable p) {
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ int v = p->value(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_arrow_after_null_check(Entity* _Nullable p) {
+ if (p) {
+ p->x = 1; // OK - narrowed to nonnull
+ int v = p->value(); // OK - narrowed to nonnull
+ }
+}
+
+void test_arrow_no_check() {
+ Entity* head = getHead();
+ head->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_arrow_with_check() {
+ Entity* head = getHead();
+ if (!head) return;
+ head->x = 1; // OK - narrowed to nonnull
+}
+
+void test_star_still_works(Entity* _Nullable p) {
+ (*p).x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_star_after_check(Entity* _Nullable p) {
+ if (p) {
+ (*p).x = 1; // OK - narrowed to nonnull
+ }
+}
+
+// --- Member field assignment invalidation ---
+// Assigning to a narrowed member should invalidate its narrowing.
+
+struct Container {
+ Entity* _Nullable child;
+
+ void test_member_assign_invalidates() {
+ if (child) {
+ child->x = 1; // OK — narrowed
+ child = nullptr;
+ child->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+ }
+};
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-bool-intermediary.cpp b/clang/test/Sema/flow-nullability-bool-intermediary.cpp
new file mode 100644
index 0000000000000..c9c5cbfa124a8
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-bool-intermediary.cpp
@@ -0,0 +1,121 @@
+// Tests for boolean intermediary narrowing — tracking null-check results
+// stored in bool variables, and for !(p && q) decomposition.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-nullable-to-nonnull-conversion -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+};
+
+#pragma clang assume_nonnull begin
+
+// === Basic patterns ===
+
+void test_ne_nullptr(Node * _Nullable p) {
+ bool valid = (p != nullptr);
+ if (valid) {
+ (void)p->value; // OK
+ }
+ // Outside the if, p is still nullable
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_eq_nullptr_negated(Node * _Nullable p) {
+ bool isNull = (p == nullptr);
+ if (!isNull) {
+ (void)p->value; // OK
+ }
+}
+
+void test_pointer_truthiness(Node * _Nullable p) {
+ bool valid = p;
+ if (valid) {
+ (void)p->value; // OK
+ }
+}
+
+void test_negated_pointer(Node * _Nullable p) {
+ bool isNull = !p;
+ if (!isNull) {
+ (void)p->value; // OK
+ }
+}
+
+// === Invalidation ===
+
+void test_pointer_reassigned(Node * _Nullable p, Node * _Nullable q) {
+ bool valid = (p != nullptr);
+ p = q; // reassign pointer — bool guard is stale
+ if (valid) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_bool_reassigned(Node * _Nullable p) {
+ bool valid = (p != nullptr);
+ valid = false;
+ if (valid) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_pointer_incremented(int * _Nullable p) {
+ bool valid = (p != nullptr);
+ p++;
+ if (valid) {
+ (void)*p; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Negated conjunction ===
+
+void test_negated_and_return(Node * _Nullable p, Node * _Nullable q) {
+ if (!(p && q)) return;
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+void test_negated_and_else(Node * _Nullable p, Node * _Nullable q) {
+ if (!(p && q)) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ } else {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+void test_negated_triple_and(Node * _Nullable a, Node * _Nullable b, Node * _Nullable c) {
+ if (!(a && b && c)) return;
+ (void)a->value; // OK
+ (void)b->value; // OK
+ (void)c->value; // OK
+}
+
+void test_negated_and_ne_nullptr(Node * _Nullable p, Node * _Nullable q) {
+ if (!(p != nullptr && q != nullptr)) return;
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+// === Combined: bool guard + negated && ===
+
+void test_bool_guard_in_and(Node * _Nullable p, Node * _Nullable q) {
+ bool pOk = (p != nullptr);
+ if (pOk && q) {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+// === Bool guard does not track compound conditions ===
+
+void test_bool_compound_not_tracked(Node * _Nullable p, Node * _Nullable q) {
+ bool both = (p && q);
+ if (both) {
+ // Compound conditions are not decomposed into per-variable guards
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ (void)q->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-brace-assert.cpp b/clang/test/Sema/flow-nullability-brace-assert.cpp
new file mode 100644
index 0000000000000..03b551942a1f7
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-brace-assert.cpp
@@ -0,0 +1,124 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node* _Nullable next;
+};
+
+[[noreturn]] void abort_handler(const char* msg);
+
+#define INVARIANT(cond) \
+ { \
+ if (!(cond)) { \
+ abort_handler("invariant failed"); \
+ } \
+ }
+
+#define INVARIANT_MSG(cond, msg) \
+ { \
+ if (!(cond)) { \
+ abort_handler(msg); \
+ } \
+ }
+
+#pragma clang assume_nonnull begin
+
+void test_basic_brace_assert(Node* _Nullable p) {
+ INVARIANT(p);
+ p->value = 1; // OK - INVARIANT ensures p is non-null
+}
+
+void test_brace_assert_with_message(Node* _Nullable p) {
+ INVARIANT_MSG(p, "p must not be null");
+ p->value = 1; // OK
+}
+
+void test_brace_assert_ne_nullptr(Node* _Nullable p) {
+ INVARIANT(p != nullptr);
+ p->value = 1; // OK - p != nullptr checked
+}
+
+void test_brace_assert_multiple_vars(Node* _Nullable p, Node* _Nullable q) {
+ INVARIANT(p);
+ INVARIANT(q);
+ p->value = q->value; // OK - both narrowed
+}
+
+void test_brace_assert_member(Node* _Nullable p) {
+ INVARIANT(p);
+ INVARIANT(p->next);
+ p->next->value = 1; // OK - both p and p->next narrowed through macro
+}
+
+void test_no_assert_still_warns(Node* _Nullable p) {
+ p->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+void test_manual_bare_brace_noreturn(Node* _Nullable p) {
+ {
+ if (!p) {
+ abort_handler("null");
+ }
+ }
+ p->value = 1; // OK - bare braces with noreturn narrow outward
+}
+
+void test_nested_brace_assert(Node* _Nullable p, Node* _Nullable q) {
+ {
+ if (!p) { abort_handler("p"); }
+ if (!q) { abort_handler("q"); }
+ }
+ p->value = q->value; // OK - both narrowed
+}
+
+void test_brace_assert_does_not_affect_unrelated(Node* _Nullable p, Node* _Nullable q) {
+ INVARIANT(p);
+ q->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+struct Widget {
+ Node* _Nullable data;
+ int x;
+
+ void test_this_arrow() {
+ this->x = 1; // OK - 'this' is never null
+ }
+
+ int test_this_deref() {
+ return (*this).x; // OK - 'this' is never null
+ }
+
+ void test_this_member_narrowing() {
+ INVARIANT(data);
+ data->value = 1; // OK - data narrowed by INVARIANT
+ }
+
+ void test_this_member_if_narrowing() {
+ if (data) {
+ data->value = 1; // OK - data narrowed by if
+ }
+ }
+
+ void test_this_member_no_narrowing() {
+ data->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ }
+};
+
+void test_and_member_narrowing(Node* _Nullable p) {
+ if (p && p->next) {
+ p->next->value = 1; // OK - both p and p->next narrowed by && condition
+ }
+}
+
+void test_and_member_no_narrowing(Node* _Nullable p) {
+ if (p) {
+ p->next->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ }
+}
+
+void test_or_member_early_return(Node* _Nullable p) {
+ if (!p || !p->next) return;
+ p->next->value = 1; // OK - both p and p->next narrowed by early return
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-builtin-expect.cpp b/clang/test/Sema/flow-nullability-builtin-expect.cpp
new file mode 100644
index 0000000000000..272ecbc7f0019
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-builtin-expect.cpp
@@ -0,0 +1,87 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+};
+
+[[noreturn]] void fatal(const char* msg);
+
+#pragma clang assume_nonnull begin
+
+// === __builtin_expect in conditions ===
+
+void test_builtin_expect_if(Entity* _Nullable p) {
+ if (__builtin_expect(!!(p), 1)) {
+ p->x = 1; // OK - narrowed through __builtin_expect
+ }
+}
+
+void test_builtin_expect_negated(Entity* _Nullable p) {
+ if (__builtin_expect(!!(p == nullptr), 0))
+ return;
+ p->x = 1; // OK - early return narrowing through __builtin_expect
+}
+
+void test_builtin_expect_early_return(Entity* _Nullable p) {
+ if (__builtin_expect(!!(!p), 0))
+ return;
+ p->x = 1; // OK
+}
+
+// === Macro-wrapped __builtin_expect (like LIKELY/UNLIKELY) ===
+
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+void test_likely_macro(Entity* _Nullable p) {
+ if (LIKELY(p)) {
+ p->x = 1; // OK - narrowed
+ }
+}
+
+void test_unlikely_null_check(Entity* _Nullable p) {
+ if (UNLIKELY(!p))
+ return;
+ p->x = 1; // OK
+}
+
+// === __builtin_expect in assertion macros ===
+
+#define CHECK(cond) do { if (__builtin_expect(!(cond), 0)) fatal("CHECK failed"); } while(0)
+
+void test_check_macro(Entity* _Nullable p) {
+ CHECK(p);
+ p->x = 1; // OK - CHECK asserted non-null
+}
+
+void test_check_macro_two_vars(Entity* _Nullable p, Entity* _Nullable q) {
+ CHECK(p);
+ CHECK(q);
+ p->x = q->x; // OK
+}
+
+// === __builtin_assume narrows pointers ===
+
+void test_builtin_assume_simple(Entity* _Nullable p) {
+ __builtin_assume(p != nullptr);
+ p->x = 1; // OK - narrowed by __builtin_assume
+}
+
+void test_builtin_assume_truthiness(Entity* _Nullable p) {
+ __builtin_assume(p);
+ p->x = 1; // OK - narrowed by __builtin_assume(p)
+}
+
+void test_builtin_assume_two_vars(Entity* _Nullable p, Entity* _Nullable q) {
+ __builtin_assume(p != nullptr);
+ __builtin_assume(q != nullptr);
+ p->x = q->x; // OK
+}
+
+// === Without __builtin_expect still warns ===
+
+void test_no_narrowing_without_check(Entity* _Nullable p) {
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-c-basic.c b/clang/test/Sema/flow-nullability-c-basic.c
new file mode 100644
index 0000000000000..a04fab26fb006
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-c-basic.c
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable %s -verify
+
+struct Node {
+ int value;
+ struct Node* _Nullable next;
+};
+
+struct Node* _Nullable getNode(void);
+
+void test_star_deref_warns(int* p) {
+ *p = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_star_after_check(int* p) {
+ if (p) {
+ *p = 42; // OK - narrowed
+ }
+}
+
+void test_arrow_deref_warns(struct Node* p) {
+ p->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_arrow_after_check(struct Node* p) {
+ if (p) {
+ p->value = 1; // OK
+ }
+}
+
+void test_early_return(struct Node* p) {
+ if (!p) return;
+ p->value = 1; // OK - narrowed by early return
+}
+
+void test_null_comparison(struct Node* p) {
+ if (p != 0) {
+ p->value = 1; // OK
+ }
+}
+
+void test_linked_list(struct Node* _Nullable head) {
+ for (struct Node* _Nullable p = head; p; p = p->next) {
+ p->value = 0; // OK
+ }
+}
diff --git a/clang/test/Sema/flow-nullability-c-comprehensive.c b/clang/test/Sema/flow-nullability-c-comprehensive.c
new file mode 100644
index 0000000000000..1da1b4620320e
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-c-comprehensive.c
@@ -0,0 +1,211 @@
+// Comprehensive C test for flow-sensitive nullability analysis.
+// Covers C-specific patterns: nested structs, restrict, compound literals,
+// flexible array members, and C99/C11 features.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c11 %s -verify
+
+struct Point {
+ int x;
+ int y;
+};
+
+struct Line {
+ struct Point * _Nullable start;
+ struct Point * _Nullable end;
+};
+
+struct Node {
+ int value;
+ struct Node * _Nullable next;
+ struct Node * _Nullable prev;
+};
+
+struct Point * _Nullable getPoint(void);
+struct Node * _Nullable getNode(void);
+int getInt(void);
+
+// === Nested struct pointer access ===
+
+void test_nested_struct(struct Line * _Nullable line) {
+ if (line && line->start) {
+ line->start->x = 1; // OK — both narrowed
+ }
+}
+
+void test_nested_not_checked(struct Line * _Nonnull line) {
+ line->start->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Double-linked list traversal ===
+
+void test_doubly_linked(struct Node * _Nullable head) {
+ for (struct Node * _Nullable p = head; p; p = p->next) {
+ p->value = 0; // OK — narrowed by loop condition
+ if (p->prev) {
+ p->prev->value = -1; // OK — narrowed
+ }
+ }
+}
+
+// === Reverse traversal ===
+
+void test_reverse_traversal(struct Node * _Nullable tail) {
+ struct Node * _Nullable p = tail;
+ while (p) {
+ p->value = 0; // OK
+ p = p->prev;
+ }
+}
+
+// === restrict pointer ===
+
+void test_restrict(int * restrict p) {
+ *p = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_restrict_checked(int * restrict _Nullable p) {
+ if (p)
+ *p = 42; // OK
+}
+
+// === Compound literal ===
+
+void test_compound_literal(void) {
+ int *p = &(int){42};
+ *p = 0; // OK — address-of compound literal is nonnull
+}
+
+// === Designated initializer (struct members are stack-allocated) ===
+
+void test_designated_init(void) {
+ struct Point pt = {.x = 1, .y = 2};
+ struct Point *pp = &pt;
+ pp->x = 3; // OK — address-of
+}
+
+// === Array of pointers ===
+
+void test_pointer_array(struct Node * _Nullable * _Nonnull nodes, int n) {
+ for (int i = 0; i < n; i++) {
+ struct Node * _Nullable node = nodes[i];
+ if (node) {
+ node->value = i; // OK — narrowed via local variable
+ }
+ }
+}
+
+// === Multiple sequential checks ===
+
+void test_sequential_checks(struct Node * _Nullable a,
+ struct Node * _Nullable b,
+ struct Node * _Nullable c) {
+ if (!a) return;
+ if (!b) return;
+ if (!c) return;
+ a->value = b->value + c->value; // OK — all narrowed
+}
+
+// === Null check with comparison operators ===
+
+void test_comparison_styles(struct Node *p) {
+ // All these are equivalent null checks
+ if (p != 0) {
+ p->value = 1; // OK
+ }
+}
+
+void test_comparison_null_macro(struct Node *p) {
+ if (p != ((void*)0)) {
+ p->value = 1; // OK
+ }
+}
+
+// === Function returning _Nonnull ===
+
+struct Node * _Nonnull createNode(void);
+
+void test_nonnull_return(void) {
+ struct Node *n = createNode();
+ n->value = 1; // OK — _Nonnull return
+}
+
+// === Void pointer cast patterns ===
+
+void test_void_ptr_cast(void * _Nonnull raw) {
+ struct Node *n = (struct Node *)raw;
+ // void* is _Nonnull, so cast result is nonnull
+ n->value = 1; // OK — nonnull source
+}
+
+void test_void_ptr_nullable(void * _Nullable raw) {
+ struct Node *n = (struct Node *)raw;
+ n->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Conditional operator ===
+
+void test_cond_op(struct Node * _Nullable p, struct Node * _Nullable q) {
+ struct Node *r = p ? p : q;
+ // r might be q which is nullable
+ if (r)
+ r->value = 1; // OK — narrowed
+}
+
+// === Nested conditionals ===
+
+void test_nested_cond(struct Node * _Nullable p) {
+ if (p) {
+ if (p->next) {
+ if (p->next->next) {
+ p->next->next->value = 0; // OK — all narrowed
+ }
+ }
+ }
+}
+
+// === Goto-based cleanup pattern (common in C) ===
+
+int test_goto_cleanup(struct Node * _Nullable p) {
+ int result = -1;
+ if (!p) goto out;
+ result = p->value; // OK — narrowed
+out:
+ return result;
+}
+
+// === Switch with null check in cases ===
+
+void test_switch_null_check(struct Node * _Nullable p, int choice) {
+ switch (choice) {
+ case 0:
+ if (p)
+ p->value = 0; // OK
+ break;
+ case 1:
+ if (!p) return;
+ p->value = 1; // OK
+ break;
+ default:
+ break;
+ }
+}
+
+// === Comma operator ===
+
+void test_comma(struct Node * _Nullable p) {
+ if (!p) return;
+ (void)p->value; // OK — narrowed (comma test simplified)
+}
+
+// === sizeof does not evaluate ===
+
+void test_sizeof_unevaluated(struct Node * _Nullable p) {
+ int s = sizeof(p->value); // OK — sizeof is unevaluated
+ (void)s;
+}
+
+// === Pointer subtraction ===
+
+void test_ptr_subtraction(int *a, int *b) {
+ long diff = a - b; // OK — subtraction, not dereference
+ (void)diff;
+}
diff --git a/clang/test/Sema/flow-nullability-c-idioms.c b/clang/test/Sema/flow-nullability-c-idioms.c
new file mode 100644
index 0000000000000..a1182bbf5bc2b
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-c-idioms.c
@@ -0,0 +1,301 @@
+// Comprehensive C idiom tests for flow-sensitive nullability analysis.
+// Real C code is macro-heavy, uses malloc/free patterns, errno checks,
+// container_of tricks, callback patterns, and varargs. These patterns
+// must work correctly — C is half the target audience for this feature.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-nullable-to-nonnull-conversion -std=c11 %s -verify
+
+typedef __SIZE_TYPE__ size_t;
+typedef _Bool bool;
+#define true 1
+#define false 0
+#define NULL ((void *)0)
+#define offsetof(type, member) __builtin_offsetof(type, member)
+
+// Simulated stdlib declarations
+void * _Nullable malloc(size_t);
+void * _Nullable calloc(size_t, size_t);
+void * _Nullable realloc(void * _Nullable, size_t);
+void free(void * _Nullable);
+void abort(void) __attribute__((noreturn));
+void exit(int) __attribute__((noreturn));
+
+struct Node {
+ int value;
+ struct Node * _Nullable next;
+};
+
+struct Buffer {
+ char * _Nullable data;
+ size_t len;
+ size_t cap;
+};
+
+struct Node * _Nullable getNode(void);
+int getInt(void);
+
+// === Macro-heavy null-check patterns ===
+// Real C code wraps null checks in macros. The analysis should see through them.
+
+#define CHECK_NULL(ptr) do { if (!(ptr)) return; } while(0)
+#define CHECK_NULL_RET(ptr, ret) do { if (!(ptr)) return (ret); } while(0)
+#define ASSERT_NONNULL(ptr) do { if (!(ptr)) abort(); } while(0)
+#define DEREF(p) ((p)->value)
+#define SAFE_DEREF(p, fallback) ((p) ? (p)->value : (fallback))
+
+void test_check_null_macro(struct Node *p) {
+ CHECK_NULL(p);
+ p->value = 1; // OK — macro expanded to if(!p) return
+}
+
+int test_check_null_ret_macro(struct Node *p) {
+ CHECK_NULL_RET(p, -1);
+ return p->value; // OK
+}
+
+void test_assert_nonnull_macro(struct Node *p) {
+ ASSERT_NONNULL(p);
+ p->value = 1; // OK — abort() is noreturn
+}
+
+void test_deref_macro(struct Node *p) {
+ int v = DEREF(p); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ (void)v;
+}
+
+void test_deref_macro_guarded(struct Node *p) {
+ if (p) {
+ int v = DEREF(p); // OK — narrowed before macro
+ (void)v;
+ }
+}
+
+void test_safe_deref_macro(struct Node *p) {
+ int v = SAFE_DEREF(p, -1); // OK — ternary checks p
+ (void)v;
+}
+
+// === malloc/free patterns ===
+// malloc returns nullable (can fail), need to check before use.
+
+void test_malloc_no_check(void) {
+ // In C, void* implicitly converts to struct Node*, preserving _Nullable
+ struct Node * _Nullable n = malloc(sizeof(struct Node));
+ n->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ free(n);
+}
+
+void test_malloc_checked(void) {
+ struct Node *n = (struct Node *)malloc(sizeof(struct Node));
+ if (!n) return;
+ n->value = 1; // OK — narrowed
+ n->next = NULL;
+ free(n);
+}
+
+void test_malloc_abort(void) {
+ struct Node *n = (struct Node *)malloc(sizeof(struct Node));
+ if (!n) abort();
+ n->value = 1; // OK — abort is noreturn
+ free(n);
+}
+
+void test_calloc_checked(void) {
+ struct Node *n = (struct Node *)calloc(1, sizeof(struct Node));
+ if (!n) return;
+ n->value = 1; // OK
+ free(n);
+}
+
+// === realloc pattern (returns nullable) ===
+
+void test_realloc(struct Buffer * _Nonnull buf) {
+ char * _Nullable new_data = (char *)realloc(buf->data, buf->cap * 2);
+ if (!new_data) return;
+ buf->data = new_data;
+ buf->cap *= 2;
+}
+
+// === Linked list construction ===
+
+struct Node * _Nullable list_prepend(struct Node * _Nullable head, int val) {
+ struct Node *n = (struct Node *)malloc(sizeof(struct Node));
+ if (!n) return head;
+ n->value = val; // OK — checked
+ n->next = head;
+ return n;
+}
+
+void list_free(struct Node * _Nullable head) {
+ struct Node * _Nullable p = head;
+ while (p) {
+ struct Node * _Nullable next = p->next; // OK — p narrowed
+ free(p);
+ p = next;
+ }
+}
+
+int list_sum(struct Node * _Nullable head) {
+ int sum = 0;
+ for (struct Node * _Nullable p = head; p; p = p->next) {
+ sum += p->value; // OK — narrowed by loop condition
+ }
+ return sum;
+}
+
+// === Callback / function pointer patterns ===
+
+typedef void (*node_visitor_fn)(struct Node * _Nonnull, void * _Nullable);
+
+void list_foreach(struct Node * _Nullable head, node_visitor_fn _Nonnull fn, void * _Nullable ctx) {
+ for (struct Node * _Nullable p = head; p; p = p->next) {
+ fn(p, ctx); // OK — p narrowed
+ }
+}
+
+// === errno-style error checking ===
+
+struct File;
+struct File * _Nullable file_open(const char * _Nonnull path);
+int file_read(struct File * _Nonnull f, char * _Nonnull buf, int len);
+void file_close(struct File * _Nonnull f);
+
+int test_errno_pattern(void) {
+ struct File * _Nullable f = file_open("/tmp/test");
+ if (!f) return -1;
+ // f is narrowed past early return
+ char buf[256];
+ int n = file_read(f, buf, 256); // OK
+ file_close(f); // OK
+ return n;
+}
+
+// === container_of macro pattern ===
+// A ubiquitous C pattern (Linux kernel, etc.) that computes a container
+// from a member pointer. The result is always nonnull if the member is.
+
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr) - offsetof(type, member)))
+
+struct list_head {
+ struct list_head * _Nullable next;
+ struct list_head * _Nullable prev;
+};
+
+struct my_item {
+ int data;
+ struct list_head link;
+};
+
+void test_container_of(struct list_head * _Nullable pos) {
+ if (!pos) return;
+ // container_of produces a pointer from arithmetic — nonnull if pos is nonnull
+ struct my_item *item = container_of(pos, struct my_item, link);
+ item->data = 42; // OK — arithmetic on non-null pointer
+}
+
+// === Multi-level cleanup with goto ===
+// The goto cleanup pattern is the C equivalent of RAII.
+
+int test_multi_level_cleanup(void) {
+ int ret = -1;
+ struct Node *a = (struct Node *)malloc(sizeof(struct Node));
+ if (!a) goto out;
+
+ struct Node *b = (struct Node *)malloc(sizeof(struct Node));
+ if (!b) goto free_a;
+
+ a->value = 1; // OK — narrowed past goto
+ b->value = 2; // OK — narrowed past goto
+ a->next = b;
+ ret = a->value + b->value;
+
+free_a:
+ free(a);
+out:
+ return ret;
+}
+
+// === Bitfield struct with nullable pointer ===
+
+struct Options {
+ unsigned verbose : 1;
+ unsigned debug : 1;
+ struct Node * _Nullable config;
+};
+
+void test_bitfield_struct(struct Options * _Nonnull opts) {
+ if (opts->config) {
+ opts->config->value = opts->verbose; // OK — narrowed
+ }
+}
+
+// === Null check via helper function result ===
+// The analysis is intraprocedural — can't see inside helper functions.
+
+static bool is_valid(const struct Node * _Nullable p) {
+ return p != NULL;
+}
+
+void test_helper_check(struct Node *p) {
+ // is_valid returns bool but the analysis can't know it checks for null.
+ // This is an accepted limitation of intraprocedural analysis.
+ if (is_valid(p)) {
+ p->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Nested macro expansion ===
+
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+void test_likely_macro(struct Node *p) {
+ if (LIKELY(p != NULL)) {
+ p->value = 1; // OK
+ }
+}
+
+void test_unlikely_null(struct Node *p) {
+ if (UNLIKELY(p == NULL)) return;
+ p->value = 1; // OK
+}
+
+// === Flexible array member ===
+
+struct FlexArray {
+ int count;
+ struct Node * _Nullable items[];
+};
+
+void test_flex_array(struct FlexArray * _Nonnull fa) {
+ for (int i = 0; i < fa->count; i++) {
+ struct Node * _Nullable item = fa->items[i];
+ if (item) {
+ item->value = i; // OK
+ }
+ }
+}
+
+// === void** output parameter pattern ===
+// Common C pattern: function fills in an output pointer.
+
+int get_node_out(struct Node * _Nullable * _Nonnull out);
+
+void test_output_param(void) {
+ struct Node * _Nullable n = NULL;
+ if (get_node_out(&n) == 0 && n) {
+ n->value = 42; // OK — checked via &&
+ }
+}
+
+// === Static assert + null check (compile-time vs runtime) ===
+
+_Static_assert(sizeof(struct Node) > 0, "Node must have size");
+
+void test_with_static_assert(struct Node *p) {
+ _Static_assert(sizeof(*p) == sizeof(struct Node), "size match");
+ // _Static_assert doesn't affect flow, p is still nullable
+ if (p) {
+ p->value = 1; // OK
+ }
+}
diff --git a/clang/test/Sema/flow-nullability-call-invalidation.c b/clang/test/Sema/flow-nullability-call-invalidation.c
new file mode 100644
index 0000000000000..80be05dc9fb15
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-call-invalidation.c
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable %s -verify
+// expected-no-diagnostics
+
+// Test that function calls do NOT invalidate narrowing.
+// Functions receive a copy of pointer arguments, so they cannot modify
+// the original pointer variable to make it null.
+
+void takes_int(int x);
+void takes_ptr(int *p);
+
+void test_narrowing_preserved_after_call(int *p) {
+ if (p) {
+ // p is narrowed to nonnull
+ takes_int(42); // Function call doesn't invalidate p's narrowing
+ *p = 1; // OK - p is still nonnull
+ }
+}
+
+void test_narrowing_preserved_pass_ptr(int *p) {
+ if (p) {
+ // p is narrowed to nonnull
+ takes_ptr(p); // Even passing p doesn't invalidate (pass by value)
+ *p = 1; // OK - p is still nonnull
+ }
+}
+
+void test_multiple_calls(int *p, int *q) {
+ if (p && q) {
+ // Both p and q narrowed to nonnull
+ takes_ptr(p);
+ takes_ptr(q);
+ takes_int(42);
+ *p = 1; // OK - narrowing preserved through all calls
+ *q = 2; // OK - narrowing preserved through all calls
+ }
+}
+
+// Known false negative: passing a pointer's address to a function lets the
+// callee set *out = NULL, invalidating narrowing. The analysis intentionally
+// does not invalidate on address-taken (matching ThreadSafety's approach)
+// to avoid excessive noise from common output-parameter patterns.
+void nullify(int **out);
+
+void test_address_taken_false_negative(int *p) {
+ if (p) {
+ nullify(&p); // could set p = NULL
+ *p = 1; // no warning — known false negative
+ }
+}
diff --git a/clang/test/Sema/flow-nullability-cast-propagation.cpp b/clang/test/Sema/flow-nullability-cast-propagation.cpp
new file mode 100644
index 0000000000000..cf09de0383b82
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-cast-propagation.cpp
@@ -0,0 +1,116 @@
+// Tests that explicit casts preserve nullability from the source operand.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Base {
+ int x;
+};
+
+struct Derived : Base {
+ int y;
+};
+
+Base * _Nullable getNullable();
+Base * _Nonnull getNonnull();
+
+#pragma clang assume_nonnull begin
+
+void test_c_style_cast_nonnull() {
+ Base *b = getNonnull();
+ Derived *d = (Derived *)b;
+ d->y = 1; // OK - nonnull propagated through C-style cast
+}
+
+void test_static_cast_nonnull() {
+ Base *b = getNonnull();
+ Derived *d = static_cast<Derived *>(b);
+ d->y = 1; // OK - nonnull propagated through static_cast
+}
+
+void test_reinterpret_cast_nonnull() {
+ Base *b = getNonnull();
+ int *ip = reinterpret_cast<int *>(b);
+ *ip = 1; // OK - nonnull propagated through reinterpret_cast
+}
+
+void test_c_style_cast_nullable_warns() {
+ Base *b = getNullable();
+ Derived *d = (Derived *)b;
+ d->y = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_explicit_nonnull_dest() {
+ Base *b = getNullable();
+ Derived * _Nonnull d = (Derived * _Nonnull)b;
+ d->y = 1; // OK - explicit _Nonnull on dest type
+}
+
+// --- reinterpret_cast on this + pointer arithmetic ---
+
+typedef unsigned char uint8_t;
+
+struct Foo {
+ int x;
+ void test_cast_this() {
+ auto* p = reinterpret_cast<uint8_t*>(this) + 4;
+ *p = 0; // OK — this is always non-null, arithmetic preserves it
+ }
+};
+
+void test_ptr_arith_nonnull(int* p) {
+ auto* q = p + 1;
+ *q = 0; // OK — p is nonnull (assume_nonnull), arithmetic preserves it
+}
+
+void test_ptr_arith_nullable(int* _Nullable p) {
+ auto* q = p + 1;
+ *q = 0; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// --- reinterpret_cast from this via Base* should not warn ---
+// ExplicitCastExpr is the base class for CXXStaticCastExpr and
+// CXXReinterpretCastExpr, so the unwrapCastsAndArithmetic helper handles both.
+
+struct DerivedReinterpret : Base {
+ void test_reinterpret_cast_this_to_base() {
+ // reinterpret_cast from this — this is always non-null, cast preserves it
+ Base *b = reinterpret_cast<Base*>(this);
+ b->x = 1; // OK — this is non-null
+
+ // pointer arithmetic on reinterpret_cast<uint8_t*>(this)
+ uint8_t *raw = reinterpret_cast<uint8_t*>(this) + 4;
+ *raw = 0; // OK — this is always non-null
+ }
+};
+
+// --- static_cast from this should not warn ---
+
+struct Bar {
+ int val;
+ void test_deref_static_cast_this() {
+ // static_cast<Base*>(this) — this is always non-null, cast preserves it
+ (*static_cast<Bar*>(this)).val = 42; // OK — this is non-null
+ }
+};
+
+struct DerivedBar : Base {
+ void test_deref_cast_this_to_base() {
+ (*static_cast<Base*>(this)).x = 1; // OK — this is non-null
+ }
+};
+
+// --- address-of through cast should not warn ---
+
+struct Baz {
+ int z;
+};
+
+void test_deref_cast_addr_of(Baz& other) {
+ // &other is always non-null; cast preserves that
+ (*static_cast<Baz*>(&other)).z = 1; // OK — address-of is non-null
+}
+
+void test_deref_cast_addr_of_different_type(Derived& d) {
+ (*static_cast<Base*>(&d)).x = 1; // OK — address-of is non-null
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-chained-deref.cpp b/clang/test/Sema/flow-nullability-chained-deref.cpp
new file mode 100644
index 0000000000000..0cae1c67e7cf8
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-chained-deref.cpp
@@ -0,0 +1,263 @@
+// Tests for chained/nested dereference patterns with nullable pointers.
+// These patterns are common in real codebases: method chains returning
+// nullable, double-dereference through accessor calls, and multi-level
+// member narrowing.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+ Node * _Nullable left;
+ Node * _Nullable right;
+ Node * _Nullable parent;
+};
+
+struct Container {
+ Node * _Nullable root;
+ Node * _Nullable head;
+ int size;
+};
+
+Node * _Nullable getNode();
+Container * _Nullable getContainer();
+int getInt();
+
+#pragma clang assume_nonnull begin
+
+// === Direct chained dereference: getNode()->value ===
+// Calling a function that returns nullable, then immediately accessing a member.
+
+void test_direct_chain_warns() {
+ int v = getNode()->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ (void)v;
+}
+
+void test_direct_chain_guarded() {
+ Node * _Nullable n = getNode();
+ if (n) {
+ int v = n->value; // OK
+ (void)v;
+ }
+}
+
+// === Double chain: getContainer()->root->value ===
+
+void test_double_chain_warns() {
+ (void)getContainer()->root; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_double_chain_partial_guard() {
+ Container * _Nullable c = getContainer();
+ if (c) {
+ c->root->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_double_chain_full_guard() {
+ Container * _Nullable c = getContainer();
+ if (c && c->root) {
+ c->root->value = 1; // OK — both narrowed
+ }
+}
+
+// === Triple chain through linked list ===
+
+void test_triple_chain(Node * _Nullable head) {
+ if (head && head->next && head->next->next) {
+ head->next->next->value = 42; // OK — all three narrowed
+ }
+}
+
+// === Known limitation: multi-level member narrowing ===
+// The analysis tracks (VarDecl, FieldDecl) pairs for member narrowing.
+// When head->next is narrowed, accessing head->next->next is a dereference
+// of the narrowed member. The analysis currently does not re-check that
+// the result (head->next->next) is also nullable. This is a false negative
+// accepted for the current intraprocedural design.
+
+void test_triple_chain_partial(Node * _Nullable head) {
+ if (head && head->next) {
+ // head->next is narrowed, but head->next->next is still nullable.
+ // The analysis currently does not warn here (accepted false negative).
+ head->next->next->value = 42; // no warning (known limitation)
+ }
+}
+
+// === Method return chaining ===
+
+struct Builder {
+ Node * _Nullable node;
+
+ Builder * _Nullable setNode(Node * _Nonnull n) {
+ node = n;
+ return this;
+ }
+
+ Node * _Nullable getResult() {
+ return node;
+ }
+};
+
+Builder * _Nullable getBuilder();
+
+void test_builder_chain_warns() {
+ getBuilder()->getResult(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_builder_chain_guarded() {
+ Builder * _Nullable b = getBuilder();
+ if (b) {
+ Node * _Nullable result = b->getResult();
+ if (result) {
+ (void)result->value; // OK — both guarded
+ }
+ }
+}
+
+// === Pointer-to-pointer (T**) ===
+// Known limitation: the analysis tracks narrowing for VarDecls and
+// (VarDecl, FieldDecl) pairs. It does NOT track *pp as a narrowable
+// expression, so even after checking *pp, dereferences through *pp
+// still warn.
+
+void test_ptr_to_ptr(Node * _Nullable * _Nullable pp) {
+ if (pp && *pp) {
+ // *pp was checked but the analysis can't track it
+ (*pp)->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_ptr_to_ptr_via_local(Node * _Nullable * _Nullable pp) {
+ if (!pp) return;
+ Node * _Nullable p = *pp; // capture into a local variable
+ if (p) {
+ p->value = 1; // OK — local variable is tracked
+ }
+}
+
+// === Array of nullable pointers ===
+
+void test_array_of_nullable(Node * _Nullable nodes[], int n) {
+ for (int i = 0; i < n; i++) {
+ Node * _Nullable cur = nodes[i];
+ if (cur) {
+ cur->value = i; // OK — narrowed via local
+ }
+ }
+}
+
+// === Conditional chain: p ? p->next : nullptr, then use ===
+
+void test_conditional_chain(Node * _Nullable p) {
+ Node * _Nullable next = p ? p->next : nullptr;
+ if (next) {
+ next->value = 1; // OK — narrowed
+ }
+}
+
+// === Assignment from chain ===
+
+void test_assign_from_chain() {
+ Node * _Nullable n = getNode();
+ if (!n) return;
+
+ // n is narrowed, but n->next is still nullable
+ Node * _Nullable child = n->next;
+ if (child) {
+ child->value = 1; // OK
+ }
+}
+
+// === Chained access in loop ===
+
+void test_chain_in_loop() {
+ Node * _Nullable head = getNode();
+ for (Node * _Nullable p = head; p; p = p->next) {
+ // p is narrowed by loop condition
+ if (p->left && p->left->right) {
+ p->left->right->value = 0; // OK — all narrowed
+ }
+ }
+}
+
+// === Chained return value ===
+
+Node * _Nullable get_grandchild(Node * _Nullable n) {
+ if (n && n->next) {
+ return n->next->next; // OK — n->next narrowed; returns nullable
+ }
+ return nullptr;
+}
+
+// === Container accessor pattern ===
+
+void test_container_accessor() {
+ Container * _Nullable c = getContainer();
+ if (!c) return;
+
+ // c->root is nullable, need to check
+ if (!c->root) return;
+ c->root->value = 1; // OK — both narrowed
+
+ // But root->next needs separate check
+ if (c->root->next) {
+ c->root->next->value = 2; // OK
+ }
+}
+
+// === Cascade of null-checked returns ===
+
+Node * _Nullable safe_next(Node * _Nullable n) {
+ if (!n) return nullptr;
+ return n->next; // OK — n narrowed
+}
+
+void test_cascade() {
+ Node * _Nullable n = getNode();
+ Node * _Nullable child = safe_next(n);
+ // child is nullable (return type says so), need to check
+ if (child) {
+ child->value = 1; // OK
+ }
+}
+
+// === Nested struct with multiple nullable fields ===
+
+struct Tree {
+ int data;
+ Tree * _Nullable left;
+ Tree * _Nullable right;
+ Tree * _Nullable parent;
+};
+
+void test_tree_traversal(Tree * _Nullable root) {
+ if (!root) return;
+
+ // Check left subtree
+ if (root->left) {
+ root->left->data = 1; // OK
+ if (root->left->left) {
+ root->left->left->data = 2; // OK — deeply narrowed
+ }
+ }
+
+ // Check right subtree — independent narrowing
+ if (root->right && root->right->parent) {
+ root->right->parent->data = 3; // OK
+ }
+}
+
+// === Chained dereference with reassignment invalidation ===
+
+void test_chain_invalidation(Node * _Nullable p) {
+ if (p && p->next) {
+ p->next->value = 1; // OK — both narrowed
+ p = getNode(); // reassign p — narrowing gone
+ // p is now nullable again
+ if (p) {
+ p->value = 2; // OK — re-narrowed
+ }
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-complex-cfg.cpp b/clang/test/Sema/flow-nullability-complex-cfg.cpp
new file mode 100644
index 0000000000000..e62f888fcc793
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-complex-cfg.cpp
@@ -0,0 +1,190 @@
+// Tests for complex control flow graph patterns and intersect semantics.
+// The analysis intersects narrowing at merge points — a variable is only
+// narrowed after a merge if ALL incoming paths agree it's narrowed.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-nullable-to-nonnull-conversion -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node * _Nullable left;
+ Node * _Nullable right;
+};
+
+Node * _Nullable getNode();
+Node * _Nonnull getSafeNode();
+int getInt();
+
+#pragma clang assume_nonnull begin
+
+// === Diamond: both branches narrow — merge keeps narrowing ===
+
+void test_diamond_both_narrow(Node * _Nullable p) {
+ if (getInt()) {
+ if (!p) return;
+ } else {
+ if (!p) return;
+ }
+ (void)p->value; // OK — narrowed on both paths
+}
+
+// === Diamond: only one branch narrows — merge loses narrowing ===
+
+void test_diamond_one_narrows(Node * _Nullable p) {
+ if (getInt()) {
+ if (!p) return;
+ // narrowed here
+ } else {
+ // NOT narrowed here
+ }
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Diamond: both branches assign nonnull ===
+
+void test_diamond_assign_both(int * _Nullable p) {
+ int x = 0, y = 0;
+ if (getInt()) {
+ p = &x;
+ } else {
+ p = &y;
+ }
+ (void)*p; // OK — both branches assign nonnull (address-of)
+}
+
+// === Diamond: one branch assigns nonnull, other doesn't touch ===
+
+void test_diamond_assign_one(Node * _Nullable p) {
+ int x;
+ if (getInt()) {
+ // p unchanged — still nullable
+ } else {
+ if (!p) return;
+ }
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Nested if-else (3 levels) ===
+
+void test_nested_three_levels(Node * _Nullable p, Node * _Nullable q) {
+ if (!p) return;
+ if (getInt()) {
+ if (!q) return;
+ (void)p->value; // OK
+ (void)q->value; // OK
+ } else {
+ (void)p->value; // OK — outer guard still holds
+ (void)q->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Loop with multiple exit points ===
+
+void test_loop_multi_exit(Node * _Nullable p) {
+ for (int i = 0; i < 10; i++) {
+ if (!p) break;
+ (void)p->value; // OK — narrowed by break guard
+ }
+ // After loop, p may or may not be narrowed (break path vs normal exit)
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Sequential narrowing ===
+
+void test_sequential_narrow(Node * _Nullable p, Node * _Nullable q, Node * _Nullable r) {
+ if (!p) return;
+ if (!q) return;
+ if (!r) return;
+ (void)p->value; // OK
+ (void)q->value; // OK
+ (void)r->value; // OK
+}
+
+// === Narrowing lost after reassignment in one branch ===
+
+void test_reassign_in_branch(Node * _Nullable p) {
+ if (!p) return;
+ // p is narrowed
+ if (getInt()) {
+ p = getNode(); // reassigned to nullable
+ }
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Do-while loop ===
+
+void test_do_while(Node * _Nullable p) {
+ if (!p) return;
+ do {
+ (void)p->value; // OK — narrowed on entry, loop back-edge preserves
+ } while (getInt() && p);
+}
+
+// === Nested loops ===
+
+void test_nested_loops(Node * _Nullable p) {
+ if (!p) return;
+ for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < 10; j++) {
+ (void)p->value; // OK — narrowed, loops don't invalidate
+ }
+ }
+}
+
+// === Switch with fallthrough ===
+
+void test_switch_fallthrough(Node * _Nullable p) {
+ switch (getInt()) {
+ case 0:
+ if (!p) return;
+ // fallthrough — narrowing from case 0
+ [[fallthrough]];
+ case 1:
+ // Reached from case 0 (narrowed) OR case 1 (not narrowed)
+ // Intersect: NOT narrowed
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ break;
+ default:
+ break;
+ }
+}
+
+// === Post-loop narrowing via break vs normal exit ===
+
+void test_post_loop_narrowing(Node * _Nullable p) {
+ while (true) {
+ if (p) break; // exit loop only when p is non-null
+ p = getNode();
+ }
+ // After loop, we only exit via break where p was non-null
+ (void)p->value; // OK — only exit is via break where p is narrowed
+}
+
+// === if-else with return in both branches ===
+
+Node * _Nonnull test_both_return(Node * _Nullable p) {
+ if (p) {
+ return p; // OK
+ } else {
+ return getSafeNode();
+ }
+}
+
+// === Ternary chain ===
+
+void test_ternary_chain(Node * _Nullable a, Node * _Nullable b, Node * _Nullable c) {
+ Node *picked = a ? a : (b ? b : c);
+ // picked may be c which is nullable
+ if (picked)
+ (void)picked->value; // OK — narrowed
+}
+
+// === Back-edge invalidation in while loop ===
+
+void test_while_reassign(Node * _Nullable p) {
+ while (p) {
+ (void)p->value; // OK — narrowed by while condition
+ p = p->left; // reassign — p may become null
+ // Back-edge: p is now potentially null, but while condition re-checks
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-compound-conditions.cpp b/clang/test/Sema/flow-nullability-compound-conditions.cpp
new file mode 100644
index 0000000000000..7929b3bef649d
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-compound-conditions.cpp
@@ -0,0 +1,219 @@
+// Tests for complex boolean expressions in null guards.
+// The CFG decomposes && and || into separate blocks. getTerminalCondition()
+// follows the RHS to find the leaf condition. This tests the full range
+// of compound condition patterns.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-nullable-to-nonnull-conversion -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+ Node * _Nullable child;
+};
+
+int getInt();
+Node * _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+// === p && q — both narrowed in body ===
+
+void test_and_both_narrowed(Node * _Nullable p, Node * _Nullable q) {
+ if (p && q) {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+// === p || q — neither narrowed in body ===
+
+void test_or_neither_narrowed(Node * _Nullable p, Node * _Nullable q) {
+ if (p || q) {
+ // Can't tell which one is non-null
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ (void)q->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === !p || !q early return — both narrowed after ===
+
+void test_demorgan_return(Node * _Nullable p, Node * _Nullable q) {
+ if (!p || !q) return;
+ // De Morgan: both must be non-null to reach here
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+// === p && p->next — chained narrowing ===
+
+void test_chain_narrowing(Node * _Nullable p) {
+ if (p && p->next) {
+ (void)p->value; // OK — p narrowed by first condition
+ (void)p->next->value; // OK — p->next narrowed by second condition
+ }
+}
+
+// === Triple && ===
+
+void test_triple_and(Node * _Nullable a, Node * _Nullable b, Node * _Nullable c) {
+ if (a && b && c) {
+ (void)a->value; // OK
+ (void)b->value; // OK
+ (void)c->value; // OK
+ }
+}
+
+// === Negated && — both narrowed after early return ===
+
+void test_negated_and(Node * _Nullable p, Node * _Nullable q) {
+ if (!(p && q)) return;
+ // De Morgan equivalent: both must be non-null to reach here
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+// === != nullptr with && ===
+
+void test_ne_null_and(Node * _Nullable p, Node * _Nullable q) {
+ if (p != nullptr && q != nullptr) {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+// === == nullptr with || early return ===
+
+void test_eq_null_or_return(Node * _Nullable p, Node * _Nullable q) {
+ if (p == nullptr || q == nullptr) return;
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+// === Mixed condition: pointer check && value check ===
+
+void test_mixed_condition(Node * _Nullable p) {
+ if (p && p->value > 0) {
+ (void)p->value; // OK — p narrowed by first part of &&
+ }
+}
+
+// === Condition with function call ===
+
+bool isValid(Node * _Nonnull p);
+
+void test_condition_with_call(Node * _Nullable p) {
+ if (p && isValid(p)) {
+ (void)p->value; // OK — p narrowed by first part of &&
+ }
+}
+
+// === Nested && in while ===
+
+void test_while_and(Node * _Nullable p) {
+ while (p && p->next) {
+ (void)p->value; // OK
+ p = p->next; // p = potentially nullable, but while re-checks
+ }
+}
+
+// === Short-circuit in for condition ===
+
+void test_for_and_condition(Node * _Nullable p) {
+ for (int i = 0; p && i < 10; i++) {
+ (void)p->value; // OK — narrowed by for condition
+ }
+}
+
+// === Ternary with null check ===
+
+void test_ternary_null_check(Node * _Nullable p) {
+ int v = p ? p->value : -1; // OK — p narrowed in true branch
+}
+
+// === Multiple ternaries ===
+
+void test_multi_ternary(Node * _Nullable a, Node * _Nullable b) {
+ int v = a ? a->value : (b ? b->value : 0); // OK — each narrowed in its branch
+}
+
+// === Boolean intermediary from null check ===
+
+void test_bool_intermediary(Node * _Nullable p) {
+ bool valid = (p != nullptr);
+ if (valid) {
+ (void)p->value; // OK — tracked through bool guard
+ }
+}
+
+// === Boolean intermediary: equality check ===
+
+void test_bool_eq_null(Node * _Nullable p) {
+ bool isNull = (p == nullptr);
+ if (!isNull) {
+ (void)p->value; // OK — !isNull means p is non-null
+ }
+}
+
+// === Boolean intermediary: pointer truthiness ===
+
+void test_bool_truthiness(Node * _Nullable p) {
+ bool valid = p;
+ if (valid) {
+ (void)p->value; // OK — valid means p is truthy (non-null)
+ }
+}
+
+// === Boolean intermediary: negated pointer ===
+
+void test_bool_negated_ptr(Node * _Nullable p) {
+ bool isNull = !p;
+ if (!isNull) {
+ (void)p->value; // OK — !isNull means p is non-null
+ }
+ if (isNull) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Boolean intermediary invalidated by pointer reassignment ===
+
+void test_bool_ptr_reassigned(Node * _Nullable p, Node * _Nullable q) {
+ bool valid = (p != nullptr);
+ p = q; // reassign p — guard is now stale
+ if (valid) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Boolean intermediary invalidated by bool reassignment ===
+
+void test_bool_reassigned(Node * _Nullable p) {
+ bool valid = (p != nullptr);
+ valid = false; // reassign bool — guard is gone
+ if (valid) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Negated && with triple condition ===
+
+void test_negated_triple_and(Node * _Nullable a, Node * _Nullable b, Node * _Nullable c) {
+ if (!(a && b && c)) return;
+ (void)a->value; // OK
+ (void)b->value; // OK
+ (void)c->value; // OK
+}
+
+// === Negated && in if-else (body should NOT narrow) ===
+
+void test_negated_and_body(Node * _Nullable p, Node * _Nullable q) {
+ if (!(p && q)) {
+ // At least one is null — can't narrow either
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ } else {
+ // Both non-null
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-conversion-op.cpp b/clang/test/Sema/flow-nullability-conversion-op.cpp
new file mode 100644
index 0000000000000..a213e8bbf63d3
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-conversion-op.cpp
@@ -0,0 +1,38 @@
+// Tests that conversion operators (operator T*()) don't trigger spurious
+// nullability-inference warnings. Without the IK_ConversionFunctionId
+// exclusion in SemaType.cpp, the compiler would try to infer nullability
+// on the return type of operator void*(), causing errors.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nonnull -std=c++17 %s -verify
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+typedef void* bool_type;
+
+struct ConvertToRawPtr {
+ void* data;
+ operator void*() const { return data; }
+};
+
+struct ConvertToTypedef {
+ bool_type data;
+ operator bool_type() const { return data; }
+};
+
+struct ConvertToNonPointer {
+ int value;
+ operator int() const { return value; }
+};
+
+void test_conversions() {
+ ConvertToRawPtr a;
+ void* p = a;
+
+ ConvertToTypedef b;
+ bool_type q = b;
+
+ ConvertToNonPointer c;
+ int n = c;
+}
+
+void test_deref_still_warns(int* _Nullable p) {
+ (void)*p; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
diff --git a/clang/test/Sema/flow-nullability-coroutines.cpp b/clang/test/Sema/flow-nullability-coroutines.cpp
new file mode 100644
index 0000000000000..e4dd1accd7d38
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-coroutines.cpp
@@ -0,0 +1,117 @@
+// Tests for coroutine interactions with flow-sensitive nullability analysis.
+// Coroutines introduce suspension points where control flow is non-obvious.
+// The analysis should handle co_await/co_yield/co_return without crashing
+// and without producing spurious warnings.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++20 -I%S/../SemaCXX/Inputs %s -verify
+
+#include "std-coroutine.h"
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+};
+
+// --- Generator coroutine type ---
+
+struct Generator {
+ struct promise_type {
+ Node * _Nullable current;
+ Generator get_return_object() { return {}; }
+ std::suspend_always initial_suspend() { return {}; }
+ std::suspend_always final_suspend() noexcept { return {}; }
+ void unhandled_exception() {}
+ std::suspend_always yield_value(Node * _Nullable val) {
+ current = val;
+ return {};
+ }
+ void return_void() {}
+ };
+};
+
+// --- Task coroutine type ---
+
+struct Task {
+ struct promise_type {
+ Task get_return_object() { return {}; }
+ std::suspend_never initial_suspend() { return {}; }
+ std::suspend_always final_suspend() noexcept { return {}; }
+ void unhandled_exception() {}
+ void return_void() {}
+ };
+};
+
+// --- Awaitable that returns a nullable pointer ---
+
+struct NullableAwaitable {
+ bool await_ready() const noexcept { return true; }
+ void await_suspend(std::coroutine_handle<>) const noexcept {}
+ Node * _Nullable await_resume() const noexcept { return nullptr; }
+};
+
+#pragma clang assume_nonnull begin
+
+// === Basic coroutine with nullable check ===
+
+Generator yield_nodes(Node * _Nullable head) {
+ for (Node * _Nullable p = head; p; p = p->next) {
+ (void)p->value; // OK — narrowed by loop condition
+ co_yield p;
+ }
+}
+
+// === co_await returning nullable ===
+
+Task consume_awaitable() {
+ NullableAwaitable awaitable;
+ Node * _Nullable result = co_await awaitable;
+ if (result) {
+ (void)result->value; // OK — narrowed
+ }
+ co_return;
+}
+
+// === Null check before co_yield ===
+
+Generator guarded_yield(Node * _Nullable n) {
+ if (n) {
+ (void)n->value; // OK — narrowed
+ co_yield n;
+ (void)n->value; // OK — still narrowed (no reassignment)
+ }
+}
+
+// === Multiple co_yields with independent checks ===
+
+Generator multi_yield(Node * _Nullable a, Node * _Nullable b) {
+ if (a) {
+ co_yield a;
+ }
+ if (b) {
+ co_yield b;
+ }
+}
+
+// === Coroutine with nonnull parameter ===
+
+Generator nonnull_param(Node * _Nonnull n) {
+ (void)n->value; // OK — _Nonnull
+ co_yield n;
+ (void)n->value; // OK — _Nonnull
+}
+
+// === Unchecked nullable deref in coroutine body — should warn ===
+
+Task test_unchecked_deref_in_coroutine(Node * _Nullable p) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ co_return;
+}
+
+// === co_await result used without check — should warn ===
+
+Task test_unchecked_co_await_result() {
+ NullableAwaitable awaitable;
+ Node * _Nullable result = co_await awaitable;
+ (void)result->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-default-nonnull.cpp b/clang/test/Sema/flow-nullability-default-nonnull.cpp
new file mode 100644
index 0000000000000..8ec744b9c2f24
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-default-nonnull.cpp
@@ -0,0 +1,48 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nonnull -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+};
+
+Entity* _Nullable getNullable();
+Entity* getUnannotated();
+
+#pragma clang assume_nonnull begin
+
+void test_unannotated_param_no_warn(Entity* p) {
+ p->x = 1; // OK - parameter gets _Nonnull from assume_nonnull pragma
+}
+
+void test_unannotated_star(Entity* p) {
+ (*p).x = 1; // OK - parameter gets _Nonnull from pragma
+}
+
+void test_explicit_nullable_warns(Entity* _Nullable p) {
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_explicit_nullable_after_check(Entity* _Nullable p) {
+ if (p) {
+ p->x = 1; // OK - narrowed
+ }
+}
+
+void test_return_nullable_warns() {
+ Entity* e = getNullable();
+ e->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// With -fnullability-default=nonnull, unannotated pointers are treated as
+// nonnull. getUnannotated() has no _Nullable, so it's safe.
+void test_return_unannotated_ok() {
+ Entity* e = getUnannotated();
+ e->x = 1; // OK - unannotated return treated as nonnull per default
+}
+
+void test_local_nonnull_ok() {
+ Entity stack;
+ Entity* _Nonnull p = &stack;
+ p->x = 1; // OK - explicit _Nonnull
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-duplicate-diag.cpp b/clang/test/Sema/flow-nullability-duplicate-diag.cpp
new file mode 100644
index 0000000000000..6ee029fdf17ea
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-duplicate-diag.cpp
@@ -0,0 +1,37 @@
+// Verify no confusing duplicate diagnostics when both
+// -Wnullable-to-nonnull-conversion and -Wflow-nullable-dereference are active.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wnullable-to-nonnull-conversion -std=c++17 %s -verify
+
+#pragma clang assume_nonnull begin
+
+void take_nonnull(int * _Nonnull p);
+
+// Passing nullable to nonnull param: only the conversion warning fires.
+// The flow analysis then narrows the pointer, so no deref warning after.
+void test_pass_to_nonnull(int * _Nullable p) {
+ take_nonnull(p); // expected-warning{{implicit conversion from nullable pointer 'int * _Nullable' to non-nullable pointer type 'int * _Nonnull'}}
+ *p = 42; // OK — narrowed by nonnull call, no second warning
+}
+
+// Direct dereference of nullable: only the flow warning fires,
+// not the conversion warning (there's no nonnull destination type).
+void test_deref_only(int * _Nullable p) {
+ *p = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Assignment to nonnull variable from nullable: only the conversion warning.
+void test_assign_to_nonnull(int * _Nullable p) {
+ int * _Nonnull q = p; // expected-warning{{implicit conversion from nullable}}
+}
+
+// After null check: flow analysis knows p is non-null, so no deref warning.
+// But -Wnullable-to-nonnull-conversion is type-based, not flow-based — the
+// declared type is still _Nullable, so the conversion warning persists.
+// This is correct: the two warnings serve different purposes.
+void test_checked(int * _Nullable p) {
+ if (!p) return;
+ take_nonnull(p); // expected-warning{{implicit conversion from nullable}}
+ *p = 42; // OK — narrowed, no deref warning
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-else-branch.cpp b/clang/test/Sema/flow-nullability-else-branch.cpp
new file mode 100644
index 0000000000000..7b2d34fe6ac9f
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-else-branch.cpp
@@ -0,0 +1,65 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+};
+
+#pragma clang assume_nonnull begin
+
+// === Else-branch narrowing for single negated check ===
+
+void test_else_simple(Entity* _Nullable p) {
+ if (!p) {
+ return;
+ } else {
+ p->x = 1; // OK - narrowed in else branch
+ }
+}
+
+// === Else-branch narrowing for compound OR conditions ===
+
+void test_else_or_two_vars(Entity* _Nullable p, Entity* _Nullable q) {
+ if (!p || !q) {
+ return;
+ } else {
+ p->x = q->x; // OK - both narrowed in else branch
+ }
+}
+
+void test_else_or_three_vars(Entity* _Nullable p, Entity* _Nullable q, Entity* _Nullable r) {
+ if (!p || !q || !r) {
+ return;
+ } else {
+ p->x = q->x + r->x; // OK - all narrowed in else branch
+ }
+}
+
+// === Early-return (no else) still works for OR ===
+
+void test_early_return_or(Entity* _Nullable p, Entity* _Nullable q) {
+ if (!p || !q)
+ return;
+ p->x = q->x; // OK - both narrowed after early return
+}
+
+// === Positive check with else should NOT narrow in else ===
+
+void test_positive_else_no_narrow(Entity* _Nullable p) {
+ if (p) {
+ p->x = 1; // OK - narrowed in then branch
+ } else {
+ p->x = 2; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// === Member expression narrowing in else ===
+
+void test_else_member_narrowing(Entity* _Nullable p) {
+ if (!p) {
+ // p is null here
+ } else {
+ p->x = 1; // OK - narrowed in else
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-exceptions.cpp b/clang/test/Sema/flow-nullability-exceptions.cpp
new file mode 100644
index 0000000000000..45912c34f9f19
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-exceptions.cpp
@@ -0,0 +1,97 @@
+// Tests for exception handling interactions with flow-sensitive nullability.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 -fcxx-exceptions %s -verify
+
+struct Node {
+ int value;
+};
+
+Node * _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+// === Narrowing before try block persists inside ===
+
+void test_narrow_before_try(Node * _Nullable p) {
+ if (!p) return;
+ try {
+ (void)p->value; // OK — narrowed before try
+ } catch (...) {
+ }
+}
+
+// === throw in null guard narrows ===
+
+void test_throw_guard(Node * _Nullable p) {
+ if (!p) throw "null";
+ (void)p->value; // OK — throw terminates null path
+}
+
+// === try body with null check ===
+
+void test_null_check_in_try(Node * _Nullable p) {
+ try {
+ if (!p) throw "null";
+ (void)p->value; // OK — narrowed by throw guard
+ } catch (...) {
+ }
+}
+
+// === catch block should not inherit narrowing from try ===
+// After the try block, if code falls through, the narrowing from
+// inside try may or may not hold depending on CFG edges.
+
+void test_after_try_catch(Node * _Nullable p) {
+ try {
+ if (p)
+ (void)p->value; // OK — narrowed
+ } catch (...) {
+ }
+ // After try/catch, p's narrowing depends on merge of try and catch edges.
+ // Conservative: should warn.
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Narrowing in both try and catch ===
+
+void test_narrow_in_both(Node * _Nullable p) {
+ try {
+ if (!p) throw "null";
+ (void)p->value; // OK
+ } catch (...) {
+ if (!p) return;
+ (void)p->value; // OK — narrowed in catch too
+ }
+ // Both try (throw guard) and catch (early return) narrowed p,
+ // so the merge point should preserve narrowing.
+ (void)p->value; // OK — narrowed on all paths
+}
+
+// === Multiple catch blocks ===
+
+void test_multiple_catch(Node * _Nullable p) {
+ if (!p) return;
+ try {
+ (void)p->value; // OK — narrowed
+ } catch (int) {
+ // narrowing may not persist through exception edges
+ } catch (...) {
+ }
+}
+
+// === throw expression in ternary ===
+
+void test_throw_ternary(Node * _Nullable p) {
+ // throw is a valid expression in the false branch of a ternary.
+ // The CFG models it as a terminating path, so p is narrowed.
+ int v = p ? p->value : throw "null"; // OK — throw terminates null path
+ (void)v;
+}
+
+// === Noexcept function — no exception CFG edges ===
+
+void test_noexcept_narrowing(Node * _Nullable p) noexcept {
+ if (!p) return;
+ (void)p->value; // OK — narrowed, no exception edges to worry about
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-false-positives.cpp b/clang/test/Sema/flow-nullability-false-positives.cpp
new file mode 100644
index 0000000000000..c2e430aa6d715
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-false-positives.cpp
@@ -0,0 +1,225 @@
+// False positive regression suite for flow-sensitive nullability analysis.
+// Every test case in this file must produce NO warnings. These represent
+// common C++ patterns that an overly-aggressive analysis might flag.
+// This file is the most important for reviewer confidence.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+};
+
+int getInt();
+Node * _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+// === Conditional initialization on all paths ===
+
+void test_conditional_init(bool cond) {
+ int x = 0, y = 0;
+ int *p;
+ if (cond) {
+ p = &x;
+ } else {
+ p = &y;
+ }
+ (void)*p; // OK — assigned nonnull on both paths
+}
+
+// === Static local variable ===
+
+void test_static_local() {
+ static int x = 42;
+ int *p = &x;
+ (void)*p; // OK — address-of is always nonnull
+}
+
+// === Global variable access ===
+
+int g_value = 0;
+
+void test_global_addr() {
+ int *p = &g_value;
+ (void)*p; // OK — address-of
+}
+
+// === Function pointer call ===
+
+typedef int (*IntFn)(int);
+
+void test_fn_ptr(IntFn fn) {
+ // Function pointers are not subject to null dereference checking
+ // in the same way — they're called, not dereferenced with *
+ int result = fn(42); // OK — no * dereference
+}
+
+// === Chained method calls on nonnull ===
+
+struct Builder {
+ Builder *setX(int) { return this; }
+ Builder *setY(int) { return this; }
+ int build() { return 0; }
+};
+
+void test_builder_pattern() {
+ Builder b;
+ // this is always nonnull, so chained -> returns are fine
+ b.setX(1)->setY(2)->build(); // OK — this is nonnull
+}
+
+// === Address-of array element ===
+
+void test_array_element_addr() {
+ int arr[10];
+ int *p = &arr[5];
+ (void)*p; // OK — address-of
+}
+
+// === Pointer to member of stack object ===
+
+void test_member_addr() {
+ Node n;
+ int *p = &n.value;
+ (void)*p; // OK — address-of
+}
+
+// === Ternary with nonnull on both sides ===
+
+void test_ternary_both_nonnull(bool cond) {
+ int x = 1, y = 2;
+ int *p = cond ? &x : &y;
+ (void)*p; // OK — nonnull on both branches
+}
+
+// === Cast of nonnull ===
+
+void test_cast_nonnull() {
+ int x = 42;
+ void *vp = &x;
+ int *ip = static_cast<int *>(vp);
+ (void)*ip; // OK — source was nonnull (address-of)
+}
+
+// === new expression ===
+
+void test_throwing_new() {
+ int *p = new int(42);
+ (void)*p; // OK — throwing new never returns null
+}
+
+// === Multiple checks, then use ===
+
+void test_multi_check(Node * _Nullable a, Node * _Nullable b, Node * _Nullable c) {
+ if (a && b && c) {
+ (void)a->value; // OK
+ (void)b->value; // OK
+ (void)c->value; // OK
+ }
+}
+
+// === Reassign to nonnull after nullable ===
+
+void test_reassign_nonnull() {
+ int x;
+ int * _Nullable p = nullptr;
+ p = &x;
+ (void)*p; // OK — reassigned to nonnull
+}
+
+// === Loop variable always nonnull ===
+
+void test_loop_var() {
+ int arr[10];
+ for (int i = 0; i < 10; i++) {
+ int *p = &arr[i];
+ (void)*p; // OK — address-of
+ }
+}
+
+// === Nested struct access on nonnull ===
+
+struct Outer {
+ Node node;
+};
+
+void test_nested_nonnull_access() {
+ Outer o;
+ int v = o.node.value; // OK — dot access on stack object, no deref
+}
+
+// === Pointer arithmetic on nonnull ===
+
+void test_ptr_arith() {
+ int arr[10];
+ int *p = arr; // array decays to pointer — nonnull
+ int *q = arr + 5; // arithmetic on nonnull — still nonnull
+ (void)*q; // OK
+}
+
+// === Reference binding ===
+
+void test_reference(int * _Nonnull p) {
+ int &ref = *p; // OK — _Nonnull
+ ref = 42;
+}
+
+// === Comma operator with pointer ===
+
+void test_comma_op() {
+ int x;
+ int *p = (getInt(), &x);
+ (void)*p; // OK — comma evaluates to &x which is nonnull
+}
+
+// === Narrowing survives function calls ===
+// The analysis correctly does NOT invalidate narrowing on function calls
+// because pointers are passed by value in C/C++.
+
+void external_fn();
+
+void test_narrowing_survives_call(Node * _Nullable p) {
+ if (!p) return;
+ external_fn();
+ (void)p->value; // OK — function call doesn't invalidate narrowing
+}
+
+// === sizeof/alignof don't dereference ===
+
+void test_sizeof_no_deref(Node * _Nullable p) {
+ auto s = sizeof(*p); // OK — sizeof doesn't evaluate its operand
+ (void)s;
+}
+
+// === decltype doesn't dereference ===
+
+void test_decltype_no_deref(Node * _Nullable p) {
+ using T = decltype(p->value); // OK — decltype is unevaluated
+ T x = 0;
+ (void)x;
+}
+
+// === this pointer in member functions ===
+
+struct Obj {
+ int x;
+ void method() {
+ this->x = 1; // OK — this is never null
+ (*this).x = 2; // OK — *this is suppressed
+ }
+};
+
+// === Non-std iterator dereference ===
+
+struct MyIterator {
+ Node *current;
+ Node &operator*() { return *current; }
+ Node *operator->() { return current; }
+};
+
+void test_iterator_deref(MyIterator it) {
+ (void)it->value; // OK — non-std operator-> is not checked
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-for-loop.cpp b/clang/test/Sema/flow-nullability-for-loop.cpp
new file mode 100644
index 0000000000000..1fe387d8661de
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-for-loop.cpp
@@ -0,0 +1,25 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node* _Nullable next;
+};
+
+#pragma clang assume_nonnull begin
+
+// === For-loop increment under condition narrowing ===
+
+void test_for_loop_linked_list(Node* _Nullable head) {
+ for (Node* _Nullable p = head; p; p = p->next) {
+ p->value = 0; // OK - p narrowed from condition
+ }
+}
+
+void test_for_loop_simple_increment(Node* _Nullable p) {
+ for (; p; p = p->next) {
+ p->value = 0; // OK
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-gradual-adoption.cpp b/clang/test/Sema/flow-nullability-gradual-adoption.cpp
new file mode 100644
index 0000000000000..5d61dc41efc90
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-gradual-adoption.cpp
@@ -0,0 +1,87 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=unspecified -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+ int value() const { return x; }
+};
+
+Entity* _Nullable getHead();
+Entity* _Nullable getChest();
+Entity* getUnannotated();
+
+// === OUTSIDE pragma: warnings on explicit _Nullable ===
+// Functions with any nullability annotation activate the analysis,
+// even without a pragma or -fnullability-default flag.
+
+void test_outside_pragma_explicit_nullable(Entity* _Nullable p) {
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ (*p).x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ getHead()->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Unannotated functions outside pragma still don't activate
+void test_outside_pragma_unannotated(Entity* p) {
+ p->x = 1; // OK - no annotations, analysis not active
+}
+
+// === INSIDE pragma: warnings on explicit _Nullable ===
+
+#pragma clang assume_nonnull begin
+
+void test_explicit_nullable_param_arrow_warns(Entity* _Nullable p) {
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_explicit_nullable_param_arrow_with_check(Entity* _Nullable p) {
+ if (!p) return;
+ p->x = 1; // OK - narrowed to nonnull
+}
+
+void test_explicit_nullable_param_star_warns(Entity* _Nullable p) {
+ (*p).x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_explicit_nullable_param_star_with_check(Entity* _Nullable p) {
+ if (!p) return;
+ (*p).x = 1; // OK - narrowed to nonnull
+}
+
+void test_chained_nullable_arrow_warns() {
+ getHead()->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_chained_nullable_arrow_method_warns() {
+ int v = getHead()->value(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_unannotated_no_warn() {
+ Entity* e = getUnannotated();
+ e->x = 1; // OK - unannotated pointer, unspecified mode
+ (*e).x = 1; // OK - unannotated pointer, unspecified mode
+}
+
+void test_unannotated_param_no_warn(Entity* p) {
+ p->x = 1; // OK - unannotated param, unspecified mode
+}
+
+// === Lambda scoping: analysis must still run for outer function ===
+// Regression test: lambda bodies call ActOnStartOfFunctionDef, which must
+// not clobber the per-function analysis decision for the enclosing function.
+
+void test_lambda_does_not_clobber_outer(Entity* _Nullable p) {
+ // Lambda with no annotations — should not disable outer analysis
+ auto f = [](int x) { return x + 1; };
+ (void)f(1);
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_nested_lambda_scoping(Entity* _Nullable p) {
+ auto outer = [](int x) {
+ auto inner = [](int y) { return y; };
+ return inner(x);
+ };
+ (void)outer(1);
+ (*p).x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-if-constexpr.cpp b/clang/test/Sema/flow-nullability-if-constexpr.cpp
new file mode 100644
index 0000000000000..65cc15723f762
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-if-constexpr.cpp
@@ -0,0 +1,55 @@
+// Tests for if constexpr interaction with flow-sensitive nullability.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+#pragma clang assume_nonnull begin
+
+// Known limitation: the warn_null_init_nonnull check fires during
+// declaration processing, before if-constexpr discarding. This means
+// _Nonnull p = nullptr in a discarded branch still warns. Suppressing
+// this would require tracking discarded-branch state at decl processing
+// time, which Clang doesn't expose. In practice, writing explicit
+// _Nonnull p = nullptr in a discarded branch is very rare.
+
+void test_if_constexpr_discarded() {
+ if constexpr (false) {
+ int * _Nonnull p = nullptr; // expected-warning{{null assigned to a variable of nonnull type}}
+ }
+}
+
+// Live branch correctly warns
+void test_if_constexpr_live() {
+ if constexpr (true) {
+ int * _Nonnull p = nullptr; // expected-warning{{null assigned to a variable of nonnull type}}
+ }
+}
+
+// Flow analysis narrowing works in live constexpr branches
+void test_if_constexpr_narrowing(int * _Nullable p) {
+ if constexpr (true) {
+ if (p) {
+ *p = 42; // OK — narrowed
+ }
+ }
+}
+
+// Dereference in live branch warns correctly
+void test_if_constexpr_deref(int * _Nullable p) {
+ if constexpr (true) {
+ *p = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+// Template with if constexpr — both instantiations checked
+template<bool B>
+void template_constexpr_branch() {
+ if constexpr (B) {
+ int * _Nonnull p = nullptr; // expected-warning 2{{null assigned to a variable of nonnull type}}
+ }
+}
+
+void instantiate_both() {
+ template_constexpr_branch<false>();
+ template_constexpr_branch<true>(); // expected-note{{in instantiation}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-lambda.cpp b/clang/test/Sema/flow-nullability-lambda.cpp
new file mode 100644
index 0000000000000..2735b6727d31d
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-lambda.cpp
@@ -0,0 +1,148 @@
+// Tests for lambda capture interactions with flow-sensitive nullability.
+// Lambdas create separate function bodies — the analysis is intraprocedural,
+// so each lambda body is analyzed independently.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+};
+
+Node * _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+// === Capture nullable by value — warns inside lambda ===
+
+void test_capture_nullable_by_value(Node * _Nullable p) {
+ auto f = [p]() {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ f();
+}
+
+// === Capture narrowed by value — still nullable inside lambda ===
+// Even though p was narrowed before the lambda, the capture creates a new
+// copy. The analysis treats each function body independently.
+
+void test_capture_narrowed_by_value(Node * _Nullable p) {
+ if (p) {
+ auto f = [p]() {
+ // p is captured by value from narrowed context, but the lambda
+ // is a separate function body. The analysis sees p as the
+ // lambda's parameter (implicitly nullable in nullable-default).
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ f();
+ (void)p->value; // OK — still narrowed in outer scope
+ }
+}
+
+// === Capture by reference — narrowing does not propagate ===
+
+void test_capture_by_ref(Node * _Nullable p) {
+ if (p) {
+ auto f = [&p]() {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ f();
+ }
+}
+
+// === Lambda with its own null check ===
+
+void test_lambda_own_check(Node * _Nullable p) {
+ auto f = [p]() {
+ if (p)
+ (void)p->value; // OK — narrowed inside lambda
+ };
+ f();
+}
+
+// === Immediately-invoked lambda expression ===
+
+void test_iife(Node * _Nullable p) {
+ [p]() {
+ if (p)
+ (void)p->value; // OK — narrowed
+ }();
+}
+
+// === Lambda capturing nonnull pointer ===
+
+void test_capture_nonnull(Node * _Nonnull p) {
+ auto f = [p]() {
+ (void)p->value; // OK — _Nonnull captured
+ };
+ f();
+}
+
+// === Generic lambda with auto parameter ===
+
+void test_generic_lambda() {
+ auto f = [](auto * _Nullable p) {
+ if (p)
+ (void)p->value; // OK — narrowed
+ };
+ Node * _Nullable n = nullptr;
+ f(n);
+}
+
+// === Lambda returning nullable pointer ===
+
+void test_lambda_return() {
+ Node * _Nullable n = nullptr;
+ auto getter = [&n]() -> Node * _Nullable { return n; };
+ Node * _Nullable result = getter();
+ (void)result->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === Nested lambdas ===
+
+void test_nested_lambda(Node * _Nullable p) {
+ auto outer = [p]() {
+ auto inner = [p]() {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ inner();
+ };
+ outer();
+}
+
+// === Lambda with no captures — unrelated pointer ===
+
+void test_lambda_no_capture() {
+ auto f = [](Node * _Nullable p) {
+ if (!p) return;
+ (void)p->value; // OK — narrowed by early return
+ };
+ f(nullptr);
+}
+
+// === Mutable lambda modifying captured pointer ===
+
+void test_mutable_capture(Node * _Nullable p) {
+ auto f = [p]() mutable {
+ p = nullptr; // mutate the captured copy
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ f();
+}
+
+// === Init-capture (C++14) — captures are independent variables ===
+
+void test_init_capture_warns() {
+ auto f = [p = getNode()]() {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ f();
+}
+
+void test_init_capture_with_check() {
+ auto f = [p = getNode()]() {
+ if (p)
+ (void)p->value; // OK — narrowed inside lambda
+ };
+ f();
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-new-expr.cpp b/clang/test/Sema/flow-nullability-new-expr.cpp
new file mode 100644
index 0000000000000..010b2cb76e664
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-new-expr.cpp
@@ -0,0 +1,42 @@
+// Tests that throwing operator new is treated as _Nonnull (it never returns null),
+// while nothrow operator new is left nullable.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+typedef __SIZE_TYPE__ size_t;
+
+namespace std {
+ struct nothrow_t {};
+ extern const nothrow_t nothrow;
+}
+
+void *operator new(size_t, const std::nothrow_t &) noexcept;
+
+struct Widget {
+ int value;
+};
+
+Widget * _Nullable getNullableWidget();
+
+#pragma clang assume_nonnull begin
+
+void test_new_direct_deref() {
+ Widget *w = new Widget();
+ w->value = 42; // OK - throwing new never returns null
+}
+
+void test_new_var_deref() {
+ Widget *w = new Widget();
+ int v = w->value; // OK - narrowed via new
+}
+
+void test_nothrow_new_warns() {
+ Widget *w = new (std::nothrow) Widget();
+ w->value = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_nullable_control() {
+ Widget *w = getNullableWidget();
+ w->value = 42; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-nonnull-attr.cpp b/clang/test/Sema/flow-nullability-nonnull-attr.cpp
new file mode 100644
index 0000000000000..85dd3dce9441e
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-nonnull-attr.cpp
@@ -0,0 +1,91 @@
+// Tests for __attribute__((nonnull)) interactions with flow-sensitive nullability.
+// The analysis already handles NonNullAttr for parameter narrowing — this
+// provides comprehensive test coverage.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-nullable-to-nonnull-conversion -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+};
+
+#pragma clang assume_nonnull begin
+
+// === Function-level __attribute__((nonnull)) ===
+
+__attribute__((nonnull))
+void consume_all(Node *a, Node *b) {
+ // params are nonnull by attribute
+}
+
+void test_fn_level_nonnull(Node * _Nullable p, Node * _Nullable q) {
+ if (!p || !q) return;
+ consume_all(p, q); // OK — both narrowed
+ // After passing to nonnull function, narrowing is preserved
+ (void)p->value; // OK
+ (void)q->value; // OK
+}
+
+// === Parameter-level __attribute__((nonnull(1,3))) ===
+
+__attribute__((nonnull(1, 3)))
+void consume_specific(Node *a, Node * _Nullable b, Node *c) {
+ // a and c are nonnull by attribute
+}
+
+void test_param_level_nonnull(Node * _Nullable p, Node * _Nullable q, Node * _Nullable r) {
+ consume_specific(p, q, r);
+ // After call: p and r were passed to nonnull params, so they're narrowed
+ (void)p->value; // OK — narrowed by passing to nonnull param 1
+ (void)r->value; // OK — narrowed by passing to nonnull param 3
+}
+
+// === __attribute__((returns_nonnull)) ===
+
+__attribute__((returns_nonnull))
+Node *createSafe();
+
+void test_returns_nonnull() {
+ Node *p = createSafe();
+ (void)p->value; // OK — _Nonnull return type
+}
+
+// === Narrowing via _Nonnull parameter (type qualifier, not attribute) ===
+
+void take_nonnull(Node * _Nonnull p) {}
+
+void test_type_qualifier_narrowing(Node * _Nullable p) {
+ take_nonnull(p);
+ (void)p->value; // OK — narrowed by passing to _Nonnull param
+}
+
+// === Multiple calls to nonnull functions ===
+
+void test_multi_call_narrowing(Node * _Nullable a, Node * _Nullable b) {
+ take_nonnull(a);
+ take_nonnull(b);
+ (void)a->value; // OK
+ (void)b->value; // OK
+}
+
+// === Nonnull parameter not invalidated by other calls ===
+
+void unrelated_fn();
+
+void test_nonnull_survives_calls(Node * _Nonnull p) {
+ unrelated_fn();
+ (void)p->value; // OK — _Nonnull parameter, calls don't invalidate
+}
+
+// === Attribute on C-style function declaration ===
+
+extern "C" {
+ __attribute__((nonnull(1)))
+ void c_consumer(Node *p, int x);
+}
+
+void test_c_fn_nonnull(Node * _Nullable p) {
+ c_consumer(p, 42);
+ (void)p->value; // OK — narrowed by passing to nonnull param
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-nonnull-param-narrowing.cpp b/clang/test/Sema/flow-nullability-nonnull-param-narrowing.cpp
new file mode 100644
index 0000000000000..fd8a197d53da2
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-nonnull-param-narrowing.cpp
@@ -0,0 +1,71 @@
+// Tests that passing a pointer to a _Nonnull parameter narrows it as non-null.
+// Also tests GCC-style __attribute__((nonnull)) which is used by glibc/bionic.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wnullable-to-nonnull-conversion -std=c++17 %s -verify
+
+typedef unsigned long size_t;
+
+// Simulate system header declarations with _Nonnull params
+size_t my_strlen(const char * _Nonnull s);
+void my_use(const char * _Nonnull s);
+void unannotated_use(const char *s);
+void two_params(const char * _Nonnull a, const char *b);
+
+// GCC-style nonnull attribute (used by glibc strlen, memcpy, etc.)
+size_t gcc_strlen(const char *s) __attribute__((nonnull(1)));
+void gcc_all_nonnull(const char *a, const char *b) __attribute__((nonnull));
+void gcc_partial_nonnull(const char *a, const char *b) __attribute__((nonnull(1)));
+
+#pragma clang assume_nonnull begin
+
+// After passing to a _Nonnull param, the pointer is narrowed — no warning.
+void test_nonnull_param_narrows(const char * _Nullable filePath) {
+ my_strlen(filePath); // expected-warning{{implicit conversion from nullable}}
+ const char c = *filePath; // OK — narrowed by call above
+}
+
+// Passing to an unannotated param does NOT narrow (unannotated_use's param
+// has no annotation, so inside assume_nonnull it becomes implicitly nonnull —
+// but that's an implicit annotation, not an explicit _Nonnull).
+void test_unannotated_param_no_narrow(const char * _Nullable filePath) {
+ unannotated_use(filePath);
+ const char c = *filePath; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Multiple args: only _Nonnull ones narrow.
+void test_mixed_params(const char * _Nullable a, const char * _Nullable b) {
+ two_params(a, b); // expected-warning{{implicit conversion from nullable}}
+ const char c1 = *a; // OK — narrowed
+ const char c2 = *b; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Multiple _Nonnull calls in sequence.
+void test_multiple_calls(const char * _Nullable p, const char * _Nullable q) {
+ my_use(p); // expected-warning{{implicit conversion from nullable}}
+ my_strlen(q); // expected-warning{{implicit conversion from nullable}}
+ const char c1 = *p; // OK
+ const char c2 = *q; // OK
+}
+
+// --- GCC-style __attribute__((nonnull)) ---
+
+// nonnull(1) narrows the first arg, like glibc's strlen.
+void test_gcc_nonnull_attr(const char * _Nullable filePath) {
+ gcc_strlen(filePath);
+ const char c = *filePath; // OK — narrowed by gcc nonnull attr
+}
+
+// nonnull (no args) means ALL pointer params are nonnull.
+void test_gcc_nonnull_all(const char * _Nullable a, const char * _Nullable b) {
+ gcc_all_nonnull(a, b);
+ const char c1 = *a; // OK — narrowed
+ const char c2 = *b; // OK — narrowed
+}
+
+// nonnull(1) only narrows the first arg, not the second.
+void test_gcc_nonnull_partial(const char * _Nullable a, const char * _Nullable b) {
+ gcc_partial_nonnull(a, b);
+ const char c1 = *a; // OK — narrowed (param 1 is nonnull)
+ const char c2 = *b; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-nonnull-param.cpp b/clang/test/Sema/flow-nullability-nonnull-param.cpp
new file mode 100644
index 0000000000000..b0f958bce4eb6
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-nonnull-param.cpp
@@ -0,0 +1,43 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Entity {
+ int x;
+ int value() const { return x; }
+};
+
+#pragma clang assume_nonnull begin
+
+void test_nonnull_star(Entity* _Nonnull p) {
+ (*p).x = 1; // OK - _Nonnull never warns
+}
+
+void test_nonnull_arrow(Entity* _Nonnull p) {
+ p->x = 1; // OK - _Nonnull never warns
+}
+
+void test_nonnull_method(Entity* _Nonnull p) {
+ int v = p->value(); // OK
+}
+
+void test_nonnull_local() {
+ Entity e;
+ Entity* _Nonnull p = &e;
+ p->x = 1; // OK - _Nonnull local
+}
+
+void test_mixed_params(Entity* _Nonnull safe, Entity* _Nullable risky) {
+ safe->x = 1; // OK - _Nonnull
+ if (risky) {
+ risky->x = safe->x; // OK - risky narrowed, safe is _Nonnull
+ }
+}
+
+void test_nonnull_after_null_check(Entity* _Nonnull p) {
+ if (p) {
+ p->x = 1; // OK - redundant check, but still fine
+ }
+ p->x = 2; // OK - _Nonnull
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-noreturn.cpp b/clang/test/Sema/flow-nullability-noreturn.cpp
new file mode 100644
index 0000000000000..949bc91c942d5
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-noreturn.cpp
@@ -0,0 +1,81 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Entity {
+ int x;
+};
+
+[[noreturn]] void fatal(const char* msg);
+void log(const char* msg);
+
+#pragma clang assume_nonnull begin
+
+// === if-else where both branches terminate ===
+
+void test_if_else_both_return(Entity* _Nullable p) {
+ if (!p) {
+ if (true) { return; }
+ else { return; }
+ }
+ p->x = 1; // OK - if always terminates (both branches return)
+}
+
+void test_if_else_return_and_noreturn(Entity* _Nullable p) {
+ if (!p) {
+ if (true) { return; }
+ else { fatal("unreachable"); }
+ }
+ p->x = 1; // OK - if always terminates
+}
+
+void test_nested_if_else_terminates(Entity* _Nullable p) {
+ if (!p) {
+ if (true) {
+ if (true) { return; }
+ else { return; }
+ } else {
+ fatal("unreachable");
+ }
+ }
+ p->x = 1; // OK - deeply nested, both paths terminate
+}
+
+void test_if_without_else_no_termination(Entity* _Nullable p, bool flag) {
+ if (!p) {
+ if (flag) { return; }
+ }
+ p->x = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// === noreturn function in StatementAlwaysTerminates ===
+
+void test_noreturn_function(Entity* _Nullable p) {
+ if (!p) {
+ fatal("p is null");
+ }
+ p->x = 1; // OK - noreturn guarantees we don't reach here if p was null
+}
+
+void test_noreturn_in_compound(Entity* _Nullable p) {
+ if (!p) {
+ log("about to die");
+ fatal("p is null");
+ }
+ p->x = 1; // OK
+}
+
+// === do-while(0) assertion macro pattern ===
+
+#define MY_ASSERT(cond) do { if (!(cond)) fatal("assertion failed: " #cond); } while(0)
+
+void test_do_while_assert(Entity* _Nullable p) {
+ MY_ASSERT(p);
+ p->x = 1; // OK - asserted non-null
+}
+
+void test_do_while_assert_two_vars(Entity* _Nullable p, Entity* _Nullable q) {
+ MY_ASSERT(p);
+ MY_ASSERT(q);
+ p->x = q->x; // OK - both asserted non-null
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-nullable-default-template.cpp b/clang/test/Sema/flow-nullability-nullable-default-template.cpp
new file mode 100644
index 0000000000000..6aec7052e0f1a
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-nullable-default-template.cpp
@@ -0,0 +1,43 @@
+// Test: Does the checker catch dereferences of explicit _Nullable return types?
+// This mimics the getComponent<T>() pattern from Clay ECS.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Component {
+ int value;
+ void setValue(int v) { value = v; }
+};
+
+struct Entity {
+ // Template method returning T* _Nullable
+ template<typename T>
+ T* _Nullable getComponent() { return nullptr; }
+
+ // Non-template returning pointer — explicitly nullable
+ Component* _Nullable getFirstComponent() { return nullptr; }
+};
+
+// Case 1: Non-template function → local var → arrow deref
+void test_non_template(Entity* e) {
+ Component* c = e->getFirstComponent(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ c->setValue(42); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Case 2: Template function → local var → arrow deref
+void test_template(Entity* e) {
+ Component* c = e->getComponent<Component>(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ c->setValue(42); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Case 3: Chained call → local var → data member access
+void test_data_member(Entity* e) {
+ Component* c = e->getComponent<Component>(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ c->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+// Case 4: With null check — should NOT warn for c, still warns for e
+void test_with_check(Entity* e) {
+ Component* c = e->getComponent<Component>(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ if (c != nullptr) {
+ c->setValue(42); // OK — c is narrowed
+ }
+}
diff --git a/clang/test/Sema/flow-nullability-perf-stress.cpp b/clang/test/Sema/flow-nullability-perf-stress.cpp
new file mode 100644
index 0000000000000..7163f95475ae7
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-perf-stress.cpp
@@ -0,0 +1,401 @@
+// Performance regression guard for flow-sensitive nullability analysis.
+// This file generates a large amount of work for the analysis and must
+// compile within the default lit timeout. If the analysis has a complexity
+// regression, this test will time out.
+//
+// Modeled after clang/test/Analysis/runtime-regression.c — the test passes
+// if it finishes; there are no diagnostic expectations beyond that.
+//
+// UNSUPPORTED: asan, msan, ubsan
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+ Node * _Nullable left;
+ Node * _Nullable right;
+};
+
+Node * _Nullable getNode();
+int getInt();
+
+#pragma clang assume_nonnull begin
+
+// --- Pattern 1: Many sequential null checks (tests linear scaling) ---
+// 100 variables, each checked and used
+
+#define CHECK_AND_USE(N) \
+ { Node * _Nullable p##N = getNode(); if (p##N) p##N->value = N; }
+
+void stress_sequential() {
+ CHECK_AND_USE(0) CHECK_AND_USE(1) CHECK_AND_USE(2) CHECK_AND_USE(3)
+ CHECK_AND_USE(4) CHECK_AND_USE(5) CHECK_AND_USE(6) CHECK_AND_USE(7)
+ CHECK_AND_USE(8) CHECK_AND_USE(9) CHECK_AND_USE(10) CHECK_AND_USE(11)
+ CHECK_AND_USE(12) CHECK_AND_USE(13) CHECK_AND_USE(14) CHECK_AND_USE(15)
+ CHECK_AND_USE(16) CHECK_AND_USE(17) CHECK_AND_USE(18) CHECK_AND_USE(19)
+ CHECK_AND_USE(20) CHECK_AND_USE(21) CHECK_AND_USE(22) CHECK_AND_USE(23)
+ CHECK_AND_USE(24) CHECK_AND_USE(25) CHECK_AND_USE(26) CHECK_AND_USE(27)
+ CHECK_AND_USE(28) CHECK_AND_USE(29) CHECK_AND_USE(30) CHECK_AND_USE(31)
+ CHECK_AND_USE(32) CHECK_AND_USE(33) CHECK_AND_USE(34) CHECK_AND_USE(35)
+ CHECK_AND_USE(36) CHECK_AND_USE(37) CHECK_AND_USE(38) CHECK_AND_USE(39)
+ CHECK_AND_USE(40) CHECK_AND_USE(41) CHECK_AND_USE(42) CHECK_AND_USE(43)
+ CHECK_AND_USE(44) CHECK_AND_USE(45) CHECK_AND_USE(46) CHECK_AND_USE(47)
+ CHECK_AND_USE(48) CHECK_AND_USE(49) CHECK_AND_USE(50) CHECK_AND_USE(51)
+ CHECK_AND_USE(52) CHECK_AND_USE(53) CHECK_AND_USE(54) CHECK_AND_USE(55)
+ CHECK_AND_USE(56) CHECK_AND_USE(57) CHECK_AND_USE(58) CHECK_AND_USE(59)
+ CHECK_AND_USE(60) CHECK_AND_USE(61) CHECK_AND_USE(62) CHECK_AND_USE(63)
+ CHECK_AND_USE(64) CHECK_AND_USE(65) CHECK_AND_USE(66) CHECK_AND_USE(67)
+ CHECK_AND_USE(68) CHECK_AND_USE(69) CHECK_AND_USE(70) CHECK_AND_USE(71)
+ CHECK_AND_USE(72) CHECK_AND_USE(73) CHECK_AND_USE(74) CHECK_AND_USE(75)
+ CHECK_AND_USE(76) CHECK_AND_USE(77) CHECK_AND_USE(78) CHECK_AND_USE(79)
+ CHECK_AND_USE(80) CHECK_AND_USE(81) CHECK_AND_USE(82) CHECK_AND_USE(83)
+ CHECK_AND_USE(84) CHECK_AND_USE(85) CHECK_AND_USE(86) CHECK_AND_USE(87)
+ CHECK_AND_USE(88) CHECK_AND_USE(89) CHECK_AND_USE(90) CHECK_AND_USE(91)
+ CHECK_AND_USE(92) CHECK_AND_USE(93) CHECK_AND_USE(94) CHECK_AND_USE(95)
+ CHECK_AND_USE(96) CHECK_AND_USE(97) CHECK_AND_USE(98) CHECK_AND_USE(99)
+}
+
+// --- Pattern 2: Branch fan-out (tests intersect scaling) ---
+// 50 independent if-branches merging at one point
+
+#define BRANCH(N) if (getInt()) { s##N = &nodes[N]; }
+
+void stress_fanout() {
+ Node nodes[50];
+ Node * _Nullable s0 = nullptr, * _Nullable s1 = nullptr;
+ Node * _Nullable s2 = nullptr, * _Nullable s3 = nullptr;
+ Node * _Nullable s4 = nullptr, * _Nullable s5 = nullptr;
+ Node * _Nullable s6 = nullptr, * _Nullable s7 = nullptr;
+ Node * _Nullable s8 = nullptr, * _Nullable s9 = nullptr;
+ Node * _Nullable s10 = nullptr, * _Nullable s11 = nullptr;
+ Node * _Nullable s12 = nullptr, * _Nullable s13 = nullptr;
+ Node * _Nullable s14 = nullptr, * _Nullable s15 = nullptr;
+ Node * _Nullable s16 = nullptr, * _Nullable s17 = nullptr;
+ Node * _Nullable s18 = nullptr, * _Nullable s19 = nullptr;
+ Node * _Nullable s20 = nullptr, * _Nullable s21 = nullptr;
+ Node * _Nullable s22 = nullptr, * _Nullable s23 = nullptr;
+ Node * _Nullable s24 = nullptr, * _Nullable s25 = nullptr;
+ Node * _Nullable s26 = nullptr, * _Nullable s27 = nullptr;
+ Node * _Nullable s28 = nullptr, * _Nullable s29 = nullptr;
+ Node * _Nullable s30 = nullptr, * _Nullable s31 = nullptr;
+ Node * _Nullable s32 = nullptr, * _Nullable s33 = nullptr;
+ Node * _Nullable s34 = nullptr, * _Nullable s35 = nullptr;
+ Node * _Nullable s36 = nullptr, * _Nullable s37 = nullptr;
+ Node * _Nullable s38 = nullptr, * _Nullable s39 = nullptr;
+ Node * _Nullable s40 = nullptr, * _Nullable s41 = nullptr;
+ Node * _Nullable s42 = nullptr, * _Nullable s43 = nullptr;
+ Node * _Nullable s44 = nullptr, * _Nullable s45 = nullptr;
+ Node * _Nullable s46 = nullptr, * _Nullable s47 = nullptr;
+ Node * _Nullable s48 = nullptr, * _Nullable s49 = nullptr;
+
+ BRANCH(0) BRANCH(1) BRANCH(2) BRANCH(3) BRANCH(4)
+ BRANCH(5) BRANCH(6) BRANCH(7) BRANCH(8) BRANCH(9)
+ BRANCH(10) BRANCH(11) BRANCH(12) BRANCH(13) BRANCH(14)
+ BRANCH(15) BRANCH(16) BRANCH(17) BRANCH(18) BRANCH(19)
+ BRANCH(20) BRANCH(21) BRANCH(22) BRANCH(23) BRANCH(24)
+ BRANCH(25) BRANCH(26) BRANCH(27) BRANCH(28) BRANCH(29)
+ BRANCH(30) BRANCH(31) BRANCH(32) BRANCH(33) BRANCH(34)
+ BRANCH(35) BRANCH(36) BRANCH(37) BRANCH(38) BRANCH(39)
+ BRANCH(40) BRANCH(41) BRANCH(42) BRANCH(43) BRANCH(44)
+ BRANCH(45) BRANCH(46) BRANCH(47) BRANCH(48) BRANCH(49)
+}
+
+// --- Pattern 3: Many small functions (realistic workload) ---
+// 100 functions with typical null-check-and-use patterns
+
+#define SMALL_FN(N) \
+ void small_fn_##N(Node * _Nullable p) { \
+ if (!p) return; \
+ p->value = N; \
+ if (p->next) p->next->value = N + 1; \
+ }
+
+SMALL_FN(0) SMALL_FN(1) SMALL_FN(2) SMALL_FN(3) SMALL_FN(4)
+SMALL_FN(5) SMALL_FN(6) SMALL_FN(7) SMALL_FN(8) SMALL_FN(9)
+SMALL_FN(10) SMALL_FN(11) SMALL_FN(12) SMALL_FN(13) SMALL_FN(14)
+SMALL_FN(15) SMALL_FN(16) SMALL_FN(17) SMALL_FN(18) SMALL_FN(19)
+SMALL_FN(20) SMALL_FN(21) SMALL_FN(22) SMALL_FN(23) SMALL_FN(24)
+SMALL_FN(25) SMALL_FN(26) SMALL_FN(27) SMALL_FN(28) SMALL_FN(29)
+SMALL_FN(30) SMALL_FN(31) SMALL_FN(32) SMALL_FN(33) SMALL_FN(34)
+SMALL_FN(35) SMALL_FN(36) SMALL_FN(37) SMALL_FN(38) SMALL_FN(39)
+SMALL_FN(40) SMALL_FN(41) SMALL_FN(42) SMALL_FN(43) SMALL_FN(44)
+SMALL_FN(45) SMALL_FN(46) SMALL_FN(47) SMALL_FN(48) SMALL_FN(49)
+SMALL_FN(50) SMALL_FN(51) SMALL_FN(52) SMALL_FN(53) SMALL_FN(54)
+SMALL_FN(55) SMALL_FN(56) SMALL_FN(57) SMALL_FN(58) SMALL_FN(59)
+SMALL_FN(60) SMALL_FN(61) SMALL_FN(62) SMALL_FN(63) SMALL_FN(64)
+SMALL_FN(65) SMALL_FN(66) SMALL_FN(67) SMALL_FN(68) SMALL_FN(69)
+SMALL_FN(70) SMALL_FN(71) SMALL_FN(72) SMALL_FN(73) SMALL_FN(74)
+SMALL_FN(75) SMALL_FN(76) SMALL_FN(77) SMALL_FN(78) SMALL_FN(79)
+SMALL_FN(80) SMALL_FN(81) SMALL_FN(82) SMALL_FN(83) SMALL_FN(84)
+SMALL_FN(85) SMALL_FN(86) SMALL_FN(87) SMALL_FN(88) SMALL_FN(89)
+SMALL_FN(90) SMALL_FN(91) SMALL_FN(92) SMALL_FN(93) SMALL_FN(94)
+SMALL_FN(95) SMALL_FN(96) SMALL_FN(97) SMALL_FN(98) SMALL_FN(99)
+
+// --- Pattern 4: Deep nesting (tests edge state tracking) ---
+
+void stress_deep_nesting(
+ Node * _Nullable p0, Node * _Nullable p1, Node * _Nullable p2,
+ Node * _Nullable p3, Node * _Nullable p4, Node * _Nullable p5,
+ Node * _Nullable p6, Node * _Nullable p7, Node * _Nullable p8,
+ Node * _Nullable p9, Node * _Nullable p10, Node * _Nullable p11,
+ Node * _Nullable p12, Node * _Nullable p13, Node * _Nullable p14) {
+ if (p0) {
+ if (p1) {
+ if (p2) {
+ if (p3) {
+ if (p4) {
+ if (p5) {
+ if (p6) {
+ if (p7) {
+ if (p8) {
+ if (p9) {
+ if (p10) {
+ if (p11) {
+ if (p12) {
+ if (p13) {
+ if (p14) {
+ p0->value = p1->value + p2->value + p3->value;
+ p4->value = p5->value + p6->value + p7->value;
+ p8->value = p9->value + p10->value + p11->value;
+ p12->value = p13->value + p14->value;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// --- Pattern 5: Linked list traversal with operations ---
+
+void stress_linked_list() {
+ Node * _Nullable head = getNode();
+ int sum = 0;
+ for (Node * _Nullable p = head; p; p = p->next) {
+ sum += p->value;
+ if (p->left) {
+ sum += p->left->value;
+ if (p->left->right) {
+ sum += p->left->right->value;
+ }
+ }
+ if (p->right) {
+ sum += p->right->value;
+ }
+ }
+ (void)sum;
+}
+
+// --- Pattern 6: Diamond CFG merges (tests intersect with divergent narrowing) ---
+// Each diamond: if(cond) { narrow p_i } else { narrow q_i }
+// Then merge point must intersect correctly.
+
+#define DIAMOND(N) \
+ if (getInt()) { \
+ if (p##N) p##N->value = N; \
+ } else { \
+ if (q##N) q##N->value = N; \
+ }
+
+void stress_diamond_merges() {
+ Node * _Nullable p0 = getNode(), * _Nullable q0 = getNode();
+ Node * _Nullable p1 = getNode(), * _Nullable q1 = getNode();
+ Node * _Nullable p2 = getNode(), * _Nullable q2 = getNode();
+ Node * _Nullable p3 = getNode(), * _Nullable q3 = getNode();
+ Node * _Nullable p4 = getNode(), * _Nullable q4 = getNode();
+ Node * _Nullable p5 = getNode(), * _Nullable q5 = getNode();
+ Node * _Nullable p6 = getNode(), * _Nullable q6 = getNode();
+ Node * _Nullable p7 = getNode(), * _Nullable q7 = getNode();
+ Node * _Nullable p8 = getNode(), * _Nullable q8 = getNode();
+ Node * _Nullable p9 = getNode(), * _Nullable q9 = getNode();
+ Node * _Nullable p10 = getNode(), * _Nullable q10 = getNode();
+ Node * _Nullable p11 = getNode(), * _Nullable q11 = getNode();
+ Node * _Nullable p12 = getNode(), * _Nullable q12 = getNode();
+ Node * _Nullable p13 = getNode(), * _Nullable q13 = getNode();
+ Node * _Nullable p14 = getNode(), * _Nullable q14 = getNode();
+ Node * _Nullable p15 = getNode(), * _Nullable q15 = getNode();
+ Node * _Nullable p16 = getNode(), * _Nullable q16 = getNode();
+ Node * _Nullable p17 = getNode(), * _Nullable q17 = getNode();
+ Node * _Nullable p18 = getNode(), * _Nullable q18 = getNode();
+ Node * _Nullable p19 = getNode(), * _Nullable q19 = getNode();
+ Node * _Nullable p20 = getNode(), * _Nullable q20 = getNode();
+ Node * _Nullable p21 = getNode(), * _Nullable q21 = getNode();
+ Node * _Nullable p22 = getNode(), * _Nullable q22 = getNode();
+ Node * _Nullable p23 = getNode(), * _Nullable q23 = getNode();
+ Node * _Nullable p24 = getNode(), * _Nullable q24 = getNode();
+
+ DIAMOND(0) DIAMOND(1) DIAMOND(2) DIAMOND(3) DIAMOND(4)
+ DIAMOND(5) DIAMOND(6) DIAMOND(7) DIAMOND(8) DIAMOND(9)
+ DIAMOND(10) DIAMOND(11) DIAMOND(12) DIAMOND(13) DIAMOND(14)
+ DIAMOND(15) DIAMOND(16) DIAMOND(17) DIAMOND(18) DIAMOND(19)
+ DIAMOND(20) DIAMOND(21) DIAMOND(22) DIAMOND(23) DIAMOND(24)
+}
+
+// --- Pattern 7: Boolean guard stress (tests BoolGuards tracking) ---
+// Many boolean intermediaries checked later — stresses the guard map.
+
+#define BOOL_GUARD(N) \
+ bool valid_##N = (getNode() != nullptr); \
+ Node * _Nullable bg_##N = getNode();
+
+#define BOOL_CHECK(N) \
+ if (valid_##N && bg_##N) { bg_##N->value = N; }
+
+void stress_bool_guards() {
+ BOOL_GUARD(0) BOOL_GUARD(1) BOOL_GUARD(2) BOOL_GUARD(3)
+ BOOL_GUARD(4) BOOL_GUARD(5) BOOL_GUARD(6) BOOL_GUARD(7)
+ BOOL_GUARD(8) BOOL_GUARD(9) BOOL_GUARD(10) BOOL_GUARD(11)
+ BOOL_GUARD(12) BOOL_GUARD(13) BOOL_GUARD(14) BOOL_GUARD(15)
+ BOOL_GUARD(16) BOOL_GUARD(17) BOOL_GUARD(18) BOOL_GUARD(19)
+ BOOL_GUARD(20) BOOL_GUARD(21) BOOL_GUARD(22) BOOL_GUARD(23)
+ BOOL_GUARD(24) BOOL_GUARD(25) BOOL_GUARD(26) BOOL_GUARD(27)
+ BOOL_GUARD(28) BOOL_GUARD(29) BOOL_GUARD(30) BOOL_GUARD(31)
+ BOOL_GUARD(32) BOOL_GUARD(33) BOOL_GUARD(34) BOOL_GUARD(35)
+ BOOL_GUARD(36) BOOL_GUARD(37) BOOL_GUARD(38) BOOL_GUARD(39)
+
+ BOOL_CHECK(0) BOOL_CHECK(1) BOOL_CHECK(2) BOOL_CHECK(3)
+ BOOL_CHECK(4) BOOL_CHECK(5) BOOL_CHECK(6) BOOL_CHECK(7)
+ BOOL_CHECK(8) BOOL_CHECK(9) BOOL_CHECK(10) BOOL_CHECK(11)
+ BOOL_CHECK(12) BOOL_CHECK(13) BOOL_CHECK(14) BOOL_CHECK(15)
+ BOOL_CHECK(16) BOOL_CHECK(17) BOOL_CHECK(18) BOOL_CHECK(19)
+ BOOL_CHECK(20) BOOL_CHECK(21) BOOL_CHECK(22) BOOL_CHECK(23)
+ BOOL_CHECK(24) BOOL_CHECK(25) BOOL_CHECK(26) BOOL_CHECK(27)
+ BOOL_CHECK(28) BOOL_CHECK(29) BOOL_CHECK(30) BOOL_CHECK(31)
+ BOOL_CHECK(32) BOOL_CHECK(33) BOOL_CHECK(34) BOOL_CHECK(35)
+ BOOL_CHECK(36) BOOL_CHECK(37) BOOL_CHECK(38) BOOL_CHECK(39)
+}
+
+// --- Pattern 8: Member narrowing stress (tests NarrowedMembers set) ---
+// Many member accesses through different variables, all checked.
+
+struct Tree {
+ int data;
+ Tree * _Nullable left;
+ Tree * _Nullable right;
+ Tree * _Nullable parent;
+};
+
+#define MEMBER_NARROW(N) \
+ void member_fn_##N(Tree * _Nullable t) { \
+ if (!t) return; \
+ if (t->left) { \
+ t->left->data = N; \
+ if (t->left->right) t->left->right->data = N; \
+ } \
+ if (t->right) { \
+ t->right->data = N; \
+ if (t->right->parent) t->right->parent->data = N; \
+ } \
+ }
+
+MEMBER_NARROW(0) MEMBER_NARROW(1) MEMBER_NARROW(2) MEMBER_NARROW(3)
+MEMBER_NARROW(4) MEMBER_NARROW(5) MEMBER_NARROW(6) MEMBER_NARROW(7)
+MEMBER_NARROW(8) MEMBER_NARROW(9) MEMBER_NARROW(10) MEMBER_NARROW(11)
+MEMBER_NARROW(12) MEMBER_NARROW(13) MEMBER_NARROW(14) MEMBER_NARROW(15)
+MEMBER_NARROW(16) MEMBER_NARROW(17) MEMBER_NARROW(18) MEMBER_NARROW(19)
+MEMBER_NARROW(20) MEMBER_NARROW(21) MEMBER_NARROW(22) MEMBER_NARROW(23)
+MEMBER_NARROW(24) MEMBER_NARROW(25) MEMBER_NARROW(26) MEMBER_NARROW(27)
+MEMBER_NARROW(28) MEMBER_NARROW(29) MEMBER_NARROW(30) MEMBER_NARROW(31)
+MEMBER_NARROW(32) MEMBER_NARROW(33) MEMBER_NARROW(34) MEMBER_NARROW(35)
+MEMBER_NARROW(36) MEMBER_NARROW(37) MEMBER_NARROW(38) MEMBER_NARROW(39)
+MEMBER_NARROW(40) MEMBER_NARROW(41) MEMBER_NARROW(42) MEMBER_NARROW(43)
+MEMBER_NARROW(44) MEMBER_NARROW(45) MEMBER_NARROW(46) MEMBER_NARROW(47)
+MEMBER_NARROW(48) MEMBER_NARROW(49)
+
+// --- Pattern 9: Compound conditions stress ---
+// Many && chains forcing the CFG decomposition to create many basic blocks.
+
+#define AND_CHAIN_3(A, B, C) if (A && B && C) { A->value = B->value + C->value; }
+
+void stress_compound_conditions() {
+ Node * _Nullable a0 = getNode(), * _Nullable b0 = getNode(), * _Nullable c0 = getNode();
+ Node * _Nullable a1 = getNode(), * _Nullable b1 = getNode(), * _Nullable c1 = getNode();
+ Node * _Nullable a2 = getNode(), * _Nullable b2 = getNode(), * _Nullable c2 = getNode();
+ Node * _Nullable a3 = getNode(), * _Nullable b3 = getNode(), * _Nullable c3 = getNode();
+ Node * _Nullable a4 = getNode(), * _Nullable b4 = getNode(), * _Nullable c4 = getNode();
+ Node * _Nullable a5 = getNode(), * _Nullable b5 = getNode(), * _Nullable c5 = getNode();
+ Node * _Nullable a6 = getNode(), * _Nullable b6 = getNode(), * _Nullable c6 = getNode();
+ Node * _Nullable a7 = getNode(), * _Nullable b7 = getNode(), * _Nullable c7 = getNode();
+ Node * _Nullable a8 = getNode(), * _Nullable b8 = getNode(), * _Nullable c8 = getNode();
+ Node * _Nullable a9 = getNode(), * _Nullable b9 = getNode(), * _Nullable c9 = getNode();
+
+ AND_CHAIN_3(a0, b0, c0) AND_CHAIN_3(a1, b1, c1)
+ AND_CHAIN_3(a2, b2, c2) AND_CHAIN_3(a3, b3, c3)
+ AND_CHAIN_3(a4, b4, c4) AND_CHAIN_3(a5, b5, c5)
+ AND_CHAIN_3(a6, b6, c6) AND_CHAIN_3(a7, b7, c7)
+ AND_CHAIN_3(a8, b8, c8) AND_CHAIN_3(a9, b9, c9)
+}
+
+// --- Pattern 10: Large switch statement (tests edge-state scaling) ---
+// A 100-case switch creates ~200 edges. Each case narrows a different var
+// then falls through to the merge point, stressing the per-edge state map.
+
+void stress_switch() {
+ Node * _Nullable p = getNode();
+ int x = getInt();
+ switch (x) {
+ case 0: if (p) p->value = 0; break;
+ case 1: if (p) p->value = 1; break;
+ case 2: if (p) p->value = 2; break;
+ case 3: if (p) p->value = 3; break;
+ case 4: if (p) p->value = 4; break;
+ case 5: if (p) p->value = 5; break;
+ case 6: if (p) p->value = 6; break;
+ case 7: if (p) p->value = 7; break;
+ case 8: if (p) p->value = 8; break;
+ case 9: if (p) p->value = 9; break;
+ case 10: if (p) p->value = 10; break;
+ case 11: if (p) p->value = 11; break;
+ case 12: if (p) p->value = 12; break;
+ case 13: if (p) p->value = 13; break;
+ case 14: if (p) p->value = 14; break;
+ case 15: if (p) p->value = 15; break;
+ case 16: if (p) p->value = 16; break;
+ case 17: if (p) p->value = 17; break;
+ case 18: if (p) p->value = 18; break;
+ case 19: if (p) p->value = 19; break;
+ case 20: if (p) p->value = 20; break;
+ case 21: if (p) p->value = 21; break;
+ case 22: if (p) p->value = 22; break;
+ case 23: if (p) p->value = 23; break;
+ case 24: if (p) p->value = 24; break;
+ case 25: if (p) p->value = 25; break;
+ case 26: if (p) p->value = 26; break;
+ case 27: if (p) p->value = 27; break;
+ case 28: if (p) p->value = 28; break;
+ case 29: if (p) p->value = 29; break;
+ case 30: if (p) p->value = 30; break;
+ case 31: if (p) p->value = 31; break;
+ case 32: if (p) p->value = 32; break;
+ case 33: if (p) p->value = 33; break;
+ case 34: if (p) p->value = 34; break;
+ case 35: if (p) p->value = 35; break;
+ case 36: if (p) p->value = 36; break;
+ case 37: if (p) p->value = 37; break;
+ case 38: if (p) p->value = 38; break;
+ case 39: if (p) p->value = 39; break;
+ case 40: if (p) p->value = 40; break;
+ case 41: if (p) p->value = 41; break;
+ case 42: if (p) p->value = 42; break;
+ case 43: if (p) p->value = 43; break;
+ case 44: if (p) p->value = 44; break;
+ case 45: if (p) p->value = 45; break;
+ case 46: if (p) p->value = 46; break;
+ case 47: if (p) p->value = 47; break;
+ case 48: if (p) p->value = 48; break;
+ case 49: if (p) p->value = 49; break;
+ default: break;
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-range-for.cpp b/clang/test/Sema/flow-nullability-range-for.cpp
new file mode 100644
index 0000000000000..85fb4e389d2f0
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-range-for.cpp
@@ -0,0 +1,31 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nonnull -std=c++17 %s -verify
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Item { int value; };
+
+template <typename T, int N>
+struct Array {
+ T data_[N];
+ T* begin() { return data_; }
+ T* end() { return data_ + N; }
+ const T* begin() const { return data_; }
+ const T* end() const { return data_ + N; }
+};
+
+void test_range_for_no_warn() {
+ Array<Item, 3> arr = {};
+ for (const auto& item : arr) {
+ (void)item.value;
+ }
+}
+
+void test_range_for_c_array() {
+ Item items[4] = {};
+ for (const auto& item : items) {
+ (void)item.value;
+ }
+}
+
+void test_deref_still_warns(int* _Nullable p) {
+ (void)*p; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
diff --git a/clang/test/Sema/flow-nullability-reassignment.cpp b/clang/test/Sema/flow-nullability-reassignment.cpp
new file mode 100644
index 0000000000000..ff167c304ab07
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-reassignment.cpp
@@ -0,0 +1,64 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+struct Entity {
+ int x;
+};
+
+Entity* _Nullable getEntity();
+
+#pragma clang assume_nonnull begin
+
+// Reassignment invalidates narrowing — the CFG-based analysis tracks this correctly.
+
+void test_reassign_invalidates(Entity* _Nullable p, Entity* _Nullable other) {
+ if (p) {
+ p = other;
+ (void)*p; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_deref_before_reassign(Entity* _Nullable p, Entity* _Nullable other) {
+ if (p) {
+ (*p).x = 1; // OK - narrowed
+ p = other;
+ (*p).x = 2; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+void test_reassign_then_recheck(Entity* _Nullable p) {
+ p = getEntity();
+ if (p) {
+ (*p).x = 1; // OK - re-narrowed after reassignment
+ }
+}
+
+// Pointer increment/decrement preserves narrowing — arithmetic on a
+// non-null pointer is still non-null (same as p + 1 in initialization).
+void test_increment_preserves_narrowing(Entity* _Nullable p) {
+ if (p) {
+ p++;
+ (void)*p; // OK — p++ on non-null is still non-null
+ }
+}
+
+void test_decrement_preserves_narrowing(Entity* _Nullable p) {
+ if (p) {
+ --p;
+ (void)*p; // OK — --p on non-null is still non-null
+ }
+}
+
+// But member narrowing IS invalidated by pointer arithmetic,
+// since the pointer now points to a different object.
+struct Chain {
+ int value;
+ Chain * _Nullable next;
+};
+
+void test_increment_invalidates_member(Chain * _Nullable p) {
+ if (p && p->next) {
+ p++;
+ p->next->value = 1; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-smart-ptr.cpp b/clang/test/Sema/flow-nullability-smart-ptr.cpp
new file mode 100644
index 0000000000000..04653291a6c1a
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-smart-ptr.cpp
@@ -0,0 +1,224 @@
+// Tests for smart pointer null dereference detection in flow-sensitive nullability.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node* next;
+};
+
+// Minimal std smart pointer mocks — must be in namespace std for detection.
+namespace std {
+
+template <typename T>
+struct unique_ptr {
+ T* ptr;
+ using pointer = T*;
+ using element_type = T;
+ pointer operator->() { return ptr; }
+ element_type& operator*() { return *ptr; }
+ pointer get() { return ptr; }
+ explicit operator bool() const { return ptr != nullptr; }
+ void reset() { ptr = nullptr; }
+ void reset(T* p) { ptr = p; }
+ unique_ptr() : ptr(nullptr) {}
+ unique_ptr(unique_ptr&& other) : ptr(other.ptr) { other.ptr = nullptr; }
+ unique_ptr& operator=(unique_ptr&& other) { ptr = other.ptr; other.ptr = nullptr; return *this; }
+ unique_ptr(const unique_ptr&) = delete;
+ unique_ptr& operator=(const unique_ptr&) = delete;
+};
+
+template <typename T>
+struct shared_ptr {
+ T* ptr;
+ T* operator->() { return ptr; }
+ T& operator*() { return *ptr; }
+ T* get() { return ptr; }
+ explicit operator bool() const { return ptr != nullptr; }
+ void reset() { ptr = nullptr; }
+ void reset(T* p) { ptr = p; }
+};
+
+template <typename T, typename... Args>
+unique_ptr<T> make_unique(Args&&... args);
+
+template <typename T, typename... Args>
+shared_ptr<T> make_shared(Args&&... args);
+
+template <typename T>
+T&& move(T& t) noexcept;
+
+} // namespace std
+
+#pragma clang assume_nonnull begin
+
+// Non-std smart pointer (should NOT trigger smart pointer warnings)
+template <typename T>
+struct CustomPtr {
+ T* ptr;
+ T* operator->() { return ptr; }
+ T& operator*() { return *ptr; }
+};
+
+// Iterator (should NOT trigger smart pointer warnings)
+struct Container {
+ struct Iterator {
+ Node* ptr;
+ Node* operator->() { return ptr; }
+ Node& operator*() { return *ptr; }
+ };
+ Iterator begin();
+ Iterator end();
+};
+
+// --- Basic dereference warnings ---
+
+void test_sp_deref_warns(std::unique_ptr<Node> sp) {
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+void test_shared_ptr_deref_warns(std::shared_ptr<Node> sp) {
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// --- Narrowing via null check ---
+
+void test_sp_narrowed_by_check(std::unique_ptr<Node> sp) {
+ if (sp) {
+ sp->value = 1; // OK — narrowed by bool check
+ }
+}
+
+void test_sp_narrowed_negated(std::unique_ptr<Node> sp) {
+ if (!sp)
+ return;
+ sp->value = 1; // OK — narrowed by early return
+}
+
+// --- make_unique/make_shared narrow ---
+
+void test_make_unique_narrows() {
+ auto sp = std::make_unique<Node>();
+ sp->value = 1; // OK — make_unique always returns non-null
+}
+
+void test_make_shared_narrows() {
+ auto sp = std::make_shared<Node>();
+ sp->value = 1; // OK — make_shared always returns non-null
+}
+
+// --- reset() makes nullable ---
+
+void test_reset_makes_nullable(std::unique_ptr<Node> sp) {
+ if (sp) {
+ sp->value = 1; // OK
+ }
+ sp.reset();
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+void test_reset_with_arg_narrows(std::unique_ptr<Node> sp) {
+ sp.reset(new Node());
+ sp->value = 1; // OK — reset(ptr) gives it a value
+}
+
+void test_reset_nullptr_stays_nullable(std::unique_ptr<Node> sp) {
+ if (sp) {
+ sp->value = 1; // OK — narrowed
+ }
+ sp.reset(nullptr);
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// --- std::move makes source nullable ---
+
+void test_move_makes_source_nullable(std::unique_ptr<Node> sp) {
+ if (sp) {
+ sp->value = 1; // OK
+ }
+ auto other = std::move(sp);
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// --- Member smart pointers ---
+// Member smart pointers do NOT warn by default (too many false positives on
+// members initialized in constructors). They only warn when the current
+// function has evidence of nullability (reset, move, etc).
+
+struct Owner {
+ std::unique_ptr<Node> csm_;
+
+ // No warning — no evidence of nullability within this function.
+ // Most member unique_ptrs are set in the constructor and always valid.
+ void use_no_evidence() {
+ csm_->value = 1; // OK — no evidence of nullability
+ }
+
+ // Warning — reset() without args makes it nullable in this function
+ void use_after_reset() {
+ csm_->value = 1; // OK — before reset
+ csm_.reset();
+ csm_->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ }
+
+ // OK — reset with arg re-narrows it
+ void use_after_reset_with_arg() {
+ csm_.reset(new Node());
+ csm_->value = 1; // OK — reset(ptr) narrows
+ }
+
+ // Warning — std::move makes it nullable
+ void use_after_move() {
+ auto other = std::move(csm_);
+ csm_->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ }
+
+ // OK — narrowed by null check after reset
+ void use_safe_after_reset() {
+ csm_.reset();
+ if (csm_) {
+ csm_->value = 1; // OK — narrowed
+ }
+ }
+};
+
+
+// --- Assignment from make_unique re-narrows ---
+
+void test_sp_assign_from_make_unique() {
+ std::unique_ptr<Node> sp;
+ sp->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ sp = std::make_unique<Node>();
+ sp->value = 1; // OK — assignment from make_unique narrows
+}
+
+// --- Non-std smart pointers should NOT warn (kept as before) ---
+
+void test_custom_ptr_no_warn(CustomPtr<Node> cp) {
+ cp->value = 1; // OK — not a std smart pointer, skip operator->
+}
+
+void test_iterator_no_warn(Container c) {
+ auto it = c.begin();
+ it->value = 1; // OK — iterator, not a smart pointer
+}
+
+// --- .get() returns an unannotated raw pointer, no warning ---
+// (The smart pointer deref check via operator-> is the preferred warning path.)
+
+void test_get_no_warning_unannotated(std::unique_ptr<Node> sp) {
+ sp.get()->value = 1; // OK — get() return type is unannotated
+}
+
+// --- Raw pointers still work as before ---
+
+void test_raw_ptr_still_warns(Node* _Nullable p) {
+ p->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+void test_raw_ptr_narrowed(Node* _Nullable p) {
+ if (p) {
+ p->value = 1; // OK — narrowed
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-structured-bindings.cpp b/clang/test/Sema/flow-nullability-structured-bindings.cpp
new file mode 100644
index 0000000000000..46f9a6feb8e96
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-structured-bindings.cpp
@@ -0,0 +1,160 @@
+// Tests for structured bindings with flow-sensitive nullability analysis.
+// C++17 structured bindings produce BindingDecl nodes, not VarDecls.
+// The analysis currently tracks narrowing on VarDecls, so structured
+// binding variables are not narrowable. This test documents that behavior
+// and verifies no crashes occur.
+//
+// Structured binding pointers that need null-checking should be captured
+// into local variables first — this test also shows that workaround.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+};
+
+Node * _Nullable getNode();
+
+// Pair-like type for structured bindings
+struct PtrPair {
+ Node * _Nullable first;
+ Node * _Nullable second;
+};
+
+PtrPair getPair();
+
+// Tuple-like for testing get<> protocol
+struct Triple {
+ Node * _Nullable a;
+ Node * _Nullable b;
+ int c;
+};
+
+Triple getTriple();
+
+#pragma clang assume_nonnull begin
+
+// === Basic struct decomposition ===
+// Structured binding variables are BindingDecls, not VarDecls.
+// The analysis does not currently track narrowing on BindingDecls,
+// so these accesses do not warn even without null checks.
+// This is a known false negative — documenting that no crash occurs.
+
+void test_struct_decomp() {
+ PtrPair pair = getPair();
+ auto [p, q] = pair;
+ // p and q are BindingDecls — analysis doesn't track them,
+ // so no warning is produced (accepted false negative).
+ if (p) {
+ (void)p->value; // OK — narrowed (even though binding)
+ }
+ if (q) {
+ (void)q->value; // OK
+ }
+}
+
+// === Decomposition with && guard ===
+
+void test_decomp_both_checked() {
+ auto [p, q] = getPair();
+ if (p && q) {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+// === Mixed nullable/non-nullable struct ===
+
+struct MixedPair {
+ Node * _Nonnull safe;
+ Node * _Nullable risky;
+};
+
+MixedPair getMixed();
+
+void test_mixed_decomp() {
+ auto [safe, risky] = getMixed();
+ (void)safe->value; // OK — source is _Nonnull
+ // risky is a BindingDecl — no warning produced (false negative)
+}
+
+void test_mixed_decomp_guarded() {
+ auto [safe, risky] = getMixed();
+ (void)safe->value; // OK
+ if (risky) {
+ (void)risky->value; // OK — checked
+ }
+}
+
+// === Decomposition from triple ===
+
+void test_triple_decomp() {
+ auto [a, b, c] = getTriple();
+ if (a && b) {
+ (void)a->value; // OK
+ (void)b->value; // OK
+ }
+ (void)c; // OK — int, not a pointer
+}
+
+// === Reference binding through structured bindings ===
+
+void test_ref_decomp() {
+ PtrPair pair = getPair();
+ auto &[p, q] = pair;
+ if (p) {
+ (void)p->value; // OK
+ }
+}
+
+// === Workaround: capture into local variable for narrowing ===
+// This is the recommended pattern when you need null-checking with
+// structured bindings.
+
+void test_capture_workaround() {
+ auto [first, second] = getPair();
+ Node * _Nullable p = first;
+ Node * _Nullable q = second;
+ if (p && q) {
+ (void)p->value; // OK — local VarDecl is tracked
+ (void)q->value; // OK
+ }
+}
+
+// === Structured binding in if-init (C++17) ===
+
+void test_if_init_decomp() {
+ if (auto [p, q] = getPair(); p && q) {
+ (void)p->value; // OK
+ (void)q->value; // OK
+ }
+}
+
+// === Structured binding in for-range-init ===
+
+struct PairList {
+ PtrPair pairs[3];
+ PtrPair *begin() { return pairs; }
+ PtrPair *end() { return pairs + 3; }
+};
+
+void test_range_decomp(PairList &list) {
+ for (auto [p, q] : list) {
+ if (p) {
+ (void)p->value; // OK
+ }
+ }
+}
+
+// === Decomposition of stack-allocated struct ===
+
+void test_stack_decomp() {
+ int x = 42;
+ Node node{0, nullptr};
+ struct { Node * _Nonnull p; int *q; } s = {&node, &x};
+ auto [p, q] = s;
+ (void)p->value; // OK — source is _Nonnull
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-switch.cpp b/clang/test/Sema/flow-nullability-switch.cpp
new file mode 100644
index 0000000000000..b2c57526ffe08
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-switch.cpp
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Entity {
+ int x;
+};
+
+#pragma clang assume_nonnull begin
+
+void test_narrowing_before_switch(Entity* _Nullable p, int kind) {
+ if (!p) return;
+ switch (kind) {
+ case 0:
+ p->x = 0; // OK - narrowed before switch
+ break;
+ case 1:
+ p->x = 1; // OK - narrowing carries into cases
+ break;
+ default:
+ p->x = -1; // OK
+ break;
+ }
+}
+
+void test_null_check_then_switch(Entity* _Nullable p, int kind) {
+ if (p) {
+ switch (kind) {
+ case 0:
+ p->x = 0; // OK
+ break;
+ case 1:
+ p->x = 1; // OK
+ break;
+ }
+ }
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-templates.cpp b/clang/test/Sema/flow-nullability-templates.cpp
new file mode 100644
index 0000000000000..080d01e9a08fa
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-templates.cpp
@@ -0,0 +1,155 @@
+// Tests for template interactions with flow-sensitive nullability analysis.
+// Templates are a major source of false positives in type-based analyses —
+// template instantiation can bake _Nullable into cast result types even when
+// the source is unannotated.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node * _Nullable next;
+};
+
+Node * _Nullable getNode();
+Node * _Nonnull getSafeNode();
+
+#pragma clang assume_nonnull begin
+
+// === Template functions with pointer parameters ===
+
+template <typename T>
+void deref_unchecked(T * _Nullable p) {
+ (void)p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+template <typename T>
+void deref_guarded(T * _Nullable p) {
+ if (p)
+ (void)p->value; // OK — narrowed
+}
+
+template <typename T>
+void deref_nonnull(T * _Nonnull p) {
+ (void)p->value; // OK — _Nonnull
+}
+
+void test_template_functions() {
+ Node * _Nullable n = getNode();
+ deref_unchecked(n); // expected-note{{in instantiation of function template specialization 'deref_unchecked<Node>' requested here}}
+ deref_guarded(n);
+ deref_nonnull(getSafeNode());
+}
+
+// === Template class with nullable member ===
+
+template <typename T>
+struct Wrapper {
+ T * _Nullable ptr;
+
+ void use_unchecked() {
+ (void)ptr->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+
+ void use_guarded() {
+ if (ptr)
+ (void)ptr->value; // OK
+ }
+};
+
+void test_template_class() {
+ Wrapper<Node> w;
+ w.use_unchecked(); // expected-note{{in instantiation of member function 'Wrapper<Node>::use_unchecked' requested here}}
+ w.use_guarded();
+}
+
+// === Template with multiple pointer params of different nullability ===
+
+template <typename T>
+void mixed_nullability(T * _Nonnull safe, T * _Nullable risky) {
+ (void)safe->value; // OK — _Nonnull
+ (void)risky->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_mixed() {
+ mixed_nullability(getSafeNode(), getNode()); // expected-note{{in instantiation of function template specialization 'mixed_nullability<Node>' requested here}}
+}
+
+// === Template that narrows then uses ===
+
+template <typename T>
+T* _Nullable find(T * _Nullable head, int target) {
+ for (T * _Nullable p = head; p; p = p->next) {
+ if (p->value == target) // OK — narrowed by loop condition
+ return p;
+ }
+ return nullptr;
+}
+
+void test_find() {
+ Node * _Nullable head = getNode();
+ find(head, 42);
+}
+
+// === Template with cast — the key false-positive scenario ===
+// Template instantiation can produce casts with _Nullable in the dest type.
+// The analysis should look through these casts to the source type.
+
+template <typename T>
+T* cast_and_use(void *raw) {
+ T *p = static_cast<T *>(raw);
+ // raw is void* (unannotated in nullable-default mode), but static_cast
+ // may bake the template param's nullability into the result.
+ // Should not warn — source (raw) is not explicitly _Nullable.
+ (void)p->value; // OK — unannotated source through cast
+ return p;
+}
+
+void test_template_cast() {
+ int dummy;
+ cast_and_use<Node>(&dummy);
+}
+
+// === Non-type template parameters (no effect on nullability) ===
+
+template <int N>
+void fixed_iteration(Node * _Nullable p) {
+ if (!p) return;
+ for (int i = 0; i < N; i++)
+ (void)p->value; // OK — narrowed
+}
+
+void test_non_type_template() {
+ fixed_iteration<10>(getNode());
+}
+
+// === Template with auto return type ===
+
+template <typename T>
+auto safe_access(T * _Nullable p, int fallback) {
+ if (p)
+ return p->value; // OK
+ return fallback;
+}
+
+void test_auto_return() {
+ safe_access(getNode(), -1);
+}
+
+// === Dependent type that resolves to pointer ===
+
+template <typename T>
+struct PointerHolder {
+ using Ptr = T*;
+ Ptr _Nullable held;
+
+ void use() {
+ if (held)
+ (void)held->value; // OK — narrowed
+ }
+};
+
+void test_dependent_type() {
+ PointerHolder<Node> h;
+ h.use();
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-terminators.cpp b/clang/test/Sema/flow-nullability-terminators.cpp
new file mode 100644
index 0000000000000..1a1682c09350b
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-terminators.cpp
@@ -0,0 +1,86 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 -fcxx-exceptions %s -verify
+// expected-no-diagnostics
+
+struct Entity {
+ int x;
+};
+
+[[noreturn]] void fatal(const char* msg);
+
+#pragma clang assume_nonnull begin
+
+// === throw as terminator ===
+
+void test_throw_narrows(Entity* _Nullable p) {
+ if (!p) throw "null pointer";
+ p->x = 1; // OK - throw terminates
+}
+
+void test_throw_in_compound(Entity* _Nullable p) {
+ if (!p) {
+ throw "null";
+ }
+ p->x = 1; // OK
+}
+
+// === goto as terminator ===
+
+void test_goto_narrows(Entity* _Nullable p) {
+ if (!p) goto cleanup;
+ p->x = 1; // OK - goto terminates
+cleanup:
+ return;
+}
+
+// === break as terminator (in loop) ===
+
+void test_break_narrows(Entity* _Nullable p) {
+ for (int i = 0; i < 10; i++) {
+ if (!p) break;
+ p->x = i; // OK - break terminates
+ }
+}
+
+void test_break_while(Entity* _Nullable p) {
+ while (true) {
+ if (!p) break;
+ p->x = 1; // OK
+ }
+}
+
+// === continue as terminator (in loop) ===
+
+void test_continue_narrows(Entity* _Nullable p) {
+ for (int i = 0; i < 10; i++) {
+ if (!p) continue;
+ p->x = i; // OK - continue terminates
+ }
+}
+
+// === return in else (positive check) ===
+
+// CFG correctly models that when the else-branch returns, post-if code
+// is only reachable from the then-branch where p was narrowed.
+void test_positive_check_else_return(Entity* _Nullable p) {
+ if (p) {
+ // use p
+ } else {
+ return;
+ }
+ p->x = 1; // OK - only reachable when p is non-null
+}
+
+// === combinations ===
+
+void test_noreturn_then_deref(Entity* _Nullable p) {
+ if (!p) fatal("null");
+ p->x = 1; // OK
+}
+
+void test_two_checks_return(Entity* _Nullable p, Entity* _Nullable q) {
+ if (!p) return;
+ if (!q) return;
+ p->x = q->x; // OK - both narrowed
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-ternary.cpp b/clang/test/Sema/flow-nullability-ternary.cpp
new file mode 100644
index 0000000000000..b4c7b872769f4
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-ternary.cpp
@@ -0,0 +1,59 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Node {
+ int value;
+ Node* _Nullable next;
+};
+
+Node* _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+// === Basic ternary narrowing ===
+
+void test_ternary_true_branch(Node* _Nullable p) {
+ int x = p ? p->value : 0; // OK - p narrowed to nonnull in true branch
+}
+
+void test_ternary_false_branch_negated(Node* _Nullable p) {
+ int x = !p ? 0 : p->value; // OK - p narrowed to nonnull in false branch
+}
+
+void test_ternary_no_narrowing_false(Node* _Nullable p) {
+ int x = p ? 0 : p->value; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+void test_ternary_deref_star(Node* _Nullable p) {
+ Node n = p ? *p : (Node){0, nullptr}; // OK - p narrowed to nonnull
+ (void)n;
+}
+
+// === Ternary with comparison operators ===
+
+void test_ternary_ne_null(Node* _Nullable p) {
+ int x = (p != nullptr) ? p->value : -1; // OK
+}
+
+void test_ternary_eq_null(Node* _Nullable p) {
+ int x = (p == nullptr) ? -1 : p->value; // OK - narrowed in false branch
+}
+
+// === Ternary with AND conditions ===
+
+void test_ternary_and_both(Node* _Nullable p, Node* _Nullable q) {
+ int x = (p && q) ? p->value + q->value : 0; // OK - both narrowed
+}
+
+// === Nested ternary ===
+
+void test_nested_ternary(Node* _Nullable p, Node* _Nullable q) {
+ int x = p ? (q ? p->value + q->value : p->value) : 0; // OK
+}
+
+// === Ternary warns when it should ===
+
+void test_ternary_unrelated_cond(int flag, Node* _Nullable p) {
+ int x = flag ? p->value : 0; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-type-identity.cpp b/clang/test/Sema/flow-nullability-type-identity.cpp
new file mode 100644
index 0000000000000..7309f785ef011
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-type-identity.cpp
@@ -0,0 +1,107 @@
+// Verify that -fflow-sensitive-nullability does NOT affect type identity.
+// Nullability qualifiers are type sugar in Clang — they don't participate
+// in template argument deduction, std::is_same, decltype, or overload
+// resolution. This must remain true even when the flag infers
+// _Null_unspecified on unannotated pointers.
+//
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+template<typename T, typename U>
+struct is_same { static constexpr bool value = false; };
+template<typename T>
+struct is_same<T, T> { static constexpr bool value = true; };
+
+// Force a diagnostic that prints the type name — used to verify
+// types print as "int *", not "int * _Null_unspecified".
+template<typename T> struct show_type;
+
+#pragma clang assume_nonnull begin
+
+// --- decltype preserves bare pointer type ---
+
+void test_decltype_local() {
+ int x = 0;
+ int *p = &x;
+ static_assert(is_same<decltype(p), int*>::value, "");
+}
+
+void test_decltype_param(int *p) {
+ static_assert(is_same<decltype(p), int*>::value, "");
+}
+
+// --- auto deduction ---
+
+void test_auto_deduction() {
+ int x = 0;
+ auto p = &x;
+ static_assert(is_same<decltype(p), int*>::value, "");
+}
+
+// --- template argument deduction ---
+
+template<typename T>
+void accept(T) {
+ static_assert(is_same<T, int*>::value, "");
+}
+
+void test_template_deduction() {
+ int x = 0;
+ int *p = &x;
+ accept(p);
+}
+
+// --- explicit template argument matching ---
+
+template<typename T> void accept_ptr(T*) {}
+
+void test_explicit_template_arg() {
+ int x = 0;
+ int *p = &x;
+ accept_ptr<int>(p); // must match int*, not int* _Null_unspecified
+}
+
+// --- Nullability qualifiers are sugar: all compare equal ---
+
+void test_nullability_is_sugar() {
+ int x;
+ int *bare = &x;
+ int * _Nullable nullable = &x;
+ int * _Nonnull nonnull = &x;
+ int * _Null_unspecified unspec = &x;
+
+ // All four are the same type for is_same purposes
+ static_assert(is_same<decltype(bare), decltype(nullable)>::value, "");
+ static_assert(is_same<decltype(bare), decltype(nonnull)>::value, "");
+ static_assert(is_same<decltype(bare), decltype(unspec)>::value, "");
+}
+
+// --- Function return type deduction ---
+
+auto make_ptr() {
+ int *p = new int(42);
+ return p;
+}
+
+void test_return_type_deduction() {
+ static_assert(is_same<decltype(make_ptr()), int*>::value, "");
+}
+
+// --- Const pointer ---
+
+void test_const_ptr() {
+ const int x = 0;
+ const int *p = &x;
+ static_assert(is_same<decltype(p), const int*>::value, "");
+}
+
+// --- Pointer to pointer ---
+
+void test_ptr_to_ptr() {
+ int x;
+ int *p = &x;
+ int **pp = &p;
+ static_assert(is_same<decltype(pp), int**>::value, "");
+}
+
+#pragma clang assume_nonnull end
diff --git a/clang/test/Sema/flow-nullability-unannotated-fp.cpp b/clang/test/Sema/flow-nullability-unannotated-fp.cpp
new file mode 100644
index 0000000000000..f9ac1d65dd173
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-unannotated-fp.cpp
@@ -0,0 +1,44 @@
+// Tests for unannotated pointers under nullable-default.
+// Under -fnullability-default=nullable, unannotated pointers are nullable.
+// These patterns correctly warn — the fix is to add null checks or _Nonnull.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+typedef unsigned long size_t;
+typedef unsigned char uint8_t;
+
+// --- Case 1: array subscript on unannotated pointer parameter ---
+// buffers is nullable → subscript is a deref → warns.
+
+inline bool getData(const uint8_t** buffers, int readIndex) {
+ auto buffer = buffers[readIndex]; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ return buffer != nullptr;
+}
+
+// --- Case 2: lambda deleter parameter ---
+// w is nullable under the default → dereference warns.
+
+struct Widget {
+ int x;
+ ~Widget() {}
+};
+
+void test_deleter() {
+ auto* ptr = new Widget;
+ auto deleter = [](Widget* w) {
+ w->~Widget(); // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ };
+ deleter(ptr);
+}
+
+// --- Case 3: return value from function doing pointer arithmetic on this ---
+// getBuffer() return type is nullable → array subscript warns.
+
+struct Buffer {
+ int offset;
+ uint8_t* getBuffer() {
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+ void use() {
+ uint8_t val = getBuffer()[0]; // expected-warning{{dereference of nullable pointer}} expected-note{{add a null check}}
+ }
+};
diff --git a/clang/test/Sema/flow-nullability-void-star-cast.cpp b/clang/test/Sema/flow-nullability-void-star-cast.cpp
new file mode 100644
index 0000000000000..d24bbca27bcd4
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-void-star-cast.cpp
@@ -0,0 +1,44 @@
+// Test: void* casts with nullable-default
+// With -fnullability-default=nullable, unannotated void* params are nullable.
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+
+struct Data {
+ int value;
+};
+
+// void* param is nullable under the default — dereferences warn
+void test_void_star_cast_deref(void* obj) {
+ Data* p = static_cast<Data*>(obj);
+ *p = Data{42}; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ p->value = 1; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// reinterpret_cast to void** — source obj is nullable
+void test_reinterpret_cast_void_star_star(void* obj) {
+ *reinterpret_cast<void**>(obj) = nullptr; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// Explicit _Nullable also warns
+void test_nullable_void_star(void* _Nullable obj) {
+ Data* p = static_cast<Data*>(obj);
+ *p = Data{42}; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// Double-pointer casts — source obj is nullable
+void test_double_ptr_cast(void* obj) {
+ *reinterpret_cast<void**>(obj) = nullptr; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+ *reinterpret_cast<int**>(obj) = nullptr; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+void test_double_ptr_local(void* obj) {
+ void** pp = reinterpret_cast<void**>(obj);
+ *pp = nullptr; // expected-warning {{dereference of nullable pointer}} expected-note {{add a null check}}
+}
+
+// With null check, no warning
+void test_void_star_checked(void* obj) {
+ if (obj) {
+ Data* p = static_cast<Data*>(obj);
+ *p = Data{42}; // OK — obj was checked
+ }
+}
diff --git a/clang/test/Sema/flow-nullability-warning-groups.cpp b/clang/test/Sema/flow-nullability-warning-groups.cpp
new file mode 100644
index 0000000000000..7d0978977ea0f
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-warning-groups.cpp
@@ -0,0 +1,20 @@
+// Tests for warning group suppression and control.
+//
+// -Wno-flow-nullable-dereference suppresses the warning:
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-flow-nullable-dereference -verify=suppressed %s
+//
+// Parent group -Wno-flow-nullability also suppresses:
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Wno-flow-nullability -verify=suppressed %s
+//
+// -Werror=flow-nullable-dereference promotes to error:
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -Werror=flow-nullable-dereference -verify=werror %s
+//
+// cc1 rejects invalid -fnullability-default value:
+// RUN: not %clang_cc1 -fnullability-default=invalid %s 2>&1 | FileCheck %s
+// CHECK: error: invalid value 'invalid' in '-fnullability-default=invalid'
+
+// suppressed-no-diagnostics
+
+void test(int * _Nullable p) {
+ *p = 42; // werror-error {{dereference of nullable pointer}} werror-note {{add a null check}}
+}
diff --git a/clang/test/Sema/flow-nullability-while-loop.cpp b/clang/test/Sema/flow-nullability-while-loop.cpp
new file mode 100644
index 0000000000000..87c78aae3882a
--- /dev/null
+++ b/clang/test/Sema/flow-nullability-while-loop.cpp
@@ -0,0 +1,38 @@
+// RUN: %clang_cc1 -fsyntax-only -fflow-sensitive-nullability -fnullability-default=nullable -std=c++17 %s -verify
+// expected-no-diagnostics
+
+struct Node {
+ int value;
+ Node* _Nullable next;
+};
+
+Node* _Nullable getNode();
+
+#pragma clang assume_nonnull begin
+
+void test_while_basic(Node* _Nullable p) {
+ while (p) {
+ p->value = 1; // OK - p narrowed by while condition
+ }
+}
+
+void test_while_linked_list(Node* _Nullable head) {
+ Node* _Nullable p = head;
+ while (p) {
+ p->value = 0;
+ p = p->next; // OK - p narrowed, so p->next is safe
+ }
+}
+
+void test_while_nested(Node* _Nullable p) {
+ while (p) {
+ Node* _Nullable q = p->next;
+ while (q) {
+ q->value = p->value; // OK - both narrowed
+ q = q->next;
+ }
+ p = p->next;
+ }
+}
+
+#pragma clang assume_nonnull end
More information about the cfe-commits
mailing list