[clang] Thread Safety Analysis: Support reentrant capabilities (PR #137133)
Marco Elver via cfe-commits
cfe-commits at lists.llvm.org
Wed Apr 30 00:28:01 PDT 2025
https://github.com/melver updated https://github.com/llvm/llvm-project/pull/137133
>From a8319028f08192ca6140beed7f27a83a829c6d84 Mon Sep 17 00:00:00 2001
From: Marco Elver <elver at google.com>
Date: Wed, 23 Apr 2025 11:31:25 +0200
Subject: [PATCH 1/2] Thread Safety Analysis: Convert CapabilityExpr::CapExpr
to hold flags
Rather than holding a single bool, switch it to contain flags, which is
both more descriptive and simplifies adding more flags in subsequent
changes.
NFC.
---
.../clang/Analysis/Analyses/ThreadSafetyCommon.h | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index e99c5b2466334..6e46a2d721463 100644
--- a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -271,26 +271,28 @@ class CFGWalker {
// translateAttrExpr needs it, but that should be moved too.
class CapabilityExpr {
private:
- /// The capability expression and whether it's negated.
- llvm::PointerIntPair<const til::SExpr *, 1, bool> CapExpr;
+ static constexpr unsigned FlagNegative = 1u << 0;
+
+ /// The capability expression and flags.
+ llvm::PointerIntPair<const til::SExpr *, 1, unsigned> CapExpr;
/// The kind of capability as specified by @ref CapabilityAttr::getName.
StringRef CapKind;
public:
- CapabilityExpr() : CapExpr(nullptr, false) {}
+ CapabilityExpr() : CapExpr(nullptr, 0) {}
CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg)
- : CapExpr(E, Neg), CapKind(Kind) {}
+ : CapExpr(E, Neg ? FlagNegative : 0), CapKind(Kind) {}
// Don't allow implicitly-constructed StringRefs since we'll capture them.
template <typename T> CapabilityExpr(const til::SExpr *, T, bool) = delete;
const til::SExpr *sexpr() const { return CapExpr.getPointer(); }
StringRef getKind() const { return CapKind; }
- bool negative() const { return CapExpr.getInt(); }
+ bool negative() const { return CapExpr.getInt() & FlagNegative; }
CapabilityExpr operator!() const {
- return CapabilityExpr(CapExpr.getPointer(), CapKind, !CapExpr.getInt());
+ return CapabilityExpr(CapExpr.getPointer(), CapKind, !negative());
}
bool equals(const CapabilityExpr &other) const {
>From 427dcd2c396fbd454129abb3d3f2d572f87eadbc Mon Sep 17 00:00:00 2001
From: Marco Elver <elver at google.com>
Date: Thu, 24 Apr 2025 09:02:08 +0200
Subject: [PATCH 2/2] Thread Safety Analysis: Support reentrant capabilities
Introduce the `reentrant_capability` attribute, which may be specified
alongside the `capability(..)` attribute to denote that the defined
capability type is reentrant. Marking a capability as reentrant means
that acquiring the same capability multiple times is safe, and does not
produce warnings on attempted re-acquisition.
The most significant changes required are plumbing to propagate if the
attribute is present to a CapabilityExpr, and then introducing a
ReentrancyCount to FactEntry that can be incremented while a fact
remains in the FactSet.
Care was taken to avoid increasing the size of both CapabilityExpr and
FactEntry by carefully allocating free bits of CapabilityExpr::CapExpr
and the bitset respectively.
---
clang/docs/ReleaseNotes.rst | 1 +
clang/docs/ThreadSafetyAnalysis.rst | 18 +
.../clang/Analysis/Analyses/ThreadSafety.h | 13 +-
.../Analysis/Analyses/ThreadSafetyCommon.h | 21 +-
clang/include/clang/Basic/Attr.td | 7 +
.../clang/Basic/DiagnosticSemaKinds.td | 6 +
clang/lib/Analysis/ThreadSafety.cpp | 134 +++++--
clang/lib/Analysis/ThreadSafetyCommon.cpp | 66 ++--
clang/lib/Sema/AnalysisBasedWarnings.cpp | 3 +
clang/lib/Sema/SemaDeclAttr.cpp | 10 +
...a-attribute-supported-attributes-list.test | 1 +
clang/test/Sema/warn-thread-safety-analysis.c | 20 +
.../test/SemaCXX/thread-safety-annotations.h | 1 +
.../SemaCXX/warn-thread-safety-analysis.cpp | 356 ++++++++++++++++++
.../SemaCXX/warn-thread-safety-parsing.cpp | 7 +
clang/unittests/AST/ASTImporterTest.cpp | 9 +
16 files changed, 598 insertions(+), 75 deletions(-)
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 80399e458aec3..2a6565e37e256 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -369,6 +369,7 @@ Improvements to Clang's diagnostics
as function arguments or return value respectively. Note that
:doc:`ThreadSafetyAnalysis` still does not perform alias analysis. The
feature will be default-enabled with ``-Wthread-safety`` in a future release.
+- The :doc:`ThreadSafetyAnalysis` now supports reentrant capabilities.
- Clang will now do a better job producing common nested names, when producing
common types for ternary operator, template argument deduction and multiple return auto deduction.
- The ``-Wsign-compare`` warning now treats expressions with bitwise not(~) and minus(-) as signed integers
diff --git a/clang/docs/ThreadSafetyAnalysis.rst b/clang/docs/ThreadSafetyAnalysis.rst
index 130069c5659d6..4fc7ff28e9931 100644
--- a/clang/docs/ThreadSafetyAnalysis.rst
+++ b/clang/docs/ThreadSafetyAnalysis.rst
@@ -434,6 +434,21 @@ class can be used as a capability. The string argument specifies the kind of
capability in error messages, e.g. ``"mutex"``. See the ``Container`` example
given above, or the ``Mutex`` class in :ref:`mutexheader`.
+REENTRANT_CAPABILITY
+--------------------
+
+``REENTRANT_CAPABILITY`` is an attribute on capability classes, denoting that
+they are reentrant. Marking a capability as reentrant means that acquiring the
+same capability multiple times is safe. Acquiring the same capability with
+different access privileges (exclusive vs. shared) again is not considered
+reentrant by the analysis.
+
+Note: In many cases this attribute is only required where a capability is
+acquired reentrant within the same function, such as via macros or other
+helpers. Otherwise, best practice is to avoid explicitly acquiring a capability
+multiple times within the same function, and letting the analysis produce
+warnings on double-acquisition attempts.
+
.. _scoped_capability:
SCOPED_CAPABILITY
@@ -846,6 +861,9 @@ implementation.
#define CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
+ #define REENTRANT_CAPABILITY \
+ THREAD_ANNOTATION_ATTRIBUTE__(reentrant_capability)
+
#define SCOPED_CAPABILITY \
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index 20b75c46593e0..7e0e25b9aab1d 100644
--- a/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -94,16 +94,17 @@ enum AccessKind {
/// This enum distinguishes between different situations where we warn due to
/// inconsistent locking.
-/// \enum SK_LockedSomeLoopIterations -- a mutex is locked for some but not all
-/// loop iterations.
-/// \enum SK_LockedSomePredecessors -- a mutex is locked in some but not all
-/// predecessors of a CFGBlock.
-/// \enum SK_LockedAtEndOfFunction -- a mutex is still locked at the end of a
-/// function.
enum LockErrorKind {
+ /// A capability is locked for some but not all loop iterations.
LEK_LockedSomeLoopIterations,
+ /// A capability is locked in some but not all predecessors of a CFGBlock.
LEK_LockedSomePredecessors,
+ /// A capability is still locked at the end of a function.
LEK_LockedAtEndOfFunction,
+ /// A capability is locked with mismatching reentrancy depth in predecessors
+ /// of a CFGBlock.
+ LEK_LockedReentrancyMismatch,
+ /// Expecting a capability to be held at the end of function.
LEK_NotLockedAtEndOfFunction
};
diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index 6e46a2d721463..bebd03deb2872 100644
--- a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -22,6 +22,7 @@
#define LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYCOMMON_H
#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
@@ -272,27 +273,33 @@ class CFGWalker {
class CapabilityExpr {
private:
static constexpr unsigned FlagNegative = 1u << 0;
+ static constexpr unsigned FlagReentrant = 1u << 1;
/// The capability expression and flags.
- llvm::PointerIntPair<const til::SExpr *, 1, unsigned> CapExpr;
+ llvm::PointerIntPair<const til::SExpr *, 2, unsigned> CapExpr;
/// The kind of capability as specified by @ref CapabilityAttr::getName.
StringRef CapKind;
public:
CapabilityExpr() : CapExpr(nullptr, 0) {}
- CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg)
- : CapExpr(E, Neg ? FlagNegative : 0), CapKind(Kind) {}
+ CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg, bool Reentrant)
+ : CapExpr(E, (Neg ? FlagNegative : 0) | (Reentrant ? FlagReentrant : 0)),
+ CapKind(Kind) {}
+ CapabilityExpr(const til::SExpr *E, QualType QT, bool Neg);
// Don't allow implicitly-constructed StringRefs since we'll capture them.
- template <typename T> CapabilityExpr(const til::SExpr *, T, bool) = delete;
+ template <typename T>
+ CapabilityExpr(const til::SExpr *, T, bool, bool) = delete;
const til::SExpr *sexpr() const { return CapExpr.getPointer(); }
StringRef getKind() const { return CapKind; }
bool negative() const { return CapExpr.getInt() & FlagNegative; }
+ bool reentrant() const { return CapExpr.getInt() & FlagReentrant; }
CapabilityExpr operator!() const {
- return CapabilityExpr(CapExpr.getPointer(), CapKind, !negative());
+ return CapabilityExpr(CapExpr.getPointer(), CapKind, !negative(),
+ reentrant());
}
bool equals(const CapabilityExpr &other) const {
@@ -389,10 +396,6 @@ class SExprBuilder {
// Translate a variable reference.
til::LiteralPtr *createVariable(const VarDecl *VD);
- // Create placeholder for this: we don't know the VarDecl on construction yet.
- std::pair<til::LiteralPtr *, StringRef>
- createThisPlaceholder(const Expr *Exp);
-
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index dcdcff8c46fe2..6e38b11a816af 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -4003,6 +4003,13 @@ def LocksExcluded : InheritableAttr {
let Documentation = [Undocumented];
}
+def ReentrantCapability : InheritableAttr {
+ let Spellings = [Clang<"reentrant_capability">];
+ let Subjects = SubjectList<[Record, TypedefName]>;
+ let Documentation = [Undocumented];
+ let SimpleHandler = 1;
+}
+
// C/C++ consumed attributes.
def Consumable : InheritableAttr {
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index ad5bf26be2590..f6bb0be71ca7e 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -4045,6 +4045,9 @@ def warn_thread_attribute_not_on_scoped_lockable_param : Warning<
"%0 attribute applies to function parameters only if their type is a "
"reference to a 'scoped_lockable'-annotated type">,
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
+def warn_reentrant_capability_without_capability : Warning<
+ "ignoring %0 attribute on %1 without 'capability' attribute">,
+ InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def err_attribute_argument_out_of_bounds_extra_info : Error<
"%0 attribute parameter %1 is out of bounds: "
"%plural{0:no parameters to index into|"
@@ -4073,6 +4076,9 @@ def warn_lock_some_predecessors : Warning<
def warn_expecting_lock_held_on_loop : Warning<
"expecting %0 '%1' to be held at start of each loop">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
+def warn_lock_reentrancy_mismatch : Warning<
+ "expecting %0 '%1' to be held with matching reentrancy depth on every path through here">,
+ InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_locked_here : Note<"%0 acquired here">;
def note_unlocked_here : Note<"%0 released here">;
def warn_lock_exclusive_and_shared : Warning<
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index 7e86af6b4a317..e02443a09ff50 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -99,8 +99,6 @@ class FactSet;
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
/// it is exclusive or shared, etc.
-///
-/// FIXME: this analysis does not currently support re-entrant locking.
class FactEntry : public CapabilityExpr {
public:
enum FactEntryKind { Lockable, ScopedLockable };
@@ -151,6 +149,19 @@ class FactEntry : public CapabilityExpr {
bool FullyRemove,
ThreadSafetyHandler &Handler) const = 0;
+ // Return an updated FactEntry if we can acquire this capability reentrant,
+ // nullptr otherwise.
+ virtual std::unique_ptr<FactEntry> tryReenter(LockKind ReenterKind) const {
+ return nullptr;
+ }
+
+ // Return an updated FactEntry if we are releasing a capability previously
+ // acquired reentrant, nullptr otherwise.
+ virtual std::unique_ptr<FactEntry> leaveReentrant() const { return nullptr; }
+
+ // Return the reentrancy depth.
+ virtual unsigned int getReentrancyDepth() const { return 0; }
+
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) const {
return (LKind == LK_Exclusive) || (LK == LK_Shared);
@@ -168,6 +179,8 @@ class FactManager {
public:
FactID newFact(std::unique_ptr<FactEntry> Entry) {
Facts.push_back(std::move(Entry));
+ assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
+ "FactID space exhausted");
return static_cast<unsigned short>(Facts.size() - 1);
}
@@ -235,6 +248,20 @@ class FactSet {
return false;
}
+ std::optional<FactID> replaceLock(FactManager &FM, iterator It,
+ std::unique_ptr<FactEntry> Entry) {
+ if (It == end())
+ return std::nullopt;
+ FactID F = FM.newFact(std::move(Entry));
+ *It = F;
+ return F;
+ }
+
+ std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
+ std::unique_ptr<FactEntry> Entry) {
+ return replaceLock(FM, findLockIter(FM, CapE), std::move(Entry));
+ }
+
iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
return std::find_if(begin(), end(), [&](FactID ID) {
return FM[ID].matches(CapE);
@@ -859,6 +886,10 @@ static void findBlockLocations(CFG *CFGraph,
namespace {
class LockableFactEntry : public FactEntry {
+private:
+ /// Reentrancy depth.
+ unsigned int ReentrancyDepth = 0;
+
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
SourceKind Src = Acquired)
@@ -876,14 +907,25 @@ class LockableFactEntry : public FactEntry {
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
ThreadSafetyHandler &Handler) const override {
- Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
- entry.loc());
+ if (std::unique_ptr<FactEntry> RFact = tryReenter(entry.kind())) {
+ // This capability has been reentrantly acquired.
+ FSet.replaceLock(FactMan, entry, std::move(RFact));
+ } else {
+ Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
+ entry.loc());
+ }
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove,
ThreadSafetyHandler &Handler) const override {
+ if (std::unique_ptr<FactEntry> RFact = leaveReentrant()) {
+ // This capability remains reentrantly acquired.
+ FSet.replaceLock(FactMan, Cp, std::move(RFact));
+ return;
+ }
+
FSet.removeLock(FactMan, Cp);
if (!Cp.negative()) {
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
@@ -894,6 +936,30 @@ class LockableFactEntry : public FactEntry {
static bool classof(const FactEntry *A) {
return A->getFactEntryKind() == Lockable;
}
+
+ std::unique_ptr<FactEntry> tryReenter(LockKind ReenterKind) const override {
+ if (!reentrant())
+ return nullptr;
+ if (kind() != ReenterKind)
+ return nullptr;
+ auto NewFact = std::make_unique<LockableFactEntry>(*this);
+ NewFact->ReentrancyDepth++;
+ // With ReentrancyDepth being an unsigned int, and FactID being an unsigned
+ // short, it should currently not be possible that ReentrancyDepth would
+ // overflow before FactID overflows.
+ assert(NewFact->ReentrancyDepth != 0 && "Reentrancy depth overflow");
+ return NewFact;
+ }
+
+ std::unique_ptr<FactEntry> leaveReentrant() const override {
+ if (!ReentrancyDepth)
+ return nullptr;
+ auto NewFact = std::make_unique<LockableFactEntry>(*this);
+ NewFact->ReentrancyDepth--;
+ return NewFact;
+ }
+
+ unsigned int getReentrancyDepth() const override { return ReentrancyDepth; }
};
class ScopedLockableFactEntry : public FactEntry {
@@ -996,10 +1062,14 @@ class ScopedLockableFactEntry : public FactEntry {
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
LockKind kind, SourceLocation loc,
ThreadSafetyHandler *Handler) const {
- if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
- if (Handler)
- Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
- loc);
+ if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
+ const FactEntry &Fact = FactMan[*It];
+ if (std::unique_ptr<FactEntry> RFact = Fact.tryReenter(kind)) {
+ // This capability has been reentrantly acquired.
+ FSet.replaceLock(FactMan, It, std::move(RFact));
+ } else if (Handler) {
+ Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
+ }
} else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
@@ -1009,7 +1079,14 @@ class ScopedLockableFactEntry : public FactEntry {
void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
SourceLocation loc, ThreadSafetyHandler *Handler) const {
- if (FSet.findLock(FactMan, Cp)) {
+ if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
+ const FactEntry &Fact = FactMan[*It];
+ if (std::unique_ptr<FactEntry> RFact = Fact.leaveReentrant()) {
+ // This capability remains reentrantly acquired.
+ FSet.replaceLock(FactMan, It, std::move(RFact));
+ return;
+ }
+
FSet.removeLock(FactMan, Cp);
FSet.addLock(FactMan,
std::make_unique<LockableFactEntry>(!Cp, LK_Exclusive, loc));
@@ -1287,7 +1364,6 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
Entry->loc(), Entry->getKind());
}
- // FIXME: Don't always warn when we have support for reentrant locks.
if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
Cp->handleLock(FSet, FactMan, *Entry, Handler);
@@ -1812,15 +1888,15 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
assert(!Self);
const auto *TagT = Exp->getType()->getAs<TagType>();
if (D->hasAttrs() && TagT && Exp->isPRValue()) {
- std::pair<til::LiteralPtr *, StringRef> Placeholder =
- Analyzer->SxBuilder.createThisPlaceholder(Exp);
+ til::LiteralPtr *Placeholder =
+ Analyzer->SxBuilder.createVariable(nullptr);
[[maybe_unused]] auto inserted =
- Analyzer->ConstructedObjects.insert({Exp, Placeholder.first});
+ Analyzer->ConstructedObjects.insert({Exp, Placeholder});
assert(inserted.second && "Are we visiting the same expression again?");
if (isa<CXXConstructExpr>(Exp))
- Self = Placeholder.first;
+ Self = Placeholder;
if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
- Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false);
+ Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
}
assert(Loc.isInvalid());
@@ -1958,12 +2034,13 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
if (DeclaredLocks.empty())
continue;
CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
- StringRef("mutex"), false);
+ StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
Cp.isInvalid() && CBTE) {
if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
Object != Analyzer->ConstructedObjects.end())
- Cp = CapabilityExpr(Object->second, StringRef("mutex"), false);
+ Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
+ /*Reentrant=*/false);
}
const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
if (!Fact) {
@@ -2303,14 +2380,24 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
FactSet EntrySetOrig = EntrySet;
// Find locks in ExitSet that conflict or are not in EntrySet, and warn.
- for (const auto &Fact : ExitSet) {
- const FactEntry &ExitFact = FactMan[Fact];
+ for (const auto &ExitID : ExitSet) {
+ const FactEntry &ExitFact = FactMan[ExitID];
FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
if (EntryIt != EntrySet.end()) {
- if (join(FactMan[*EntryIt], ExitFact,
- EntryLEK != LEK_LockedSomeLoopIterations))
- *EntryIt = Fact;
+ const FactEntry &EntryFact = FactMan[*EntryIt];
+ if (join(EntryFact, ExitFact, EntryLEK != LEK_LockedSomeLoopIterations))
+ *EntryIt = ExitID;
+
+ if (EntryFact.getReentrancyDepth() != ExitFact.getReentrancyDepth()) {
+ // Pick the FactEntry with the greater reentrancy depth as the "good"
+ // fact to reduce potential later warnings.
+ if (EntryFact.getReentrancyDepth() < ExitFact.getReentrancyDepth())
+ *EntryIt = ExitID;
+ Handler.handleMutexHeldEndOfScope(
+ ExitFact.getKind(), ExitFact.toString(), ExitFact.loc(), JoinLoc,
+ LEK_LockedReentrancyMismatch);
+ }
} else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
EntryLEK, Handler);
@@ -2472,7 +2559,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
if (UnderlyingLocks.empty())
continue;
- CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(), false);
+ CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
+ /*Neg=*/false, /*Reentrant=*/false);
auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
Cp, Param->getLocation(), FactEntry::Declared);
for (const CapabilityExpr &M : UnderlyingLocks)
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index 13cd7e26dc16f..623c81b44f488 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -81,28 +81,6 @@ static bool isCalleeArrow(const Expr *E) {
return ME ? ME->isArrow() : false;
}
-static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
- return A->getName();
-}
-
-static StringRef ClassifyDiagnostic(QualType VDT) {
- // We need to look at the declaration of the type of the value to determine
- // which it is. The type should either be a record or a typedef, or a pointer
- // or reference thereof.
- if (const auto *RT = VDT->getAs<RecordType>()) {
- if (const auto *RD = RT->getDecl())
- if (const auto *CA = RD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (const auto *TT = VDT->getAs<TypedefType>()) {
- if (const auto *TD = TT->getDecl())
- if (const auto *CA = TD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (VDT->isPointerOrReferenceType())
- return ClassifyDiagnostic(VDT->getPointeeType());
-
- return "mutex";
-}
-
/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
@@ -170,9 +148,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
return CapabilityExpr(
- Self,
- ClassifyDiagnostic(
- cast<CXXMethodDecl>(D)->getFunctionObjectParameterType()),
+ Self, cast<CXXMethodDecl>(D)->getFunctionObjectParameterType(),
false);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
@@ -197,7 +173,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
return CapabilityExpr(new (Arena) til::Wildcard(), StringRef("wildcard"),
- false);
+ /*Neg=*/false, /*Reentrant=*/false);
else
// Ignore other string literals for now.
return CapabilityExpr();
@@ -217,33 +193,25 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
}
}
- til::SExpr *E = translate(AttrExp, Ctx);
+ const til::SExpr *E = translate(AttrExp, Ctx);
// Trap mutex expressions like nullptr, or 0.
// Any literal value is nonsense.
if (!E || isa<til::Literal>(E))
return CapabilityExpr();
- StringRef Kind = ClassifyDiagnostic(AttrExp->getType());
-
// Hack to deal with smart pointers -- strip off top-level pointer casts.
if (const auto *CE = dyn_cast<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
- return CapabilityExpr(CE->expr(), Kind, Neg);
+ E = CE->expr();
}
- return CapabilityExpr(E, Kind, Neg);
+ return CapabilityExpr(E, AttrExp->getType(), Neg);
}
til::LiteralPtr *SExprBuilder::createVariable(const VarDecl *VD) {
return new (Arena) til::LiteralPtr(VD);
}
-std::pair<til::LiteralPtr *, StringRef>
-SExprBuilder::createThisPlaceholder(const Expr *Exp) {
- return {new (Arena) til::LiteralPtr(nullptr),
- ClassifyDiagnostic(Exp->getType())};
-}
-
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
@@ -1011,6 +979,30 @@ void SExprBuilder::exitCFG(const CFGBlock *Last) {
IncompleteArgs.clear();
}
+static CapabilityExpr makeCapabilityExpr(const til::SExpr *E, QualType VDT,
+ bool Neg) {
+ // We need to look at the declaration of the type of the value to determine
+ // which it is. The type should either be a record or a typedef, or a pointer
+ // or reference thereof.
+ if (const auto *RT = VDT->getAs<RecordType>()) {
+ if (const auto *RD = RT->getDecl())
+ if (const auto *CA = RD->getAttr<CapabilityAttr>())
+ return CapabilityExpr(E, CA->getName(), Neg,
+ RD->hasAttr<ReentrantCapabilityAttr>());
+ } else if (const auto *TT = VDT->getAs<TypedefType>()) {
+ if (const auto *TD = TT->getDecl())
+ if (const auto *CA = TD->getAttr<CapabilityAttr>())
+ return CapabilityExpr(E, CA->getName(), Neg,
+ TD->hasAttr<ReentrantCapabilityAttr>());
+ } else if (VDT->isPointerOrReferenceType())
+ return makeCapabilityExpr(E, VDT->getPointeeType(), Neg);
+
+ return CapabilityExpr(E, StringRef("mutex"), Neg, false);
+}
+
+CapabilityExpr::CapabilityExpr(const til::SExpr *E, QualType QT, bool Neg)
+ : CapabilityExpr(makeCapabilityExpr(E, QT, Neg)) {}
+
#ifndef NDEBUG
namespace {
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 2418aaf8de8e6..9cbe3e8b615b6 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1918,6 +1918,9 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
case LEK_LockedAtEndOfFunction:
DiagID = diag::warn_no_unlock;
break;
+ case LEK_LockedReentrancyMismatch:
+ DiagID = diag::warn_lock_reentrancy_mismatch;
+ break;
case LEK_NotLockedAtEndOfFunction:
DiagID = diag::warn_expecting_locked;
break;
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 413999b95b998..92ae99177948c 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -7784,6 +7784,16 @@ void Sema::ProcessDeclAttributeList(
Diag(D->getLocation(), diag::err_designated_init_attr_non_init);
D->dropAttr<ObjCDesignatedInitializerAttr>();
}
+
+ // Do not permit 'reentrant_capability' without 'capability' attribute.
+ if (const auto *A = D->getAttr<ReentrantCapabilityAttr>()) {
+ if (!D->hasAttr<CapabilityAttr>()) {
+ Diag(AttrList.begin()->getLoc(),
+ diag::warn_reentrant_capability_without_capability)
+ << A << cast<NamedDecl>(D);
+ D->dropAttr<ReentrantCapabilityAttr>();
+ }
+ }
}
void Sema::ProcessDeclAttributeDelayed(Decl *D,
diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
index 55f196625770a..4510c0b0c89c6 100644
--- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test
+++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test
@@ -174,6 +174,7 @@
// CHECK-NEXT: PreserveNone (SubjectMatchRule_hasType_functionType)
// CHECK-NEXT: RandomizeLayout (SubjectMatchRule_record)
// CHECK-NEXT: ReadOnlyPlacement (SubjectMatchRule_record)
+// CHECK-NEXT: ReentrantCapability (SubjectMatchRule_record, SubjectMatchRule_type_alias)
// CHECK-NEXT: ReleaseHandle (SubjectMatchRule_variable_is_parameter)
// CHECK-NEXT: ReqdWorkGroupSize (SubjectMatchRule_function)
// CHECK-NEXT: Restrict (SubjectMatchRule_function)
diff --git a/clang/test/Sema/warn-thread-safety-analysis.c b/clang/test/Sema/warn-thread-safety-analysis.c
index c58b7bed61183..a974b5e49eafe 100644
--- a/clang/test/Sema/warn-thread-safety-analysis.c
+++ b/clang/test/Sema/warn-thread-safety-analysis.c
@@ -2,6 +2,7 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -fexperimental-late-parse-attributes -DLATE_PARSING %s
#define LOCKABLE __attribute__ ((lockable))
+#define REENTRANT_CAPABILITY __attribute__ ((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
#define GUARDED_BY(...) __attribute__ ((guarded_by(__VA_ARGS__)))
#define GUARDED_VAR __attribute__ ((guarded_var))
@@ -216,6 +217,25 @@ int main(void) {
return 0;
}
+/*** Reentrancy test ***/
+struct REENTRANT_CAPABILITY LOCKABLE ReentrantMutex {};
+void reentrant_mutex_lock(struct ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
+void reentrant_mutex_unlock(struct ReentrantMutex *mu) UNLOCK_FUNCTION(mu);
+
+struct ReentrantMutex rmu;
+int r_ GUARDED_BY(&rmu);
+
+void test_reentrant(void) {
+ reentrant_mutex_lock(&rmu);
+ r_ = 1;
+ reentrant_mutex_lock(&rmu);
+ r_ = 1;
+ reentrant_mutex_unlock(&rmu);
+ r_ = 1;
+ reentrant_mutex_unlock(&rmu);
+ r_ = 1; // expected-warning{{writing variable 'r_' requires holding mutex 'rmu' exclusively}}
+}
+
// We had a problem where we'd skip all attributes that follow a late-parsed
// attribute in a single __attribute__.
void run(void) __attribute__((guarded_by(mu1), guarded_by(mu1))); // expected-warning 2{{only applies to non-static data members and global variables}}
diff --git a/clang/test/SemaCXX/thread-safety-annotations.h b/clang/test/SemaCXX/thread-safety-annotations.h
index d89bcf8ff4706..00d432da4b6f0 100644
--- a/clang/test/SemaCXX/thread-safety-annotations.h
+++ b/clang/test/SemaCXX/thread-safety-annotations.h
@@ -35,6 +35,7 @@
#define PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x)))
// Common
+#define REENTRANT_CAPABILITY __attribute__((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__((scoped_lockable))
#define ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
#define ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))
diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
index 2a04e820eb095..1683c5911cfb7 100644
--- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -6812,3 +6812,359 @@ class PointerGuard {
}
};
} // namespace Derived_Smart_Pointer
+
+namespace Reentrancy {
+
+class REENTRANT_CAPABILITY LOCKABLE ReentrantMutex {
+ public:
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
+ void ReaderLock() SHARED_LOCK_FUNCTION();
+ void Unlock() UNLOCK_FUNCTION();
+ void ExclusiveUnlock() EXCLUSIVE_UNLOCK_FUNCTION();
+ void ReaderUnlock() SHARED_UNLOCK_FUNCTION();
+ bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
+ bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true);
+
+ // for negative capabilities
+ const ReentrantMutex& operator!() const { return *this; }
+
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
+ void AssertReaderHeld() ASSERT_SHARED_LOCK();
+};
+
+class SCOPED_LOCKABLE ReentrantMutexLock {
+ public:
+ ReentrantMutexLock(ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
+ ~ReentrantMutexLock() UNLOCK_FUNCTION();
+};
+
+class SCOPED_LOCKABLE ReentrantReaderMutexLock {
+ public:
+ ReentrantReaderMutexLock(ReentrantMutex *mu) SHARED_LOCK_FUNCTION(mu);
+ ~ReentrantReaderMutexLock() UNLOCK_FUNCTION();
+};
+
+class SCOPED_LOCKABLE RelockableReentrantMutexLock {
+public:
+ RelockableReentrantMutexLock(ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
+ ~RelockableReentrantMutexLock() EXCLUSIVE_UNLOCK_FUNCTION();
+
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
+ void Unlock() UNLOCK_FUNCTION();
+};
+
+ReentrantMutex rmu;
+int guard_var __attribute__((guarded_var)) = 0;
+int guardby_var __attribute__((guarded_by(rmu))) = 0;
+
+void testReentrantMany() {
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+ rmu.Unlock();
+}
+
+void testReentrantManyReader() {
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderLock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+ rmu.ReaderUnlock();
+}
+
+void testReentrantLock1() {
+ rmu.Lock();
+ guard_var = 2;
+ rmu.Lock();
+ guard_var = 2;
+ rmu.Unlock();
+ guard_var = 2;
+ rmu.Unlock();
+ guard_var = 2; // expected-warning{{writing variable 'guard_var' requires holding any mutex exclusively}}
+}
+
+void testReentrantReaderLock1() {
+ rmu.ReaderLock();
+ int x = guard_var;
+ rmu.ReaderLock();
+ int y = guard_var;
+ rmu.ReaderUnlock();
+ int z = guard_var;
+ rmu.ReaderUnlock();
+ int a = guard_var; // expected-warning{{reading variable 'guard_var' requires holding any mutex}}
+}
+
+void testReentrantLock2() {
+ rmu.Lock();
+ guardby_var = 2;
+ rmu.Lock();
+ guardby_var = 2;
+ rmu.Unlock();
+ guardby_var = 2;
+ rmu.Unlock();
+ guardby_var = 2; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
+}
+
+void testReentrantReaderLock2() {
+ rmu.ReaderLock();
+ int x = guardby_var;
+ rmu.ReaderLock();
+ int y = guardby_var;
+ rmu.ReaderUnlock();
+ int z = guardby_var;
+ rmu.ReaderUnlock();
+ int a = guardby_var; // expected-warning{{reading variable 'guardby_var' requires holding mutex 'rmu'}}
+}
+
+void testReentrantTryLock1() {
+ if (rmu.TryLock()) {
+ guardby_var = 1;
+ if (rmu.TryLock()) {
+ guardby_var = 1;
+ rmu.Unlock();
+ }
+ guardby_var = 1;
+ rmu.Unlock();
+ }
+ guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
+}
+
+void testReentrantTryLock2() {
+ rmu.Lock();
+ guardby_var = 1;
+ if (rmu.TryLock()) {
+ guardby_var = 1;
+ rmu.Unlock();
+ }
+ guardby_var = 1;
+ rmu.Unlock();
+ guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
+}
+
+void testReentrantNotHeld() {
+ rmu.Unlock(); // \
+ // expected-warning{{releasing mutex 'rmu' that was not held}}
+}
+
+void testReentrantMissingUnlock() {
+ rmu.Lock(); // expected-note{{mutex acquired here}}
+ rmu.Lock(); // reenter
+ rmu.Unlock();
+} // expected-warning{{mutex 'rmu' is still held at the end of function}}
+
+// Acquiring the same capability with different access privileges is not
+// considered reentrant.
+void testMixedSharedExclusive() {
+ rmu.ReaderLock(); // expected-note{{mutex acquired here}}
+ rmu.Lock(); // expected-warning{{acquiring mutex 'rmu' that is already held}}
+ rmu.Unlock(); // expected-note{{mutex released here}}
+ rmu.ReaderUnlock(); // expected-warning{{releasing mutex 'rmu' that was not held}}
+}
+
+void testReentrantIntersection() {
+ rmu.Lock();
+ if (guardby_var) {
+ rmu.Lock();
+ rmu.Lock();
+ rmu.Unlock();
+ } else {
+ rmu.Lock();
+ guardby_var = 1;
+ }
+ guardby_var = 1;
+ rmu.Unlock();
+ guardby_var = 1;
+ rmu.Unlock();
+ guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
+}
+
+void testReentrantIntersectionBad() {
+ rmu.Lock(); // expected-note{{mutex acquired here}}
+ if (guardby_var) {
+ rmu.Lock();
+ rmu.Lock();
+ } else {
+ rmu.Lock();
+ guardby_var = 1;
+ }
+ guardby_var = 1; // expected-warning{{expecting mutex 'rmu' to be held with matching reentrancy depth on every path through here}}
+ rmu.Unlock();
+ guardby_var = 1;
+ rmu.Unlock();
+ guardby_var = 1;
+ rmu.Unlock();
+}
+
+void testLocksRequiredReentrant() EXCLUSIVE_LOCKS_REQUIRED(rmu) {
+ guardby_var = 1;
+ rmu.Lock();
+ rmu.Lock();
+ guardby_var = 1;
+ rmu.Unlock();
+ rmu.Unlock();
+ guardby_var = 1;
+}
+
+void testAssertReentrant() {
+ rmu.AssertHeld();
+ guardby_var = 1;
+ rmu.Lock();
+ guardby_var = 1;
+ rmu.Unlock();
+ guardby_var = 1;
+}
+
+void testAssertReaderReentrant() {
+ rmu.AssertReaderHeld();
+ int x = guardby_var;
+ rmu.ReaderLock();
+ int y = guardby_var;
+ rmu.ReaderUnlock();
+ int z = guardby_var;
+}
+
+struct TestScopedReentrantLockable {
+ ReentrantMutex mu1;
+ ReentrantMutex mu2;
+ int a __attribute__((guarded_by(mu1)));
+ int b __attribute__((guarded_by(mu2)));
+
+ bool getBool();
+
+ void foo1() {
+ ReentrantMutexLock mulock1(&mu1);
+ a = 5;
+ ReentrantMutexLock mulock2(&mu1);
+ a = 5;
+ }
+
+ void foo2() {
+ ReentrantMutexLock mulock1(&mu1);
+ a = 5;
+ mu1.Lock();
+ a = 5;
+ mu1.Unlock();
+ a = 5;
+ }
+
+#ifdef __cpp_guaranteed_copy_elision
+ void const_lock() {
+ const ReentrantMutexLock mulock1 = ReentrantMutexLock(&mu1);
+ a = 5;
+ const ReentrantMutexLock mulock2 = ReentrantMutexLock(&mu1);
+ a = 3;
+ }
+#endif
+
+ void temporary() {
+ ReentrantMutexLock{&mu1}, a = 1, ReentrantMutexLock{&mu1}, a = 5;
+ }
+
+ void lifetime_extension() {
+ const ReentrantMutexLock &mulock1 = ReentrantMutexLock(&mu1);
+ a = 5;
+ const ReentrantMutexLock &mulock2 = ReentrantMutexLock(&mu1);
+ a = 5;
+ }
+
+ void foo3() {
+ ReentrantReaderMutexLock mulock1(&mu1);
+ if (getBool()) {
+ ReentrantMutexLock mulock2a(&mu2);
+ b = a + 1;
+ }
+ else {
+ ReentrantMutexLock mulock2b(&mu2);
+ b = a + 2;
+ }
+ }
+
+ void foo4() {
+ ReentrantMutexLock mulock_a(&mu1);
+ ReentrantMutexLock mulock_b(&mu1);
+ }
+
+ void temporary_double_lock() {
+ ReentrantMutexLock mulock_a(&mu1);
+ ReentrantMutexLock{&mu1};
+ }
+
+ void foo5() {
+ ReentrantMutexLock mulock1(&mu1), mulock2(&mu2);
+ {
+ ReentrantMutexLock mulock3(&mu1), mulock4(&mu2);
+ a = b+1;
+ }
+ b = a+1;
+ }
+};
+
+void scopedDoubleUnlock() {
+ RelockableReentrantMutexLock scope(&rmu);
+ scope.Unlock(); // expected-note{{mutex released here}}
+ scope.Unlock(); // expected-warning {{releasing mutex 'rmu' that was not held}}
+}
+
+void scopedDoubleLock1() {
+ RelockableReentrantMutexLock scope(&rmu);
+ scope.Lock();
+ scope.Unlock();
+}
+
+void scopedDoubleLock2() {
+ RelockableReentrantMutexLock scope(&rmu);
+ scope.Unlock();
+ scope.Lock();
+ scope.Lock();
+ scope.Unlock();
+}
+
+typedef int REENTRANT_CAPABILITY __attribute__((capability("bitlock"))) *bitlock_t;
+void bit_lock(bitlock_t l) EXCLUSIVE_LOCK_FUNCTION(l);
+void bit_unlock(bitlock_t l) UNLOCK_FUNCTION(l);
+bitlock_t bl;
+void testReentrantTypedef() {
+ bit_lock(bl);
+ bit_lock(bl);
+ bit_unlock(bl);
+ bit_unlock(bl);
+}
+
+class TestNegativeWithReentrantMutex {
+ ReentrantMutex rmu;
+ int a GUARDED_BY(rmu);
+
+public:
+ void baz() EXCLUSIVE_LOCKS_REQUIRED(!rmu) {
+ rmu.Lock();
+ rmu.Lock();
+ a = 0;
+ rmu.Unlock();
+ rmu.Unlock();
+ }
+};
+
+} // namespace Reentrancy
diff --git a/clang/test/SemaCXX/warn-thread-safety-parsing.cpp b/clang/test/SemaCXX/warn-thread-safety-parsing.cpp
index 752803e4a0543..f75b82ada323c 100644
--- a/clang/test/SemaCXX/warn-thread-safety-parsing.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-parsing.cpp
@@ -3,6 +3,7 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -std=c++11 %s -D CPP11
#define LOCKABLE __attribute__ ((lockable))
+#define REENTRANT_CAPABILITY __attribute__ ((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
#define GUARDED_BY(x) __attribute__ ((guarded_by(x)))
#define GUARDED_VAR __attribute__ ((guarded_var))
@@ -260,6 +261,12 @@ class LFoo {
void l_function_params(int lvar LOCKABLE); // \
// expected-warning {{'lockable' attribute only applies to}}
+class REENTRANT_CAPABILITY LOCKABLE LTestReentrant { // order does not matter
+};
+
+class REENTRANT_CAPABILITY LTestReentrantOnly { // \
+ // expected-warning {{ignoring 'reentrant_capability' attribute on 'LTestReentrantOnly' without 'capability' attribute}}
+};
//-----------------------------------------//
// Scoped Lockable Attribute (sl)
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index 4192faee1af80..843c44a2a1a4a 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -7993,6 +7993,15 @@ TEST_P(ImportAttributes, ImportLocksExcluded) {
checkImportVariadicArg(FromAttr->args(), ToAttr->args());
}
+TEST_P(ImportAttributes, ImportReentrantCapability) {
+ ReentrantCapabilityAttr *FromAttr, *ToAttr;
+ importAttr<CXXRecordDecl>(
+ R"(
+ struct __attribute__((capability("x"), reentrant_capability)) test {};
+ )",
+ FromAttr, ToAttr);
+}
+
TEST_P(ImportAttributes, ImportC99NoThrowAttr) {
NoThrowAttr *FromAttr, *ToAttr;
importAttr<FunctionDecl>("void test () __attribute__ ((__nothrow__));",
More information about the cfe-commits
mailing list