[Patch] Clang: allow scoped_lockable to acquire multiple locks
Ed Schouten
ed at 80386.nl
Tue Sep 2 14:19:49 PDT 2014
Hi Delesley, Aaron,
On 2 September 2014 22:44, Delesley Hutchins <delesley at google.com> wrote:
> (1) FactEntry needs a virtual destructor to avoid memory leaks.
Done!
> (2) I would like the different FactEntry classes to be declared together (at
> the top of the file), rather than split as they are currently.
The problem is that the *LockableFactEntry classes depend on FactSet
and FactManager, whereas the FactSet and FactManager depend on the
FactEntry.
I could possibly declare them together, but in that case, the
implementations of handle*() would need to go elsewhere. I'm usually
not a big fan of that approach. Be sure to let me know if you'd prefer
that anyway.
On 2 September 2014 22:49, Aaron Ballman <aaron at aaronballman.com> wrote:
> I would not reformat the entire file at this point (that tends to be
> pretty noisy for little gain). Instead, you can run clang-format over
> the patch file itself. There's a pretty simple script for doing this
> described in the documentation:
> http://clang.llvm.org/docs/ClangFormat.html
That seems to work perfectly. I've just reformatted the code. Updated patch:
http://80386.nl/pub/20140902-clang-scoped_lockable-2.txt
Be sure to let me know if there's anything else that you'd like to see
differently. I'll probably push in the change some time tomorrow
morning/afternoon.
Thanks,
--
Ed Schouten <ed at 80386.nl>
-------------- next part --------------
Index: lib/Analysis/ThreadSafety.cpp
===================================================================
--- lib/Analysis/ThreadSafety.cpp (revision 216834)
+++ lib/Analysis/ThreadSafety.cpp (working copy)
@@ -85,8 +85,9 @@
}
};
+class FactManager;
+class FactSet;
-
/// \brief This is a helper class that stores a fact that is known at a
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
@@ -99,29 +100,28 @@
private:
LockKind LKind; ///< exclusive or shared
SourceLocation AcquireLoc; ///< where it was acquired.
- bool Managed; ///< for ScopedLockable objects
bool Asserted; ///< true if the lock was asserted
- const til::SExpr* UnderlyingMutex; ///< for ScopedLockable objects
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- bool Mng=false, bool Asrt=false)
- : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(Mng),
- Asserted(Asrt), UnderlyingMutex(nullptr)
- {}
+ bool Asrt)
+ : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {}
- FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- const til::SExpr *Mu)
- : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(false),
- Asserted(false), UnderlyingMutex(Mu)
- {}
+ virtual ~FactEntry() {}
LockKind kind() const { return LKind; }
SourceLocation loc() const { return AcquireLoc; }
bool asserted() const { return Asserted; }
- bool managed() const { return Managed; }
- const til::SExpr* underlying() const { return UnderlyingMutex; }
+ virtual void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const = 0;
+ virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const = 0;
+
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) {
return (LKind == LK_Exclusive) || (LK == LK_Shared);
@@ -135,16 +135,16 @@
/// the analysis of a single routine.
class FactManager {
private:
- std::vector<FactEntry> Facts;
+ std::vector<std::unique_ptr<FactEntry>> Facts;
public:
- FactID newFact(const FactEntry &Entry) {
- Facts.push_back(Entry);
+ FactID newFact(std::unique_ptr<FactEntry> Entry) {
+ Facts.push_back(std::move(Entry));
return static_cast<unsigned short>(Facts.size() - 1);
}
- const FactEntry& operator[](FactID F) const { return Facts[F]; }
- FactEntry& operator[](FactID F) { return Facts[F]; }
+ const FactEntry &operator[](FactID F) const { return *Facts[F]; }
+ FactEntry &operator[](FactID F) { return *Facts[F]; }
};
@@ -184,8 +184,8 @@
void addLockByID(FactID ID) { FactIDs.push_back(ID); }
- FactID addLock(FactManager& FM, const FactEntry &Entry) {
- FactID F = FM.newFact(Entry);
+ FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
+ FactID F = FM.newFact(std::move(Entry));
FactIDs.push_back(F);
return F;
}
@@ -758,6 +758,98 @@
}
}
+class LockableFactEntry : public FactEntry {
+private:
+ bool Managed; ///< managed by ScopedLockable object
+
+public:
+ LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+ bool Mng = false, bool Asrt = false)
+ : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ if (!Managed && !asserted() && !negative() && !isUniversal()) {
+ Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+ LEK);
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ FSet.removeLock(FactMan, Cp);
+ if (!Cp.negative()) {
+ FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ !Cp, LK_Exclusive, UnlockLoc));
+ }
+ }
+};
+
+class ScopedLockableFactEntry : public FactEntry {
+private:
+ SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+
+public:
+ ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+ const CapExprSet &Excl, const CapExprSet &Shrd)
+ : FactEntry(CE, LK_Exclusive, Loc, false) {
+ for (const auto &M : Excl)
+ UnderlyingMutexes.push_back(M.sexpr());
+ for (const auto &M : Shrd)
+ UnderlyingMutexes.push_back(M.sexpr());
+ }
+
+ void
+ handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+ SourceLocation JoinLoc, LockErrorKind LEK,
+ ThreadSafetyHandler &Handler) const override {
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+ // If this scoped lock manages another mutex, and if the underlying
+ // mutex is still held, then warn about the underlying mutex.
+ Handler.handleMutexHeldEndOfScope(
+ "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+ }
+ }
+ }
+
+ void handleUnlock(FactSet &FSet, FactManager &FactMan,
+ const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+ bool FullyRemove, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ assert(!Cp.negative() && "Managing object cannot be negative.");
+ for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex, false);
+ auto UnderEntry = llvm::make_unique<LockableFactEntry>(
+ !UnderCp, LK_Exclusive, UnlockLoc);
+
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, UnderCp)) {
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, UnderCp)) {
+ Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
+ UnlockLoc);
+ }
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, std::move(UnderEntry));
+ }
+ }
+ if (FullyRemove)
+ FSet.removeLock(FactMan, Cp);
+ }
+};
+
/// \brief Class which implements the core thread safety analysis routines.
class ThreadSafetyAnalyzer {
friend class BuildLockset;
@@ -778,8 +870,8 @@
bool inCurrentScope(const CapabilityExpr &CapE);
- void addLock(FactSet &FSet, const FactEntry &Entry, StringRef DiagKind,
- bool ReqAttr = false);
+ void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
+ StringRef DiagKind, bool ReqAttr = false);
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
StringRef DiagKind);
@@ -908,32 +1000,33 @@
/// \brief Add a new lock to the lockset, warning if the lock is already there.
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
-void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry &Entry,
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
+ std::unique_ptr<FactEntry> Entry,
StringRef DiagKind, bool ReqAttr) {
- if (Entry.shouldIgnore())
+ if (Entry->shouldIgnore())
return;
- if (!ReqAttr && !Entry.negative()) {
+ if (!ReqAttr && !Entry->negative()) {
// look for the negative capability, and remove it from the fact set.
- CapabilityExpr NegC = !Entry;
+ CapabilityExpr NegC = !*Entry;
FactEntry *Nen = FSet.findLock(FactMan, NegC);
if (Nen) {
FSet.removeLock(FactMan, NegC);
}
else {
- if (inCurrentScope(Entry) && !Entry.asserted())
- Handler.handleNegativeNotHeld(DiagKind, Entry.toString(),
- NegC.toString(), Entry.loc());
+ if (inCurrentScope(*Entry) && !Entry->asserted())
+ Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+ NegC.toString(), Entry->loc());
}
}
// FIXME: deal with acquired before/after annotations.
// FIXME: Don't always warn when we have support for reentrant locks.
- if (FSet.findLock(FactMan, Entry)) {
- if (!Entry.asserted())
- Handler.handleDoubleLock(DiagKind, Entry.toString(), Entry.loc());
+ if (FSet.findLock(FactMan, *Entry)) {
+ if (!Entry->asserted())
+ Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
} else {
- FSet.addLock(FactMan, Entry);
+ FSet.addLock(FactMan, std::move(Entry));
}
}
@@ -960,37 +1053,8 @@
LDat->kind(), ReceivedKind, UnlockLoc);
}
- if (LDat->underlying()) {
- assert(!Cp.negative() && "Managing object cannot be negative.");
- CapabilityExpr UnderCp(LDat->underlying(), false);
- FactEntry UnderEntry(!UnderCp, LK_Exclusive, UnlockLoc);
-
- // This is scoped lockable object, which manages the real mutex.
- if (FullyRemove) {
- // We're destroying the managing object.
- // Remove the underlying mutex if it exists; but don't warn.
- if (FSet.findLock(FactMan, UnderCp)) {
- FSet.removeLock(FactMan, UnderCp);
- FSet.addLock(FactMan, UnderEntry);
- }
- FSet.removeLock(FactMan, Cp);
- } else {
- // We're releasing the underlying mutex, but not destroying the
- // managing object. Warn on dual release.
- if (!FSet.findLock(FactMan, UnderCp)) {
- Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), UnlockLoc);
- }
- FSet.removeLock(FactMan, UnderCp);
- FSet.addLock(FactMan, UnderEntry);
- }
- return;
- }
- // else !LDat->underlying()
-
- FSet.removeLock(FactMan, Cp);
- if (!Cp.negative()) {
- FSet.addLock(FactMan, FactEntry(!Cp, LK_Exclusive, UnlockLoc));
- }
+ LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
+ DiagKind);
}
@@ -1192,10 +1256,12 @@
// Add and remove locks.
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
- addLock(Result, FactEntry(ExclusiveLockToAdd, LK_Exclusive, Loc),
+ addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
+ LK_Exclusive, Loc),
CapDiagKind);
for (const auto &SharedLockToAdd : SharedLocksToAdd)
- addLock(Result, FactEntry(SharedLockToAdd, LK_Shared, Loc),
+ addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
+ LK_Shared, Loc),
CapDiagKind);
}
@@ -1457,8 +1523,9 @@
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Exclusive, Loc,
- false, true),
+ Analyzer->addLock(FSet,
+ llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Exclusive, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
@@ -1468,8 +1535,8 @@
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Shared, Loc,
- false, true),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ AssertLock, LK_Shared, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
@@ -1523,32 +1590,28 @@
// Add locks.
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(M, LK_Exclusive, Loc, isScopedVar),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Exclusive, Loc, isScopedVar),
CapDiagKind);
for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(M, LK_Shared, Loc, isScopedVar),
+ Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+ M, LK_Shared, Loc, isScopedVar),
CapDiagKind);
- // Add the managing object as a dummy mutex, mapped to the underlying mutex.
- // FIXME: this doesn't work if we acquire multiple locks.
if (isScopedVar) {
+ // Add the managing object as a dummy mutex, mapped to the underlying mutex.
SourceLocation MLoc = VD->getLocation();
DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
// FIXME: does this store a pointer to DRE?
CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
- for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc, M.sexpr()),
- CapDiagKind);
- for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Shared, MLoc, M.sexpr()),
- CapDiagKind);
-
- // handle corner case where the underlying mutex is invalid
- if (ExclusiveLocksToAdd.size() == 0 && SharedLocksToAdd.size() == 0) {
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc),
- CapDiagKind);
- }
+ CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd);
+ std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(),
+ std::back_inserter(UnderlyingMutexes));
+ Analyzer->addLock(FSet,
+ llvm::make_unique<ScopedLockableFactEntry>(
+ Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+ CapDiagKind);
}
// Remove locks.
@@ -1729,22 +1792,8 @@
*Iter1 = Fact;
}
} else {
- if (LDat2->underlying()) {
- if (FSet2.findLock(FactMan, CapabilityExpr(LDat2->underlying(),
- false))) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- sx::toString(LDat2->underlying()),
- LDat2->loc(), JoinLoc, LEK1);
- }
- }
- else if (!LDat2->managed() && !LDat2->asserted() &&
- !LDat2->negative() && !LDat2->isUniversal()) {
- Handler.handleMutexHeldEndOfScope("mutex", LDat2->toString(),
- LDat2->loc(), JoinLoc, LEK1);
- }
+ LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
+ Handler);
}
}
@@ -1754,22 +1803,8 @@
const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
if (!LDat2) {
- if (LDat1->underlying()) {
- if (FSet1Orig.findLock(FactMan, CapabilityExpr(LDat1->underlying(),
- false))) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- sx::toString(LDat1->underlying()),
- LDat1->loc(), JoinLoc, LEK1);
- }
- }
- else if (!LDat1->managed() && !LDat1->asserted() &&
- !LDat1->negative() && !LDat1->isUniversal()) {
- Handler.handleMutexHeldEndOfScope("mutex", LDat1->toString(),
- LDat1->loc(), JoinLoc, LEK2);
- }
+ LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
+ Handler);
if (Modify)
FSet1.removeLock(FactMan, *LDat1);
}
@@ -1895,10 +1930,12 @@
// FIXME -- Loc can be wrong here.
for (const auto &Mu : ExclusiveLocksToAdd)
- addLock(InitialLockset, FactEntry(Mu, LK_Exclusive, Loc),
+ addLock(InitialLockset,
+ llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc),
CapDiagKind, true);
for (const auto &Mu : SharedLocksToAdd)
- addLock(InitialLockset, FactEntry(Mu, LK_Shared, Loc),
+ addLock(InitialLockset,
+ llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc),
CapDiagKind, true);
}
@@ -2068,11 +2105,11 @@
// issue the appropriate warning.
// FIXME: the location here is not quite right.
for (const auto &Lock : ExclusiveLocksAcquired)
- ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Exclusive,
- D->getLocation()));
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Exclusive, D->getLocation()));
for (const auto &Lock : SharedLocksAcquired)
- ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Shared,
- D->getLocation()));
+ ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ Lock, LK_Shared, D->getLocation()));
for (const auto &Lock : LocksReleased)
ExpectedExitSet.removeLock(FactMan, Lock);
Index: test/SemaCXX/warn-thread-safety-analysis.cpp
===================================================================
--- test/SemaCXX/warn-thread-safety-analysis.cpp (revision 216834)
+++ test/SemaCXX/warn-thread-safety-analysis.cpp (working copy)
@@ -63,6 +63,12 @@
void Release() UNLOCK_FUNCTION();
};
+class __attribute__((scoped_lockable)) DoubleMutexLock {
+public:
+ DoubleMutexLock(Mutex *mu1, Mutex *mu2)
+ __attribute__((exclusive_lock_function(mu1, mu2)));
+ ~DoubleMutexLock() __attribute__((unlock_function));
+};
// The universal lock, written "*", allows checking to be selectively turned
// off for a particular piece of code.
@@ -1662,6 +1668,12 @@
a = b+1;
b = a+1;
}
+
+ void foo5() {
+ DoubleMutexLock mulock(&mu1, &mu2);
+ a = b + 1;
+ b = a + 1;
+ }
};
} // end namespace test_scoped_lockable
More information about the cfe-commits
mailing list