[Patch] Clang: allow scoped_lockable to acquire multiple locks
Ed Schouten
ed at 80386.nl
Sat Aug 30 21:53:35 PDT 2014
Hi DeLesley,
First of all, thanks for your work on the thread annotations for
Clang! Impressed by how well they work, I decided to patch up
FreeBSD's pthread library to use these annotations by default.
http://lists.freebsd.org/pipermail/freebsd-toolchain/2014-August/001183.html
While experimenting with using these annotations in a C++ codebase, I
discovered that it's currently not possible to use these for
interlocking, as scoped_lockable does not support acquiring multiple
mutexes. Example:
class SCOPED_LOCKABLE DoubleGuard {
DoubleGuard(Mutex* m1, Mutex* m2) EXCLUSIVE_LOCK_FUNCTION(m1, m2) {
if (m1 == m2) {
m1_ = m1;
m2_ = nullptr;
m1_->Lock();
} else {
if (m1 < m2) {
m1_ = m1;
m2_ = m2;
} else {
m1_ = m2;
m2_ = m1;
}
m1_->Lock();
m2_->Lock();
}
}
~DoubleGuard() UNLOCK_FUNCTION() {
m1_->Unlock();
if (m2_ != nullptr)
m2_->Unlock();
}
private:
Mutex* m1_;
Mutex* m2_;
};
This class cannot be used, as it will cause a warning that the guard
object itself is locked twice. Attached is a patch to fix this. It
changes the lock path to always create exactly one FactEntry for
scoped lockables. Instead of having a single UnderlyingMutex, it now
uses a CapExprSet.
What are your thoughts on this change? Is it all right for me to commit this?
Thanks,
--
Ed Schouten <ed at 80386.nl>
-------------- next part --------------
Index: lib/Analysis/ThreadSafety.cpp
===================================================================
--- lib/Analysis/ThreadSafety.cpp (revision 216834)
+++ lib/Analysis/ThreadSafety.cpp (working copy)
@@ -97,23 +97,23 @@
/// shared.
class FactEntry : public CapabilityExpr {
private:
- LockKind LKind; ///< exclusive or shared
- SourceLocation AcquireLoc; ///< where it was acquired.
- bool Managed; ///< for ScopedLockable objects
- bool Asserted; ///< true if the lock was asserted
- const til::SExpr* UnderlyingMutex; ///< for ScopedLockable objects
+ LockKind LKind; ///< exclusive or shared
+ SourceLocation AcquireLoc; ///< where it was acquired.
+ bool Managed; ///< for ScopedLockable objects
+ bool Asserted; ///< true if the lock was asserted
+ CapExprSet UnderlyingMutexes; ///< for ScopedLockable objects
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
bool Mng=false, bool Asrt=false)
: CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(Mng),
- Asserted(Asrt), UnderlyingMutex(nullptr)
+ Asserted(Asrt)
{}
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- const til::SExpr *Mu)
+ const CapExprSet& UnderlyingMutexes)
: CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(false),
- Asserted(false), UnderlyingMutex(Mu)
+ Asserted(false), UnderlyingMutexes(UnderlyingMutexes)
{}
LockKind kind() const { return LKind; }
@@ -120,7 +120,7 @@
SourceLocation loc() const { return AcquireLoc; }
bool asserted() const { return Asserted; }
bool managed() const { return Managed; }
- const til::SExpr* underlying() const { return UnderlyingMutex; }
+ const CapExprSet* underlying() const { return &UnderlyingMutexes; }
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) {
@@ -960,28 +960,31 @@
LDat->kind(), ReceivedKind, UnlockLoc);
}
- if (LDat->underlying()) {
+ const CapExprSet* UnderlyingMutexes = LDat->underlying();
+ if (!UnderlyingMutexes->empty()) {
assert(!Cp.negative() && "Managing object cannot be negative.");
- CapabilityExpr UnderCp(LDat->underlying(), false);
- FactEntry UnderEntry(!UnderCp, LK_Exclusive, UnlockLoc);
+ for (const CapabilityExpr& UnderlyingMutex : *UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex.sexpr(), false);
+ FactEntry UnderEntry(!UnderCp, LK_Exclusive, UnlockLoc);
- // This is scoped lockable object, which manages the real mutex.
- if (FullyRemove) {
- // We're destroying the managing object.
- // Remove the underlying mutex if it exists; but don't warn.
- if (FSet.findLock(FactMan, UnderCp)) {
+ // This is scoped lockable object, which manages the real mutex.
+ if (FullyRemove) {
+ // We're destroying the managing object.
+ // Remove the underlying mutex if it exists; but don't warn.
+ if (FSet.findLock(FactMan, UnderCp)) {
+ FSet.removeLock(FactMan, UnderCp);
+ FSet.addLock(FactMan, UnderEntry);
+ }
+ FSet.removeLock(FactMan, Cp);
+ } else {
+ // We're releasing the underlying mutex, but not destroying the
+ // managing object. Warn on dual release.
+ if (!FSet.findLock(FactMan, UnderCp)) {
+ Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), UnlockLoc);
+ }
FSet.removeLock(FactMan, UnderCp);
FSet.addLock(FactMan, UnderEntry);
}
- FSet.removeLock(FactMan, Cp);
- } else {
- // We're releasing the underlying mutex, but not destroying the
- // managing object. Warn on dual release.
- if (!FSet.findLock(FactMan, UnderCp)) {
- Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), UnlockLoc);
- }
- FSet.removeLock(FactMan, UnderCp);
- FSet.addLock(FactMan, UnderEntry);
}
return;
}
@@ -1530,7 +1533,6 @@
CapDiagKind);
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
- // FIXME: this doesn't work if we acquire multiple locks.
if (isScopedVar) {
SourceLocation MLoc = VD->getLocation();
DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
@@ -1537,18 +1539,12 @@
// FIXME: does this store a pointer to DRE?
CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
- for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc, M.sexpr()),
- CapDiagKind);
- for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Shared, MLoc, M.sexpr()),
- CapDiagKind);
-
- // handle corner case where the underlying mutex is invalid
- if (ExclusiveLocksToAdd.size() == 0 && SharedLocksToAdd.size() == 0) {
- Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc),
- CapDiagKind);
- }
+ CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd);
+ std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(),
+ std::back_inserter(UnderlyingMutexes));
+ Analyzer->addLock(FSet,
+ FactEntry(Scp, LK_Exclusive, MLoc, UnderlyingMutexes),
+ CapDiagKind);
}
// Remove locks.
@@ -1729,15 +1725,18 @@
*Iter1 = Fact;
}
} else {
- if (LDat2->underlying()) {
- if (FSet2.findLock(FactMan, CapabilityExpr(LDat2->underlying(),
- false))) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- sx::toString(LDat2->underlying()),
- LDat2->loc(), JoinLoc, LEK1);
+ const CapExprSet* UnderlyingMutexes = LDat2->underlying();
+ if (!UnderlyingMutexes->empty()) {
+ for (const CapabilityExpr &UnderlyingMutex : *UnderlyingMutexes) {
+ if (FSet2.findLock(FactMan, CapabilityExpr(UnderlyingMutex.sexpr(),
+ false))) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(
+ "mutex", sx::toString(UnderlyingMutex.sexpr()),
+ LDat2->loc(), JoinLoc, LEK1);
+ }
}
}
else if (!LDat2->managed() && !LDat2->asserted() &&
@@ -1754,15 +1753,18 @@
const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
if (!LDat2) {
- if (LDat1->underlying()) {
- if (FSet1Orig.findLock(FactMan, CapabilityExpr(LDat1->underlying(),
- false))) {
- // If this is a scoped lock that manages another mutex, and if the
- // underlying mutex is still held, then warn about the underlying
- // mutex.
- Handler.handleMutexHeldEndOfScope("mutex",
- sx::toString(LDat1->underlying()),
- LDat1->loc(), JoinLoc, LEK1);
+ const CapExprSet* UnderlyingMutexes = LDat1->underlying();
+ if (!UnderlyingMutexes->empty()) {
+ for (const CapabilityExpr &UnderlyingMutex : *UnderlyingMutexes) {
+ if (FSet1Orig.findLock(
+ FactMan, CapabilityExpr(UnderlyingMutex.sexpr(), false))) {
+ // If this is a scoped lock that manages another mutex, and if the
+ // underlying mutex is still held, then warn about the underlying
+ // mutex.
+ Handler.handleMutexHeldEndOfScope(
+ "mutex", sx::toString(UnderlyingMutex.sexpr()),
+ LDat1->loc(), JoinLoc, LEK1);
+ }
}
}
else if (!LDat1->managed() && !LDat1->asserted() &&
Index: test/SemaCXX/warn-thread-safety-analysis.cpp
===================================================================
--- test/SemaCXX/warn-thread-safety-analysis.cpp (revision 216834)
+++ test/SemaCXX/warn-thread-safety-analysis.cpp (working copy)
@@ -63,6 +63,12 @@
void Release() UNLOCK_FUNCTION();
};
+class __attribute__((scoped_lockable)) DoubleMutexLock {
+ public:
+ DoubleMutexLock(Mutex *mu1, Mutex *mu2)
+ __attribute__((exclusive_lock_function(mu1, mu2)));
+ ~DoubleMutexLock() __attribute__((unlock_function));
+};
// The universal lock, written "*", allows checking to be selectively turned
// off for a particular piece of code.
@@ -1662,6 +1668,12 @@
a = b+1;
b = a+1;
}
+
+ void foo5() {
+ DoubleMutexLock mulock(&mu1, &mu2);
+ a = b+1;
+ b = a+1;
+ }
};
} // end namespace test_scoped_lockable
More information about the cfe-commits
mailing list