r217016 - Allow a scoped lockable object to acquire/release multiple locks.

Ed Schouten ed at 80386.nl
Tue Sep 2 23:00:11 PDT 2014


Author: ed
Date: Wed Sep  3 01:00:11 2014
New Revision: 217016

URL: http://llvm.org/viewvc/llvm-project?rev=217016&view=rev
Log:
Allow a scoped lockable object to acquire/release multiple locks.

Scoped lockable objects (mutex guards) are implemented as if it is a
lock itself that is acquired upon construction and unlocked upon
destruction. As it if course needs to be used to actually lock down
something else (a mutex), it keeps track of this knowledge through its
underlying mutex field in its FactEntry.

The problem with this approach is that this only allows us to lock down
a single mutex, so extend the code to use a vector of underlying
mutexes. This, however, makes the code a bit more complex than
necessary, so subclass FactEntry into LockableFactEntry and
ScopedLockableFactEntry and move all the logic that differs between
regular locks and scoped lockables into member functions.

Modified:
    cfe/trunk/lib/Analysis/ThreadSafety.cpp
    cfe/trunk/test/SemaCXX/warn-thread-safety-analysis.cpp

Modified: cfe/trunk/lib/Analysis/ThreadSafety.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Analysis/ThreadSafety.cpp?rev=217016&r1=217015&r2=217016&view=diff
==============================================================================
--- cfe/trunk/lib/Analysis/ThreadSafety.cpp (original)
+++ cfe/trunk/lib/Analysis/ThreadSafety.cpp Wed Sep  3 01:00:11 2014
@@ -85,7 +85,8 @@ public:
   }
 };
 
-
+class FactManager;
+class FactSet;
 
 /// \brief This is a helper class that stores a fact that is known at a
 /// particular point in program execution.  Currently, a fact is a capability,
@@ -99,28 +100,27 @@ class FactEntry : public CapabilityExpr
 private:
   LockKind          LKind;            ///<  exclusive or shared
   SourceLocation    AcquireLoc;       ///<  where it was acquired.
-  bool              Managed;          ///<  for ScopedLockable objects
   bool              Asserted;         ///<  true if the lock was asserted
-  const til::SExpr* UnderlyingMutex;  ///<  for ScopedLockable objects
 
 public:
   FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
-            bool Mng=false, bool Asrt=false)
-    : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(Mng),
-      Asserted(Asrt), UnderlyingMutex(nullptr)
-  {}
+            bool Asrt)
+      : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {}
 
-  FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
-            const til::SExpr *Mu)
-    : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Managed(false),
-      Asserted(false), UnderlyingMutex(Mu)
-  {}
+  virtual ~FactEntry() {}
 
   LockKind          kind()       const { return LKind;    }
   SourceLocation    loc()        const { return AcquireLoc; }
   bool              asserted()   const { return Asserted; }
-  bool              managed()    const { return Managed;  }
-  const til::SExpr* underlying() const { return UnderlyingMutex; }
+
+  virtual void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const = 0;
+  virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                            const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                            bool FullyRemove, ThreadSafetyHandler &Handler,
+                            StringRef DiagKind) const = 0;
 
   // Return true if LKind >= LK, where exclusive > shared
   bool isAtLeast(LockKind LK) {
@@ -135,16 +135,16 @@ typedef unsigned short FactID;
 /// the analysis of a single routine.
 class FactManager {
 private:
-  std::vector<FactEntry> Facts;
+  std::vector<std::unique_ptr<FactEntry>> Facts;
 
 public:
-  FactID newFact(const FactEntry &Entry) {
-    Facts.push_back(Entry);
+  FactID newFact(std::unique_ptr<FactEntry> Entry) {
+    Facts.push_back(std::move(Entry));
     return static_cast<unsigned short>(Facts.size() - 1);
   }
 
-  const FactEntry& operator[](FactID F) const { return Facts[F]; }
-  FactEntry&       operator[](FactID F)       { return Facts[F]; }
+  const FactEntry &operator[](FactID F) const { return *Facts[F]; }
+  FactEntry &operator[](FactID F) { return *Facts[F]; }
 };
 
 
@@ -184,8 +184,8 @@ public:
 
   void addLockByID(FactID ID) { FactIDs.push_back(ID); }
 
-  FactID addLock(FactManager& FM, const FactEntry &Entry) {
-    FactID F = FM.newFact(Entry);
+  FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
+    FactID F = FM.newFact(std::move(Entry));
     FactIDs.push_back(F);
     return F;
   }
@@ -758,6 +758,98 @@ static void findBlockLocations(CFG *CFGr
   }
 }
 
+class LockableFactEntry : public FactEntry {
+private:
+  bool Managed; ///<  managed by ScopedLockable object
+
+public:
+  LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
+                    bool Mng = false, bool Asrt = false)
+      : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+
+  void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const override {
+    if (!Managed && !asserted() && !negative() && !isUniversal()) {
+      Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+                                        LEK);
+    }
+  }
+
+  void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                    bool FullyRemove, ThreadSafetyHandler &Handler,
+                    StringRef DiagKind) const override {
+    FSet.removeLock(FactMan, Cp);
+    if (!Cp.negative()) {
+      FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                !Cp, LK_Exclusive, UnlockLoc));
+    }
+  }
+};
+
+class ScopedLockableFactEntry : public FactEntry {
+private:
+  SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+
+public:
+  ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+                          const CapExprSet &Excl, const CapExprSet &Shrd)
+      : FactEntry(CE, LK_Exclusive, Loc, false) {
+    for (const auto &M : Excl)
+      UnderlyingMutexes.push_back(M.sexpr());
+    for (const auto &M : Shrd)
+      UnderlyingMutexes.push_back(M.sexpr());
+  }
+
+  void
+  handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
+                                SourceLocation JoinLoc, LockErrorKind LEK,
+                                ThreadSafetyHandler &Handler) const override {
+    for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+      if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+        // If this scoped lock manages another mutex, and if the underlying
+        // mutex is still held, then warn about the underlying mutex.
+        Handler.handleMutexHeldEndOfScope(
+            "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+      }
+    }
+  }
+
+  void handleUnlock(FactSet &FSet, FactManager &FactMan,
+                    const CapabilityExpr &Cp, SourceLocation UnlockLoc,
+                    bool FullyRemove, ThreadSafetyHandler &Handler,
+                    StringRef DiagKind) const override {
+    assert(!Cp.negative() && "Managing object cannot be negative.");
+    for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+      CapabilityExpr UnderCp(UnderlyingMutex, false);
+      auto UnderEntry = llvm::make_unique<LockableFactEntry>(
+          !UnderCp, LK_Exclusive, UnlockLoc);
+
+      if (FullyRemove) {
+        // We're destroying the managing object.
+        // Remove the underlying mutex if it exists; but don't warn.
+        if (FSet.findLock(FactMan, UnderCp)) {
+          FSet.removeLock(FactMan, UnderCp);
+          FSet.addLock(FactMan, std::move(UnderEntry));
+        }
+      } else {
+        // We're releasing the underlying mutex, but not destroying the
+        // managing object.  Warn on dual release.
+        if (!FSet.findLock(FactMan, UnderCp)) {
+          Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
+                                        UnlockLoc);
+        }
+        FSet.removeLock(FactMan, UnderCp);
+        FSet.addLock(FactMan, std::move(UnderEntry));
+      }
+    }
+    if (FullyRemove)
+      FSet.removeLock(FactMan, Cp);
+  }
+};
+
 /// \brief Class which implements the core thread safety analysis routines.
 class ThreadSafetyAnalyzer {
   friend class BuildLockset;
@@ -778,8 +870,8 @@ public:
 
   bool inCurrentScope(const CapabilityExpr &CapE);
 
-  void addLock(FactSet &FSet, const FactEntry &Entry, StringRef DiagKind,
-               bool ReqAttr = false);
+  void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
+               StringRef DiagKind, bool ReqAttr = false);
   void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
                   SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
                   StringRef DiagKind);
@@ -908,32 +1000,33 @@ inline bool ThreadSafetyAnalyzer::inCurr
 
 /// \brief Add a new lock to the lockset, warning if the lock is already there.
 /// \param ReqAttr -- true if this is part of an initial Requires attribute.
-void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry &Entry,
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
+                                   std::unique_ptr<FactEntry> Entry,
                                    StringRef DiagKind, bool ReqAttr) {
-  if (Entry.shouldIgnore())
+  if (Entry->shouldIgnore())
     return;
 
-  if (!ReqAttr && !Entry.negative()) {
+  if (!ReqAttr && !Entry->negative()) {
     // look for the negative capability, and remove it from the fact set.
-    CapabilityExpr NegC = !Entry;
+    CapabilityExpr NegC = !*Entry;
     FactEntry *Nen = FSet.findLock(FactMan, NegC);
     if (Nen) {
       FSet.removeLock(FactMan, NegC);
     }
     else {
-      if (inCurrentScope(Entry) && !Entry.asserted())
-        Handler.handleNegativeNotHeld(DiagKind, Entry.toString(),
-                                      NegC.toString(), Entry.loc());
+      if (inCurrentScope(*Entry) && !Entry->asserted())
+        Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+                                      NegC.toString(), Entry->loc());
     }
   }
 
   // FIXME: deal with acquired before/after annotations.
   // FIXME: Don't always warn when we have support for reentrant locks.
-  if (FSet.findLock(FactMan, Entry)) {
-    if (!Entry.asserted())
-      Handler.handleDoubleLock(DiagKind, Entry.toString(), Entry.loc());
+  if (FSet.findLock(FactMan, *Entry)) {
+    if (!Entry->asserted())
+      Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
   } else {
-    FSet.addLock(FactMan, Entry);
+    FSet.addLock(FactMan, std::move(Entry));
   }
 }
 
@@ -960,37 +1053,8 @@ void ThreadSafetyAnalyzer::removeLock(Fa
                                       LDat->kind(), ReceivedKind, UnlockLoc);
   }
 
-  if (LDat->underlying()) {
-    assert(!Cp.negative() && "Managing object cannot be negative.");
-    CapabilityExpr UnderCp(LDat->underlying(), false);
-    FactEntry UnderEntry(!UnderCp, LK_Exclusive, UnlockLoc);
-
-    // This is scoped lockable object, which manages the real mutex.
-    if (FullyRemove) {
-      // We're destroying the managing object.
-      // Remove the underlying mutex if it exists; but don't warn.
-      if (FSet.findLock(FactMan, UnderCp)) {
-        FSet.removeLock(FactMan, UnderCp);
-        FSet.addLock(FactMan, UnderEntry);
-      }
-      FSet.removeLock(FactMan, Cp);
-    } else {
-      // We're releasing the underlying mutex, but not destroying the
-      // managing object.  Warn on dual release.
-      if (!FSet.findLock(FactMan, UnderCp)) {
-        Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), UnlockLoc);
-      }
-      FSet.removeLock(FactMan, UnderCp);
-      FSet.addLock(FactMan, UnderEntry);
-    }
-    return;
-  }
-  // else !LDat->underlying()
-
-  FSet.removeLock(FactMan, Cp);
-  if (!Cp.negative()) {
-    FSet.addLock(FactMan, FactEntry(!Cp, LK_Exclusive, UnlockLoc));
-  }
+  LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
+                     DiagKind);
 }
 
 
@@ -1192,10 +1256,12 @@ void ThreadSafetyAnalyzer::getEdgeLockse
   // Add and remove locks.
   SourceLocation Loc = Exp->getExprLoc();
   for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
-    addLock(Result, FactEntry(ExclusiveLockToAdd, LK_Exclusive, Loc),
+    addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
+                                                         LK_Exclusive, Loc),
             CapDiagKind);
   for (const auto &SharedLockToAdd : SharedLocksToAdd)
-    addLock(Result, FactEntry(SharedLockToAdd, LK_Shared, Loc),
+    addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
+                                                         LK_Shared, Loc),
             CapDiagKind);
 }
 
@@ -1457,8 +1523,9 @@ void BuildLockset::handleCall(Expr *Exp,
         CapExprSet AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
         for (const auto &AssertLock : AssertLocks)
-          Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Exclusive, Loc,
-                                            false, true),
+          Analyzer->addLock(FSet,
+                            llvm::make_unique<LockableFactEntry>(
+                                AssertLock, LK_Exclusive, Loc, false, true),
                             ClassifyDiagnostic(A));
         break;
       }
@@ -1468,8 +1535,8 @@ void BuildLockset::handleCall(Expr *Exp,
         CapExprSet AssertLocks;
         Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
         for (const auto &AssertLock : AssertLocks)
-          Analyzer->addLock(FSet, FactEntry(AssertLock, LK_Shared, Loc,
-                                            false, true),
+          Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                      AssertLock, LK_Shared, Loc, false, true),
                             ClassifyDiagnostic(A));
         break;
       }
@@ -1523,32 +1590,28 @@ void BuildLockset::handleCall(Expr *Exp,
 
   // Add locks.
   for (const auto &M : ExclusiveLocksToAdd)
-    Analyzer->addLock(FSet, FactEntry(M, LK_Exclusive, Loc, isScopedVar),
+    Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                M, LK_Exclusive, Loc, isScopedVar),
                       CapDiagKind);
   for (const auto &M : SharedLocksToAdd)
-    Analyzer->addLock(FSet, FactEntry(M, LK_Shared, Loc, isScopedVar),
+    Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
+                                M, LK_Shared, Loc, isScopedVar),
                       CapDiagKind);
 
-  // Add the managing object as a dummy mutex, mapped to the underlying mutex.
-  // FIXME: this doesn't work if we acquire multiple locks.
   if (isScopedVar) {
+    // Add the managing object as a dummy mutex, mapped to the underlying mutex.
     SourceLocation MLoc = VD->getLocation();
     DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
     // FIXME: does this store a pointer to DRE?
     CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
 
-    for (const auto &M : ExclusiveLocksToAdd)
-      Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc, M.sexpr()),
-                        CapDiagKind);
-    for (const auto &M : SharedLocksToAdd)
-      Analyzer->addLock(FSet, FactEntry(Scp, LK_Shared, MLoc, M.sexpr()),
-                        CapDiagKind);
-
-    // handle corner case where the underlying mutex is invalid
-    if (ExclusiveLocksToAdd.size() == 0 && SharedLocksToAdd.size() == 0) {
-      Analyzer->addLock(FSet, FactEntry(Scp, LK_Exclusive, MLoc),
-                        CapDiagKind);
-    }
+    CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd);
+    std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(),
+              std::back_inserter(UnderlyingMutexes));
+    Analyzer->addLock(FSet,
+                      llvm::make_unique<ScopedLockableFactEntry>(
+                          Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
+                      CapDiagKind);
   }
 
   // Remove locks.
@@ -1729,22 +1792,8 @@ void ThreadSafetyAnalyzer::intersectAndW
         *Iter1 = Fact;
       }
     } else {
-      if (LDat2->underlying()) {
-        if (FSet2.findLock(FactMan, CapabilityExpr(LDat2->underlying(),
-                                                    false))) {
-          // If this is a scoped lock that manages another mutex, and if the
-          // underlying mutex is still held, then warn about the underlying
-          // mutex.
-          Handler.handleMutexHeldEndOfScope("mutex",
-                                            sx::toString(LDat2->underlying()),
-                                            LDat2->loc(), JoinLoc, LEK1);
-        }
-      }
-      else if (!LDat2->managed() && !LDat2->asserted() &&
-               !LDat2->negative() && !LDat2->isUniversal()) {
-        Handler.handleMutexHeldEndOfScope("mutex", LDat2->toString(),
-                                          LDat2->loc(), JoinLoc, LEK1);
-      }
+      LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
+                                           Handler);
     }
   }
 
@@ -1754,22 +1803,8 @@ void ThreadSafetyAnalyzer::intersectAndW
     const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
 
     if (!LDat2) {
-      if (LDat1->underlying()) {
-        if (FSet1Orig.findLock(FactMan, CapabilityExpr(LDat1->underlying(),
-                                                       false))) {
-          // If this is a scoped lock that manages another mutex, and if the
-          // underlying mutex is still held, then warn about the underlying
-          // mutex.
-          Handler.handleMutexHeldEndOfScope("mutex",
-                                            sx::toString(LDat1->underlying()),
-                                            LDat1->loc(), JoinLoc, LEK1);
-        }
-      }
-      else if (!LDat1->managed() && !LDat1->asserted() &&
-               !LDat1->negative() && !LDat1->isUniversal()) {
-        Handler.handleMutexHeldEndOfScope("mutex", LDat1->toString(),
-                                          LDat1->loc(), JoinLoc, LEK2);
-      }
+      LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
+                                           Handler);
       if (Modify)
         FSet1.removeLock(FactMan, *LDat1);
     }
@@ -1895,10 +1930,12 @@ void ThreadSafetyAnalyzer::runAnalysis(A
 
     // FIXME -- Loc can be wrong here.
     for (const auto &Mu : ExclusiveLocksToAdd)
-      addLock(InitialLockset, FactEntry(Mu, LK_Exclusive, Loc),
+      addLock(InitialLockset,
+              llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc),
               CapDiagKind, true);
     for (const auto &Mu : SharedLocksToAdd)
-      addLock(InitialLockset, FactEntry(Mu, LK_Shared, Loc),
+      addLock(InitialLockset,
+              llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc),
               CapDiagKind, true);
   }
 
@@ -2068,11 +2105,11 @@ void ThreadSafetyAnalyzer::runAnalysis(A
   // issue the appropriate warning.
   // FIXME: the location here is not quite right.
   for (const auto &Lock : ExclusiveLocksAcquired)
-    ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Exclusive,
-                                               D->getLocation()));
+    ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                         Lock, LK_Exclusive, D->getLocation()));
   for (const auto &Lock : SharedLocksAcquired)
-    ExpectedExitSet.addLock(FactMan, FactEntry(Lock, LK_Shared,
-                                               D->getLocation()));
+    ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+                                         Lock, LK_Shared, D->getLocation()));
   for (const auto &Lock : LocksReleased)
     ExpectedExitSet.removeLock(FactMan, Lock);
 

Modified: cfe/trunk/test/SemaCXX/warn-thread-safety-analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/SemaCXX/warn-thread-safety-analysis.cpp?rev=217016&r1=217015&r2=217016&view=diff
==============================================================================
--- cfe/trunk/test/SemaCXX/warn-thread-safety-analysis.cpp (original)
+++ cfe/trunk/test/SemaCXX/warn-thread-safety-analysis.cpp Wed Sep  3 01:00:11 2014
@@ -63,6 +63,12 @@ class SCOPED_LOCKABLE ReleasableMutexLoc
   void Release() UNLOCK_FUNCTION();
 };
 
+class __attribute__((scoped_lockable)) DoubleMutexLock {
+public:
+  DoubleMutexLock(Mutex *mu1, Mutex *mu2)
+      __attribute__((exclusive_lock_function(mu1, mu2)));
+  ~DoubleMutexLock() __attribute__((unlock_function));
+};
 
 // The universal lock, written "*", allows checking to be selectively turned
 // off for a particular piece of code.
@@ -1662,6 +1668,12 @@ struct TestScopedLockable {
     a = b+1;
     b = a+1;
   }
+
+  void foo5() {
+    DoubleMutexLock mulock(&mu1, &mu2);
+    a = b + 1;
+    b = a + 1;
+  }
 };
 
 } // end namespace test_scoped_lockable





More information about the cfe-commits mailing list