[compiler-rt] ae1bd3a - [scudo] Add thread-safety annotations on TSD data members

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 15 15:46:46 PST 2023


Author: Chia-hung Duan
Date: 2023-02-15T23:44:44Z
New Revision: ae1bd3adf02f4f381b3298b794fab8387d794538

URL: https://github.com/llvm/llvm-project/commit/ae1bd3adf02f4f381b3298b794fab8387d794538
DIFF: https://github.com/llvm/llvm-project/commit/ae1bd3adf02f4f381b3298b794fab8387d794538.diff

LOG: [scudo] Add thread-safety annotations on TSD data members

Ideally, we want to assert that all the operations on
Cache/QuarantineCache always have the `Mutex` acquired. However,
the current architecture of accessing TSD is not easy to cooperate
with the thread-safety analysis because of pointer aliasing. In
alternative, we add the getters for accessing TSD member and attach
proper thread-safety annotations on them.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D142151

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/combined.h
    compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
    compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
    compiler-rt/lib/scudo/standalone/tsd.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 7812f277a0321..c3079e23c795c 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -249,9 +249,9 @@ class Allocator {
   // - unlinking the local stats from the global ones (destroying the cache does
   //   the last two items).
   void commitBack(TSD<ThisT> *TSD) {
-    Quarantine.drain(&TSD->QuarantineCache,
-                     QuarantineCallback(*this, TSD->Cache));
-    TSD->Cache.destroy(&Stats);
+    Quarantine.drain(&TSD->getQuarantineCache(),
+                     QuarantineCallback(*this, TSD->getCache()));
+    TSD->getCache().destroy(&Stats);
   }
 
   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
@@ -375,14 +375,14 @@ class Allocator {
       DCHECK_NE(ClassId, 0U);
       bool UnlockRequired;
       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-      Block = TSD->Cache.allocate(ClassId);
+      Block = TSD->getCache().allocate(ClassId);
       // If the allocation failed, the most likely reason with a 32-bit primary
       // is the region being full. In that event, retry in each successively
       // larger class until it fits. If it fails to fit in the largest class,
       // fallback to the Secondary.
       if (UNLIKELY(!Block)) {
         while (ClassId < SizeClassMap::LargestClassId && !Block)
-          Block = TSD->Cache.allocate(++ClassId);
+          Block = TSD->getCache().allocate(++ClassId);
         if (!Block)
           ClassId = 0;
       }
@@ -1168,7 +1168,7 @@ class Allocator {
       if (LIKELY(ClassId)) {
         bool UnlockRequired;
         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-        TSD->Cache.deallocate(ClassId, BlockBegin);
+        TSD->getCache().deallocate(ClassId, BlockBegin);
         if (UnlockRequired)
           TSD->unlock();
       } else {
@@ -1180,8 +1180,8 @@ class Allocator {
     } else {
       bool UnlockRequired;
       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
-      Quarantine.put(&TSD->QuarantineCache,
-                     QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+      Quarantine.put(&TSD->getQuarantineCache(),
+                     QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
       if (UnlockRequired)
         TSD->unlock();
     }

diff  --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index d4c2a23e0bf85..a79469fa81985 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -447,9 +447,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
 
   bool UnlockRequired;
   auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
-  EXPECT_TRUE(!TSD->Cache.isEmpty());
-  TSD->Cache.drain();
-  EXPECT_TRUE(TSD->Cache.isEmpty());
+  EXPECT_TRUE(!TSD->getCache().isEmpty());
+  TSD->getCache().drain();
+  EXPECT_TRUE(TSD->getCache().isEmpty());
   if (UnlockRequired)
     TSD->unlock();
 }
@@ -738,7 +738,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
 
   bool UnlockRequired;
   auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
-  TSD->Cache.drain();
+  TSD->getCache().drain();
 
   Allocator->releaseToOS();
 }

diff  --git a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
index 44b6bfdf05d6b..a092fdde904ee 100644
--- a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp
@@ -102,15 +102,15 @@ template <class AllocatorT> static void testRegistry() {
   bool UnlockRequired;
   auto TSD = Registry->getTSDAndLock(&UnlockRequired);
   EXPECT_NE(TSD, nullptr);
-  EXPECT_EQ(TSD->Cache.Canary, 0U);
+  EXPECT_EQ(TSD->getCache().Canary, 0U);
   if (UnlockRequired)
     TSD->unlock();
 
   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
   TSD = Registry->getTSDAndLock(&UnlockRequired);
   EXPECT_NE(TSD, nullptr);
-  EXPECT_EQ(TSD->Cache.Canary, 0U);
-  memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
+  EXPECT_EQ(TSD->getCache().Canary, 0U);
+  memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
   if (UnlockRequired)
     TSD->unlock();
 }
@@ -141,14 +141,14 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
   // For an exclusive TSD, the cache should be empty. We cannot guarantee the
   // same for a shared TSD.
   if (!UnlockRequired)
-    EXPECT_EQ(TSD->Cache.Canary, 0U);
+    EXPECT_EQ(TSD->getCache().Canary, 0U);
   // Transform the thread id to a uptr to use it as canary.
   const scudo::uptr Canary = static_cast<scudo::uptr>(
       std::hash<std::thread::id>{}(std::this_thread::get_id()));
-  TSD->Cache.Canary = Canary;
+  TSD->getCache().Canary = Canary;
   // Loop a few times to make sure that a concurrent thread isn't modifying it.
   for (scudo::uptr I = 0; I < 4096U; I++)
-    EXPECT_EQ(TSD->Cache.Canary, Canary);
+    EXPECT_EQ(TSD->getCache().Canary, Canary);
   if (UnlockRequired)
     TSD->unlock();
 }

diff  --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
index daf2a098f0e5d..c5ed6ddfa124e 100644
--- a/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -25,9 +25,6 @@
 namespace scudo {
 
 template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
-  // TODO: Add thread-safety annotation on `Cache` and `QuarantineCache`.
-  typename Allocator::CacheT Cache;
-  typename Allocator::QuarantineCacheT QuarantineCache;
   using ThisT = TSD<Allocator>;
   u8 DestructorIterations = 0;
 
@@ -60,9 +57,29 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
     Instance->commitBack(this);
   }
 
+  // Ideally, we may want to assert that all the operations on
+  // Cache/QuarantineCache always have the `Mutex` acquired. However, the
+  // current architecture of accessing TSD is not easy to cooperate with the
+  // thread-safety analysis because of pointer aliasing. So now we just add the
+  // assertion on the getters of Cache/QuarantineCache.
+  //
+  // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
+  // TSD doesn't always require holding the lock. Add this assertion while the
+  // lock is always acquired.
+  typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
+    return Cache;
+  }
+  typename Allocator::QuarantineCacheT &getQuarantineCache()
+      ASSERT_CAPABILITY(Mutex) {
+    return QuarantineCache;
+  }
+
 private:
   HybridMutex Mutex;
   atomic_uptr Precedence = {};
+
+  typename Allocator::CacheT Cache GUARDED_BY(Mutex);
+  typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
 };
 
 } // namespace scudo


        


More information about the llvm-commits mailing list