[compiler-rt] f0c9d1e - [tsan] Remove special SyncClock::kInvalidTid

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 30 13:39:26 PDT 2021


Author: Vitaly Buka
Date: 2021-04-30T13:39:15-07:00
New Revision: f0c9d1e95f99f3db21c907e912c966566ae2a4ea

URL: https://github.com/llvm/llvm-project/commit/f0c9d1e95f99f3db21c907e912c966566ae2a4ea
DIFF: https://github.com/llvm/llvm-project/commit/f0c9d1e95f99f3db21c907e912c966566ae2a4ea.diff

LOG: [tsan] Remove special SyncClock::kInvalidTid

Followup for D101428.

Reviewed By: dvyukov

Differential Revision: https://reviews.llvm.org/D101604

Added: 
    

Modified: 
    compiler-rt/lib/tsan/rtl/tsan_clock.cpp
    compiler-rt/lib/tsan/rtl/tsan_clock.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp b/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
index c91b29cb22b4..8e5188392caf 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
@@ -150,7 +150,7 @@ void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
   bool acquired = false;
   for (unsigned i = 0; i < kDirtyTids; i++) {
     SyncClock::Dirty dirty = src->dirty_[i];
-    unsigned tid = dirty.tid;
+    unsigned tid = dirty.tid();
     if (tid != kInvalidTid) {
       if (clk_[tid] < dirty.epoch) {
         clk_[tid] = dirty.epoch;
@@ -299,10 +299,10 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
     dst->tab_idx_ = cached_idx_;
     dst->size_ = cached_size_;
     dst->blocks_ = cached_blocks_;
-    CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+    CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid);
     // The cached clock is shared (immutable),
     // so this is where we store the current clock.
-    dst->dirty_[0].tid = tid_;
+    dst->dirty_[0].set_tid(tid_);
     dst->dirty_[0].epoch = clk_[tid_];
     dst->release_store_tid_ = tid_;
     dst->release_store_reused_ = reused_;
@@ -336,8 +336,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
     ce.reused = 0;
     i++;
   }
-  for (uptr i = 0; i < kDirtyTids; i++)
-    dst->dirty_[i].tid = kInvalidTid;
+  for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
   dst->release_store_tid_ = tid_;
   dst->release_store_reused_ = reused_;
   // Rememeber that we don't need to acquire it in future.
@@ -369,10 +368,10 @@ void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
   // Update the threads time, but preserve 'acquired' flag.
   for (unsigned i = 0; i < kDirtyTids; i++) {
     SyncClock::Dirty *dirty = &dst->dirty_[i];
-    const unsigned tid = dirty->tid;
+    const unsigned tid = dirty->tid();
     if (tid == tid_ || tid == kInvalidTid) {
       CPP_STAT_INC(StatClockReleaseFast);
-      dirty->tid = tid_;
+      dirty->set_tid(tid_);
       dirty->epoch = clk_[tid_];
       return;
     }
@@ -393,8 +392,8 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
     return false;
   for (unsigned i = 0; i < kDirtyTids; i++) {
     SyncClock::Dirty dirty = src->dirty_[i];
-    if (dirty.tid != kInvalidTid) {
-      if (clk_[dirty.tid] < dirty.epoch)
+    if (dirty.tid() != kInvalidTid) {
+      if (clk_[dirty.tid()] < dirty.epoch)
         return false;
     }
   }
@@ -453,8 +452,7 @@ void SyncClock::ResetImpl() {
   blocks_ = 0;
   release_store_tid_ = kInvalidTid;
   release_store_reused_ = 0;
-  for (uptr i = 0; i < kDirtyTids; i++)
-    dirty_[i].tid = kInvalidTid;
+  for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid);
 }
 
 void SyncClock::Resize(ClockCache *c, uptr nclk) {
@@ -503,10 +501,10 @@ void SyncClock::Resize(ClockCache *c, uptr nclk) {
 void SyncClock::FlushDirty() {
   for (unsigned i = 0; i < kDirtyTids; i++) {
     Dirty *dirty = &dirty_[i];
-    if (dirty->tid != kInvalidTid) {
-      CHECK_LT(dirty->tid, size_);
-      elem(dirty->tid).epoch = dirty->epoch;
-      dirty->tid = kInvalidTid;
+    if (dirty->tid() != kInvalidTid) {
+      CHECK_LT(dirty->tid(), size_);
+      elem(dirty->tid()).epoch = dirty->epoch;
+      dirty->set_tid(kInvalidTid);
     }
   }
 }
@@ -559,7 +557,7 @@ ALWAYS_INLINE bool SyncClock::Cachable() const {
   if (size_ == 0)
     return false;
   for (unsigned i = 0; i < kDirtyTids; i++) {
-    if (dirty_[i].tid != kInvalidTid)
+    if (dirty_[i].tid() != kInvalidTid)
       return false;
   }
   return atomic_load_relaxed(ref_ptr(tab_)) == 1;
@@ -606,7 +604,7 @@ ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
 u64 SyncClock::get(unsigned tid) const {
   for (unsigned i = 0; i < kDirtyTids; i++) {
     Dirty dirty = dirty_[i];
-    if (dirty.tid == tid)
+    if (dirty.tid() == tid)
       return dirty.epoch;
   }
   return elem(tid).epoch;
@@ -625,9 +623,8 @@ void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
   for (uptr i = 0; i < size_; i++)
     printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
   printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
-      release_store_tid_, release_store_reused_,
-      dirty_[0].tid, dirty_[0].epoch,
-      dirty_[1].tid, dirty_[1].epoch);
+         release_store_tid_, release_store_reused_, dirty_[0].tid(),
+         dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch);
 }
 
 void SyncClock::Iter::Next() {

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_clock.h b/compiler-rt/lib/tsan/rtl/tsan_clock.h
index cae424a388a1..31376a1bc9e2 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_clock.h
@@ -63,14 +63,22 @@ class SyncClock {
   friend class ThreadClock;
   friend class Iter;
   static const uptr kDirtyTids = 2;
-  // Full kInvalidTid won't fit into Dirty::tid.
-  static const u64 kInvalidTid = (1ull << (64 - kClkBits)) - 1;
 
   struct Dirty {
-    u64 epoch  : kClkBits;
-    u64 tid : 64 - kClkBits;  // kInvalidId if not active
+    u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; }
+    void set_tid(u32 tid) {
+      tid_ = tid == kInvalidTid ? kShortInvalidTid : tid;
+    }
+    u64 epoch : kClkBits;
+
+   private:
+    // Full kInvalidTid won't fit into Dirty::tid.
+    static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1;
+    u64 tid_ : 64 - kClkBits;  // kInvalidId if not active
   };
 
+  static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit");
+
   unsigned release_store_tid_;
   unsigned release_store_reused_;
   Dirty dirty_[kDirtyTids];
@@ -148,7 +156,6 @@ class ThreadClock {
 
  private:
   static const uptr kDirtyTids = SyncClock::kDirtyTids;
-  static const u64 kInvalidTid = SyncClock::kInvalidTid;
   // Index of the thread associated with he clock ("current thread").
   const unsigned tid_;
   const unsigned reused_;  // tid_ reuse count.


        


More information about the llvm-commits mailing list