[compiler-rt] 7bd81fe - tsan: don't save creation stack for some sync objects

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 2 04:30:29 PDT 2021


Author: Dmitry Vyukov
Date: 2021-08-02T13:30:24+02:00
New Revision: 7bd81fe1831e909e762e2f1f5caaba154989d4a1

URL: https://github.com/llvm/llvm-project/commit/7bd81fe1831e909e762e2f1f5caaba154989d4a1
DIFF: https://github.com/llvm/llvm-project/commit/7bd81fe1831e909e762e2f1f5caaba154989d4a1.diff

LOG: tsan: don't save creation stack for some sync objects

Currently we save the creation stack for sync objects always.
But it's not needed to some sync objects, most notably atomics.
We simply don't use atomic creation stack anywhere.
Allow callers to control saving of the creation stack
and don't save it for atomics.

Depends on D107257.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D107258

Added: 
    

Modified: 
    compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
    compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
    compiler-rt/lib/tsan/rtl/tsan_sync.cpp
    compiler-rt/lib/tsan/rtl/tsan_sync.h
    compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index c25de24426f89..42b273250d3ab 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -268,7 +268,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
     return;
   }
   __sync_synchronize();
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
   Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
@@ -282,7 +282,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
   if (LIKELY(mo == mo_relaxed))
     return F(a, v);
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
   Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
@@ -415,7 +415,7 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
   }
 
   bool release = IsReleaseOrder(mo);
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
   RWLock l(&s->mtx, release);
   T cc = *c;
   T pr = func_cas(a, cc, v);

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 359b15c22ebdd..342d65cf43d46 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -71,9 +71,10 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     MemoryWrite(thr, pc, addr, kSizeLog1);
     thr->is_freeing = false;
   }
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
   Lock l(&s->mtx);
   s->SetFlags(flagz & MutexCreationFlagMask);
+  // Save stack in the case the sync object was created before as atomic.
   if (!SANITIZER_GO && s->creation_stack_id == 0)
     s->creation_stack_id = CurrentStackId(thr, pc);
 }
@@ -143,7 +144,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     {
       ReadLock l(&s->mtx);
       s->UpdateFlags(flagz);
@@ -171,7 +172,7 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
   bool first = false;
   bool report_double_lock = false;
   {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     Lock l(&s->mtx);
     s->UpdateFlags(flagz);
     thr->fast_state.IncrementEpoch();
@@ -221,7 +222,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   bool report_bad_unlock = false;
   int rec = 0;
   {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     Lock l(&s->mtx);
     thr->fast_state.IncrementEpoch();
     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
@@ -260,7 +261,7 @@ void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
     {
-      SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+      SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
       ReadLock l(&s->mtx);
       s->UpdateFlags(flagz);
       Callback cb(thr, pc);
@@ -279,7 +280,7 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   bool report_bad_lock = false;
   bool pre_lock = false;
   {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     ReadLock l(&s->mtx);
     s->UpdateFlags(flagz);
     thr->fast_state.IncrementEpoch();
@@ -318,7 +319,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
   u64 mid = 0;
   bool report_bad_unlock = false;
   {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     Lock l(&s->mtx);
     thr->fast_state.IncrementEpoch();
     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
@@ -351,7 +352,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
   u64 mid = 0;
   bool report_bad_unlock = false;
   {
-    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     Lock l(&s->mtx);
     bool write = true;
     if (s->owner_tid == kInvalidTid) {
@@ -392,7 +393,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
 
 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
   Lock l(&s->mtx);
   s->owner_tid = kInvalidTid;
   s->recursion = 0;
@@ -400,7 +401,7 @@ void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
 
 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId());
 }
 
@@ -438,7 +439,7 @@ void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
   Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
@@ -450,7 +451,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: Release %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
   Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
@@ -462,7 +463,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
   Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index a1bcdcf9cfdb0..1773cb30afae9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -20,13 +20,14 @@ void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
 
 SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
 
-void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid,
+                   bool save_stack) {
   this->addr = addr;
   this->uid = uid;
   this->next = 0;
 
   creation_stack_id = 0;
-  if (!SANITIZER_GO)  // Go does not use them
+  if (save_stack && !SANITIZER_GO)  // Go does not use them
     creation_stack_id = CurrentStackId(thr, pc);
   if (common_flags()->detect_deadlocks)
     DDMutexInit(thr, pc, this);
@@ -190,7 +191,8 @@ MBlock* MetaMap::GetBlock(uptr p) {
   }
 }
 
-SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create) {
+SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+                          bool save_stack) {
   u32 *meta = MemToMeta(addr);
   u32 idx0 = *meta;
   u32 myidx = 0;
@@ -219,7 +221,7 @@ SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create) {
       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
       mys = sync_alloc_.Map(myidx);
-      mys->Init(thr, pc, addr, uid);
+      mys->Init(thr, pc, addr, uid, save_stack);
     }
     mys->next = idx0;
     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h
index 73556dd71246d..79a2f8dd13395 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h
@@ -64,7 +64,7 @@ struct SyncVar {
   // with the mtx. This reduces contention for hot sync objects.
   SyncClock clock;
 
-  void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+  void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack);
   void Reset(Processor *proc);
 
   u64 GetId() const {
@@ -115,11 +115,12 @@ class MetaMap {
   void ResetRange(Processor *proc, uptr p, uptr sz);
   MBlock* GetBlock(uptr p);
 
-  SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr) {
-    return GetSync(thr, pc, addr, true);
+  SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
+                           bool save_stack) {
+    return GetSync(thr, pc, addr, true, save_stack);
   }
   SyncVar *GetSyncIfExists(uptr addr) {
-    return GetSync(nullptr, 0, addr, false);
+    return GetSync(nullptr, 0, addr, false, false);
   }
 
   void MoveMemory(uptr src, uptr dst, uptr sz);
@@ -136,7 +137,8 @@ class MetaMap {
   SyncAlloc sync_alloc_;
   atomic_uint64_t uid_gen_;
 
-  SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create);
+  SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+                   bool save_stack);
 };
 
 }  // namespace __tsan

diff  --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp
index 324f4f61200fa..f902b4956788a 100644
--- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp
+++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp
@@ -57,10 +57,10 @@ TEST(MetaMap, Sync) {
   m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
   SyncVar *s1 = m->GetSyncIfExists((uptr)&block[0]);
   EXPECT_EQ(s1, (SyncVar*)0);
-  s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]);
+  s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block[0], false);
   EXPECT_NE(s1, (SyncVar*)0);
   EXPECT_EQ(s1->addr, (uptr)&block[0]);
-  SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block[1]);
+  SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block[1], false);
   EXPECT_NE(s2, (SyncVar*)0);
   EXPECT_EQ(s2->addr, (uptr)&block[1]);
   m->FreeBlock(thr->proc(), (uptr)&block[0]);
@@ -79,8 +79,8 @@ TEST(MetaMap, MoveMemory) {
   u64 block2[4] = {};  // fake malloc block
   m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64));
   m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64));
-  SyncVar *s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[0]);
-  SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[1]);
+  SyncVar *s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[0], false);
+  SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[1], false);
   m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64));
   MBlock *mb1 = m->GetBlock((uptr)&block1[0]);
   EXPECT_EQ(mb1, (MBlock*)0);
@@ -111,7 +111,7 @@ TEST(MetaMap, ResetSync) {
   MetaMap *m = &ctx->metamap;
   u64 block[1] = {};  // fake malloc block
   m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
-  SyncVar *s = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]);
+  SyncVar *s = m->GetSyncOrCreate(thr, 0, (uptr)&block[0], false);
   s->Reset(thr->proc());
   uptr sz = m->FreeBlock(thr->proc(), (uptr)&block[0]);
   EXPECT_EQ(sz, 1 * sizeof(u64));


        


More information about the llvm-commits mailing list