[compiler-rt] [llvm] [tsan] Introduce Adaptive Delay Scheduling to TSAN (PR #178836)

Chris Cotter via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 31 18:06:01 PST 2026


https://github.com/ccotter updated https://github.com/llvm/llvm-project/pull/178836

>From 16956114f94ea5bac1d5bb127d8f75537d33f818 Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 23 Aug 2024 11:08:52 -0400
Subject: [PATCH] [tsan] Introduce Adaptive Delay Scheduling to TSAN

This commit introduces an "adaptive delay scheduler" to the TSAN
runtime. At various synchronization points, like mutex or atomic
operations, the scheduler may sleep or do small spin loops to expose
additional thread interleavings in the execution.

This change is inspired by prior work, which is discussed in more detail
on https://discourse.llvm.org/t/rfc-tsan-implementing-a-fuzz-scheduler-for-tsan/80969.
In short, https://reviews.llvm.org/D65383 was an earlier unmerged
attempt at adding a random delay delay scheduler. Feedback on the RFC
led to the version in this commit, aiming to limit the amount of delay.

This change aims to limit the overhead when the fuzzing scheduler is not
enabled (empty TSAN_OPTIONS). Nearly all checks are guarded by an
inlined checked on a global variable that is only set when the fuzzing
scheduler is active. I ran some contrived benchmarks with empty
TSAN_OPTIONS with an unmodified TSAN runtime and with the TSAN runtime
with these changes. For example, a simple main() that creates one
background thread and runs millions of atomic ops (relaxed, acq/rel,
seq_cst) in a tight loop), and did not find meaningful (or unmeaningful)
differences in the performance.

The scheduler is added as a virtual interface to allow other behaviors
discussed in https://ceur-ws.org/Vol-2344/paper9.pdf to be added in the
future (for example, a scheduler that only allows one thread to run at
once).

An LLM helped write the adaptive behavior, in particular, the TimeBudget
class for program-level delay tracking, the tiering concept for how much
to delay the program based on type of synchronization operation, the
address sampler, and the per-thread quota. I reviewed the ouput and
amended the code to reduce some duplication and simplify some of the
behavior. I also used the LLM to replace its original `double`
based calculation logic with the Percent class, allowing integer based
arithmetic. Finally, the LLM helped write the unit test cases for
Percent.
---
 .../sanitizer_common/sanitizer_allocator.h    |   6 -
 .../lib/sanitizer_common/sanitizer_common.h   |   6 +
 compiler-rt/lib/tsan/rtl/CMakeLists.txt       |   1 +
 compiler-rt/lib/tsan/rtl/tsan_flags.inc       |  20 +
 .../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp   | 380 ++++++++++++++++++
 .../lib/tsan/rtl/tsan_fuzzing_scheduler.h     | 113 ++++++
 .../lib/tsan/rtl/tsan_interceptors_posix.cpp  |  44 +-
 .../lib/tsan/rtl/tsan_interface_atomic.cpp    |  14 +
 compiler-rt/lib/tsan/rtl/tsan_rtl.cpp         |   5 +
 .../lib/tsan/tests/unit/CMakeLists.txt        |   1 +
 .../lib/tsan/tests/unit/tsan_percent_test.cpp | 152 +++++++
 llvm/docs/ReleaseNotes.md                     |   2 +
 12 files changed, 737 insertions(+), 7 deletions(-)
 create mode 100644 compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
 create mode 100644 compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
 create mode 100644 compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp

diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 0b28f86d14084..6154f7810334b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -47,12 +47,6 @@ void PrintHintAllocatorCannotReturnNull();
 // Callback type for iterating over chunks.
 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
 
-inline u32 Rand(u32 *state) {  // ANSI C linear congruential PRNG.
-  return (*state = *state * 1103515245 + 12345) >> 16;
-}
-
-inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; }  // [0, n)
-
 template<typename T>
 inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
   if (n <= 1) return;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 515a7c9cdf60f..564ae301475a9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -1100,6 +1100,12 @@ inline u32 GetNumberOfCPUsCached() {
   return NumberOfCPUsCached;
 }
 
+inline u32 Rand(u32* state) {  // ANSI C linear congruential PRNG.
+  return (*state = *state * 1103515245 + 12345) >> 16;
+}
+
+inline u32 RandN(u32* state, u32 n) { return Rand(state) % n; }  // [0, n)
+
 }  // namespace __sanitizer
 
 inline void *operator new(__sanitizer::usize size,
diff --git a/compiler-rt/lib/tsan/rtl/CMakeLists.txt b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
index d7d84706bfd58..c0b6b1cf6842c 100644
--- a/compiler-rt/lib/tsan/rtl/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
@@ -26,6 +26,7 @@ set(TSAN_SOURCES
   tsan_external.cpp
   tsan_fd.cpp
   tsan_flags.cpp
+  tsan_fuzzing_scheduler.cpp
   tsan_ignoreset.cpp
   tsan_interceptors_memintrinsics.cpp
   tsan_interceptors_posix.cpp
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 77ab910f08fbc..18394e2be7dde 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -92,3 +92,23 @@ TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites,
           "\"disable_for_all_processes\" - don't lock during all writes in "
           "the current process and it's children processes.")
 #endif
+
+TSAN_FLAG(const char*, fuzzing_scheduler, "",
+          "Choosing fuzzing scheduler: '', 'adaptive'")
+
+TSAN_FLAG(int, adaptive_delay_target_overhead_pct, 25,
+          "Target percentage overhead for adaptive delay injection")
+TSAN_FLAG(int, adaptive_delay_relaxed_sample_rate, 10000,
+          "Sample 1 in N relaxed atomic operations for delay")
+TSAN_FLAG(int, adaptive_delay_sync_atomic_sample_rate, 100,
+          "Sample 1 in N acquire/release/seq_cst atomic operations for delay")
+TSAN_FLAG(int, adaptive_delay_mutex_sample_rate, 10,
+          "Sample 1 in N mutex/cv operations for delay")
+TSAN_FLAG(int, adaptive_delay_max_atomic_us, 50,
+          "Maximum delay in microseconds for atomic operations")
+TSAN_FLAG(int, adaptive_delay_max_sync_us, 500,
+          "Maximum delay in microseconds for mutex/cv/thread operations")
+TSAN_FLAG(int, adaptive_delay_window_ms, 100,
+          "Time window in milliseconds for delay budget calculation")
+TSAN_FLAG(int, adaptive_delay_random_seed, 0,
+          "Random seed for delay injection (0 = use time-based seed)")
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
new file mode 100644
index 0000000000000..5ac2d51b2dfe7
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -0,0 +1,380 @@
+//===-- tsan_fuzzing_scheduler.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fuzzing_scheduler.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno_codes.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+extern "C" int pthread_detach(void*);
+
+namespace __interception {
+extern int (*real_pthread_detach)(void*);
+}  // namespace __interception
+
+namespace __tsan {
+
+namespace {
+
+struct NullFuzzingScheduler : IFuzzingScheduler {
+  void Init() override {}
+  void AtomicOpFence(int mo) override {}
+  void AtomicOpAddr(uptr addr, int mo) override {}
+  void MutexCvOp() override {}
+  int DetachThread(void* th) override { return REAL(pthread_detach)(th); }
+  void BeforeChildThreadRuns() override {}
+  void AfterThreadCreation() override {}
+  void JoinOp() override {}
+};
+
+static constexpr u64 microseconds_per_second = 1000000ULL;
+
+// =============================================================================
+// AdaptiveDelayScheduler: Time-budget aware delay injection for race exposure
+// =============================================================================
+//
+// This scheduler injects delays to expose data races while maintaining a
+// configurable overhead target. It uses several strategies:
+//
+// 1. Time-Budget Controller: Tracks cumulative delays vs wall-clock time
+//    and adjusts delay probability to maintain target overhead.
+//
+// 2. Tiered Delays: Different delay strategies for different op types:
+//    - Relaxed atomics: Very rare sampling, tiny spin delays
+//    - Sync atomics (acq/rel/seq_cst): Moderate sampling, small usleep
+//    - Mutex/CV ops: Higher sampling, larger delays
+//    - Thread create/join: Always delay (rare but high value)
+//
+// 3. Address-based Sampling: Exponential backoff per address to avoid
+//    repeatedly delaying hot atomics.
+//
+// 4. Per-thread Quotas: Each thread has a delay budget per time window.
+
+struct AdaptiveDelayScheduler : NullFuzzingScheduler {
+  struct TimeBudget {
+    atomic_uint64_t total_delay_ns_;
+    u64 program_start_ns_;
+    int target_overhead_pct_;
+    Percent target_low_;
+    Percent target_high_;
+
+    void Init(int target_pct) {
+      atomic_store(&total_delay_ns_, 0, memory_order_relaxed);
+      program_start_ns_ = NanoTime();
+      target_overhead_pct_ = target_pct;
+      target_low_ = Percent::FromPct(
+          target_overhead_pct_ >= 5 ? target_overhead_pct_ - 5 : 0);
+      target_high_ = Percent::FromPct(target_overhead_pct_ + 5);
+    }
+
+    void RecordDelay(u64 delay_ns) {
+      atomic_fetch_add(&total_delay_ns_, delay_ns, memory_order_relaxed);
+    }
+
+    Percent GetOverheadPercent() {
+      u64 elapsed = NanoTime() - program_start_ns_;
+      u64 one_millisecond = microseconds_per_second;
+      if (elapsed < one_millisecond)
+        return Percent::FromPct(0);
+      u64 delay = atomic_load(&total_delay_ns_, memory_order_relaxed);
+      return Percent::FromRatio(delay, elapsed);
+    }
+
+    bool ShouldDelay(unsigned int* seed) {
+      Percent ratio = GetOverheadPercent();
+
+      if (ratio < target_low_)
+        return true;
+      if (ratio > target_high_)
+        return false;
+
+      // Linear interpolation: at target_low -> 100%, at target_high -> 0%
+      Percent prob = (target_high_ - ratio) / (target_high_ - target_low_);
+      return prob.RandomCheck(seed);
+    }
+  };
+
+  struct ThreadDelayState {
+    u64 window_start_ns_;
+    u32 delays_this_window_;
+    u32 max_delays_per_window_;
+    u64 window_duration_ns_;
+
+    void Init(u64 window_ms) {
+      window_start_ns_ = NanoTime();
+      delays_this_window_ = 0;
+      static constexpr int max_delays_per_window_default = 500;
+      max_delays_per_window_ = max_delays_per_window_default;
+      window_duration_ns_ = window_ms * microseconds_per_second;
+    }
+
+    bool CanDelay() {
+      u64 now = NanoTime();
+      bool needs_reset = now - window_start_ns_ > window_duration_ns_;
+      if (needs_reset) {
+        window_start_ns_ = now;
+        delays_this_window_ = 0;
+      }
+
+      if (delays_this_window_ >= max_delays_per_window_)
+        return false;
+      return true;
+    }
+
+    void RecordOneDelay() { ++delays_this_window_; }
+  };
+
+  // Address Sampler with Exponential Backoff
+  struct AddressSampler {
+    static constexpr u64 TABLE_SIZE = 2048;
+    struct Entry {
+      atomic_uintptr_t addr_;
+      atomic_uint32_t count_;
+    };
+    Entry table_[TABLE_SIZE];
+    static constexpr u32 ExponentialBackoffCap = 128;
+
+    void Init() {
+      for (u64 i = 0; i < TABLE_SIZE; ++i) {
+        atomic_store(&table_[i].addr_, 0, memory_order_relaxed);
+        atomic_store(&table_[i].count_, 0, memory_order_relaxed);
+      }
+    }
+
+    static ALWAYS_INLINE u64 splitmix64(u64 x) {
+      x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9ULL;
+      x = (x ^ (x >> 27)) * 0x94D049BB133111EBULL;
+      x = x ^ (x >> 31);
+      return x;
+    }
+
+    // Uses exponential backoff: delay on 1st, 2nd, 4th, 8th, 16th, ...
+    bool ShouldDelayAddr(uptr addr) {
+      u64 idx = splitmix64(addr >> 3) & (TABLE_SIZE - 1);
+      Entry& e = table_[idx];
+
+      // This function is not thread safe.
+      // If two threads access the same hashed entry in parallel,
+      // worst case, we may end up returning true too often. This is
+      // acceptable...instead of full locking.
+
+      uptr stored_addr = atomic_load(&e.addr_, memory_order_relaxed);
+      if (stored_addr != addr) {
+        // Hash Collision - reset
+        atomic_store(&e.addr_, addr, memory_order_relaxed);
+        atomic_store(&e.count_, 1, memory_order_relaxed);
+        return true;
+      }
+
+      u32 count = atomic_fetch_add(&e.count_, 1, memory_order_relaxed) + 1;
+
+      if ((count & (count - 1)) == 0 && count <= ExponentialBackoffCap)
+        return true;
+      return false;
+    }
+  };
+
+  TimeBudget budget_;
+  AddressSampler sampler_;
+
+  int relaxed_sample_rate_;
+  int sync_atomic_sample_rate_;
+  int mutex_sample_rate_;
+  int max_atomic_delay_us_;
+  int max_sync_delay_us_;
+  u64 window_ms_;
+
+  static thread_local unsigned int tls_random_seed_;
+
+  static thread_local ThreadDelayState tls_state_;
+  static thread_local bool tls_initialized_;
+
+  void Init() override { InitTls(); }
+
+  void InitTls() {
+    tls_state_.Init(window_ms_);
+    tls_random_seed_ = flags()->adaptive_delay_random_seed;
+    if (tls_random_seed_ == 0)
+      tls_random_seed_ = NanoTime();
+    tls_initialized_ = true;
+  }
+
+  AdaptiveDelayScheduler() {
+    relaxed_sample_rate_ = flags()->adaptive_delay_relaxed_sample_rate;
+    sync_atomic_sample_rate_ = flags()->adaptive_delay_sync_atomic_sample_rate;
+    mutex_sample_rate_ = flags()->adaptive_delay_mutex_sample_rate;
+    max_atomic_delay_us_ = flags()->adaptive_delay_max_atomic_us;
+    max_sync_delay_us_ = flags()->adaptive_delay_max_sync_us;
+    window_ms_ = flags()->adaptive_delay_window_ms;
+
+    int target_pct = flags()->adaptive_delay_target_overhead_pct;
+    if (target_pct < 1)
+      target_pct = 1;
+
+    budget_.Init(target_pct);
+    sampler_.Init();
+
+    Printf("INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
+    Printf("  Target overhead: %d%%\n", target_pct);
+    Printf("  Random seed: %u\n", tls_random_seed_);
+    Printf("  Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
+    Printf("  Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
+    Printf("  Mutex sample rate: 1/%d\n", mutex_sample_rate_);
+    Printf("  Max atomic delay: %d us\n", max_atomic_delay_us_);
+    Printf("  Max sync delay: %d us\n", max_sync_delay_us_);
+    Printf("  Delay window: %llu ms\n", window_ms_);
+  }
+
+  void DoSpinDelay(int cycles) {
+    volatile int v = 0;
+    for (int i = 0; i < cycles; ++i) v = i;
+    (void)v;
+  }
+
+  void DoYieldDelay() { internal_sched_yield(); }
+
+  void UsleepDelay(int max_us, unsigned int* seed) {
+    int delay_us = 1 + (Rand(seed) % max_us);
+    internal_usleep(delay_us);
+    budget_.RecordDelay(delay_us * 1000ULL);
+  }
+
+  void AtomicRelaxedOpDelay() {
+    if ((Rand(&tls_random_seed_) % relaxed_sample_rate_) != 0)
+      return;
+    if (!budget_.ShouldDelay(&tls_random_seed_))
+      return;
+    if (!tls_state_.CanDelay())
+      return;
+
+    DoSpinDelay(10 + (Rand(&tls_random_seed_) % 10));
+    tls_state_.RecordOneDelay();
+    static constexpr int spin_delay_estimate_ns = 50;
+    budget_.RecordDelay(spin_delay_estimate_ns);
+  }
+
+  void AtomicSyncOpDelay(uptr* addr) {
+    if ((Rand(&tls_random_seed_) % sync_atomic_sample_rate_) != 0)
+      return;
+    if (!budget_.ShouldDelay(&tls_random_seed_))
+      return;
+    if (!tls_state_.CanDelay())
+      return;
+
+    if (addr && !sampler_.ShouldDelayAddr(*addr))
+      return;
+
+    if (max_atomic_delay_us_ <= 1) {
+      DoYieldDelay();
+      static constexpr int yield_delay_estimate_ns = 100;
+      budget_.RecordDelay(yield_delay_estimate_ns);
+    } else
+      UsleepDelay(max_atomic_delay_us_, &tls_random_seed_);
+    tls_state_.RecordOneDelay();
+  }
+
+  void AtomicOpFence(int mo) override {
+    CHECK(tls_initialized_);
+
+    if (mo < mo_acquire)
+      AtomicRelaxedOpDelay();
+    else
+      AtomicSyncOpDelay(nullptr);
+  }
+
+  void AtomicOpAddr(uptr addr, int mo) override {
+    CHECK(tls_initialized_);
+
+    if (mo < mo_acquire)
+      AtomicRelaxedOpDelay();
+    else
+      AtomicSyncOpDelay(&addr);
+  }
+
+  void UnsampledDelay() {
+    CHECK(tls_initialized_);
+
+    if (!budget_.ShouldDelay(&tls_random_seed_))
+      return;
+    if (!tls_state_.CanDelay())
+      return;
+
+    UsleepDelay(max_sync_delay_us_, &tls_random_seed_);
+    tls_state_.RecordOneDelay();
+  }
+
+  void MutexCvOp() override {
+    CHECK(tls_initialized_);
+
+    if ((Rand(&tls_random_seed_) % mutex_sample_rate_) != 0)
+      return;
+    if (!budget_.ShouldDelay(&tls_random_seed_))
+      return;
+    if (!tls_state_.CanDelay())
+      return;
+
+    UsleepDelay(max_sync_delay_us_, &tls_random_seed_);
+    tls_state_.RecordOneDelay();
+  }
+
+  void JoinOp() override { UnsampledDelay(); }
+
+  void BeforeChildThreadRuns() override {
+    InitTls();
+    UnsampledDelay();
+  }
+
+  void AfterThreadCreation() override { UnsampledDelay(); }
+
+  int DetachThread(void* th) override {
+    int res = REAL(pthread_detach)(th);
+    UnsampledDelay();
+    return res;
+  }
+};
+
+thread_local unsigned int AdaptiveDelayScheduler::tls_random_seed_;
+thread_local AdaptiveDelayScheduler::ThreadDelayState
+    AdaptiveDelayScheduler::tls_state_;
+thread_local bool AdaptiveDelayScheduler::tls_initialized_ = false;
+
+IFuzzingScheduler& FuzzingSchedulerDispatcher() {
+  if (!internal_strcmp(flags()->fuzzing_scheduler, "")) {
+    is_fuzz_scheduler_enabled = false;
+    static NullFuzzingScheduler scheduler;
+    return scheduler;
+  } else if (!internal_strcmp(flags()->fuzzing_scheduler, "adaptive")) {
+    is_fuzz_scheduler_enabled = true;
+    static AdaptiveDelayScheduler scheduler;
+    return scheduler;
+  } else {
+    Printf(
+        "FATAL: ThreadSanitizer invalid fuzzing scheduler. Please check "
+        "TSAN_OPTIONS!\n");
+    Die();
+  }
+}
+
+}  // namespace
+
+bool is_fuzz_scheduler_enabled;
+
+IFuzzingScheduler& GetFuzzingScheduler() {
+  static IFuzzingScheduler& scheduler = FuzzingSchedulerDispatcher();
+  return scheduler;
+}
+
+}  // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
new file mode 100644
index 0000000000000..3c91bc534ba23
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
@@ -0,0 +1,113 @@
+//===-- tsan_fuzzing_scheduler.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FUZZING_SCHEDULER_H
+#define TSAN_FUZZING_SCHEDULER_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+// Fixed-point arithmetic type that mimics floating point operations
+class Percent {
+  using u32 = __sanitizer::u32;
+  using u64 = __sanitizer::u64;
+
+  u32 bp_{};  // basis points (0-10000 represents 0.0-1.0)
+  bool is_valid_{};
+
+  static constexpr u32 kBasisPointsPerUnit = 10000;
+
+  Percent(u32 bp, bool is_valid) : bp_(bp), is_valid_(is_valid) {}
+
+ public:
+  Percent() = default;
+  Percent(const Percent&) = default;
+  Percent& operator=(const Percent&) = default;
+  Percent(Percent&&) = default;
+  Percent& operator=(Percent&&) = default;
+
+  static Percent FromPct(u32 pct) { return Percent{pct * 100, true}; }
+  static Percent FromRatio(u64 numerator, u64 denominator) {
+    if (denominator == 0)
+      return Percent{0, false};
+    // Avoid overflow: scale down if needed
+    if (numerator > UINT64_MAX / kBasisPointsPerUnit) {
+      return Percent{(u32)((numerator / denominator) * kBasisPointsPerUnit),
+                     true};
+    }
+    return Percent{(u32)((numerator * kBasisPointsPerUnit) / denominator),
+                   true};
+  }
+
+  bool IsValid() const { return is_valid_; }
+
+  // Returns true with probability equal to the percentage.
+  bool RandomCheck(u32* seed) const {
+    return (Rand(seed) % kBasisPointsPerUnit) < bp_;
+  }
+
+  int GetPct() const { return bp_ / 100; }
+  int GetBasisPoints() const { return bp_; }
+
+  bool operator==(const Percent& other) const { return bp_ == other.bp_; }
+  bool operator!=(const Percent& other) const { return bp_ != other.bp_; }
+  bool operator<(const Percent& other) const { return bp_ < other.bp_; }
+  bool operator>(const Percent& other) const { return bp_ > other.bp_; }
+  bool operator<=(const Percent& other) const { return bp_ <= other.bp_; }
+  bool operator>=(const Percent& other) const { return bp_ >= other.bp_; }
+
+  Percent operator-(const Percent& other) const {
+    if (!is_valid_ || !other.is_valid_)
+      return Percent{0, false};
+    if (bp_ < other.bp_)
+      return Percent{0, false};
+    return Percent{bp_ - other.bp_, true};
+  }
+
+  Percent operator/(const Percent& other) const {
+    if (!is_valid_ || !other.is_valid_)
+      return Percent{0, false};
+    if (other.bp_ == 0)
+      return Percent{0, false};
+    return Percent{(bp_ * kBasisPointsPerUnit) / other.bp_, true};
+  }
+};
+
+struct IFuzzingScheduler {
+  virtual void Init() = 0;
+
+  virtual void MutexCvOp() = 0;
+  virtual void AtomicOpFence(int mo) = 0;
+  virtual void AtomicOpAddr(__sanitizer::uptr addr, int mo) = 0;
+
+  virtual int DetachThread(void* th) = 0;
+  virtual void AfterThreadCreation() = 0;
+  virtual void BeforeChildThreadRuns() = 0;
+  virtual void JoinOp() = 0;
+
+ protected:
+  IFuzzingScheduler() = default;
+  ~IFuzzingScheduler() = default;
+};
+
+IFuzzingScheduler& GetFuzzingScheduler();
+
+extern bool is_fuzz_scheduler_enabled;
+
+ALWAYS_INLINE bool IsFuzzSchedulerEnabled() {
+  return is_fuzz_scheduler_enabled;
+}
+
+}  // namespace __tsan
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 714220a0109a8..f34aa338f5aa1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -34,6 +34,7 @@
 #if SANITIZER_APPLE && !SANITIZER_GO
 #  include "tsan_flags.h"
 #endif
+#include "tsan_fuzzing_scheduler.h"
 #include "tsan_interceptors.h"
 #include "tsan_interface.h"
 #include "tsan_mman.h"
@@ -1065,6 +1066,9 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
     ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
     p->started.Post();
   }
+
+  GetFuzzingScheduler().BeforeChildThreadRuns();
+
   void *res = callback(param);
   // Prevent the callback from being tail called,
   // it mixes up stack traces.
@@ -1128,11 +1132,13 @@ TSAN_INTERCEPTOR(int, pthread_create,
   }
   if (attr == &myattr)
     pthread_attr_destroy(&myattr);
+  GetFuzzingScheduler().AfterThreadCreation();
   return res;
 }
 
 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+  GetFuzzingScheduler().JoinOp();
 #if SANITIZER_ANDROID
   {
     // In Bionic, if the target thread has already exited when pthread_detach is
@@ -1175,7 +1181,7 @@ int internal_pthread_join(void *th, void **ret) {
 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
-  int res = REAL(pthread_detach)(th);
+  int res = GetFuzzingScheduler().DetachThread(th);
   if (res == 0) {
     ThreadDetach(thr, pc, tid);
   }
@@ -1197,6 +1203,7 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
+  GetFuzzingScheduler().JoinOp();
   int res = REAL(pthread_tryjoin_np)(th, ret);
   ThreadIgnoreEnd(thr);
   if (res == 0)
@@ -1211,6 +1218,7 @@ TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
+  GetFuzzingScheduler().JoinOp();
   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
   ThreadIgnoreEnd(thr);
   if (res == 0)
@@ -1324,6 +1332,7 @@ int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+  GetFuzzingScheduler().MutexCvOp();
   return cond_wait(
       thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
       m);
@@ -1332,6 +1341,7 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+  GetFuzzingScheduler().MutexCvOp();
   return cond_wait(
       thr, pc, &si,
       [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
@@ -1343,6 +1353,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
             __sanitizer_clockid_t clock, void *abstime) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
+  GetFuzzingScheduler().MutexCvOp();
   return cond_wait(
       thr, pc, &si,
       [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
@@ -1358,6 +1369,7 @@ INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
             void *reltime) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+  GetFuzzingScheduler().MutexCvOp();
   return cond_wait(
       thr, pc, &si,
       [=]() {
@@ -1371,6 +1383,7 @@ INTERCEPTOR(int, pthread_cond_signal, void *c) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+  GetFuzzingScheduler().MutexCvOp();
   return REAL(pthread_cond_signal)(cond);
 }
 
@@ -1378,6 +1391,7 @@ INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+  GetFuzzingScheduler().MutexCvOp();
   return REAL(pthread_cond_broadcast)(cond);
 }
 
@@ -1385,6 +1399,7 @@ INTERCEPTOR(int, pthread_cond_destroy, void *c) {
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_cond_destroy)(cond);
   if (common_flags()->legacy_pthread_cond) {
     // Free our aux cond and zero the pointer to not leave dangling pointers.
@@ -1413,6 +1428,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
 
 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_mutex_destroy)(m);
   if (res == 0 || res == errno_EBUSY) {
     MutexDestroy(thr, pc, (uptr)m);
@@ -1423,6 +1439,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
 TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
   MutexPreLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = BLOCK_REAL(pthread_mutex_lock)(m);
   if (res == errno_EOWNERDEAD)
     MutexRepair(thr, pc, (uptr)m);
@@ -1435,6 +1452,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
 
 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_mutex_trylock)(m);
   if (res == errno_EOWNERDEAD)
     MutexRepair(thr, pc, (uptr)m);
@@ -1446,6 +1464,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
 #if !SANITIZER_APPLE
 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_mutex_timedlock)(m, abstime);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1457,6 +1476,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
 TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
   MutexUnlock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_mutex_unlock)(m);
   if (res == errno_EINVAL)
     MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1468,6 +1488,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
                  __sanitizer_clockid_t clock, void *abstime) {
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
   MutexPreLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
   if (res == errno_EOWNERDEAD)
     MutexRepair(thr, pc, (uptr)m);
@@ -1486,6 +1507,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
 TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
   MutexPreLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = BLOCK_REAL(__pthread_mutex_lock)(m);
   if (res == errno_EOWNERDEAD)
     MutexRepair(thr, pc, (uptr)m);
@@ -1499,6 +1521,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
 TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
   MutexUnlock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(__pthread_mutex_unlock)(m);
   if (res == errno_EINVAL)
     MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1510,6 +1533,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
 #if !SANITIZER_APPLE
 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_spin_init)(m, pshared);
   if (res == 0) {
     MutexCreate(thr, pc, (uptr)m);
@@ -1519,6 +1543,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
 
 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_spin_destroy)(m);
   if (res == 0) {
     MutexDestroy(thr, pc, (uptr)m);
@@ -1529,6 +1554,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
   MutexPreLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = BLOCK_REAL(pthread_spin_lock)(m);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m);
@@ -1538,6 +1564,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
 
 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_spin_trylock)(m);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1548,6 +1575,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
   MutexUnlock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_spin_unlock)(m);
   return res;
 }
@@ -1555,6 +1583,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_init)(m, a);
   if (res == 0) {
     MutexCreate(thr, pc, (uptr)m);
@@ -1564,6 +1593,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_destroy)(m);
   if (res == 0) {
     MutexDestroy(thr, pc, (uptr)m);
@@ -1574,6 +1604,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
   MutexPreReadLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_rdlock)(m);
   if (res == 0) {
     MutexPostReadLock(thr, pc, (uptr)m);
@@ -1583,6 +1614,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+  GetFuzzingScheduler().JoinOp();
   int res = REAL(pthread_rwlock_tryrdlock)(m);
   if (res == 0) {
     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1593,6 +1625,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
 #if !SANITIZER_APPLE
 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
   if (res == 0) {
     MutexPostReadLock(thr, pc, (uptr)m);
@@ -1604,6 +1637,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
   MutexPreLock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m);
@@ -1613,6 +1647,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_trywrlock)(m);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1623,6 +1658,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
 #if !SANITIZER_APPLE
 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
   if (res == 0) {
     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1634,6 +1670,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_rwlock_unlock)(m);
   return res;
 }
@@ -1642,6 +1679,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_barrier_init)(b, a, count);
   return res;
 }
@@ -1649,6 +1687,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_barrier_destroy)(b);
   return res;
 }
@@ -1657,6 +1696,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
   Release(thr, pc, (uptr)b);
   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
+  GetFuzzingScheduler().MutexCvOp();
   int res = REAL(pthread_barrier_wait)(b);
   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -1702,6 +1742,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
   if (fd > 0)
     FdAccess(thr, pc, fd);
+  GetFuzzingScheduler().MutexCvOp();
   return REAL(__fxstat)(version, fd, buf);
 }
 
@@ -2143,6 +2184,7 @@ TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
     __sanitizer_sigset_t *oldset) {
   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+  GetFuzzingScheduler().MutexCvOp();
   return REAL(pthread_sigmask)(how, set, oldset);
 }
 
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 527e5a9b4a8d8..c06e25e2a6afe 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -22,6 +22,7 @@
 #include "sanitizer_common/sanitizer_placement_new.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
 #include "tsan_flags.h"
+#include "tsan_fuzzing_scheduler.h"
 #include "tsan_interface.h"
 #include "tsan_rtl.h"
 
@@ -522,6 +523,8 @@ static morder to_morder(int mo) {
 
 template <class Op, class... Types>
 ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
+  if (IsFuzzSchedulerEnabled())
+    GetFuzzingScheduler().AtomicOpFence(mo);
   ThreadState *const thr = cur_thread();
   ProcessPendingSignals(thr);
   if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
@@ -529,6 +532,17 @@ ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
   return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
 }
 
+template <class Op, class AddrType, class... Types>
+ALWAYS_INLINE auto AtomicImpl(morder mo, AddrType addr, Types... args) {
+  if (IsFuzzSchedulerEnabled())
+    GetFuzzingScheduler().AtomicOpAddr((uptr)addr, (int)mo);
+  ThreadState* const thr = cur_thread();
+  ProcessPendingSignals(thr);
+  if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
+    return Op::NoTsanAtomic(mo, addr, args...);
+  return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), addr, args...);
+}
+
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index feee566f44829..75d679c74382f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -22,6 +22,7 @@
 #include "sanitizer_common/sanitizer_stackdepot.h"
 #include "sanitizer_common/sanitizer_symbolizer.h"
 #include "tsan_defs.h"
+#include "tsan_fuzzing_scheduler.h"
 #include "tsan_interface.h"
 #include "tsan_mman.h"
 #include "tsan_platform.h"
@@ -775,6 +776,10 @@ void Initialize(ThreadState *thr) {
     while (__tsan_resumed == 0) {}
   }
 
+#if !SANITIZER_GO
+  GetFuzzingScheduler().Init();
+#endif
+
   OnInitialize();
 }
 
diff --git a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
index 005457e374c40..d183c3071cff8 100644
--- a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
@@ -3,6 +3,7 @@ set(TSAN_UNIT_TEST_SOURCES
   tsan_flags_test.cpp
   tsan_ilist_test.cpp
   tsan_mman_test.cpp
+  tsan_percent_test.cpp
   tsan_shadow_test.cpp
   tsan_stack_test.cpp
   tsan_sync_test.cpp
diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp
new file mode 100644
index 0000000000000..2fe6db7086905
--- /dev/null
+++ b/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp
@@ -0,0 +1,152 @@
+//===-- tsan_percent_test.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "gtest/gtest.h"
+#include "tsan_fuzzing_scheduler.h"
+
+namespace __tsan {
+
+TEST(Percent, DefaultedObject) {
+  Percent defaulted;
+  EXPECT_FALSE(defaulted.IsValid());
+}
+
+TEST(Percent, FromPct) {
+  Percent p0 = Percent::FromPct(0);
+  Percent p50 = Percent::FromPct(50);
+  Percent p100 = Percent::FromPct(100);
+  Percent p150 = Percent::FromPct(150);
+
+  EXPECT_TRUE(p0.IsValid());
+  EXPECT_TRUE(p50.IsValid());
+  EXPECT_TRUE(p100.IsValid());
+  EXPECT_TRUE(p150.IsValid());
+
+  EXPECT_EQ(p0, p0);
+  EXPECT_NE(p0, p50);
+  EXPECT_NE(p50, p100);
+
+  EXPECT_EQ(p0.GetBasisPoints(), 0);
+  EXPECT_EQ(p50.GetBasisPoints(), 5000);
+  EXPECT_EQ(p100.GetBasisPoints(), 10000);
+  EXPECT_EQ(p150.GetBasisPoints(), 15000);
+
+  EXPECT_EQ(p0.GetPct(), 0);
+  EXPECT_EQ(p50.GetPct(), 50);
+  EXPECT_EQ(p100.GetPct(), 100);
+  EXPECT_EQ(p150.GetPct(), 150);
+}
+
+TEST(Percent, FromRatio) {
+  Percent half = Percent::FromRatio(1, 2);
+  Percent expected_half = Percent::FromPct(50);
+  EXPECT_TRUE(half.IsValid());
+  EXPECT_EQ(half, expected_half);
+
+  Percent quarter = Percent::FromRatio(1, 4);
+  Percent expected_quarter = Percent::FromPct(25);
+  EXPECT_EQ(quarter, expected_quarter);
+
+  Percent full = Percent::FromRatio(100, 100);
+  Percent expected_full = Percent::FromPct(100);
+  EXPECT_EQ(full, expected_full);
+
+  Percent div_zero = Percent::FromRatio(50, 0);
+  EXPECT_FALSE(div_zero.IsValid());
+}
+
+TEST(Percent, Comparisons) {
+  Percent low = Percent::FromPct(20);
+  Percent p20 = Percent::FromPct(20);
+  Percent mid = Percent::FromPct(50);
+  Percent high = Percent::FromPct(80);
+
+  EXPECT_TRUE(low == p20);
+  EXPECT_FALSE(low != p20);
+  EXPECT_FALSE(low == mid);
+  EXPECT_TRUE(low != mid);
+
+  EXPECT_TRUE(low < mid);
+  EXPECT_TRUE(mid < high);
+  EXPECT_FALSE(high < low);
+
+  EXPECT_TRUE(high > mid);
+  EXPECT_TRUE(mid > low);
+  EXPECT_FALSE(low > high);
+
+  EXPECT_TRUE(low <= mid);
+  EXPECT_TRUE(low <= low);
+
+  EXPECT_TRUE(high >= mid);
+  EXPECT_TRUE(high >= high);
+}
+
+TEST(Percent, Subtraction) {
+  Percent a = Percent::FromPct(75);
+  Percent b = Percent::FromPct(25);
+  Percent result = a - b;
+
+  Percent expected = Percent::FromPct(50);
+  EXPECT_TRUE(result.IsValid());
+  EXPECT_EQ(result, expected);
+
+  // Underflow
+  Percent low = Percent::FromPct(20);
+  Percent high = Percent::FromPct(80);
+  Percent underflow = low - high;
+  EXPECT_FALSE(underflow.IsValid());
+
+  Percent result_invalid = underflow - low;
+  EXPECT_FALSE(result_invalid.IsValid());
+}
+
+TEST(Percent, Division) {
+  Percent numerator = Percent::FromPct(100);
+  Percent denominator = Percent::FromPct(50);
+  Percent result = numerator / denominator;
+
+  Percent expected = Percent::FromPct(200);
+  EXPECT_TRUE(result.IsValid());
+  EXPECT_EQ(result, expected);
+
+  Percent zero = Percent::FromPct(0);
+  Percent non_zero = Percent::FromPct(50);
+  Percent div_zero_result = non_zero / zero;
+  EXPECT_FALSE(div_zero_result.IsValid());
+
+  Percent invalid = Percent::FromRatio(10, 0);
+  Percent valid = Percent::FromPct(50);
+  Percent result_invalid = valid / invalid;
+  EXPECT_FALSE(result_invalid.IsValid());
+}
+
+TEST(Percent, RandomCheck) {
+  unsigned int seed = 0;
+
+  Percent p0 = Percent::FromPct(0);
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_FALSE(p0.RandomCheck(&seed));
+  }
+
+  Percent p50 = Percent::FromPct(50);
+  for (int i = 0; i < 100; ++i) {
+    p50.RandomCheck(&seed);
+    // No verification since we cannot guarantee the random result.
+    // Just verify the code does not crash...
+  }
+
+  Percent p150 = Percent::FromPct(150);
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_TRUE(p150.RandomCheck(&seed));
+  }
+}
+
+}  // namespace __tsan
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index c3ff79e5422ab..e814c8b17f62f 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -176,6 +176,8 @@ Changes to BOLT
 Changes to Sanitizers
 ---------------------
 
+* Add a random delay into ThreadSanitizer to help find rare thread interleavings.
+
 Other Changes
 -------------
 



More information about the llvm-commits mailing list