[compiler-rt] [llvm] [tsan] Introduce Adaptive Delay Scheduling to TSAN (PR #178836)
Chris Cotter via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 13 12:06:11 PST 2026
https://github.com/ccotter updated https://github.com/llvm/llvm-project/pull/178836
>From becd76e1017e545089b957287511c9e753b403d8 Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 23 Aug 2024 11:08:52 -0400
Subject: [PATCH 1/7] [tsan] Introduce Adaptive Delay Scheduling to TSAN
This commit introduces an "adaptive delay scheduler" to the TSAN
runtime. At various synchronization points, like mutex or atomic
operations, the scheduler may sleep or do small spin loops to expose
additional thread interleavings in the execution.
This change is inspired by prior work, which is discussed in more detail
on https://discourse.llvm.org/t/rfc-tsan-implementing-a-fuzz-scheduler-for-tsan/80969.
In short, https://reviews.llvm.org/D65383 was an earlier unmerged
attempt at adding a random delay delay scheduler. Feedback on the RFC
led to the version in this commit, aiming to limit the amount of delay.
This change aims to limit the overhead when the fuzzing scheduler is not
enabled (empty TSAN_OPTIONS). Nearly all checks are guarded by an
inlined checked on a global variable that is only set when the fuzzing
scheduler is active. I ran some contrived benchmarks with empty
TSAN_OPTIONS with an unmodified TSAN runtime and with the TSAN runtime
with these changes. For example, a simple main() that creates one
background thread and runs millions of atomic ops (relaxed, acq/rel,
seq_cst) in a tight loop), and did not find meaningful (or unmeaningful)
differences in the performance.
The scheduler is added as a virtual interface to allow other behaviors
discussed in https://ceur-ws.org/Vol-2344/paper9.pdf to be added in the
future (for example, a scheduler that only allows one thread to run at
once).
An LLM helped write the adaptive behavior, in particular, the TimeBudget
class for program-level delay tracking, the tiering concept for how much
to delay the program based on type of synchronization operation, the
address sampler, and the per-thread quota. I reviewed the ouput and
amended the code to reduce some duplication and simplify some of the
behavior. I also used the LLM to replace its original `double`
based calculation logic with the Percent class, allowing integer based
arithmetic. Finally, the LLM helped write the unit test cases for
Percent.
---
.../sanitizer_common/sanitizer_allocator.h | 6 -
.../lib/sanitizer_common/sanitizer_common.h | 6 +
compiler-rt/lib/tsan/rtl/CMakeLists.txt | 1 +
compiler-rt/lib/tsan/rtl/tsan_flags.inc | 20 +
.../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 387 ++++++++++++++++++
.../lib/tsan/rtl/tsan_fuzzing_scheduler.h | 131 ++++++
.../tsan/rtl/tsan_fuzzing_scheduler_data.h | 36 ++
.../lib/tsan/rtl/tsan_interceptors_posix.cpp | 44 +-
.../lib/tsan/rtl/tsan_interface_atomic.cpp | 14 +
compiler-rt/lib/tsan/rtl/tsan_rtl.cpp | 5 +
compiler-rt/lib/tsan/rtl/tsan_rtl.h | 3 +
.../lib/tsan/tests/unit/CMakeLists.txt | 1 +
.../lib/tsan/tests/unit/tsan_percent_test.cpp | 152 +++++++
llvm/docs/ReleaseNotes.md | 2 +
14 files changed, 801 insertions(+), 7 deletions(-)
create mode 100644 compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
create mode 100644 compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
create mode 100644 compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
create mode 100644 compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 0b28f86d14084..6154f7810334b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -47,12 +47,6 @@ void PrintHintAllocatorCannotReturnNull();
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
- return (*state = *state * 1103515245 + 12345) >> 16;
-}
-
-inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
-
template<typename T>
inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 515a7c9cdf60f..564ae301475a9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -1100,6 +1100,12 @@ inline u32 GetNumberOfCPUsCached() {
return NumberOfCPUsCached;
}
+inline u32 Rand(u32* state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+}
+
+inline u32 RandN(u32* state, u32 n) { return Rand(state) % n; } // [0, n)
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::usize size,
diff --git a/compiler-rt/lib/tsan/rtl/CMakeLists.txt b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
index d7d84706bfd58..c0b6b1cf6842c 100644
--- a/compiler-rt/lib/tsan/rtl/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
@@ -26,6 +26,7 @@ set(TSAN_SOURCES
tsan_external.cpp
tsan_fd.cpp
tsan_flags.cpp
+ tsan_fuzzing_scheduler.cpp
tsan_ignoreset.cpp
tsan_interceptors_memintrinsics.cpp
tsan_interceptors_posix.cpp
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 77ab910f08fbc..18394e2be7dde 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -92,3 +92,23 @@ TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites,
"\"disable_for_all_processes\" - don't lock during all writes in "
"the current process and it's children processes.")
#endif
+
+TSAN_FLAG(const char*, fuzzing_scheduler, "",
+ "Choosing fuzzing scheduler: '', 'adaptive'")
+
+TSAN_FLAG(int, adaptive_delay_target_overhead_pct, 25,
+ "Target percentage overhead for adaptive delay injection")
+TSAN_FLAG(int, adaptive_delay_relaxed_sample_rate, 10000,
+ "Sample 1 in N relaxed atomic operations for delay")
+TSAN_FLAG(int, adaptive_delay_sync_atomic_sample_rate, 100,
+ "Sample 1 in N acquire/release/seq_cst atomic operations for delay")
+TSAN_FLAG(int, adaptive_delay_mutex_sample_rate, 10,
+ "Sample 1 in N mutex/cv operations for delay")
+TSAN_FLAG(int, adaptive_delay_max_atomic_us, 50,
+ "Maximum delay in microseconds for atomic operations")
+TSAN_FLAG(int, adaptive_delay_max_sync_us, 500,
+ "Maximum delay in microseconds for mutex/cv/thread operations")
+TSAN_FLAG(int, adaptive_delay_window_ms, 100,
+ "Time window in milliseconds for delay budget calculation")
+TSAN_FLAG(int, adaptive_delay_random_seed, 0,
+ "Random seed for delay injection (0 = use time-based seed)")
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
new file mode 100644
index 0000000000000..780d104720da1
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -0,0 +1,387 @@
+//===-- tsan_fuzzing_scheduler.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fuzzing_scheduler.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno_codes.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+extern "C" int pthread_detach(void*);
+
+namespace __interception {
+extern int (*real_pthread_detach)(void*);
+} // namespace __interception
+
+namespace __tsan {
+
+namespace {
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+struct NullFuzzingScheduler : IFuzzingScheduler {
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+ void Init() override {}
+ void AtomicOpFence(int mo) override {}
+ void AtomicOpAddr(uptr addr, int mo) override {}
+ void MutexCvOp() override {}
+ int DetachThread(void* th) override { return REAL(pthread_detach)(th); }
+ void BeforeChildThreadRuns() override {}
+ void AfterThreadCreation() override {}
+ void JoinOp() override {}
+};
+
+static constexpr u64 microseconds_per_second = 1000000ULL;
+
+// =============================================================================
+// AdaptiveDelayScheduler: Time-budget aware delay injection for race exposure
+// =============================================================================
+//
+// This scheduler injects delays to expose data races while maintaining a
+// configurable overhead target. It uses several strategies:
+//
+// 1. Time-Budget Controller: Tracks cumulative delays vs wall-clock time
+// and adjusts delay probability to maintain target overhead.
+//
+// 2. Tiered Delays: Different delay strategies for different op types:
+// - Relaxed atomics: Very rare sampling, tiny spin delays
+// - Sync atomics (acq/rel/seq_cst): Moderate sampling, small usleep
+// - Mutex/CV ops: Higher sampling, larger delays
+// - Thread create/join: Always delay (rare but high value)
+//
+// 3. Address-based Sampling: Exponential backoff per address to avoid
+// repeatedly delaying hot atomics.
+//
+// 4. Per-thread Quotas: Each thread has a delay budget per time window.
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+struct AdaptiveDelayScheduler : NullFuzzingScheduler {
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+ struct TimeBudget {
+ atomic_uint64_t total_delay_ns_;
+ u64 program_start_ns_;
+ int target_overhead_pct_;
+ Percent target_low_;
+ Percent target_high_;
+
+ void Init(int target_pct) {
+ atomic_store(&total_delay_ns_, 0, memory_order_relaxed);
+ program_start_ns_ = NanoTime();
+ target_overhead_pct_ = target_pct;
+ target_low_ = Percent::FromPct(
+ target_overhead_pct_ >= 5 ? target_overhead_pct_ - 5 : 0);
+ target_high_ = Percent::FromPct(target_overhead_pct_ + 5);
+ }
+
+ void RecordDelay(u64 delay_ns) {
+ atomic_fetch_add(&total_delay_ns_, delay_ns, memory_order_relaxed);
+ }
+
+ Percent GetOverheadPercent() {
+ u64 elapsed = NanoTime() - program_start_ns_;
+ u64 one_millisecond = microseconds_per_second;
+ if (elapsed < one_millisecond)
+ return Percent::FromPct(0);
+ u64 delay = atomic_load(&total_delay_ns_, memory_order_relaxed);
+ return Percent::FromRatio(delay, elapsed);
+ }
+
+ bool ShouldDelay() {
+ Percent ratio = GetOverheadPercent();
+
+ if (ratio < target_low_)
+ return true;
+ if (ratio > target_high_)
+ return false;
+
+ // Linear interpolation: at target_low -> 100%, at target_high -> 0%
+ Percent prob = (target_high_ - ratio) / (target_high_ - target_low_);
+ return prob.RandomCheck(GetRandomSeed());
+ }
+ };
+
+ // Address Sampler with Exponential Backoff
+ struct AddressSampler {
+ static constexpr u64 TABLE_SIZE = 2048;
+ struct Entry {
+ atomic_uintptr_t addr_;
+ atomic_uint32_t count_;
+ };
+ Entry table_[TABLE_SIZE];
+ static constexpr u32 ExponentialBackoffCap = 128;
+
+ void Init() {
+ for (u64 i = 0; i < TABLE_SIZE; ++i) {
+ atomic_store(&table_[i].addr_, 0, memory_order_relaxed);
+ atomic_store(&table_[i].count_, 0, memory_order_relaxed);
+ }
+ }
+
+ static ALWAYS_INLINE u64 splitmix64(u64 x) {
+ x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9ULL;
+ x = (x ^ (x >> 27)) * 0x94D049BB133111EBULL;
+ x = x ^ (x >> 31);
+ return x;
+ }
+
+ // Uses exponential backoff: delay on 1st, 2nd, 4th, 8th, 16th, ...
+ bool ShouldDelayAddr(uptr addr) {
+ u64 idx = splitmix64(addr >> 3) & (TABLE_SIZE - 1);
+ Entry& e = table_[idx];
+
+ // This function is not thread safe.
+ // If two threads access the same hashed entry in parallel,
+ // worst case, we may end up returning true too often. This is
+ // acceptable...instead of full locking.
+
+ uptr stored_addr = atomic_load(&e.addr_, memory_order_relaxed);
+ if (stored_addr != addr) {
+ // Hash Collision - reset
+ atomic_store(&e.addr_, addr, memory_order_relaxed);
+ atomic_store(&e.count_, 1, memory_order_relaxed);
+ return true;
+ }
+
+ u32 count = atomic_fetch_add(&e.count_, 1, memory_order_relaxed) + 1;
+
+ if ((count & (count - 1)) == 0 && count <= ExponentialBackoffCap)
+ return true;
+ return false;
+ }
+ };
+
+ TimeBudget budget_;
+ AddressSampler sampler_;
+
+ int relaxed_sample_rate_;
+ int sync_atomic_sample_rate_;
+ int mutex_sample_rate_;
+ int max_atomic_delay_us_;
+ int max_sync_delay_us_;
+ u64 window_ms_;
+
+ ALWAYS_INLINE static FuzzingSchedulerTlsData* TLS() {
+ return &cur_thread()->fuzzingSchedulerTlsData;
+ }
+ ALWAYS_INLINE static unsigned int* GetRandomSeed() {
+ return &cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_;
+ }
+ ALWAYS_INLINE static void SetRandomSeed(unsigned int seed) {
+ cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_ = seed;
+ }
+
+ bool CanDelayThread() {
+ u64 now = NanoTime();
+ bool needs_reset =
+ now - TLS()->window_start_ns_ > TLS()->window_duration_ns_;
+ if (needs_reset) {
+ TLS()->window_start_ns_ = now;
+ TLS()->delays_this_window_ = 0;
+ }
+
+ if (TLS()->delays_this_window_ >= TLS()->max_delays_per_window_)
+ return false;
+ return true;
+ }
+
+ void RecordOneDelayThisThread() { ++TLS()->delays_this_window_; }
+
+ void Init() override { InitTls(); }
+
+ void InitTls() {
+ TLS()->window_start_ns_ = NanoTime();
+ TLS()->delays_this_window_ = 0;
+ static constexpr int max_delays_per_window_default = 500;
+ TLS()->max_delays_per_window_ = max_delays_per_window_default;
+ TLS()->window_duration_ns_ = window_ms_ * microseconds_per_second;
+
+ SetRandomSeed(flags()->adaptive_delay_random_seed);
+ if (*GetRandomSeed() == 0)
+ SetRandomSeed(NanoTime());
+ TLS()->tls_initialized_ = true;
+ }
+
+ bool IsTlsInitialized() const { return TLS()->tls_initialized_; }
+
+ AdaptiveDelayScheduler() {
+ relaxed_sample_rate_ = flags()->adaptive_delay_relaxed_sample_rate;
+ sync_atomic_sample_rate_ = flags()->adaptive_delay_sync_atomic_sample_rate;
+ mutex_sample_rate_ = flags()->adaptive_delay_mutex_sample_rate;
+ max_atomic_delay_us_ = flags()->adaptive_delay_max_atomic_us;
+ max_sync_delay_us_ = flags()->adaptive_delay_max_sync_us;
+ window_ms_ = flags()->adaptive_delay_window_ms;
+
+ int target_pct = flags()->adaptive_delay_target_overhead_pct;
+ if (target_pct < 1)
+ target_pct = 1;
+
+ budget_.Init(target_pct);
+ sampler_.Init();
+
+ Printf("INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
+ Printf(" Target overhead: %d%%\n", target_pct);
+ Printf(" Random seed: %u\n", *GetRandomSeed());
+ Printf(" Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
+ Printf(" Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
+ Printf(" Mutex sample rate: 1/%d\n", mutex_sample_rate_);
+ Printf(" Max atomic delay: %d us\n", max_atomic_delay_us_);
+ Printf(" Max sync delay: %d us\n", max_sync_delay_us_);
+ Printf(" Delay window: %llu ms\n", window_ms_);
+ }
+
+ void DoSpinDelay(int cycles) {
+ volatile int v = 0;
+ for (int i = 0; i < cycles; ++i) v = i;
+ (void)v;
+ }
+
+ void DoYieldDelay() { internal_sched_yield(); }
+
+ void UsleepDelay(int max_us) {
+ int delay_us = 1 + (Rand(GetRandomSeed()) % max_us);
+ internal_usleep(delay_us);
+ budget_.RecordDelay(delay_us * 1000ULL);
+ }
+
+ void AtomicRelaxedOpDelay() {
+ if ((Rand(GetRandomSeed()) % relaxed_sample_rate_) != 0)
+ return;
+ if (!budget_.ShouldDelay())
+ return;
+ if (!CanDelayThread())
+ return;
+
+ DoSpinDelay(10 + (Rand(GetRandomSeed()) % 10));
+ RecordOneDelayThisThread();
+ static constexpr int spin_delay_estimate_ns = 50;
+ budget_.RecordDelay(spin_delay_estimate_ns);
+ }
+
+ void AtomicSyncOpDelay(uptr* addr) {
+ if ((Rand(GetRandomSeed()) % sync_atomic_sample_rate_) != 0)
+ return;
+ if (!budget_.ShouldDelay())
+ return;
+ if (!CanDelayThread())
+ return;
+
+ if (addr && !sampler_.ShouldDelayAddr(*addr))
+ return;
+
+ if (max_atomic_delay_us_ <= 1) {
+ DoYieldDelay();
+ static constexpr int yield_delay_estimate_ns = 100;
+ budget_.RecordDelay(yield_delay_estimate_ns);
+ } else
+ UsleepDelay(max_atomic_delay_us_);
+ RecordOneDelayThisThread();
+ }
+
+ void AtomicOpFence(int mo) override {
+ CHECK(IsTlsInitialized());
+
+ if (mo < mo_acquire)
+ AtomicRelaxedOpDelay();
+ else
+ AtomicSyncOpDelay(nullptr);
+ }
+
+ void AtomicOpAddr(uptr addr, int mo) override {
+ CHECK(IsTlsInitialized());
+
+ if (mo < mo_acquire)
+ AtomicRelaxedOpDelay();
+ else
+ AtomicSyncOpDelay(&addr);
+ }
+
+ void UnsampledDelay() {
+ CHECK(IsTlsInitialized());
+
+ if (!budget_.ShouldDelay())
+ return;
+ if (!CanDelayThread())
+ return;
+
+ UsleepDelay(max_sync_delay_us_);
+ RecordOneDelayThisThread();
+ }
+
+ void MutexCvOp() override {
+ CHECK(IsTlsInitialized());
+
+ if ((Rand(GetRandomSeed()) % mutex_sample_rate_) != 0)
+ return;
+ if (!budget_.ShouldDelay())
+ return;
+ if (!CanDelayThread())
+ return;
+
+ UsleepDelay(max_sync_delay_us_);
+ RecordOneDelayThisThread();
+ }
+
+ void JoinOp() override { UnsampledDelay(); }
+
+ void BeforeChildThreadRuns() override {
+ InitTls();
+ UnsampledDelay();
+ }
+
+ void AfterThreadCreation() override { UnsampledDelay(); }
+
+ int DetachThread(void* th) override {
+ int res = REAL(pthread_detach)(th);
+ UnsampledDelay();
+ return res;
+ }
+};
+
+IFuzzingScheduler& FuzzingSchedulerDispatcher() {
+ if (!internal_strcmp(flags()->fuzzing_scheduler, "")) {
+ is_fuzz_scheduler_enabled = false;
+ static NullFuzzingScheduler scheduler;
+ return scheduler;
+ } else if (!internal_strcmp(flags()->fuzzing_scheduler, "adaptive")) {
+ is_fuzz_scheduler_enabled = true;
+ static AdaptiveDelayScheduler scheduler;
+ return scheduler;
+ } else {
+ Printf(
+ "FATAL: ThreadSanitizer invalid fuzzing scheduler. Please check "
+ "TSAN_OPTIONS!\n");
+ Die();
+ }
+}
+
+} // namespace
+
+bool is_fuzz_scheduler_enabled;
+
+IFuzzingScheduler& GetFuzzingScheduler() {
+ static IFuzzingScheduler& scheduler = FuzzingSchedulerDispatcher();
+ return scheduler;
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
new file mode 100644
index 0000000000000..590848269a572
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
@@ -0,0 +1,131 @@
+//===-- tsan_fuzzing_scheduler.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FUZZING_SCHEDULER_H
+#define TSAN_FUZZING_SCHEDULER_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+// Fixed-point arithmetic type that mimics floating point operations
+class Percent {
+ using u32 = __sanitizer::u32;
+ using u64 = __sanitizer::u64;
+
+ u32 bp_{}; // basis points (0-10000 represents 0.0-1.0)
+ bool is_valid_{};
+
+ static constexpr u32 kBasisPointsPerUnit = 10000;
+
+ Percent(u32 bp, bool is_valid) : bp_(bp), is_valid_(is_valid) {}
+
+ public:
+ Percent() = default;
+ Percent(const Percent&) = default;
+ Percent& operator=(const Percent&) = default;
+ Percent(Percent&&) = default;
+ Percent& operator=(Percent&&) = default;
+
+ static Percent FromPct(u32 pct) { return Percent{pct * 100, true}; }
+ static Percent FromRatio(u64 numerator, u64 denominator) {
+ if (denominator == 0)
+ return Percent{0, false};
+ // Avoid overflow: scale down if needed
+ if (numerator > UINT64_MAX / kBasisPointsPerUnit) {
+ return Percent{(u32)((numerator / denominator) * kBasisPointsPerUnit),
+ true};
+ }
+ return Percent{(u32)((numerator * kBasisPointsPerUnit) / denominator),
+ true};
+ }
+
+ bool IsValid() const { return is_valid_; }
+
+ // Returns true with probability equal to the percentage.
+ bool RandomCheck(u32* seed) const {
+ return (Rand(seed) % kBasisPointsPerUnit) < bp_;
+ }
+
+ int GetPct() const { return bp_ / 100; }
+ int GetBasisPoints() const { return bp_; }
+
+ bool operator==(const Percent& other) const { return bp_ == other.bp_; }
+ bool operator!=(const Percent& other) const { return bp_ != other.bp_; }
+ bool operator<(const Percent& other) const { return bp_ < other.bp_; }
+ bool operator>(const Percent& other) const { return bp_ > other.bp_; }
+ bool operator<=(const Percent& other) const { return bp_ <= other.bp_; }
+ bool operator>=(const Percent& other) const { return bp_ >= other.bp_; }
+
+ Percent operator-(const Percent& other) const {
+ if (!is_valid_ || !other.is_valid_)
+ return Percent{0, false};
+ if (bp_ < other.bp_)
+ return Percent{0, false};
+ return Percent{bp_ - other.bp_, true};
+ }
+
+ Percent operator/(const Percent& other) const {
+ if (!is_valid_ || !other.is_valid_)
+ return Percent{0, false};
+ if (other.bp_ == 0)
+ return Percent{0, false};
+ return Percent{(bp_ * kBasisPointsPerUnit) / other.bp_, true};
+ }
+};
+
+struct IFuzzingScheduler {
+ virtual void Init() = 0;
+
+ virtual void MutexCvOp() = 0;
+ virtual void AtomicOpFence(int mo) = 0;
+ virtual void AtomicOpAddr(__sanitizer::uptr addr, int mo) = 0;
+
+ virtual int DetachThread(void* th) = 0;
+ virtual void AfterThreadCreation() = 0;
+ virtual void BeforeChildThreadRuns() = 0;
+ virtual void JoinOp() = 0;
+
+ protected:
+ IFuzzingScheduler() = default;
+
+ // Derived types of IFuzzingScheduler are only constructed on the stack.
+ // No code ever deletes a base pointer, so a non-virtual destructor is OK.
+ // There is a separate clang warning, -Wdelete-non-abstract-non-virtual-dtor,
+ // that catches deleting pointers of types with virtual methods but a
+ // non-virtual destructor.
+ //
+ // The destructor cannot be virtual, otherwise it would emit references to
+ // operator delete, which the TSAN runtime cannot depend on in some
+ // environments.
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+ ~IFuzzingScheduler() = default;
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+};
+
+IFuzzingScheduler& GetFuzzingScheduler();
+
+extern bool is_fuzz_scheduler_enabled;
+
+ALWAYS_INLINE bool IsFuzzSchedulerEnabled() {
+ return is_fuzz_scheduler_enabled;
+}
+
+} // namespace __tsan
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
new file mode 100644
index 0000000000000..f4895fabd2194
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -0,0 +1,36 @@
+//===-- tsan_fuzzing_scheduler_data.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FUZZING_SCHEDULER_DATA_H
+#define TSAN_FUZZING_SCHEDULER_DATA_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+// The TSAN Runtime defines cur_thread() to retrieve TLS thread state, and it
+// takes care of platform specific implementation details. Rather than the
+// IFuzzingScheduler derived types reinventing the wheel, we define all possible
+// TLS data in this type, which will be available in cur_thread().
+struct FuzzingSchedulerTlsData {
+ // For the adaptive scheduler
+ u64 window_start_ns_;
+ u32 delays_this_window_;
+ u32 max_delays_per_window_;
+ u64 window_duration_ns_;
+ unsigned int tls_random_seed_;
+ bool tls_initialized_;
+};
+
+} // namespace __tsan
+
+#endif // TSAN_FUZZING_SCHEDULER_DATA_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 714220a0109a8..f34aa338f5aa1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -34,6 +34,7 @@
#if SANITIZER_APPLE && !SANITIZER_GO
# include "tsan_flags.h"
#endif
+#include "tsan_fuzzing_scheduler.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_mman.h"
@@ -1065,6 +1066,9 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
p->started.Post();
}
+
+ GetFuzzingScheduler().BeforeChildThreadRuns();
+
void *res = callback(param);
// Prevent the callback from being tail called,
// it mixes up stack traces.
@@ -1128,11 +1132,13 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
+ GetFuzzingScheduler().AfterThreadCreation();
return res;
}
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+ GetFuzzingScheduler().JoinOp();
#if SANITIZER_ANDROID
{
// In Bionic, if the target thread has already exited when pthread_detach is
@@ -1175,7 +1181,7 @@ int internal_pthread_join(void *th, void **ret) {
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
- int res = REAL(pthread_detach)(th);
+ int res = GetFuzzingScheduler().DetachThread(th);
if (res == 0) {
ThreadDetach(thr, pc, tid);
}
@@ -1197,6 +1203,7 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
+ GetFuzzingScheduler().JoinOp();
int res = REAL(pthread_tryjoin_np)(th, ret);
ThreadIgnoreEnd(thr);
if (res == 0)
@@ -1211,6 +1218,7 @@ TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
+ GetFuzzingScheduler().JoinOp();
int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
ThreadIgnoreEnd(thr);
if (res == 0)
@@ -1324,6 +1332,7 @@ int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ GetFuzzingScheduler().MutexCvOp();
return cond_wait(
thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
m);
@@ -1332,6 +1341,7 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ GetFuzzingScheduler().MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
@@ -1343,6 +1353,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
__sanitizer_clockid_t clock, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
+ GetFuzzingScheduler().MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
@@ -1358,6 +1369,7 @@ INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ GetFuzzingScheduler().MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() {
@@ -1371,6 +1383,7 @@ INTERCEPTOR(int, pthread_cond_signal, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ GetFuzzingScheduler().MutexCvOp();
return REAL(pthread_cond_signal)(cond);
}
@@ -1378,6 +1391,7 @@ INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ GetFuzzingScheduler().MutexCvOp();
return REAL(pthread_cond_broadcast)(cond);
}
@@ -1385,6 +1399,7 @@ INTERCEPTOR(int, pthread_cond_destroy, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_cond_destroy)(cond);
if (common_flags()->legacy_pthread_cond) {
// Free our aux cond and zero the pointer to not leave dangling pointers.
@@ -1413,6 +1428,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_mutex_destroy)(m);
if (res == 0 || res == errno_EBUSY) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1423,6 +1439,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = BLOCK_REAL(pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1435,6 +1452,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_mutex_trylock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1446,6 +1464,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_mutex_timedlock)(m, abstime);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1457,6 +1476,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_mutex_unlock)(m);
if (res == errno_EINVAL)
MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1468,6 +1488,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
__sanitizer_clockid_t clock, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
MutexPreLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1486,6 +1507,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = BLOCK_REAL(__pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1499,6 +1521,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(__pthread_mutex_unlock)(m);
if (res == errno_EINVAL)
MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1510,6 +1533,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_spin_init)(m, pshared);
if (res == 0) {
MutexCreate(thr, pc, (uptr)m);
@@ -1519,6 +1543,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_spin_destroy)(m);
if (res == 0) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1529,6 +1554,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
MutexPreLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = BLOCK_REAL(pthread_spin_lock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
@@ -1538,6 +1564,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1548,6 +1575,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_spin_unlock)(m);
return res;
}
@@ -1555,6 +1583,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_init)(m, a);
if (res == 0) {
MutexCreate(thr, pc, (uptr)m);
@@ -1564,6 +1593,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_destroy)(m);
if (res == 0) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1574,6 +1604,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
MutexPreReadLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_rdlock)(m);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m);
@@ -1583,6 +1614,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ GetFuzzingScheduler().JoinOp();
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1593,6 +1625,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m);
@@ -1604,6 +1637,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
MutexPreLock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
@@ -1613,6 +1647,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1623,6 +1658,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1634,6 +1670,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_rwlock_unlock)(m);
return res;
}
@@ -1642,6 +1679,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
@@ -1649,6 +1687,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -1657,6 +1696,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
+ GetFuzzingScheduler().MutexCvOp();
int res = REAL(pthread_barrier_wait)(b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -1702,6 +1742,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
+ GetFuzzingScheduler().MutexCvOp();
return REAL(__fxstat)(version, fd, buf);
}
@@ -2143,6 +2184,7 @@ TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ GetFuzzingScheduler().MutexCvOp();
return REAL(pthread_sigmask)(how, set, oldset);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 527e5a9b4a8d8..c06e25e2a6afe 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_flags.h"
+#include "tsan_fuzzing_scheduler.h"
#include "tsan_interface.h"
#include "tsan_rtl.h"
@@ -522,6 +523,8 @@ static morder to_morder(int mo) {
template <class Op, class... Types>
ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
+ if (IsFuzzSchedulerEnabled())
+ GetFuzzingScheduler().AtomicOpFence(mo);
ThreadState *const thr = cur_thread();
ProcessPendingSignals(thr);
if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
@@ -529,6 +532,17 @@ ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
}
+template <class Op, class AddrType, class... Types>
+ALWAYS_INLINE auto AtomicImpl(morder mo, AddrType addr, Types... args) {
+ if (IsFuzzSchedulerEnabled())
+ GetFuzzingScheduler().AtomicOpAddr((uptr)addr, (int)mo);
+ ThreadState* const thr = cur_thread();
+ ProcessPendingSignals(thr);
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
+ return Op::NoTsanAtomic(mo, addr, args...);
+ return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), addr, args...);
+}
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index feee566f44829..75d679c74382f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
+#include "tsan_fuzzing_scheduler.h"
#include "tsan_interface.h"
#include "tsan_mman.h"
#include "tsan_platform.h"
@@ -775,6 +776,10 @@ void Initialize(ThreadState *thr) {
while (__tsan_resumed == 0) {}
}
+#if !SANITIZER_GO
+ GetFuzzingScheduler().Init();
+#endif
+
OnInitialize();
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index 635654616b781..3fada642ce770 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -36,6 +36,7 @@
#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
+#include "tsan_fuzzing_scheduler_data.h"
#include "tsan_ignoreset.h"
#include "tsan_ilist.h"
#include "tsan_mman.h"
@@ -240,6 +241,8 @@ struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {
bool in_internal_write_call;
#endif
+ FuzzingSchedulerTlsData fuzzingSchedulerTlsData;
+
explicit ThreadState(Tid tid);
};
diff --git a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
index 005457e374c40..d183c3071cff8 100644
--- a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt
@@ -3,6 +3,7 @@ set(TSAN_UNIT_TEST_SOURCES
tsan_flags_test.cpp
tsan_ilist_test.cpp
tsan_mman_test.cpp
+ tsan_percent_test.cpp
tsan_shadow_test.cpp
tsan_stack_test.cpp
tsan_sync_test.cpp
diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp
new file mode 100644
index 0000000000000..2fe6db7086905
--- /dev/null
+++ b/compiler-rt/lib/tsan/tests/unit/tsan_percent_test.cpp
@@ -0,0 +1,152 @@
+//===-- tsan_percent_test.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "gtest/gtest.h"
+#include "tsan_fuzzing_scheduler.h"
+
+namespace __tsan {
+
+TEST(Percent, DefaultedObject) {
+ Percent defaulted;
+ EXPECT_FALSE(defaulted.IsValid());
+}
+
+TEST(Percent, FromPct) {
+ Percent p0 = Percent::FromPct(0);
+ Percent p50 = Percent::FromPct(50);
+ Percent p100 = Percent::FromPct(100);
+ Percent p150 = Percent::FromPct(150);
+
+ EXPECT_TRUE(p0.IsValid());
+ EXPECT_TRUE(p50.IsValid());
+ EXPECT_TRUE(p100.IsValid());
+ EXPECT_TRUE(p150.IsValid());
+
+ EXPECT_EQ(p0, p0);
+ EXPECT_NE(p0, p50);
+ EXPECT_NE(p50, p100);
+
+ EXPECT_EQ(p0.GetBasisPoints(), 0);
+ EXPECT_EQ(p50.GetBasisPoints(), 5000);
+ EXPECT_EQ(p100.GetBasisPoints(), 10000);
+ EXPECT_EQ(p150.GetBasisPoints(), 15000);
+
+ EXPECT_EQ(p0.GetPct(), 0);
+ EXPECT_EQ(p50.GetPct(), 50);
+ EXPECT_EQ(p100.GetPct(), 100);
+ EXPECT_EQ(p150.GetPct(), 150);
+}
+
+TEST(Percent, FromRatio) {
+ Percent half = Percent::FromRatio(1, 2);
+ Percent expected_half = Percent::FromPct(50);
+ EXPECT_TRUE(half.IsValid());
+ EXPECT_EQ(half, expected_half);
+
+ Percent quarter = Percent::FromRatio(1, 4);
+ Percent expected_quarter = Percent::FromPct(25);
+ EXPECT_EQ(quarter, expected_quarter);
+
+ Percent full = Percent::FromRatio(100, 100);
+ Percent expected_full = Percent::FromPct(100);
+ EXPECT_EQ(full, expected_full);
+
+ Percent div_zero = Percent::FromRatio(50, 0);
+ EXPECT_FALSE(div_zero.IsValid());
+}
+
+TEST(Percent, Comparisons) {
+ Percent low = Percent::FromPct(20);
+ Percent p20 = Percent::FromPct(20);
+ Percent mid = Percent::FromPct(50);
+ Percent high = Percent::FromPct(80);
+
+ EXPECT_TRUE(low == p20);
+ EXPECT_FALSE(low != p20);
+ EXPECT_FALSE(low == mid);
+ EXPECT_TRUE(low != mid);
+
+ EXPECT_TRUE(low < mid);
+ EXPECT_TRUE(mid < high);
+ EXPECT_FALSE(high < low);
+
+ EXPECT_TRUE(high > mid);
+ EXPECT_TRUE(mid > low);
+ EXPECT_FALSE(low > high);
+
+ EXPECT_TRUE(low <= mid);
+ EXPECT_TRUE(low <= low);
+
+ EXPECT_TRUE(high >= mid);
+ EXPECT_TRUE(high >= high);
+}
+
+TEST(Percent, Subtraction) {
+ Percent a = Percent::FromPct(75);
+ Percent b = Percent::FromPct(25);
+ Percent result = a - b;
+
+ Percent expected = Percent::FromPct(50);
+ EXPECT_TRUE(result.IsValid());
+ EXPECT_EQ(result, expected);
+
+ // Underflow
+ Percent low = Percent::FromPct(20);
+ Percent high = Percent::FromPct(80);
+ Percent underflow = low - high;
+ EXPECT_FALSE(underflow.IsValid());
+
+ Percent result_invalid = underflow - low;
+ EXPECT_FALSE(result_invalid.IsValid());
+}
+
+TEST(Percent, Division) {
+ Percent numerator = Percent::FromPct(100);
+ Percent denominator = Percent::FromPct(50);
+ Percent result = numerator / denominator;
+
+ Percent expected = Percent::FromPct(200);
+ EXPECT_TRUE(result.IsValid());
+ EXPECT_EQ(result, expected);
+
+ Percent zero = Percent::FromPct(0);
+ Percent non_zero = Percent::FromPct(50);
+ Percent div_zero_result = non_zero / zero;
+ EXPECT_FALSE(div_zero_result.IsValid());
+
+ Percent invalid = Percent::FromRatio(10, 0);
+ Percent valid = Percent::FromPct(50);
+ Percent result_invalid = valid / invalid;
+ EXPECT_FALSE(result_invalid.IsValid());
+}
+
+TEST(Percent, RandomCheck) {
+ unsigned int seed = 0;
+
+ Percent p0 = Percent::FromPct(0);
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_FALSE(p0.RandomCheck(&seed));
+ }
+
+ Percent p50 = Percent::FromPct(50);
+ for (int i = 0; i < 100; ++i) {
+ p50.RandomCheck(&seed);
+ // No verification since we cannot guarantee the random result.
+ // Just verify the code does not crash...
+ }
+
+ Percent p150 = Percent::FromPct(150);
+ for (int i = 0; i < 100; ++i) {
+ EXPECT_TRUE(p150.RandomCheck(&seed));
+ }
+}
+
+} // namespace __tsan
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index c3ff79e5422ab..e814c8b17f62f 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -176,6 +176,8 @@ Changes to BOLT
Changes to Sanitizers
---------------------
+* Add a random delay into ThreadSanitizer to help find rare thread interleavings.
+
Other Changes
-------------
>From 3c2666b95a10e2a23279b1a1e29db426f0f61325 Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Thu, 5 Feb 2026 20:02:07 +0000
Subject: [PATCH 2/7] Update TSAN adaptive delay behavior
Change the delay flags to accept a new delay spec, which can be
* spin=N - spin loop N times
* yield - a single sched_yield
* sleep_us=N - usleep for N microseconds
Uer per thread delay calculation instead of global program level
calculation to avoid contention on an internal atomic. Per thread
tracking should be *roughly* the same as global tracking.
Change the delay from a percent slowdown to an opaque "aggressiveness"
parameter for the user factor options. It will not be trivial or
even reasonable to hold to an actual percent wall clock slodown due to
difficult in truly tracking the actual slowdown vs calcuated slowdown.
The implementation internally still is based on elapsed time slowdown.
Remove the per thread limit on number of delays, and instead use a
sliding window. Short lived unit tests will behave the same as before,
but long lived (many miutes, hours, or days) should only use recent
history for the delay target calculation.
---
compiler-rt/lib/tsan/rtl/tsan_flags.inc | 23 +-
.../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 240 ++++++++++++------
.../tsan/rtl/tsan_fuzzing_scheduler_data.h | 7 +-
3 files changed, 177 insertions(+), 93 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 18394e2be7dde..86d0f6a645996 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -94,21 +94,26 @@ TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites,
#endif
TSAN_FLAG(const char*, fuzzing_scheduler, "",
- "Choosing fuzzing scheduler: '', 'adaptive'")
+ "Fuzzing scheduler: '' (default; none), 'adaptive'")
-TSAN_FLAG(int, adaptive_delay_target_overhead_pct, 25,
- "Target percentage overhead for adaptive delay injection")
+TSAN_FLAG(
+ int, adaptive_delay_aggressiveness, 25,
+ "Controls delay injection intensity for race detection. Higher values "
+ "inject more delays to expose races. Suggested values: 10 (minimal delay), "
+ "50 (moderate delay), 200 (aggressive). "
+ "This is a tuning parameter; actual overhead varies by workload and "
+ "platform.")
TSAN_FLAG(int, adaptive_delay_relaxed_sample_rate, 10000,
"Sample 1 in N relaxed atomic operations for delay")
TSAN_FLAG(int, adaptive_delay_sync_atomic_sample_rate, 100,
"Sample 1 in N acquire/release/seq_cst atomic operations for delay")
TSAN_FLAG(int, adaptive_delay_mutex_sample_rate, 10,
"Sample 1 in N mutex/cv operations for delay")
-TSAN_FLAG(int, adaptive_delay_max_atomic_us, 50,
- "Maximum delay in microseconds for atomic operations")
-TSAN_FLAG(int, adaptive_delay_max_sync_us, 500,
- "Maximum delay in microseconds for mutex/cv/thread operations")
-TSAN_FLAG(int, adaptive_delay_window_ms, 100,
- "Time window in milliseconds for delay budget calculation")
+TSAN_FLAG(const char*, adaptive_delay_max_atomic, "sleep_us=50",
+ "Delay for atomic operations: 'spin=N' (max N spins), 'yield', or "
+ "'sleep_us=N' (max N us sleep)")
+TSAN_FLAG(const char*, adaptive_delay_max_sync, "sleep_us=500",
+ "Delay for atomic operations: 'spin=N' (max N spins), 'yield', or "
+ "'sleep_us=N' (max N us sleep)")
TSAN_FLAG(int, adaptive_delay_random_seed, 0,
"Random seed for delay injection (0 = use time-based seed)")
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 780d104720da1..14f47cb7702da 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -47,7 +47,75 @@ struct NullFuzzingScheduler : IFuzzingScheduler {
void JoinOp() override {}
};
-static constexpr u64 microseconds_per_second = 1000000ULL;
+// =============================================================================
+// DelaySpec: Represents a delay configuration parsed from flag strings
+// =============================================================================
+//
+// Delay can be specified as:
+// - "spin=N" : Spin for up to N cycles (very short delays)
+// - "yield" : Call sched_yield() once
+// - "sleep_us=N" : Sleep for up to N microseconds
+
+enum class DelayType { Spin, Yield, SleepUs };
+
+struct DelaySpec {
+ DelayType type;
+ int value; // spin cycles or sleep_us value; ignored for yield
+
+ // Estimated nanoseconds per spin cycle (volatile loop iteration)
+ static constexpr u64 kNsPerSpinCycle = 5;
+ // Estimated nanoseconds for a yield (context switch overhead)
+ static constexpr u64 kNsPerYield = 500;
+
+ static DelaySpec Parse(const char* str) {
+ DelaySpec spec;
+ if (internal_strncmp(str, "spin=", 5) == 0) {
+ spec.type = DelayType::Spin;
+ spec.value = internal_atoll(str + 5);
+ if (spec.value <= 0)
+ spec.value = 10;
+ } else if (internal_strcmp(str, "yield") == 0) {
+ spec.type = DelayType::Yield;
+ spec.value = 0;
+ } else if (internal_strncmp(str, "sleep_us=", 9) == 0) {
+ spec.type = DelayType::SleepUs;
+ spec.value = internal_atoll(str + 9);
+ if (spec.value <= 0)
+ spec.value = 1;
+ } else {
+ // Default to yield if unrecognized
+ Printf("WARNING: Unrecognized delay spec '%s', defaulting to yield\n",
+ str);
+ spec.type = DelayType::Yield;
+ spec.value = 0;
+ }
+ return spec;
+ }
+
+ u64 EstimatedNs() const {
+ switch (type) {
+ case DelayType::Spin:
+ return value * kNsPerSpinCycle;
+ case DelayType::Yield:
+ return kNsPerYield;
+ case DelayType::SleepUs:
+ return value * 1000ULL;
+ }
+ return 0;
+ }
+
+ const char* TypeName() const {
+ switch (type) {
+ case DelayType::Spin:
+ return "spin";
+ case DelayType::Yield:
+ return "yield";
+ case DelayType::SleepUs:
+ return "sleep_us";
+ }
+ return "unknown";
+ }
+};
// =============================================================================
// AdaptiveDelayScheduler: Time-budget aware delay injection for race exposure
@@ -67,8 +135,6 @@ static constexpr u64 microseconds_per_second = 1000000ULL;
//
// 3. Address-based Sampling: Exponential backoff per address to avoid
// repeatedly delaying hot atomics.
-//
-// 4. Per-thread Quotas: Each thread has a delay budget per time window.
#ifdef __clang__
# pragma clang diagnostic push
@@ -78,33 +144,75 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
#ifdef __clang__
# pragma clang diagnostic pop
#endif
+
+ ALWAYS_INLINE static FuzzingSchedulerTlsData* TLS() {
+ return &cur_thread()->fuzzingSchedulerTlsData;
+ }
+ ALWAYS_INLINE static unsigned int* GetRandomSeed() {
+ return &cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_;
+ }
+ ALWAYS_INLINE static void SetRandomSeed(unsigned int seed) {
+ cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_ = seed;
+ }
+
+ // The public facing option is adaptive_delay_aggressiveness, which is an
+ // opaque value for the user to tune the amount of delay injected into the
+ // program. Internally, the implementation maps the aggressiveness to a target
+ // percent delay for the overall program runtime. It's not easy to implement
+ // a true wall clock delay target (e.g., 25% program wall time slowdown)
+ // because 1) spin loops and yield are hard to calculate actual wall time
+ // slowness and 2) usleep(N) is often slower than advertised. Thus, we keep
+ // the user facing parameter opaque to not under deliver on a promise of
+ // percent wall time slowdown.
struct TimeBudget {
- atomic_uint64_t total_delay_ns_;
- u64 program_start_ns_;
int target_overhead_pct_;
Percent target_low_;
Percent target_high_;
void Init(int target_pct) {
- atomic_store(&total_delay_ns_, 0, memory_order_relaxed);
- program_start_ns_ = NanoTime();
target_overhead_pct_ = target_pct;
target_low_ = Percent::FromPct(
target_overhead_pct_ >= 5 ? target_overhead_pct_ - 5 : 0);
target_high_ = Percent::FromPct(target_overhead_pct_ + 5);
}
+ static constexpr u64 BucketDurationNs = 30'000'000'000ULL;
+
void RecordDelay(u64 delay_ns) {
- atomic_fetch_add(&total_delay_ns_, delay_ns, memory_order_relaxed);
+ u64 now = NanoTime();
+ u64 elapsed_ns = now - TLS()->bucket_start_ns_;
+
+ if (elapsed_ns >= BucketDurationNs) {
+ // Shift: old bucket is discarded, new becomes old, start fresh new
+ TLS()->delay_buckets_ns_[0] = TLS()->delay_buckets_ns_[1];
+ TLS()->delay_buckets_ns_[1] = 0;
+ TLS()->bucket_start_ns_ = now;
+ }
+
+ TLS()->delay_buckets_ns_[1] += delay_ns;
}
Percent GetOverheadPercent() {
- u64 elapsed = NanoTime() - program_start_ns_;
- u64 one_millisecond = microseconds_per_second;
- if (elapsed < one_millisecond)
+ u64 now = NanoTime();
+ u64 elapsed_ns = now - TLS()->bucket_start_ns_;
+
+ // Need at least 1ms to calculate
+ if (elapsed_ns < 1'000'000ULL)
return Percent::FromPct(0);
- u64 delay = atomic_load(&total_delay_ns_, memory_order_relaxed);
- return Percent::FromRatio(delay, elapsed);
+
+ if (elapsed_ns > BucketDurationNs * 2) {
+ // Both buckets are stale
+ return Percent::FromPct(0);
+ } else if (elapsed_ns > BucketDurationNs) {
+ // bucket[0] is stale, use only bucket[1] (current bucket)
+ u64 total_delay_ns = TLS()->delay_buckets_ns_[1];
+ return Percent::FromRatio(total_delay_ns, elapsed_ns);
+ } else {
+ u64 total_delay_ns =
+ TLS()->delay_buckets_ns_[0] + TLS()->delay_buckets_ns_[1];
+ u64 window_ns = BucketDurationNs + elapsed_ns;
+ return Percent::FromRatio(total_delay_ns, window_ns);
+ }
}
bool ShouldDelay() {
@@ -129,7 +237,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
atomic_uint32_t count_;
};
Entry table_[TABLE_SIZE];
- static constexpr u32 ExponentialBackoffCap = 128;
+ static constexpr u32 ExponentialBackoffCap = 64;
void Init() {
for (u64 i = 0; i < TABLE_SIZE; ++i) {
@@ -177,44 +285,15 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
int relaxed_sample_rate_;
int sync_atomic_sample_rate_;
int mutex_sample_rate_;
- int max_atomic_delay_us_;
- int max_sync_delay_us_;
- u64 window_ms_;
-
- ALWAYS_INLINE static FuzzingSchedulerTlsData* TLS() {
- return &cur_thread()->fuzzingSchedulerTlsData;
- }
- ALWAYS_INLINE static unsigned int* GetRandomSeed() {
- return &cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_;
- }
- ALWAYS_INLINE static void SetRandomSeed(unsigned int seed) {
- cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_ = seed;
- }
-
- bool CanDelayThread() {
- u64 now = NanoTime();
- bool needs_reset =
- now - TLS()->window_start_ns_ > TLS()->window_duration_ns_;
- if (needs_reset) {
- TLS()->window_start_ns_ = now;
- TLS()->delays_this_window_ = 0;
- }
-
- if (TLS()->delays_this_window_ >= TLS()->max_delays_per_window_)
- return false;
- return true;
- }
-
- void RecordOneDelayThisThread() { ++TLS()->delays_this_window_; }
+ DelaySpec atomic_delay_;
+ DelaySpec sync_delay_;
void Init() override { InitTls(); }
void InitTls() {
- TLS()->window_start_ns_ = NanoTime();
- TLS()->delays_this_window_ = 0;
- static constexpr int max_delays_per_window_default = 500;
- TLS()->max_delays_per_window_ = max_delays_per_window_default;
- TLS()->window_duration_ns_ = window_ms_ * microseconds_per_second;
+ TLS()->bucket_start_ns_ = NanoTime();
+ TLS()->delay_buckets_ns_[0] = 0;
+ TLS()->delay_buckets_ns_[1] = 0;
SetRandomSeed(flags()->adaptive_delay_random_seed);
if (*GetRandomSeed() == 0)
@@ -228,26 +307,26 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
relaxed_sample_rate_ = flags()->adaptive_delay_relaxed_sample_rate;
sync_atomic_sample_rate_ = flags()->adaptive_delay_sync_atomic_sample_rate;
mutex_sample_rate_ = flags()->adaptive_delay_mutex_sample_rate;
- max_atomic_delay_us_ = flags()->adaptive_delay_max_atomic_us;
- max_sync_delay_us_ = flags()->adaptive_delay_max_sync_us;
- window_ms_ = flags()->adaptive_delay_window_ms;
+ atomic_delay_ = DelaySpec::Parse(flags()->adaptive_delay_max_atomic);
+ sync_delay_ = DelaySpec::Parse(flags()->adaptive_delay_max_sync);
- int target_pct = flags()->adaptive_delay_target_overhead_pct;
- if (target_pct < 1)
- target_pct = 1;
+ int delay_aggressiveness = flags()->adaptive_delay_aggressiveness;
+ if (delay_aggressiveness < 1)
+ delay_aggressiveness = 1;
- budget_.Init(target_pct);
+ budget_.Init(delay_aggressiveness);
sampler_.Init();
Printf("INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
- Printf(" Target overhead: %d%%\n", target_pct);
+ Printf(" Delay aggressiveness: %d\n", delay_aggressiveness);
Printf(" Random seed: %u\n", *GetRandomSeed());
Printf(" Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
Printf(" Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
Printf(" Mutex sample rate: 1/%d\n", mutex_sample_rate_);
- Printf(" Max atomic delay: %d us\n", max_atomic_delay_us_);
- Printf(" Max sync delay: %d us\n", max_sync_delay_us_);
- Printf(" Delay window: %llu ms\n", window_ms_);
+ Printf(" Atomic delay: %s=%d (~%llu ns)\n", atomic_delay_.TypeName(),
+ atomic_delay_.value, atomic_delay_.EstimatedNs());
+ Printf(" Sync delay: %s=%d (~%llu ns)\n", sync_delay_.TypeName(),
+ sync_delay_.value, sync_delay_.EstimatedNs());
}
void DoSpinDelay(int cycles) {
@@ -258,22 +337,37 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
void DoYieldDelay() { internal_sched_yield(); }
- void UsleepDelay(int max_us) {
+ void DoSleepUsDelay(int max_us) {
int delay_us = 1 + (Rand(GetRandomSeed()) % max_us);
internal_usleep(delay_us);
budget_.RecordDelay(delay_us * 1000ULL);
}
+ void ExecuteDelay(const DelaySpec& spec) {
+ switch (spec.type) {
+ case DelayType::Spin: {
+ int cycles = 1 + (Rand(GetRandomSeed()) % spec.value);
+ DoSpinDelay(cycles);
+ budget_.RecordDelay(cycles * DelaySpec::kNsPerSpinCycle);
+ break;
+ }
+ case DelayType::Yield:
+ DoYieldDelay();
+ budget_.RecordDelay(DelaySpec::kNsPerYield);
+ break;
+ case DelayType::SleepUs:
+ DoSleepUsDelay(spec.value);
+ break;
+ }
+ }
+
void AtomicRelaxedOpDelay() {
if ((Rand(GetRandomSeed()) % relaxed_sample_rate_) != 0)
return;
if (!budget_.ShouldDelay())
return;
- if (!CanDelayThread())
- return;
DoSpinDelay(10 + (Rand(GetRandomSeed()) % 10));
- RecordOneDelayThisThread();
static constexpr int spin_delay_estimate_ns = 50;
budget_.RecordDelay(spin_delay_estimate_ns);
}
@@ -283,19 +377,11 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
return;
if (!budget_.ShouldDelay())
return;
- if (!CanDelayThread())
- return;
if (addr && !sampler_.ShouldDelayAddr(*addr))
return;
- if (max_atomic_delay_us_ <= 1) {
- DoYieldDelay();
- static constexpr int yield_delay_estimate_ns = 100;
- budget_.RecordDelay(yield_delay_estimate_ns);
- } else
- UsleepDelay(max_atomic_delay_us_);
- RecordOneDelayThisThread();
+ ExecuteDelay(atomic_delay_);
}
void AtomicOpFence(int mo) override {
@@ -321,11 +407,8 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
if (!budget_.ShouldDelay())
return;
- if (!CanDelayThread())
- return;
- UsleepDelay(max_sync_delay_us_);
- RecordOneDelayThisThread();
+ ExecuteDelay(sync_delay_);
}
void MutexCvOp() override {
@@ -335,11 +418,8 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
return;
if (!budget_.ShouldDelay())
return;
- if (!CanDelayThread())
- return;
- UsleepDelay(max_sync_delay_us_);
- RecordOneDelayThisThread();
+ ExecuteDelay(sync_delay_);
}
void JoinOp() override { UnsampledDelay(); }
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
index f4895fabd2194..3f9c77c9c0879 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -23,10 +23,9 @@ namespace __tsan {
// TLS data in this type, which will be available in cur_thread().
struct FuzzingSchedulerTlsData {
// For the adaptive scheduler
- u64 window_start_ns_;
- u32 delays_this_window_;
- u32 max_delays_per_window_;
- u64 window_duration_ns_;
+ // Sliding window delay tracking: 2 buckets of 30 seconds each
+ u64 delay_buckets_ns_[2]; // [0] = older 30s, [1] = newer 30s
+ u64 bucket_start_ns_; // When current bucket (index 1) started
unsigned int tls_random_seed_;
bool tls_initialized_;
};
>From 6eb85ff24d0414521669e62c4025a63f3cfe9b3b Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 13 Feb 2026 03:53:14 +0000
Subject: [PATCH 3/7] Review comments
- remove seed flag
- simplify AtomicImpl
- use VPrintf
---
compiler-rt/lib/tsan/rtl/tsan_flags.inc | 2 -
.../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 19 ++---
.../lib/tsan/rtl/tsan_fuzzing_scheduler.h | 84 +++++++++----------
.../tsan/rtl/tsan_fuzzing_scheduler_data.h | 2 +-
.../lib/tsan/rtl/tsan_interface_atomic.cpp | 24 +++---
5 files changed, 63 insertions(+), 68 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 86d0f6a645996..2b45ec6ad6f92 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -115,5 +115,3 @@ TSAN_FLAG(const char*, adaptive_delay_max_atomic, "sleep_us=50",
TSAN_FLAG(const char*, adaptive_delay_max_sync, "sleep_us=500",
"Delay for atomic operations: 'spin=N' (max N spins), 'yield', or "
"'sleep_us=N' (max N us sleep)")
-TSAN_FLAG(int, adaptive_delay_random_seed, 0,
- "Random seed for delay injection (0 = use time-based seed)")
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 14f47cb7702da..51ab8a537a5d1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -295,9 +295,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
TLS()->delay_buckets_ns_[0] = 0;
TLS()->delay_buckets_ns_[1] = 0;
- SetRandomSeed(flags()->adaptive_delay_random_seed);
- if (*GetRandomSeed() == 0)
- SetRandomSeed(NanoTime());
+ SetRandomSeed(NanoTime());
TLS()->tls_initialized_ = true;
}
@@ -317,15 +315,14 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
budget_.Init(delay_aggressiveness);
sampler_.Init();
- Printf("INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
- Printf(" Delay aggressiveness: %d\n", delay_aggressiveness);
- Printf(" Random seed: %u\n", *GetRandomSeed());
- Printf(" Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
- Printf(" Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
- Printf(" Mutex sample rate: 1/%d\n", mutex_sample_rate_);
- Printf(" Atomic delay: %s=%d (~%llu ns)\n", atomic_delay_.TypeName(),
+ VPrintf(1, "INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
+ VPrintf(1, " Delay aggressiveness: %d\n", delay_aggressiveness);
+ VPrintf(1, " Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
+ VPrintf(1, " Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
+ VPrintf(1, " Mutex sample rate: 1/%d\n", mutex_sample_rate_);
+ VPrintf(1, " Atomic delay: %s=%d (~%llu ns)\n", atomic_delay_.TypeName(),
atomic_delay_.value, atomic_delay_.EstimatedNs());
- Printf(" Sync delay: %s=%d (~%llu ns)\n", sync_delay_.TypeName(),
+ VPrintf(1, " Sync delay: %s=%d (~%llu ns)\n", sync_delay_.TypeName(),
sync_delay_.value, sync_delay_.EstimatedNs());
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
index 590848269a572..d6458c9c1c633 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
@@ -18,6 +18,48 @@
namespace __tsan {
+struct IFuzzingScheduler {
+ virtual void Init() = 0;
+
+ virtual void MutexCvOp() = 0;
+ virtual void AtomicOpFence(int mo) = 0;
+ virtual void AtomicOpAddr(__sanitizer::uptr addr, int mo) = 0;
+
+ virtual int DetachThread(void* th) = 0;
+ virtual void AfterThreadCreation() = 0;
+ virtual void BeforeChildThreadRuns() = 0;
+ virtual void JoinOp() = 0;
+
+ protected:
+ IFuzzingScheduler() = default;
+
+ // Derived types of IFuzzingScheduler are only constructed on the stack.
+ // No code ever deletes a base pointer, so a non-virtual destructor is OK.
+ // There is a separate clang warning, -Wdelete-non-abstract-non-virtual-dtor,
+ // that catches deleting pointers of types with virtual methods but a
+ // non-virtual destructor.
+ //
+ // The destructor cannot be virtual, otherwise it would emit references to
+ // operator delete, which the TSAN runtime cannot depend on in some
+ // environments.
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+ ~IFuzzingScheduler() = default;
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+};
+
+IFuzzingScheduler& GetFuzzingScheduler();
+
+extern bool is_fuzz_scheduler_enabled;
+
+ALWAYS_INLINE bool IsFuzzSchedulerEnabled() {
+ return is_fuzz_scheduler_enabled;
+}
+
// Fixed-point arithmetic type that mimics floating point operations
class Percent {
using u32 = __sanitizer::u32;
@@ -84,48 +126,6 @@ class Percent {
}
};
-struct IFuzzingScheduler {
- virtual void Init() = 0;
-
- virtual void MutexCvOp() = 0;
- virtual void AtomicOpFence(int mo) = 0;
- virtual void AtomicOpAddr(__sanitizer::uptr addr, int mo) = 0;
-
- virtual int DetachThread(void* th) = 0;
- virtual void AfterThreadCreation() = 0;
- virtual void BeforeChildThreadRuns() = 0;
- virtual void JoinOp() = 0;
-
- protected:
- IFuzzingScheduler() = default;
-
- // Derived types of IFuzzingScheduler are only constructed on the stack.
- // No code ever deletes a base pointer, so a non-virtual destructor is OK.
- // There is a separate clang warning, -Wdelete-non-abstract-non-virtual-dtor,
- // that catches deleting pointers of types with virtual methods but a
- // non-virtual destructor.
- //
- // The destructor cannot be virtual, otherwise it would emit references to
- // operator delete, which the TSAN runtime cannot depend on in some
- // environments.
-#ifdef __clang__
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
- ~IFuzzingScheduler() = default;
-#ifdef __clang__
-# pragma clang diagnostic pop
-#endif
-};
-
-IFuzzingScheduler& GetFuzzingScheduler();
-
-extern bool is_fuzz_scheduler_enabled;
-
-ALWAYS_INLINE bool IsFuzzSchedulerEnabled() {
- return is_fuzz_scheduler_enabled;
-}
-
} // namespace __tsan
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
index 3f9c77c9c0879..d5eb4a677bbec 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -17,7 +17,7 @@
namespace __tsan {
-// The TSAN Runtime defines cur_thread() to retrieve TLS thread state, and it
+// The runtime defines cur_thread() to retrieve TLS thread state, and it
// takes care of platform specific implementation details. Rather than the
// IFuzzingScheduler derived types reinventing the wheel, we define all possible
// TLS data in this type, which will be available in cur_thread().
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index c06e25e2a6afe..62d45ce4899f6 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -521,26 +521,26 @@ static morder to_morder(int mo) {
return res;
}
-template <class Op, class... Types>
-ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
+template <class... Types>
+ALWAYS_INLINE auto AtomicDelayImpl(morder mo, Types... args) {
if (IsFuzzSchedulerEnabled())
GetFuzzingScheduler().AtomicOpFence(mo);
- ThreadState *const thr = cur_thread();
- ProcessPendingSignals(thr);
- if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
- return Op::NoTsanAtomic(mo, args...);
- return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
}
-template <class Op, class AddrType, class... Types>
-ALWAYS_INLINE auto AtomicImpl(morder mo, AddrType addr, Types... args) {
+template <class AddrType, class... Types>
+ALWAYS_INLINE auto AtomicDelayImpl(morder mo, AddrType addr, Types... args) {
if (IsFuzzSchedulerEnabled())
GetFuzzingScheduler().AtomicOpAddr((uptr)addr, (int)mo);
- ThreadState* const thr = cur_thread();
+}
+
+template <class Op, class... Types>
+ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
+ AtomicDelayImpl(mo, args...);
+ ThreadState *const thr = cur_thread();
ProcessPendingSignals(thr);
if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
- return Op::NoTsanAtomic(mo, addr, args...);
- return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), addr, args...);
+ return Op::NoTsanAtomic(mo, args...);
+ return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
}
extern "C" {
>From 31d9f0db44525485b1316971636dc057363aee37 Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 13 Feb 2026 05:10:55 +0000
Subject: [PATCH 4/7] Remove vtable
---
compiler-rt/lib/tsan/rtl/tsan_flags.inc | 8 +-
.../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 115 ++++++++----------
.../lib/tsan/rtl/tsan_fuzzing_scheduler.h | 97 +++++++++------
.../tsan/rtl/tsan_fuzzing_scheduler_data.h | 10 +-
.../lib/tsan/rtl/tsan_interceptors_posix.cpp | 81 ++++++------
.../lib/tsan/rtl/tsan_interface_atomic.cpp | 8 +-
compiler-rt/lib/tsan/rtl/tsan_rtl.cpp | 2 +-
compiler-rt/lib/tsan/rtl/tsan_rtl.h | 2 +-
8 files changed, 169 insertions(+), 154 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 2b45ec6ad6f92..6c007e4dbfe85 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -93,8 +93,12 @@ TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites,
"the current process and it's children processes.")
#endif
-TSAN_FLAG(const char*, fuzzing_scheduler, "",
- "Fuzzing scheduler: '' (default; none), 'adaptive'")
+TSAN_FLAG(bool, enable_adaptive_delay, false,
+ "Enable adaptive delay injection to expose data races. When "
+ "enabled, delays are strategically injected at synchronization "
+ "points, atomic operations, and thread lifecycle events to increase "
+ "the likelihood of exposing races while maintaining a configurable "
+ "overhead budget.")
TSAN_FLAG(
int, adaptive_delay_aggressiveness, 25,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 51ab8a537a5d1..198d89d6e9939 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -29,24 +29,6 @@ namespace __tsan {
namespace {
-#ifdef __clang__
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
-struct NullFuzzingScheduler : IFuzzingScheduler {
-#ifdef __clang__
-# pragma clang diagnostic pop
-#endif
- void Init() override {}
- void AtomicOpFence(int mo) override {}
- void AtomicOpAddr(uptr addr, int mo) override {}
- void MutexCvOp() override {}
- int DetachThread(void* th) override { return REAL(pthread_detach)(th); }
- void BeforeChildThreadRuns() override {}
- void AfterThreadCreation() override {}
- void JoinOp() override {}
-};
-
// =============================================================================
// DelaySpec: Represents a delay configuration parsed from flag strings
// =============================================================================
@@ -118,10 +100,10 @@ struct DelaySpec {
};
// =============================================================================
-// AdaptiveDelayScheduler: Time-budget aware delay injection for race exposure
+// AdaptiveDelay: Time-budget aware delay injection for race exposure
// =============================================================================
//
-// This scheduler injects delays to expose data races while maintaining a
+// This implementation injects delays to expose data races while maintaining a
// configurable overhead target. It uses several strategies:
//
// 1. Time-Budget Controller: Tracks cumulative delays vs wall-clock time
@@ -136,23 +118,15 @@ struct DelaySpec {
// 3. Address-based Sampling: Exponential backoff per address to avoid
// repeatedly delaying hot atomics.
-#ifdef __clang__
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
-struct AdaptiveDelayScheduler : NullFuzzingScheduler {
-#ifdef __clang__
-# pragma clang diagnostic pop
-#endif
-
- ALWAYS_INLINE static FuzzingSchedulerTlsData* TLS() {
- return &cur_thread()->fuzzingSchedulerTlsData;
+struct AdaptiveDelayImpl {
+ ALWAYS_INLINE static AdaptiveDelayTlsData* TLS() {
+ return &cur_thread()->adaptiveDelayTlsData;
}
ALWAYS_INLINE static unsigned int* GetRandomSeed() {
- return &cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_;
+ return &cur_thread()->adaptiveDelayTlsData.tls_random_seed_;
}
ALWAYS_INLINE static void SetRandomSeed(unsigned int seed) {
- cur_thread()->fuzzingSchedulerTlsData.tls_random_seed_ = seed;
+ cur_thread()->adaptiveDelayTlsData.tls_random_seed_ = seed;
}
// The public facing option is adaptive_delay_aggressiveness, which is an
@@ -288,7 +262,9 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
DelaySpec atomic_delay_;
DelaySpec sync_delay_;
- void Init() override { InitTls(); }
+ void Init() {
+ InitTls();
+ }
void InitTls() {
TLS()->bucket_start_ns_ = NanoTime();
@@ -301,7 +277,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
bool IsTlsInitialized() const { return TLS()->tls_initialized_; }
- AdaptiveDelayScheduler() {
+ AdaptiveDelayImpl() {
relaxed_sample_rate_ = flags()->adaptive_delay_relaxed_sample_rate;
sync_atomic_sample_rate_ = flags()->adaptive_delay_sync_atomic_sample_rate;
mutex_sample_rate_ = flags()->adaptive_delay_mutex_sample_rate;
@@ -315,7 +291,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
budget_.Init(delay_aggressiveness);
sampler_.Init();
- VPrintf(1, "INFO: ThreadSanitizer AdaptiveDelayScheduler initialized\n");
+ VPrintf(1, "INFO: ThreadSanitizer AdaptiveDelay initialized\n");
VPrintf(1, " Delay aggressiveness: %d\n", delay_aggressiveness);
VPrintf(1, " Relaxed atomic sample rate: 1/%d\n", relaxed_sample_rate_);
VPrintf(1, " Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
@@ -381,7 +357,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
ExecuteDelay(atomic_delay_);
}
- void AtomicOpFence(int mo) override {
+ void AtomicOpFence(int mo) {
CHECK(IsTlsInitialized());
if (mo < mo_acquire)
@@ -390,7 +366,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
AtomicSyncOpDelay(nullptr);
}
- void AtomicOpAddr(uptr addr, int mo) override {
+ void AtomicOpAddr(uptr addr, int mo) {
CHECK(IsTlsInitialized());
if (mo < mo_acquire)
@@ -408,7 +384,7 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
ExecuteDelay(sync_delay_);
}
- void MutexCvOp() override {
+ void MutexCvOp() {
CHECK(IsTlsInitialized());
if ((Rand(GetRandomSeed()) % mutex_sample_rate_) != 0)
@@ -419,46 +395,57 @@ struct AdaptiveDelayScheduler : NullFuzzingScheduler {
ExecuteDelay(sync_delay_);
}
- void JoinOp() override { UnsampledDelay(); }
+ void JoinOp() {
+ UnsampledDelay();
+ }
- void BeforeChildThreadRuns() override {
+ void BeforeChildThreadRuns() {
InitTls();
UnsampledDelay();
}
- void AfterThreadCreation() override { UnsampledDelay(); }
+ void AfterThreadCreation() {
+ UnsampledDelay();
+ }
- int DetachThread(void* th) override {
- int res = REAL(pthread_detach)(th);
+ void DetachThread() {
UnsampledDelay();
- return res;
}
};
-IFuzzingScheduler& FuzzingSchedulerDispatcher() {
- if (!internal_strcmp(flags()->fuzzing_scheduler, "")) {
- is_fuzz_scheduler_enabled = false;
- static NullFuzzingScheduler scheduler;
- return scheduler;
- } else if (!internal_strcmp(flags()->fuzzing_scheduler, "adaptive")) {
- is_fuzz_scheduler_enabled = true;
- static AdaptiveDelayScheduler scheduler;
- return scheduler;
- } else {
- Printf(
- "FATAL: ThreadSanitizer invalid fuzzing scheduler. Please check "
- "TSAN_OPTIONS!\n");
- Die();
- }
+AdaptiveDelayImpl& GetImpl() {
+ static AdaptiveDelayImpl impl;
+ return impl;
}
} // namespace
-bool is_fuzz_scheduler_enabled;
+bool is_adaptive_delay_enabled;
-IFuzzingScheduler& GetFuzzingScheduler() {
- static IFuzzingScheduler& scheduler = FuzzingSchedulerDispatcher();
- return scheduler;
+void AdaptiveDelay::InitImpl() {
+ GetImpl().Init();
+}
+
+void AdaptiveDelay::MutexCvOpImpl() {
+ GetImpl().MutexCvOp();
+}
+void AdaptiveDelay::AtomicOpFenceImpl(int mo) {
+ GetImpl().AtomicOpFence(mo);
+}
+void AdaptiveDelay::AtomicOpAddrImpl(__sanitizer::uptr addr, int mo) {
+ GetImpl().AtomicOpAddr(addr, mo);
+}
+void AdaptiveDelay::DetachThreadImpl() {
+ GetImpl().DetachThread();
+}
+void AdaptiveDelay::AfterThreadCreationImpl() {
+ GetImpl().AfterThreadCreation();
+}
+void AdaptiveDelay::BeforeChildThreadRunsImpl() {
+ GetImpl().BeforeChildThreadRuns();
+}
+void AdaptiveDelay::JoinOpImpl() {
+ GetImpl().JoinOp();
}
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
index d6458c9c1c633..9e6d24127ddc0 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
@@ -18,46 +18,69 @@
namespace __tsan {
-struct IFuzzingScheduler {
- virtual void Init() = 0;
-
- virtual void MutexCvOp() = 0;
- virtual void AtomicOpFence(int mo) = 0;
- virtual void AtomicOpAddr(__sanitizer::uptr addr, int mo) = 0;
-
- virtual int DetachThread(void* th) = 0;
- virtual void AfterThreadCreation() = 0;
- virtual void BeforeChildThreadRuns() = 0;
- virtual void JoinOp() = 0;
-
- protected:
- IFuzzingScheduler() = default;
-
- // Derived types of IFuzzingScheduler are only constructed on the stack.
- // No code ever deletes a base pointer, so a non-virtual destructor is OK.
- // There is a separate clang warning, -Wdelete-non-abstract-non-virtual-dtor,
- // that catches deleting pointers of types with virtual methods but a
- // non-virtual destructor.
- //
- // The destructor cannot be virtual, otherwise it would emit references to
- // operator delete, which the TSAN runtime cannot depend on in some
- // environments.
-#ifdef __clang__
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
- ~IFuzzingScheduler() = default;
-#ifdef __clang__
-# pragma clang diagnostic pop
-#endif
-};
+extern bool is_adaptive_delay_enabled;
+
+// AdaptiveDelay injects delays at synchronization points, atomic operations,
+// and thread lifecycle events to increase the likelihood of exposing data
+// races. The delay injection is controlled by a time budget to maintain a
+// configurable overhead target.
+struct AdaptiveDelay {
+ ALWAYS_INLINE static void Init() {
+ InitImpl();
+ }
+
+ ALWAYS_INLINE static void MutexCvOp() {
+ if (!is_adaptive_delay_enabled) return;
+ MutexCvOpImpl();
+ }
-IFuzzingScheduler& GetFuzzingScheduler();
+ ALWAYS_INLINE static void AtomicOpFence(int mo) {
+ if (!is_adaptive_delay_enabled) return;
+ AtomicOpFenceImpl(mo);
+ }
+
+ ALWAYS_INLINE static void AtomicOpAddr(__sanitizer::uptr addr, int mo) {
+ if (!is_adaptive_delay_enabled) return;
+ AtomicOpAddrImpl(addr, mo);
+ }
+
+ ALWAYS_INLINE static void DetachThread() {
+ if (!is_adaptive_delay_enabled) return;
+ DetachThreadImpl();
+ }
+
+ ALWAYS_INLINE static void AfterThreadCreation() {
+ if (!is_adaptive_delay_enabled) return;
+ AfterThreadCreationImpl();
+ }
+
+ ALWAYS_INLINE static void BeforeChildThreadRuns() {
+ if (!is_adaptive_delay_enabled) return;
+ BeforeChildThreadRunsImpl();
+ }
+
+ ALWAYS_INLINE static void JoinOp() {
+ if (!is_adaptive_delay_enabled) return;
+ JoinOpImpl();
+ }
+
+private:
+
+ static void InitImpl();
+
+ static void MutexCvOpImpl();
+ static void AtomicOpFenceImpl(int mo);
+ static void AtomicOpAddrImpl(__sanitizer::uptr addr, int mo);
+ static void DetachThreadImpl();
+ static void AfterThreadCreationImpl();
+ static void BeforeChildThreadRunsImpl();
+ static void JoinOpImpl();
+};
-extern bool is_fuzz_scheduler_enabled;
+AdaptiveDelay& GetAdaptiveDelay();
-ALWAYS_INLINE bool IsFuzzSchedulerEnabled() {
- return is_fuzz_scheduler_enabled;
+ALWAYS_INLINE bool IsAdaptiveDelayEnabled() {
+ return is_adaptive_delay_enabled;
}
// Fixed-point arithmetic type that mimics floating point operations
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
index d5eb4a677bbec..553dd483aa158 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -18,11 +18,11 @@
namespace __tsan {
// The runtime defines cur_thread() to retrieve TLS thread state, and it
-// takes care of platform specific implementation details. Rather than the
-// IFuzzingScheduler derived types reinventing the wheel, we define all possible
-// TLS data in this type, which will be available in cur_thread().
-struct FuzzingSchedulerTlsData {
- // For the adaptive scheduler
+// takes care of platform specific implementation details. The AdaptiveDelay
+// implementation stores per-thread data in this struct, which is embedded
+// in cur_thread().
+struct AdaptiveDelayTlsData {
+ // For the adaptive delay implementation
// Sliding window delay tracking: 2 buckets of 30 seconds each
u64 delay_buckets_ns_[2]; // [0] = older 30s, [1] = newer 30s
u64 bucket_start_ns_; // When current bucket (index 1) started
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index f34aa338f5aa1..3c21e621b13cb 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -1067,7 +1067,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
p->started.Post();
}
- GetFuzzingScheduler().BeforeChildThreadRuns();
+ AdaptiveDelay::BeforeChildThreadRuns();
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -1132,13 +1132,13 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
- GetFuzzingScheduler().AfterThreadCreation();
+ AdaptiveDelay::AfterThreadCreation();
return res;
}
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
- GetFuzzingScheduler().JoinOp();
+ AdaptiveDelay::JoinOp();
#if SANITIZER_ANDROID
{
// In Bionic, if the target thread has already exited when pthread_detach is
@@ -1181,7 +1181,8 @@ int internal_pthread_join(void *th, void **ret) {
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
- int res = GetFuzzingScheduler().DetachThread(th);
+ AdaptiveDelay::DetachThread();
+ int res = REAL(pthread_detach)(th);
if (res == 0) {
ThreadDetach(thr, pc, tid);
}
@@ -1203,7 +1204,7 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
- GetFuzzingScheduler().JoinOp();
+ AdaptiveDelay::JoinOp();
int res = REAL(pthread_tryjoin_np)(th, ret);
ThreadIgnoreEnd(thr);
if (res == 0)
@@ -1218,7 +1219,7 @@ TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
- GetFuzzingScheduler().JoinOp();
+ AdaptiveDelay::JoinOp();
int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
ThreadIgnoreEnd(thr);
if (res == 0)
@@ -1332,7 +1333,7 @@ int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return cond_wait(
thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
m);
@@ -1341,7 +1342,7 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
@@ -1353,7 +1354,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
__sanitizer_clockid_t clock, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
@@ -1369,7 +1370,7 @@ INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return cond_wait(
thr, pc, &si,
[=]() {
@@ -1383,7 +1384,7 @@ INTERCEPTOR(int, pthread_cond_signal, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return REAL(pthread_cond_signal)(cond);
}
@@ -1391,7 +1392,7 @@ INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return REAL(pthread_cond_broadcast)(cond);
}
@@ -1399,7 +1400,7 @@ INTERCEPTOR(int, pthread_cond_destroy, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_cond_destroy)(cond);
if (common_flags()->legacy_pthread_cond) {
// Free our aux cond and zero the pointer to not leave dangling pointers.
@@ -1428,7 +1429,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_mutex_destroy)(m);
if (res == 0 || res == errno_EBUSY) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1439,7 +1440,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = BLOCK_REAL(pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1452,7 +1453,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_mutex_trylock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1464,7 +1465,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_mutex_timedlock)(m, abstime);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1476,7 +1477,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_mutex_unlock)(m);
if (res == errno_EINVAL)
MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1488,7 +1489,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
__sanitizer_clockid_t clock, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
MutexPreLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1507,7 +1508,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = BLOCK_REAL(__pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
@@ -1521,7 +1522,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(__pthread_mutex_unlock)(m);
if (res == errno_EINVAL)
MutexInvalidAccess(thr, pc, (uptr)m);
@@ -1533,7 +1534,7 @@ TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_spin_init)(m, pshared);
if (res == 0) {
MutexCreate(thr, pc, (uptr)m);
@@ -1543,7 +1544,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_spin_destroy)(m);
if (res == 0) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1554,7 +1555,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = BLOCK_REAL(pthread_spin_lock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
@@ -1564,7 +1565,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1575,7 +1576,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
MutexUnlock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_spin_unlock)(m);
return res;
}
@@ -1583,7 +1584,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_init)(m, a);
if (res == 0) {
MutexCreate(thr, pc, (uptr)m);
@@ -1593,7 +1594,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_destroy)(m);
if (res == 0) {
MutexDestroy(thr, pc, (uptr)m);
@@ -1604,7 +1605,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
MutexPreReadLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_rdlock)(m);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m);
@@ -1614,7 +1615,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
- GetFuzzingScheduler().JoinOp();
+ AdaptiveDelay::JoinOp();
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1625,7 +1626,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
if (res == 0) {
MutexPostReadLock(thr, pc, (uptr)m);
@@ -1637,7 +1638,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
MutexPreLock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
@@ -1647,7 +1648,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1658,7 +1659,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
@@ -1670,7 +1671,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
MutexReadOrWriteUnlock(thr, pc, (uptr)m);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_rwlock_unlock)(m);
return res;
}
@@ -1679,7 +1680,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
@@ -1687,7 +1688,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -1696,7 +1697,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
int res = REAL(pthread_barrier_wait)(b);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -1742,7 +1743,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return REAL(__fxstat)(version, fd, buf);
}
@@ -2184,7 +2185,7 @@ TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
- GetFuzzingScheduler().MutexCvOp();
+ AdaptiveDelay::MutexCvOp();
return REAL(pthread_sigmask)(how, set, oldset);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 62d45ce4899f6..0959a5839ef40 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -523,14 +523,14 @@ static morder to_morder(int mo) {
template <class... Types>
ALWAYS_INLINE auto AtomicDelayImpl(morder mo, Types... args) {
- if (IsFuzzSchedulerEnabled())
- GetFuzzingScheduler().AtomicOpFence(mo);
+ if (IsAdaptiveDelayEnabled())
+ AdaptiveDelay::AtomicOpFence(mo);
}
template <class AddrType, class... Types>
ALWAYS_INLINE auto AtomicDelayImpl(morder mo, AddrType addr, Types... args) {
- if (IsFuzzSchedulerEnabled())
- GetFuzzingScheduler().AtomicOpAddr((uptr)addr, (int)mo);
+ if (IsAdaptiveDelayEnabled())
+ AdaptiveDelay::AtomicOpAddr((uptr)addr, (int)mo);
}
template <class Op, class... Types>
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 75d679c74382f..6cc3b615dd217 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -777,7 +777,7 @@ void Initialize(ThreadState *thr) {
}
#if !SANITIZER_GO
- GetFuzzingScheduler().Init();
+ AdaptiveDelay::Init();
#endif
OnInitialize();
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index 3fada642ce770..dfcfde64c3ed5 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -241,7 +241,7 @@ struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {
bool in_internal_write_call;
#endif
- FuzzingSchedulerTlsData fuzzingSchedulerTlsData;
+ AdaptiveDelayTlsData adaptiveDelayTlsData;
explicit ThreadState(Tid tid);
};
>From e3f133f8b6982d63b8318b4d38d724f9bc8a501b Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 13 Feb 2026 17:10:11 +0000
Subject: [PATCH 5/7] Add back enablement flag logic
---
compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 198d89d6e9939..1910451868e24 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -19,12 +19,6 @@
#include "tsan_interface.h"
#include "tsan_rtl.h"
-extern "C" int pthread_detach(void*);
-
-namespace __interception {
-extern int (*real_pthread_detach)(void*);
-} // namespace __interception
-
namespace __tsan {
namespace {
@@ -264,6 +258,8 @@ struct AdaptiveDelayImpl {
void Init() {
InitTls();
+
+ is_adaptive_delay_enabled = flags()->enable_adaptive_delay;
}
void InitTls() {
>From 1e7b83702b8fed0d06ef81f5d8548f53fee021d6 Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 13 Feb 2026 19:49:41 +0000
Subject: [PATCH 6/7] Fix delay logic to handle first window
---
compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 7 ++++---
compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h | 3 +++
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 1910451868e24..806ed0318b370 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -155,6 +155,7 @@ struct AdaptiveDelayImpl {
TLS()->delay_buckets_ns_[0] = TLS()->delay_buckets_ns_[1];
TLS()->delay_buckets_ns_[1] = 0;
TLS()->bucket_start_ns_ = now;
+ TLS()->bucket0_window_ns = BucketDurationNs;
}
TLS()->delay_buckets_ns_[1] += delay_ns;
@@ -176,9 +177,8 @@ struct AdaptiveDelayImpl {
u64 total_delay_ns = TLS()->delay_buckets_ns_[1];
return Percent::FromRatio(total_delay_ns, elapsed_ns);
} else {
- u64 total_delay_ns =
- TLS()->delay_buckets_ns_[0] + TLS()->delay_buckets_ns_[1];
- u64 window_ns = BucketDurationNs + elapsed_ns;
+ u64 total_delay_ns = TLS()->delay_buckets_ns_[0] + TLS()->delay_buckets_ns_[1];
+ u64 window_ns = TLS()->bucket0_window_ns + elapsed_ns;
return Percent::FromRatio(total_delay_ns, window_ns);
}
}
@@ -266,6 +266,7 @@ struct AdaptiveDelayImpl {
TLS()->bucket_start_ns_ = NanoTime();
TLS()->delay_buckets_ns_[0] = 0;
TLS()->delay_buckets_ns_[1] = 0;
+ TLS()->bucket0_window_ns = 0;
SetRandomSeed(NanoTime());
TLS()->tls_initialized_ = true;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
index 553dd483aa158..6e6e51440f42d 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -26,6 +26,9 @@ struct AdaptiveDelayTlsData {
// Sliding window delay tracking: 2 buckets of 30 seconds each
u64 delay_buckets_ns_[2]; // [0] = older 30s, [1] = newer 30s
u64 bucket_start_ns_; // When current bucket (index 1) started
+ u64 bucket0_window_ns; // 0ns before the first bucket has rolled, and set to the bucket window time after
+ // This handles the case where, before the program has ran one bucket window duration,
+ // we should not include the previous bucket duration in the overhead percent calculation.
unsigned int tls_random_seed_;
bool tls_initialized_;
};
>From 4ed283be2b8d7ba4b596d5049a5837d21d791c5e Mon Sep 17 00:00:00 2001
From: Chris Cotter <ccotter14 at bloomberg.net>
Date: Fri, 13 Feb 2026 20:03:56 +0000
Subject: [PATCH 7/7] format
---
.../lib/tsan/rtl/tsan_fuzzing_scheduler.cpp | 45 +++++++------------
.../lib/tsan/rtl/tsan_fuzzing_scheduler.h | 28 +++++++-----
.../tsan/rtl/tsan_fuzzing_scheduler_data.h | 8 ++--
3 files changed, 36 insertions(+), 45 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
index 806ed0318b370..bad0218ac26ff 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.cpp
@@ -177,7 +177,8 @@ struct AdaptiveDelayImpl {
u64 total_delay_ns = TLS()->delay_buckets_ns_[1];
return Percent::FromRatio(total_delay_ns, elapsed_ns);
} else {
- u64 total_delay_ns = TLS()->delay_buckets_ns_[0] + TLS()->delay_buckets_ns_[1];
+ u64 total_delay_ns =
+ TLS()->delay_buckets_ns_[0] + TLS()->delay_buckets_ns_[1];
u64 window_ns = TLS()->bucket0_window_ns + elapsed_ns;
return Percent::FromRatio(total_delay_ns, window_ns);
}
@@ -294,9 +295,9 @@ struct AdaptiveDelayImpl {
VPrintf(1, " Sync atomic sample rate: 1/%d\n", sync_atomic_sample_rate_);
VPrintf(1, " Mutex sample rate: 1/%d\n", mutex_sample_rate_);
VPrintf(1, " Atomic delay: %s=%d (~%llu ns)\n", atomic_delay_.TypeName(),
- atomic_delay_.value, atomic_delay_.EstimatedNs());
+ atomic_delay_.value, atomic_delay_.EstimatedNs());
VPrintf(1, " Sync delay: %s=%d (~%llu ns)\n", sync_delay_.TypeName(),
- sync_delay_.value, sync_delay_.EstimatedNs());
+ sync_delay_.value, sync_delay_.EstimatedNs());
}
void DoSpinDelay(int cycles) {
@@ -392,22 +393,16 @@ struct AdaptiveDelayImpl {
ExecuteDelay(sync_delay_);
}
- void JoinOp() {
- UnsampledDelay();
- }
+ void JoinOp() { UnsampledDelay(); }
void BeforeChildThreadRuns() {
InitTls();
UnsampledDelay();
}
- void AfterThreadCreation() {
- UnsampledDelay();
- }
+ void AfterThreadCreation() { UnsampledDelay(); }
- void DetachThread() {
- UnsampledDelay();
- }
+ void DetachThread() { UnsampledDelay(); }
};
AdaptiveDelayImpl& GetImpl() {
@@ -419,30 +414,20 @@ AdaptiveDelayImpl& GetImpl() {
bool is_adaptive_delay_enabled;
-void AdaptiveDelay::InitImpl() {
- GetImpl().Init();
-}
+void AdaptiveDelay::InitImpl() { GetImpl().Init(); }
-void AdaptiveDelay::MutexCvOpImpl() {
- GetImpl().MutexCvOp();
-}
-void AdaptiveDelay::AtomicOpFenceImpl(int mo) {
- GetImpl().AtomicOpFence(mo);
-}
+void AdaptiveDelay::MutexCvOpImpl() { GetImpl().MutexCvOp(); }
+void AdaptiveDelay::AtomicOpFenceImpl(int mo) { GetImpl().AtomicOpFence(mo); }
void AdaptiveDelay::AtomicOpAddrImpl(__sanitizer::uptr addr, int mo) {
- GetImpl().AtomicOpAddr(addr, mo);
-}
-void AdaptiveDelay::DetachThreadImpl() {
- GetImpl().DetachThread();
+ GetImpl().AtomicOpAddr(addr, mo);
}
+void AdaptiveDelay::DetachThreadImpl() { GetImpl().DetachThread(); }
void AdaptiveDelay::AfterThreadCreationImpl() {
- GetImpl().AfterThreadCreation();
+ GetImpl().AfterThreadCreation();
}
void AdaptiveDelay::BeforeChildThreadRunsImpl() {
- GetImpl().BeforeChildThreadRuns();
-}
-void AdaptiveDelay::JoinOpImpl() {
- GetImpl().JoinOp();
+ GetImpl().BeforeChildThreadRuns();
}
+void AdaptiveDelay::JoinOpImpl() { GetImpl().JoinOp(); }
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
index 9e6d24127ddc0..8751de104cc0c 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler.h
@@ -25,47 +25,51 @@ extern bool is_adaptive_delay_enabled;
// races. The delay injection is controlled by a time budget to maintain a
// configurable overhead target.
struct AdaptiveDelay {
- ALWAYS_INLINE static void Init() {
- InitImpl();
- }
+ ALWAYS_INLINE static void Init() { InitImpl(); }
ALWAYS_INLINE static void MutexCvOp() {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
MutexCvOpImpl();
}
ALWAYS_INLINE static void AtomicOpFence(int mo) {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
AtomicOpFenceImpl(mo);
}
ALWAYS_INLINE static void AtomicOpAddr(__sanitizer::uptr addr, int mo) {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
AtomicOpAddrImpl(addr, mo);
}
ALWAYS_INLINE static void DetachThread() {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
DetachThreadImpl();
}
ALWAYS_INLINE static void AfterThreadCreation() {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
AfterThreadCreationImpl();
}
ALWAYS_INLINE static void BeforeChildThreadRuns() {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
BeforeChildThreadRunsImpl();
}
ALWAYS_INLINE static void JoinOp() {
- if (!is_adaptive_delay_enabled) return;
+ if (!is_adaptive_delay_enabled)
+ return;
JoinOpImpl();
}
-private:
-
+ private:
static void InitImpl();
static void MutexCvOpImpl();
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
index 6e6e51440f42d..78d052ffdcf8f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_fuzzing_scheduler_data.h
@@ -26,9 +26,11 @@ struct AdaptiveDelayTlsData {
// Sliding window delay tracking: 2 buckets of 30 seconds each
u64 delay_buckets_ns_[2]; // [0] = older 30s, [1] = newer 30s
u64 bucket_start_ns_; // When current bucket (index 1) started
- u64 bucket0_window_ns; // 0ns before the first bucket has rolled, and set to the bucket window time after
- // This handles the case where, before the program has ran one bucket window duration,
- // we should not include the previous bucket duration in the overhead percent calculation.
+ u64 bucket0_window_ns; // 0ns before the first bucket has rolled, and set to
+ // the bucket window time after This handles the case
+ // where, before the program has ran one bucket window
+ // duration, we should not include the previous bucket
+ // duration in the overhead percent calculation.
unsigned int tls_random_seed_;
bool tls_initialized_;
};
More information about the llvm-commits
mailing list