[compiler-rt] r221194 - [TSan] Use StackTrace from sanitizer_common where applicable

Alexey Samsonov vonosmas at gmail.com
Mon Nov 3 14:23:45 PST 2014


Author: samsonov
Date: Mon Nov  3 16:23:44 2014
New Revision: 221194

URL: http://llvm.org/viewvc/llvm-project?rev=221194&view=rev
Log:
[TSan] Use StackTrace from sanitizer_common where applicable

Summary:
This change removes `__tsan::StackTrace` class. There are
now three alternatives:
  # Lightweight `__sanitizer::StackTrace`, which doesn't own a buffer
  of PCs. It is used in functions that need stack traces in read-only
  mode, and helps to prevent unnecessary allocations/copies (e.g.
  for StackTraces fetched from StackDepot).
  # `__sanitizer::BufferedStackTrace`, which stores buffer of PCs in
  a constant array. It is used in TraceHeader (non-Go version)
  # `__tsan::VarSizeStackTrace`, which owns buffer of PCs, dynamically
  allocated via TSan internal allocator.

Test Plan: compiler-rt test suite

Reviewers: dvyukov, kcc

Reviewed By: kcc

Subscribers: llvm-commits, kcc

Differential Revision: http://reviews.llvm.org/D6004

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_stack_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.cc Mon Nov  3 16:23:44 2014
@@ -36,6 +36,15 @@ uptr StackTrace::GetCurrentPc() {
   return GET_CALLER_PC();
 }
 
+void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+  size = cnt + !!extra_top_pc;
+  CHECK_LE(size, kStackTraceMax);
+  internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+  if (extra_top_pc)
+    trace_buffer[cnt] = extra_top_pc;
+  top_frame_bp = 0;
+}
+
 // Check if given pointer points into allocated stack area.
 static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
   return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.h?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stacktrace.h Mon Nov  3 16:23:44 2014
@@ -64,6 +64,7 @@ struct BufferedStackTrace : public Stack
 
   BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
 
+  void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
   void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
               uptr stack_bottom, bool request_fast_unwind);
 
@@ -75,6 +76,9 @@ struct BufferedStackTrace : public Stack
                                   uptr max_depth);
   void PopStackFrames(uptr count);
   uptr LocatePcInTrace(uptr pc);
+
+  BufferedStackTrace(const BufferedStackTrace &);
+  void operator=(const BufferedStackTrace &);
 };
 
 }  // namespace __sanitizer

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_defs.h Mon Nov  3 16:23:44 2014
@@ -43,7 +43,6 @@ const unsigned kMaxTidInClock = kMaxTid
 const int kClkBits = 42;
 const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
 const uptr kShadowStackSize = 64 * 1024;
-const uptr kTraceStackSize = 256;
 
 #ifdef TSAN_SHADOW_COUNT
 # if TSAN_SHADOW_COUNT == 2 \
@@ -174,7 +173,6 @@ struct Context;
 struct ReportStack;
 class ReportDesc;
 class RegionAlloc;
-class StackTrace;
 
 // Descriptor of user's memory block.
 struct MBlock {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc Mon Nov  3 16:23:44 2014
@@ -216,7 +216,7 @@ ScopedInterceptor::~ScopedInterceptor()
     ThreadState *thr = cur_thread(); \
     const uptr caller_pc = GET_CALLER_PC(); \
     ScopedInterceptor si(thr, #func, caller_pc); \
-    const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+    const uptr pc = StackTrace::GetCurrentPc(); \
     (void)pc; \
 /**/
 
@@ -1884,12 +1884,12 @@ static void CallUserSignalHandler(Thread
   // from rtl_generic_sighandler) we have not yet received the reraised
   // signal; and it looks too fragile to intercept all ways to reraise a signal.
   if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
-    __tsan::StackTrace stack;
-    stack.ObtainCurrent(thr, pc);
+    VarSizeStackTrace stack;
+    ObtainCurrentStack(thr, pc, &stack);
     ThreadRegistryLock l(ctx->thread_registry);
     ScopedReport rep(ReportTypeErrnoInSignal);
     if (!IsFiredSuppression(ctx, rep, stack)) {
-      rep.AddStack(&stack, true);
+      rep.AddStack(stack, true);
       OutputReport(thr, rep);
     }
   }

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc Mon Nov  3 16:23:44 2014
@@ -54,7 +54,7 @@ class ScopedAnnotation {
     StatInc(thr, StatAnnotation); \
     StatInc(thr, Stat##typ); \
     ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
-    const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+    const uptr pc = StackTrace::GetCurrentPc(); \
     (void)pc; \
 /**/
 

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_atomic.cc Mon Nov  3 16:23:44 2014
@@ -474,7 +474,7 @@ static void AtomicFence(ThreadState *thr
 
 #define SCOPED_ATOMIC(func, ...) \
     const uptr callpc = (uptr)__builtin_return_address(0); \
-    uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+    uptr pc = StackTrace::GetCurrentPc(); \
     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
     ThreadState *const thr = cur_thread(); \
     if (thr->ignore_interceptors) \

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_interface_java.cc Mon Nov  3 16:23:44 2014
@@ -61,7 +61,7 @@ static JavaContext *jctx;
 #define SCOPED_JAVA_FUNC(func) \
   ThreadState *thr = cur_thread(); \
   const uptr caller_pc = GET_CALLER_PC(); \
-  const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+  const uptr pc = StackTrace::GetCurrentPc(); \
   (void)pc; \
   ScopedJavaFunc scoped(thr, caller_pc); \
 /**/

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Mon Nov  3 16:23:44 2014
@@ -66,12 +66,12 @@ static void SignalUnsafeCall(ThreadState
   if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
       !flags()->report_signal_unsafe)
     return;
-  StackTrace stack;
-  stack.ObtainCurrent(thr, pc);
+  VarSizeStackTrace stack;
+  ObtainCurrentStack(thr, pc, &stack);
   ThreadRegistryLock l(ctx->thread_registry);
   ScopedReport rep(ReportTypeSignalUnsafe);
   if (!IsFiredSuppression(ctx, rep, stack)) {
-    rep.AddStack(&stack, true);
+    rep.AddStack(stack, true);
     OutputReport(thr, rep);
   }
 }

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc Mon Nov  3 16:23:44 2014
@@ -462,8 +462,8 @@ u32 CurrentStackId(ThreadState *thr, upt
     thr->shadow_stack_pos[0] = pc;
     thr->shadow_stack_pos++;
   }
-  u32 id = StackDepotPut(__sanitizer::StackTrace(
-      thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+  u32 id = StackDepotPut(
+      StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
   if (pc != 0)
     thr->shadow_stack_pos--;
   return id;
@@ -476,7 +476,7 @@ void TraceSwitch(ThreadState *thr) {
   unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
   TraceHeader *hdr = &thr_trace->headers[trace];
   hdr->epoch0 = thr->fast_state.epoch();
-  hdr->stack0.ObtainCurrent(thr, 0);
+  ObtainCurrentStack(thr, 0, &hdr->stack0);
   hdr->mset0 = thr->mset;
   thr->nomalloc--;
 }

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h Mon Nov  3 16:23:44 2014
@@ -498,9 +498,9 @@ class ScopedReport {
   explicit ScopedReport(ReportType typ);
   ~ScopedReport();
 
-  void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+  void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
                        const MutexSet *mset);
-  void AddStack(const StackTrace *stack, bool suppressable = false);
+  void AddStack(StackTrace stack, bool suppressable = false);
   void AddThread(const ThreadContext *tctx, bool suppressable = false);
   void AddThread(int unique_tid, bool suppressable = false);
   void AddUniqueTid(int unique_tid);
@@ -524,7 +524,20 @@ class ScopedReport {
   void operator = (const ScopedReport&);
 };
 
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+                  MutexSet *mset);
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+  uptr start = 0;
+  if (size + !!toppc > kStackTraceMax) {
+    start = size + !!toppc - kStackTraceMax;
+    size = kStackTraceMax - !!toppc;
+  }
+  stack->Init(&thr->shadow_stack[start], size, toppc);
+}
+
 
 void StatAggregate(u64 *dst, u64 *src);
 void StatOutput(u64 *stat);
@@ -551,9 +564,8 @@ void ForkChildAfter(ThreadState *thr, up
 
 void ReportRace(ThreadState *thr);
 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
-bool IsFiredSuppression(Context *ctx,
-                        const ScopedReport &srep,
-                        const StackTrace &trace);
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+                        StackTrace trace);
 bool IsExpectedReport(uptr addr, uptr size);
 void PrintMatchedBenignRaces();
 bool FrameIsInternal(const ReportStack *frame);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_mutex.cc Mon Nov  3 16:23:44 2014
@@ -59,9 +59,9 @@ static void ReportMutexMisuse(ThreadStat
   ThreadRegistryLock l(ctx->thread_registry);
   ScopedReport rep(typ);
   rep.AddMutex(mid);
-  StackTrace trace;
-  trace.ObtainCurrent(thr, pc);
-  rep.AddStack(&trace, true);
+  VarSizeStackTrace trace;
+  ObtainCurrentStack(thr, pc, &trace);
+  rep.AddStack(trace, true);
   rep.AddLocation(addr, 1);
   OutputReport(thr, rep);
 }
@@ -124,12 +124,12 @@ void MutexDestroy(ThreadState *thr, uptr
     ThreadRegistryLock l(ctx->thread_registry);
     ScopedReport rep(ReportTypeMutexDestroyLocked);
     rep.AddMutex(mid);
-    StackTrace trace;
-    trace.ObtainCurrent(thr, pc);
-    rep.AddStack(&trace);
+    VarSizeStackTrace trace;
+    ObtainCurrentStack(thr, pc, &trace);
+    rep.AddStack(trace);
     FastState last(last_lock);
     RestoreStack(last.tid(), last.epoch(), &trace, 0);
-    rep.AddStack(&trace, true);
+    rep.AddStack(trace, true);
     rep.AddLocation(addr, 1);
     OutputReport(thr, rep);
   }
@@ -472,20 +472,17 @@ void ReportDeadlock(ThreadState *thr, up
     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
     rep.AddThread((int)r->loop[i].thr_ctx);
   }
-  InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
   uptr dummy_pc = 0x42;
   for (int i = 0; i < r->n; i++) {
     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
       u32 stk = r->loop[i].stk[j];
       if (stk) {
-        __sanitizer::StackTrace stack = StackDepotGet(stk);
-        stacks[i].Init(const_cast<uptr *>(stack.trace), stack.size);
+        rep.AddStack(StackDepotGet(stk), true);
       } else {
         // Sometimes we fail to extract the stack trace (FIXME: investigate),
         // but we should still produce some stack trace in the report.
-        stacks[i].Init(&dummy_pc, 1);
+        rep.AddStack(StackTrace(&dummy_pc, 1), true);
       }
-      rep.AddStack(&stacks[i], true);
     }
   }
   OutputReport(thr, rep);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc Mon Nov  3 16:23:44 2014
@@ -30,7 +30,7 @@ namespace __tsan {
 
 using namespace __sanitizer;  // NOLINT
 
-static ReportStack *SymbolizeStack(const StackTrace& trace);
+static ReportStack *SymbolizeStack(StackTrace trace);
 
 void TsanCheckFailed(const char *file, int line, const char *cond,
                      u64 v1, u64 v2) {
@@ -107,28 +107,26 @@ static void StackStripMain(ReportStack *
 ReportStack *SymbolizeStackId(u32 stack_id) {
   if (stack_id == 0)
     return 0;
-  __sanitizer::StackTrace stack = StackDepotGet(stack_id);
+  StackTrace stack = StackDepotGet(stack_id);
   if (stack.trace == nullptr)
-    return 0;
-  StackTrace trace;
-  trace.Init(stack.trace, stack.size);
-  return SymbolizeStack(trace);
+    return nullptr;
+  return SymbolizeStack(stack);
 }
 
-static ReportStack *SymbolizeStack(const StackTrace& trace) {
-  if (trace.IsEmpty())
+static ReportStack *SymbolizeStack(StackTrace trace) {
+  if (trace.size == 0)
     return 0;
   ReportStack *stack = 0;
-  for (uptr si = 0; si < trace.Size(); si++) {
-    const uptr pc = trace.Get(si);
+  for (uptr si = 0; si < trace.size; si++) {
+    const uptr pc = trace.trace[si];
 #ifndef TSAN_GO
     // We obtain the return address, that is, address of the next instruction,
     // so offset it by 1 byte.
-    const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
+    const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
 #else
     // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
     uptr pc1 = pc;
-    if (si != trace.Size() - 1)
+    if (si != trace.size - 1)
       pc1 -= 1;
 #endif
     ReportStack *ent = SymbolizeCode(pc1);
@@ -161,14 +159,14 @@ ScopedReport::~ScopedReport() {
   DestroyAndFree(rep_);
 }
 
-void ScopedReport::AddStack(const StackTrace *stack, bool suppressable) {
+void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
   ReportStack **rs = rep_->stacks.PushBack();
-  *rs = SymbolizeStack(*stack);
+  *rs = SymbolizeStack(stack);
   (*rs)->suppressable = suppressable;
 }
 
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
-    const StackTrace *stack, const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+                                   const MutexSet *mset) {
   void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
   ReportMop *mop = new(mem) ReportMop;
   rep_->mops.PushBack(mop);
@@ -177,7 +175,7 @@ void ScopedReport::AddMemoryAccess(uptr
   mop->size = s.size();
   mop->write = s.IsWrite();
   mop->atomic = s.IsAtomic();
-  mop->stack = SymbolizeStack(*stack);
+  mop->stack = SymbolizeStack(stack);
   if (mop->stack)
     mop->stack->suppressable = true;
   for (uptr i = 0; i < mset->Size(); i++) {
@@ -385,7 +383,8 @@ const ReportDesc *ScopedReport::GetRepor
   return rep_;
 }
 
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+                  MutexSet *mset) {
   // This function restores stack trace and mutex set for the thread/epoch.
   // It does so by getting stack trace and mutex set at the beginning of
   // trace part, and then replaying the trace till the given epoch.
@@ -410,13 +409,13 @@ void RestoreStack(int tid, const u64 epo
   DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
           tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
   InternalScopedBuffer<uptr> stack(kShadowStackSize);
-  for (uptr i = 0; i < hdr->stack0.Size(); i++) {
-    stack[i] = hdr->stack0.Get(i);
+  for (uptr i = 0; i < hdr->stack0.size; i++) {
+    stack[i] = hdr->stack0.trace[i];
     DPrintf2("  #%02lu: pc=%zx\n", i, stack[i]);
   }
   if (mset)
     *mset = hdr->mset0;
-  uptr pos = hdr->stack0.Size();
+  uptr pos = hdr->stack0.size;
   Event *events = (Event*)GetThreadTrace(tid);
   for (uptr i = ebegin; i <= eend; i++) {
     Event ev = events[i];
@@ -451,13 +450,13 @@ void RestoreStack(int tid, const u64 epo
   stk->Init(stack.data(), pos);
 }
 
-static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
-    uptr addr_min, uptr addr_max) {
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+                             uptr addr_min, uptr addr_max) {
   bool equal_stack = false;
   RacyStacks hash;
   if (flags()->suppress_equal_stacks) {
-    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
-    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+    hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+    hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
     for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
       if (hash == ctx->racy_stacks[i]) {
         DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
@@ -490,12 +489,12 @@ static bool HandleRacyStacks(ThreadState
   return false;
 }
 
-static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
-    uptr addr_min, uptr addr_max) {
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+                          uptr addr_min, uptr addr_max) {
   if (flags()->suppress_equal_stacks) {
     RacyStacks hash;
-    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
-    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+    hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+    hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
     ctx->racy_stacks.PushBack(hash);
   }
   if (flags()->suppress_equal_addresses) {
@@ -536,15 +535,14 @@ bool OutputReport(ThreadState *thr, cons
   return true;
 }
 
-bool IsFiredSuppression(Context *ctx,
-                        const ScopedReport &srep,
-                        const StackTrace &trace) {
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+                        StackTrace trace) {
   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
     if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
       continue;
-    for (uptr j = 0; j < trace.Size(); j++) {
+    for (uptr j = 0; j < trace.size; j++) {
       FiredSuppression *s = &ctx->fired_suppressions[k];
-      if (trace.Get(j) == s->pc) {
+      if (trace.trace[j] == s->pc) {
         if (s->supp)
           s->supp->hit_count++;
         return true;
@@ -636,9 +634,9 @@ void ReportRace(ThreadState *thr) {
   if (IsFiredSuppression(ctx, rep, addr))
     return;
   const uptr kMop = 2;
-  StackTrace traces[kMop];
+  VarSizeStackTrace traces[kMop];
   const uptr toppc = TraceTopPC(thr);
-  traces[0].ObtainCurrent(thr, toppc);
+  ObtainCurrentStack(thr, toppc, &traces[0]);
   if (IsFiredSuppression(ctx, rep, traces[0]))
     return;
   InternalScopedBuffer<MutexSet> mset2(1);
@@ -653,7 +651,7 @@ void ReportRace(ThreadState *thr) {
 
   for (uptr i = 0; i < kMop; i++) {
     Shadow s(thr->racy_state[i]);
-    rep.AddMemoryAccess(addr, s, &traces[i],
+    rep.AddMemoryAccess(addr, s, traces[i],
                         i == 0 ? &thr->mset : mset2.data());
   }
 
@@ -683,26 +681,23 @@ void ReportRace(ThreadState *thr) {
 }
 
 void PrintCurrentStack(ThreadState *thr, uptr pc) {
-  StackTrace trace;
-  trace.ObtainCurrent(thr, pc);
+  VarSizeStackTrace trace;
+  ObtainCurrentStack(thr, pc, &trace);
   PrintStack(SymbolizeStack(trace));
 }
 
 void PrintCurrentStackSlow() {
 #ifndef TSAN_GO
-  __sanitizer::BufferedStackTrace *ptrace = new(
-      internal_alloc(MBlockStackTrace, sizeof(__sanitizer::BufferedStackTrace)))
-      __sanitizer::BufferedStackTrace();
-  ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
-                 0, 0, false);
+  BufferedStackTrace *ptrace =
+      new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+          BufferedStackTrace();
+  ptrace->Unwind(kStackTraceMax, StackTrace::GetCurrentPc(), 0, 0, 0, 0, false);
   for (uptr i = 0; i < ptrace->size / 2; i++) {
     uptr tmp = ptrace->trace_buffer[i];
     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
   }
-  StackTrace trace;
-  trace.Init(ptrace->trace, ptrace->size);
-  PrintStack(SymbolizeStack(trace));
+  PrintStack(SymbolizeStack(*ptrace));
 #endif
 }
 

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.cc Mon Nov  3 16:23:44 2014
@@ -16,91 +16,31 @@
 
 namespace __tsan {
 
-StackTrace::StackTrace()
-    : n_()
-    , s_()
-    , c_() {
-}
-
-StackTrace::StackTrace(uptr *buf, uptr cnt)
-    : n_()
-    , s_(buf)
-    , c_(cnt) {
-  CHECK_NE(buf, 0);
-  CHECK_NE(cnt, 0);
-}
-
-StackTrace::~StackTrace() {
-  Reset();
-}
-
-void StackTrace::Reset() {
-  if (s_ && !c_) {
-    CHECK_NE(n_, 0);
-    internal_free(s_);
-    s_ = 0;
-  }
-  n_ = 0;
-}
+VarSizeStackTrace::VarSizeStackTrace()
+    : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
 
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
-  Reset();
-  if (cnt == 0)
-    return;
-  if (c_) {
-    CHECK_NE(s_, 0);
-    CHECK_LE(cnt, c_);
-  } else {
-    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
-  }
-  n_ = cnt;
-  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+VarSizeStackTrace::~VarSizeStackTrace() {
+  ResizeBuffer(0);
 }
 
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
-  Reset();
-  n_ = thr->shadow_stack_pos - thr->shadow_stack;
-  if (n_ + !!toppc == 0)
-    return;
-  uptr start = 0;
-  if (c_) {
-    CHECK_NE(s_, 0);
-    if (n_ + !!toppc > c_) {
-      start = n_ - c_ + !!toppc;
-      n_ = c_ - !!toppc;
-    }
-  } else {
-    // Cap potentially huge stacks.
-    if (n_ + !!toppc > kTraceStackSize) {
-      start = n_ - kTraceStackSize + !!toppc;
-      n_ = kTraceStackSize - !!toppc;
-    }
-    s_ = (uptr*)internal_alloc(MBlockStackTrace,
-                               (n_ + !!toppc) * sizeof(s_[0]));
-  }
-  for (uptr i = 0; i < n_; i++)
-    s_[i] = thr->shadow_stack[start + i];
-  if (toppc) {
-    s_[n_] = toppc;
-    n_++;
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+  if (trace_buffer) {
+    internal_free(trace_buffer);
   }
+  trace_buffer =
+      (new_size > 0)
+          ? (uptr *)internal_alloc(MBlockStackTrace,
+                                   new_size * sizeof(trace_buffer[0]))
+          : nullptr;
+  trace = trace_buffer;
+  size = new_size;
 }
 
-bool StackTrace::IsEmpty() const {
-  return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
-  return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
-  CHECK_LT(i, n_);
-  return s_[i];
-}
-
-const uptr *StackTrace::Begin() const {
-  return s_;
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+  ResizeBuffer(cnt + !!extra_top_pc);
+  internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+  if (extra_top_pc)
+    trace_buffer[cnt] = extra_top_pc;
 }
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_stack_trace.h Mon Nov  3 16:23:44 2014
@@ -13,34 +13,25 @@
 #ifndef TSAN_STACK_TRACE_H
 #define TSAN_STACK_TRACE_H
 
+#include "sanitizer_common/sanitizer_stacktrace.h"
 #include "tsan_defs.h"
 
 namespace __tsan {
 
-// FIXME: Delete this class in favor of __sanitizer::StackTrace.
-class StackTrace {
- public:
-  StackTrace();
-  // Initialized the object in "static mode",
-  // in this mode it never calls malloc/free but uses the provided buffer.
-  StackTrace(uptr *buf, uptr cnt);
-  ~StackTrace();
-  void Reset();
-
-  void Init(const uptr *pcs, uptr cnt);
-  void ObtainCurrent(ThreadState *thr, uptr toppc);
-  bool IsEmpty() const;
-  uptr Size() const;
-  uptr Get(uptr i) const;
-  const uptr *Begin() const;
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+  uptr *trace_buffer;  // Owned.
+
+  VarSizeStackTrace();
+  ~VarSizeStackTrace();
+  void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
 
  private:
-  uptr n_;
-  uptr *s_;
-  const uptr c_;
+  void ResizeBuffer(uptr new_size);
 
-  StackTrace(const StackTrace&);
-  void operator = (const StackTrace&);
+  VarSizeStackTrace(const VarSizeStackTrace &);
+  void operator=(const VarSizeStackTrace &);
 };
 
 }  // namespace __tsan

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_trace.h Mon Nov  3 16:23:44 2014
@@ -42,21 +42,15 @@ enum EventType {
 typedef u64 Event;
 
 struct TraceHeader {
-  StackTrace stack0;  // Start stack for the trace.
-  u64        epoch0;  // Start epoch for the trace.
-  MutexSet   mset0;
-#ifndef TSAN_GO
-  uptr       stack0buf[kTraceStackSize];
-#endif
-
-  TraceHeader()
 #ifndef TSAN_GO
-      : stack0(stack0buf, kTraceStackSize)
+  BufferedStackTrace stack0;  // Start stack for the trace.
 #else
-      : stack0()
+  VarSizeStackTrace stack0;
 #endif
-      , epoch0() {
-  }
+  u64        epoch0;  // Start epoch for the trace.
+  MutexSet   mset0;
+
+  TraceHeader() : stack0(), epoch0() {}
 };
 
 struct Trace {

Modified: compiler-rt/trunk/lib/tsan/tests/unit/tsan_stack_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_stack_test.cc?rev=221194&r1=221193&r2=221194&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_stack_test.cc (original)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_stack_test.cc Mon Nov  3 16:23:44 2014
@@ -17,70 +17,79 @@
 
 namespace __tsan {
 
-static void TestStackTrace(StackTrace *trace) {
+template <typename StackTraceTy>
+static void TestStackTrace(StackTraceTy *trace) {
   ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
   uptr stack[128];
   thr.shadow_stack = &stack[0];
   thr.shadow_stack_pos = &stack[0];
   thr.shadow_stack_end = &stack[128];
 
-  trace->ObtainCurrent(&thr, 0);
-  EXPECT_EQ(trace->Size(), (uptr)0);
+  ObtainCurrentStack(&thr, 0, trace);
+  EXPECT_EQ(0U, trace->size);
 
-  trace->ObtainCurrent(&thr, 42);
-  EXPECT_EQ(trace->Size(), (uptr)1);
-  EXPECT_EQ(trace->Get(0), (uptr)42);
+  ObtainCurrentStack(&thr, 42, trace);
+  EXPECT_EQ(1U, trace->size);
+  EXPECT_EQ(42U, trace->trace[0]);
 
   *thr.shadow_stack_pos++ = 100;
   *thr.shadow_stack_pos++ = 101;
-  trace->ObtainCurrent(&thr, 0);
-  EXPECT_EQ(trace->Size(), (uptr)2);
-  EXPECT_EQ(trace->Get(0), (uptr)100);
-  EXPECT_EQ(trace->Get(1), (uptr)101);
-
-  trace->ObtainCurrent(&thr, 42);
-  EXPECT_EQ(trace->Size(), (uptr)3);
-  EXPECT_EQ(trace->Get(0), (uptr)100);
-  EXPECT_EQ(trace->Get(1), (uptr)101);
-  EXPECT_EQ(trace->Get(2), (uptr)42);
+  ObtainCurrentStack(&thr, 0, trace);
+  EXPECT_EQ(2U, trace->size);
+  EXPECT_EQ(100U, trace->trace[0]);
+  EXPECT_EQ(101U, trace->trace[1]);
+
+  ObtainCurrentStack(&thr, 42, trace);
+  EXPECT_EQ(3U, trace->size);
+  EXPECT_EQ(100U, trace->trace[0]);
+  EXPECT_EQ(101U, trace->trace[1]);
+  EXPECT_EQ(42U, trace->trace[2]);
 }
 
-TEST(StackTrace, Basic) {
-  StackTrace trace;
-  TestStackTrace(&trace);
-}
+template<typename StackTraceTy>
+static void TestTrim(StackTraceTy *trace) {
+  ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
+  const uptr kShadowStackSize = 2 * kStackTraceMax;
+  uptr stack[kShadowStackSize];
+  thr.shadow_stack = &stack[0];
+  thr.shadow_stack_pos = &stack[0];
+  thr.shadow_stack_end = &stack[kShadowStackSize];
 
-TEST(StackTrace, StaticBasic) {
-  uptr buf[10];
-  StackTrace trace1(buf, 10);
-  TestStackTrace(&trace1);
-  StackTrace trace2(buf, 3);
-  TestStackTrace(&trace2);
+  for (uptr i = 0; i < kShadowStackSize; ++i)
+    *thr.shadow_stack_pos++ = 100 + i;
+
+  ObtainCurrentStack(&thr, 0, trace);
+  EXPECT_EQ(kStackTraceMax, trace->size);
+  for (uptr i = 0; i < kStackTraceMax; i++) {
+    EXPECT_EQ(100 + kStackTraceMax + i, trace->trace[i]);
+  }
+
+  ObtainCurrentStack(&thr, 42, trace);
+  EXPECT_EQ(kStackTraceMax, trace->size);
+  for (uptr i = 0; i < kStackTraceMax - 1; i++) {
+    EXPECT_EQ(101 + kStackTraceMax + i, trace->trace[i]);
+  }
+  EXPECT_EQ(42U, trace->trace[kStackTraceMax - 1]);
 }
 
-TEST(StackTrace, StaticTrim) {
-  uptr buf[2];
-  StackTrace trace(buf, 2);
+TEST(StackTrace, BasicVarSize) {
+  VarSizeStackTrace trace;
+  TestStackTrace(&trace);
+}
 
-  ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
-  uptr stack[128];
-  thr.shadow_stack = &stack[0];
-  thr.shadow_stack_pos = &stack[0];
-  thr.shadow_stack_end = &stack[128];
+TEST(StackTrace, BasicBuffered) {
+  BufferedStackTrace trace;
+  TestStackTrace(&trace);
+}
 
-  *thr.shadow_stack_pos++ = 100;
-  *thr.shadow_stack_pos++ = 101;
-  *thr.shadow_stack_pos++ = 102;
-  trace.ObtainCurrent(&thr, 0);
-  EXPECT_EQ(trace.Size(), (uptr)2);
-  EXPECT_EQ(trace.Get(0), (uptr)101);
-  EXPECT_EQ(trace.Get(1), (uptr)102);
-
-  trace.ObtainCurrent(&thr, 42);
-  EXPECT_EQ(trace.Size(), (uptr)2);
-  EXPECT_EQ(trace.Get(0), (uptr)102);
-  EXPECT_EQ(trace.Get(1), (uptr)42);
+TEST(StackTrace, TrimVarSize) {
+  VarSizeStackTrace trace;
+  TestTrim(&trace);
 }
 
+TEST(StackTrace, TrimBuffered) {
+  BufferedStackTrace trace;
+  TestTrim(&trace);
+}
 
 }  // namespace __tsan





More information about the llvm-commits mailing list