[compiler-rt] r209284 - [msan] Chained origins re-design.

Evgeniy Stepanov eugeni.stepanov at gmail.com
Wed May 21 02:02:14 PDT 2014


Author: eugenis
Date: Wed May 21 04:02:13 2014
New Revision: 209284

URL: http://llvm.org/viewvc/llvm-project?rev=209284&view=rev
Log:
[msan] Chained origins re-design.

Generalize StackDepot and create a new specialized instance of it to
efficiently (i.e. without duplicating stack trace data) store the
origin history tree.

This reduces memory usage for chained origins roughly by an order of
magnitude.

Most importantly, this new design allows us to put two limits on
stored history data (exposed in MSAN_OPTIONS) that help avoid
exponential growth in used memory on certain workloads.

See comments in lib/msan/msan_origin.h for more details.


Added:
    compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc   (with props)
    compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h   (with props)
    compiler-rt/trunk/lib/msan/msan_origin.h   (with props)
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc   (with props)
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.h
      - copied, changed from r209116, compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepotbase.h
      - copied, changed from r209116, compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
    compiler-rt/trunk/test/msan/chained_origin_limits.cc   (with props)
Modified:
    compiler-rt/trunk/include/sanitizer/msan_interface.h
    compiler-rt/trunk/lib/asan/asan_stats.cc
    compiler-rt/trunk/lib/msan/CMakeLists.txt
    compiler-rt/trunk/lib/msan/msan.cc
    compiler-rt/trunk/lib/msan/msan.h
    compiler-rt/trunk/lib/msan/msan_allocator.cc
    compiler-rt/trunk/lib/msan/msan_flags.h
    compiler-rt/trunk/lib/msan/msan_interceptors.cc
    compiler-rt/trunk/lib/msan/msan_interface_internal.h
    compiler-rt/trunk/lib/msan/msan_report.cc
    compiler-rt/trunk/lib/msan/tests/msan_test.cc
    compiler-rt/trunk/lib/sanitizer_common/CMakeLists.txt
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h

Modified: compiler-rt/trunk/include/sanitizer/msan_interface.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/include/sanitizer/msan_interface.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/include/sanitizer/msan_interface.h (original)
+++ compiler-rt/trunk/include/sanitizer/msan_interface.h Wed May 21 04:02:13 2014
@@ -19,10 +19,6 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
-  /* Returns a string describing a stack origin.
-     Return NULL if the origin is invalid, or is not a stack origin. */
-  const char *__msan_get_origin_descr_if_stack(uint32_t id);
-
   /* Set raw origin for the memory range. */
   void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
 

Modified: compiler-rt/trunk/lib/asan/asan_stats.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_stats.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_stats.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_stats.cc Wed May 21 04:02:13 2014
@@ -129,8 +129,8 @@ static void PrintAccumulatedStats() {
   BlockingMutexLock lock(&print_lock);
   stats.Print();
   StackDepotStats *stack_depot_stats = StackDepotGetStats();
-  Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
-         stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+  Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+         stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
   PrintInternalAllocatorStats();
 }
 

Modified: compiler-rt/trunk/lib/msan/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/CMakeLists.txt?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/msan/CMakeLists.txt Wed May 21 04:02:13 2014
@@ -4,6 +4,7 @@ include_directories(..)
 set(MSAN_RTL_SOURCES
   msan.cc
   msan_allocator.cc
+  msan_chained_origin_depot.cc
   msan_interceptors.cc
   msan_linux.cc
   msan_new_delete.cc

Modified: compiler-rt/trunk/lib/msan/msan.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan.cc (original)
+++ compiler-rt/trunk/lib/msan/msan.cc Wed May 21 04:02:13 2014
@@ -13,6 +13,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
 #include "msan_thread.h"
 #include "sanitizer_common/sanitizer_atomic.h"
 #include "sanitizer_common/sanitizer_common.h"
@@ -117,6 +119,28 @@ static void ParseFlagsFromString(Flags *
     Printf("Exit code not in [0, 128) range: %d\n", f->exit_code);
     Die();
   }
+  ParseFlag(str, &f->origin_history_size, "origin_history_size", "");
+  if (f->origin_history_size < 0 ||
+      f->origin_history_size > Origin::kMaxDepth) {
+    Printf(
+        "Origin history size invalid: %d. Must be 0 (unlimited) or in [1, %d] "
+        "range.\n",
+        f->origin_history_size, Origin::kMaxDepth);
+    Die();
+  }
+  ParseFlag(str, &f->origin_history_per_stack_limit,
+            "origin_history_per_stack_limit", "");
+  // Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
+  // StackDepotHandle::inc_use_count_unsafe.
+  if (f->origin_history_per_stack_limit < 0 ||
+      f->origin_history_per_stack_limit > kStackDepotMaxUseCount / 2) {
+    Printf(
+        "Origin per-stack limit invalid: %d. Must be 0 (unlimited) or in [1, "
+        "%d] range.\n",
+        f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
+    Die();
+  }
+
   ParseFlag(str, &f->report_umrs, "report_umrs", "");
   ParseFlag(str, &f->wrap_signals, "wrap_signals", "");
 
@@ -143,6 +167,8 @@ static void InitializeFlags(Flags *f, co
   f->poison_in_malloc = true;
   f->poison_in_free = true;
   f->exit_code = 77;
+  f->origin_history_size = Origin::kMaxDepth;
+  f->origin_history_per_stack_limit = 20000;
   f->report_umrs = true;
   f->wrap_signals = true;
   f->halt_on_error = !&__msan_keep_going;
@@ -230,9 +256,7 @@ void ScopedThreadLocalStateBackup::Resto
 void UnpoisonThreadLocalState() {
 }
 
-const char *GetOriginDescrIfStack(u32 id, uptr *pc) {
-  if ((id >> 31) == 0) return 0;
-  id &= (1U << 31) - 1;
+const char *GetStackOriginDescr(u32 id, uptr *pc) {
   CHECK_LT(id, kNumStackOriginDescrs);
   if (pc) *pc = StackOriginPC[id];
   return StackOriginDescr[id];
@@ -242,10 +266,30 @@ u32 ChainOrigin(u32 id, StackTrace *stac
   MsanThread *t = GetCurrentThread();
   if (t && t->InSignalHandler())
     return id;
-  uptr idx = Min(stack->size, kStackTraceMax - 1);
-  stack->trace[idx] = TRACE_MAKE_CHAINED(id);
-  u32 new_id = StackDepotPut(stack->trace, idx + 1);
-  return new_id;
+
+  Origin o(id);
+  int depth = o.depth();
+  // 0 means unlimited depth.
+  if (flags()->origin_history_size > 0 && depth > 0) {
+    if (depth >= flags()->origin_history_size) {
+      return id;
+    } else {
+      ++depth;
+    }
+  }
+
+  StackDepotHandle h = StackDepotPut_WithHandle(stack->trace, stack->size);
+  if (!h.valid()) return id;
+  int use_count = h.use_count();
+  if (use_count > flags()->origin_history_per_stack_limit)
+    return id;
+
+  u32 chained_id;
+  bool inserted = ChainedOriginDepotPut(h.id(), o.id(), &chained_id);
+
+  if (inserted) h.inc_use_count_unsafe();
+
+  return Origin(chained_id, depth).raw_id();
 }
 
 }  // namespace __msan
@@ -514,16 +558,15 @@ void __msan_set_alloca_origin4(void *a,
   bool print = false;  // internal_strstr(descr + 4, "AllocaTOTest") != 0;
   u32 id = *id_ptr;
   if (id == first_timer) {
-    id = atomic_fetch_add(&NumStackOriginDescrs,
-                          1, memory_order_relaxed);
+    u32 idx = atomic_fetch_add(&NumStackOriginDescrs, 1, memory_order_relaxed);
+    CHECK_LT(idx, kNumStackOriginDescrs);
+    StackOriginDescr[idx] = descr + 4;
+    StackOriginPC[idx] = pc;
+    ChainedOriginDepotPut(idx, Origin::kStackRoot, &id);
     *id_ptr = id;
-    CHECK_LT(id, kNumStackOriginDescrs);
-    StackOriginDescr[id] = descr + 4;
-    StackOriginPC[id] = pc;
     if (print)
-      Printf("First time: id=%d %s %p \n", id, descr + 4, pc);
+      Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
   }
-  id |= 1U << 31;
   if (print)
     Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
   __msan_set_origin(a, size, id);
@@ -536,10 +579,6 @@ u32 __msan_chain_origin(u32 id) {
   return ChainOrigin(id, &stack);
 }
 
-const char *__msan_get_origin_descr_if_stack(u32 id) {
-  return GetOriginDescrIfStack(id, 0);
-}
-
 u32 __msan_get_origin(const void *a) {
   if (!__msan_get_track_origins()) return 0;
   uptr x = (uptr)a;

Modified: compiler-rt/trunk/lib/msan/msan.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan.h (original)
+++ compiler-rt/trunk/lib/msan/msan.h Wed May 21 04:02:13 2014
@@ -32,12 +32,6 @@
 #define MEM_IS_SHADOW(mem) \
   ((uptr)mem >= 0x200000000000ULL && (uptr)mem <= 0x400000000000ULL)
 
-// Chained stack trace format.
-#define TRACE_MAGIC_MASK 0xFFFFFFFF00000000LLU
-#define TRACE_MAKE_CHAINED(id) ((uptr)id | TRACE_MAGIC_MASK)
-#define TRACE_TO_CHAINED_ID(u) ((uptr)u & (~TRACE_MAGIC_MASK))
-#define TRACE_IS_CHAINED(u) ((((uptr)u) & TRACE_MAGIC_MASK) == TRACE_MAGIC_MASK)
-
 const int kMsanParamTlsSizeInWords = 100;
 const int kMsanRetvalTlsSizeInWords = 100;
 
@@ -59,7 +53,7 @@ void InstallTrapHandler();
 void InstallAtExitHandler();
 void ReplaceOperatorsNewAndDelete();
 
-const char *GetOriginDescrIfStack(u32 id, uptr *pc);
+const char *GetStackOriginDescr(u32 id, uptr *pc);
 
 void EnterSymbolizer();
 void ExitSymbolizer();

Modified: compiler-rt/trunk/lib/msan/msan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_allocator.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_allocator.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_allocator.cc Wed May 21 04:02:13 2014
@@ -16,6 +16,8 @@
 #include "sanitizer_common/sanitizer_stackdepot.h"
 #include "msan.h"
 #include "msan_allocator.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
 #include "msan_thread.h"
 
 namespace __msan {
@@ -101,9 +103,9 @@ static void *MsanAllocate(StackTrace *st
     if (__msan_get_track_origins()) {
       u32 stack_id = StackDepotPut(stack->trace, stack->size);
       CHECK(stack_id);
-      CHECK_EQ((stack_id >> 31),
-               0);  // Higher bit is occupied by stack origins.
-      __msan_set_origin(allocated, size, stack_id);
+      u32 id;
+      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+      __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
     }
   }
   MSAN_MALLOC_HOOK(allocated, size);
@@ -124,9 +126,9 @@ void MsanDeallocate(StackTrace *stack, v
     if (__msan_get_track_origins()) {
       u32 stack_id = StackDepotPut(stack->trace, stack->size);
       CHECK(stack_id);
-      CHECK_EQ((stack_id >> 31),
-               0);  // Higher bit is occupied by stack origins.
-      __msan_set_origin(p, size, stack_id);
+      u32 id;
+      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+      __msan_set_origin(p, size,  Origin(id, 1).raw_id());
     }
   }
   MsanThread *t = GetCurrentThread();

Added: compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc?rev=209284&view=auto
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc (added)
+++ compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc Wed May 21 04:02:13 2014
@@ -0,0 +1,82 @@
+//===-- msan_chained_origin_depot.cc -----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "msan_chained_origin_depot.h"
+
+#include "sanitizer_common/sanitizer_stackdepotbase.h"
+
+namespace __msan {
+
+struct ChainedOriginDepotDesc {
+  u32 here_id;
+  u32 prev_id;
+  u32 hash() const { return here_id ^ prev_id; }
+  bool is_valid() { return true; }
+};
+
+struct ChainedOriginDepotNode {
+  ChainedOriginDepotNode *link;
+  u32 id;
+  u32 here_id;
+  u32 prev_id;
+
+  typedef ChainedOriginDepotDesc args_type;
+  bool eq(u32 hash, const args_type &args) const {
+    return here_id == args.here_id && prev_id == args.prev_id;
+  }
+  static uptr storage_size(const args_type &args) {
+    return sizeof(ChainedOriginDepotNode);
+  }
+  void store(const args_type &args, u32 other_hash) {
+    here_id = args.here_id;
+    prev_id = args.prev_id;
+  }
+  args_type load() const {
+    args_type ret = {here_id, prev_id};
+    return ret;
+  }
+  struct Handle {
+    ChainedOriginDepotNode *node_;
+    Handle() : node_(0) {}
+    explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
+    bool valid() { return node_; }
+    u32 id() { return node_->id; }
+    int here_id() { return node_->here_id; }
+    int prev_id() { return node_->prev_id; }
+  };
+  Handle get_handle() { return Handle(this); }
+
+  typedef Handle handle_type;
+};
+
+static StackDepotBase<ChainedOriginDepotNode, 3> chainedOriginDepot;
+
+StackDepotStats *ChainedOriginDepotGetStats() {
+  return chainedOriginDepot.GetStats();
+}
+
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id) {
+  ChainedOriginDepotDesc desc = {here_id, prev_id};
+  bool inserted;
+  ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc, &inserted);
+  *new_id = h.valid() ? h.id() : 0;
+  return inserted;
+}
+
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other) {
+  ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id);
+  *other = desc.prev_id;
+  return desc.here_id;
+}
+
+}  // namespace __msan

Propchange: compiler-rt/trunk/lib/msan/msan_chained_origin_depot.cc
------------------------------------------------------------------------------
    svn:eol-style = LF

Added: compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h?rev=209284&view=auto
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h (added)
+++ compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h Wed May 21 04:02:13 2014
@@ -0,0 +1,26 @@
+//===-- msan_chained_origin_depot.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_CHAINED_ORIGIN_DEPOT_H
+#define MSAN_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+StackDepotStats *ChainedOriginDepotGetStats();
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other);
+
+}  // namespace __msan
+
+#endif  // MSAN_CHAINED_ORIGIN_DEPOT_H

Propchange: compiler-rt/trunk/lib/msan/msan_chained_origin_depot.h
------------------------------------------------------------------------------
    svn:eol-style = LF

Modified: compiler-rt/trunk/lib/msan/msan_flags.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_flags.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_flags.h (original)
+++ compiler-rt/trunk/lib/msan/msan_flags.h Wed May 21 04:02:13 2014
@@ -19,6 +19,8 @@ namespace __msan {
 // Flags.
 struct Flags {
   int exit_code;
+  int origin_history_size;
+  int origin_history_per_stack_limit;
   bool poison_heap_with_zeroes;  // default: false
   bool poison_stack_with_zeroes;  // default: false
   bool poison_in_malloc;  // default: true

Modified: compiler-rt/trunk/lib/msan/msan_interceptors.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_interceptors.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_interceptors.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_interceptors.cc Wed May 21 04:02:13 2014
@@ -16,6 +16,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
 #include "msan_thread.h"
 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
 #include "sanitizer_common/sanitizer_allocator.h"
@@ -817,9 +819,9 @@ void __msan_allocated_memory(const void*
     __msan_poison(data, size);
   if (__msan_get_track_origins()) {
     u32 stack_id = StackDepotPut(stack.trace, stack.size);
-    CHECK(stack_id);
-    CHECK_EQ((stack_id >> 31), 0);  // Higher bit is occupied by stack origins.
-    __msan_set_origin(data, size, stack_id);
+    u32 id;
+    ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+    __msan_set_origin(data, size, Origin(id, 1).raw_id());
   }
 }
 

Modified: compiler-rt/trunk/lib/msan/msan_interface_internal.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_interface_internal.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_interface_internal.h (original)
+++ compiler-rt/trunk/lib/msan/msan_interface_internal.h Wed May 21 04:02:13 2014
@@ -136,8 +136,6 @@ bool __msan_is_in_loader();
 SANITIZER_INTERFACE_ATTRIBUTE
 u32 __msan_get_umr_origin();
 SANITIZER_INTERFACE_ATTRIBUTE
-const char *__msan_get_origin_descr_if_stack(u32 id);
-SANITIZER_INTERFACE_ATTRIBUTE
 void __msan_partial_poison(const void* data, void* shadow, uptr size);
 
 // Tell MSan about newly allocated memory (ex.: custom allocator).

Added: compiler-rt/trunk/lib/msan/msan_origin.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_origin.h?rev=209284&view=auto
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_origin.h (added)
+++ compiler-rt/trunk/lib/msan/msan_origin.h Wed May 21 04:02:13 2014
@@ -0,0 +1,76 @@
+//===-- msan_origin.h ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Origin id utils.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_ORIGIN_H
+#define MSAN_ORIGIN_H
+
+namespace __msan {
+
+// Origin handling.
+//
+// Origin is a 32-bit identifier that is attached to any uninitialized value in
+// the program and describes, more or less exactly, how this memory came to be
+// uninitialized.
+//
+// Origin ids are values of ChainedOriginDepot, which is a mapping of (stack_id,
+// prev_id) -> id, where
+//  * stack_id describes an event in the program, usually a memory store.
+//    StackDepot keeps a mapping between those and corresponding stack traces.
+//  * prev_id is another origin id that describes the earlier part of the
+//    uninitialized value history.
+// Following a chain of prev_id provides the full recorded history of an
+// uninitialized value.
+//
+// This, effectively, defines a tree (or 2 trees, see below) where nodes are
+// points in value history marked with origin ids, and edges are events that are
+// marked with stack_id.
+//
+// There are 2 special root origin ids:
+// * kHeapRoot - an origin with prev_id == kHeapRoot describes an event of
+//   allocating memory from heap.
+// * kStackRoot - an origin with prev_id == kStackRoot describes an event of
+//   allocating memory from stack (i.e. on function entry).
+// Note that ChainedOriginDepot does not store any node for kHeapRoot or
+// kStackRoot. These are just special id values.
+//
+// Three highest bits of origin id are used to store the length (or depth) of
+// the origin chain. Special depth value of 0 means unlimited.
+
+class Origin {
+ public:
+  static const int kDepthBits = 3;
+  static const int kDepthShift = 32 - kDepthBits;
+  static const u32 kIdMask = ((u32)-1) >> (32 - kDepthShift);
+  static const u32 kDepthMask = ~kIdMask;
+
+  static const int kMaxDepth = (1 << kDepthBits) - 1;
+
+  static const u32 kHeapRoot = (u32)-1;
+  static const u32 kStackRoot = (u32)-2;
+
+  explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+  Origin(u32 id, u32 depth) : raw_id_((depth << kDepthShift) | id) {
+    CHECK_EQ(this->depth(), depth);
+    CHECK_EQ(this->id(), id);
+  }
+  int depth() const { return raw_id_ >> kDepthShift; }
+  u32 id() const { return raw_id_ & kIdMask; }
+  u32 raw_id() const { return raw_id_; }
+  bool isStackRoot() const { return raw_id_ == kStackRoot; }
+  bool isHeapRoot() const { return raw_id_ == kHeapRoot; }
+
+ private:
+  u32 raw_id_;
+};
+
+}  // namespace __msan
+
+#endif  // MSAN_ORIGIN_H

Propchange: compiler-rt/trunk/lib/msan/msan_origin.h
------------------------------------------------------------------------------
    svn:eol-style = LF

Modified: compiler-rt/trunk/lib/msan/msan_report.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_report.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_report.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_report.cc Wed May 21 04:02:13 2014
@@ -13,6 +13,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
 #include "sanitizer_common/sanitizer_allocator_internal.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_flags.h"
@@ -56,30 +58,36 @@ static void DescribeStackOrigin(const ch
   }
 }
 
-static void DescribeOrigin(u32 origin) {
-  VPrintf(1, "  raw origin id: %d\n", origin);
-  uptr pc;
+static void DescribeOrigin(u32 id) {
+  VPrintf(1, "  raw origin id: %d\n", id);
+  Decorator d;
   while (true) {
-    if (const char *so = GetOriginDescrIfStack(origin, &pc)) {
+    Origin o(id);
+    u32 prev_id;
+    u32 stack_id = ChainedOriginDepotGet(o.id(), &prev_id);
+    Origin prev_o(prev_id);
+
+    if (prev_o.isStackRoot()) {
+      uptr pc;
+      const char *so = GetStackOriginDescr(stack_id, &pc);
       DescribeStackOrigin(so, pc);
       break;
-    }
-    Decorator d;
-    uptr size = 0;
-    const uptr *trace = StackDepotGet(origin, &size);
-    CHECK_GT(size, 0);
-    if (TRACE_IS_CHAINED(trace[size - 1])) {
-      // Linked origin.
-      // FIXME: copied? modified? passed through? observed?
-      Printf("  %sUninitialized value was stored to memory at%s\n", d.Origin(),
-             d.End());
-      StackTrace::PrintStack(trace, size - 1);
-      origin = TRACE_TO_CHAINED_ID(trace[size - 1]);
-    } else {
+    } else if (prev_o.isHeapRoot()) {
+      uptr size = 0;
+      const uptr *trace = StackDepotGet(stack_id, &size);
       Printf("  %sUninitialized value was created by a heap allocation%s\n",
              d.Origin(), d.End());
       StackTrace::PrintStack(trace, size);
       break;
+    } else {
+      // chained origin
+      uptr size = 0;
+      const uptr *trace = StackDepotGet(stack_id, &size);
+      // FIXME: copied? modified? passed through? observed?
+      Printf("  %sUninitialized value was stored to memory at%s\n", d.Origin(),
+             d.End());
+      StackTrace::PrintStack(trace, size - 1);
+      id = prev_id;
     }
   }
 }
@@ -121,7 +129,13 @@ void ReportAtExitStatistics() {
   // FIXME: we want this at normal exit, too!
   // FIXME: but only with verbosity=1 or something
   Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids);
-  Printf("Stack depot mapped bytes: %zu\n", stack_depot_stats->mapped);
+  Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated);
+
+  StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats();
+  Printf("Unique origin histories: %zu\n",
+         chained_origin_depot_stats->n_uniq_ids);
+  Printf("History depot allocated bytes: %zu\n",
+         chained_origin_depot_stats->allocated);
 }
 
 class OriginSet {

Modified: compiler-rt/trunk/lib/msan/tests/msan_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/tests/msan_test.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/tests/msan_test.cc (original)
+++ compiler-rt/trunk/lib/msan/tests/msan_test.cc Wed May 21 04:02:13 2014
@@ -105,20 +105,6 @@ static bool TrackingOrigins() {
         EXPECT_EQ(origin, __msan_get_umr_origin()); \
     } while (0)
 
-#define EXPECT_UMR_S(action, stack_origin) \
-    do {                                            \
-      __msan_set_expect_umr(1);                     \
-      action;                                       \
-      __msan_set_expect_umr(0);                     \
-      U4 id = __msan_get_umr_origin();             \
-      const char *str = __msan_get_origin_descr_if_stack(id); \
-      if (!str || strcmp(str, stack_origin)) {      \
-        fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s", \
-                id, stack_origin, str);  \
-        EXPECT_EQ(1, 0);                            \
-      }                                             \
-    } while (0)
-
 #define EXPECT_POISONED(x) ExpectPoisoned(x)
 
 template<typename T>
@@ -136,21 +122,6 @@ void ExpectPoisonedWithOrigin(const T& t
     EXPECT_EQ(origin, __msan_get_origin((void*)&t));
 }
 
-#define EXPECT_POISONED_S(x, stack_origin) \
-  ExpectPoisonedWithStackOrigin(x, stack_origin)
-
-template<typename T>
-void ExpectPoisonedWithStackOrigin(const T& t, const char *stack_origin) {
-  EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t)));
-  U4 id = __msan_get_origin((void*)&t);
-  const char *str = __msan_get_origin_descr_if_stack(id);
-  if (!str || strcmp(str, stack_origin)) {
-    fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s",
-        id, stack_origin, str);
-    EXPECT_EQ(1, 0);
-  }
-}
-
 #define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x)
 
 template<typename T>
@@ -3885,29 +3856,6 @@ TEST(MemorySanitizerOrigins, Select) {
   EXPECT_POISONED_O(g_0 ? 1 : *GetPoisonedO<S4>(0, __LINE__), __LINE__);
 }
 
-extern "C"
-NOINLINE char AllocaTO() {
-  int ar[100];
-  break_optimization(ar);
-  return ar[10];
-  // fprintf(stderr, "Descr: %s\n",
-  //        __msan_get_origin_descr_if_stack(__msan_get_origin_tls()));
-}
-
-TEST(MemorySanitizerOrigins, Alloca) {
-  if (!TrackingOrigins()) return;
-  EXPECT_POISONED_S(AllocaTO(), "ar at AllocaTO");
-  EXPECT_POISONED_S(AllocaTO(), "ar at AllocaTO");
-  EXPECT_POISONED_S(AllocaTO(), "ar at AllocaTO");
-  EXPECT_POISONED_S(AllocaTO(), "ar at AllocaTO");
-}
-
-// FIXME: replace with a lit-like test.
-TEST(MemorySanitizerOrigins, DISABLED_AllocaDeath) {
-  if (!TrackingOrigins()) return;
-  EXPECT_DEATH(AllocaTO(), "ORIGIN: stack allocation: ar at AllocaTO");
-}
-
 NOINLINE int RetvalOriginTest(U4 origin) {
   int *a = new int;
   break_optimization(a);

Modified: compiler-rt/trunk/lib/sanitizer_common/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/CMakeLists.txt?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/sanitizer_common/CMakeLists.txt Wed May 21 04:02:13 2014
@@ -12,6 +12,7 @@ set(SANITIZER_SOURCES
   sanitizer_libignore.cc
   sanitizer_linux.cc
   sanitizer_mac.cc
+  sanitizer_persistent_allocator.cc
   sanitizer_platform_limits_linux.cc
   sanitizer_platform_limits_posix.cc
   sanitizer_posix.cc
@@ -65,6 +66,7 @@ set(SANITIZER_HEADERS
   sanitizer_list.h
   sanitizer_mac.h
   sanitizer_mutex.h
+  sanitizer_persistent_allocator.h
   sanitizer_placement_new.h
   sanitizer_platform.h
   sanitizer_platform_interceptors.h
@@ -73,6 +75,7 @@ set(SANITIZER_HEADERS
   sanitizer_quarantine.h
   sanitizer_report_decorator.h
   sanitizer_stackdepot.h
+  sanitizer_stackdepotbase.h
   sanitizer_stacktrace.h
   sanitizer_stoptheworld.h
   sanitizer_suppressions.h

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h Wed May 21 04:02:13 2014
@@ -542,4 +542,9 @@ inline void *operator new(__sanitizer::o
   return alloc.Allocate(size);
 }
 
+struct StackDepotStats {
+  uptr n_uniq_ids;
+  uptr allocated;
+};
+
 #endif  // SANITIZER_COMMON_H

Added: compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc?rev=209284&view=auto
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc (added)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc Wed May 21 04:02:13 2014
@@ -0,0 +1,19 @@
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+}  // namespace __sanitizer

Propchange: compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.cc
------------------------------------------------------------------------------
    svn:eol-style = LF

Copied: compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.h (from r209116, compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc)
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.h?p2=compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.h&p1=compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc&r1=209116&r2=209284&rev=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_persistent_allocator.h Wed May 21 04:02:13 2014
@@ -1,4 +1,4 @@
-//===-- sanitizer_stackdepot.cc -------------------------------------------===//
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -7,232 +7,65 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
 //===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
 
-#include "sanitizer_stackdepot.h"
-#include "sanitizer_common.h"
 #include "sanitizer_internal_defs.h"
 #include "sanitizer_mutex.h"
 #include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
 
 namespace __sanitizer {
 
-const int kTabSize = 1024 * 1024;  // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits;  // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
-
-struct StackDesc {
-  StackDesc *link;
-  u32 id;
-  u32 hash;
-  uptr size;
-  uptr stack[1];  // [size]
-};
+class PersistentAllocator {
+ public:
+  void *alloc(uptr size);
 
-static struct {
+ private:
+  void *tryAlloc(uptr size);
   StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
-  atomic_uintptr_t region_pos;  // Region allocator for StackDesc's.
+  atomic_uintptr_t region_pos;  // Region allocator for Node's.
   atomic_uintptr_t region_end;
-  atomic_uintptr_t tab[kTabSize];  // Hash table of StackDesc's.
-  atomic_uint32_t seq[kPartCount];  // Unique id generators.
-} depot;
-
-static StackDepotStats stats;
-
-StackDepotStats *StackDepotGetStats() {
-  return &stats;
-}
-
-static u32 hash(const uptr *stack, uptr size) {
-  // murmur2
-  const u32 m = 0x5bd1e995;
-  const u32 seed = 0x9747b28c;
-  const u32 r = 24;
-  u32 h = seed ^ (size * sizeof(uptr));
-  for (uptr i = 0; i < size; i++) {
-    u32 k = stack[i];
-    k *= m;
-    k ^= k >> r;
-    k *= m;
-    h *= m;
-    h ^= k;
-  }
-  h ^= h >> 13;
-  h *= m;
-  h ^= h >> 15;
-  return h;
-}
+};
 
-static StackDesc *tryallocDesc(uptr memsz) {
+inline void *PersistentAllocator::tryAlloc(uptr size) {
   // Optimisic lock-free allocation, essentially try to bump the region ptr.
   for (;;) {
-    uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
-    uptr end = atomic_load(&depot.region_end, memory_order_acquire);
-    if (cmp == 0 || cmp + memsz > end)
-      return 0;
-    if (atomic_compare_exchange_weak(
-        &depot.region_pos, &cmp, cmp + memsz,
-        memory_order_acquire))
-      return (StackDesc*)cmp;
+    uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+    uptr end = atomic_load(&region_end, memory_order_acquire);
+    if (cmp == 0 || cmp + size > end) return 0;
+    if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+                                     memory_order_acquire))
+      return (void *)cmp;
   }
 }
 
-static StackDesc *allocDesc(uptr size) {
+inline void *PersistentAllocator::alloc(uptr size) {
   // First, try to allocate optimisitically.
-  uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
-  StackDesc *s = tryallocDesc(memsz);
-  if (s)
-    return s;
+  void *s = tryAlloc(size);
+  if (s) return s;
   // If failed, lock, retry and alloc new superblock.
-  SpinMutexLock l(&depot.mtx);
+  SpinMutexLock l(&mtx);
   for (;;) {
-    s = tryallocDesc(memsz);
-    if (s)
-      return s;
-    atomic_store(&depot.region_pos, 0, memory_order_relaxed);
+    s = tryAlloc(size);
+    if (s) return s;
+    atomic_store(&region_pos, 0, memory_order_relaxed);
     uptr allocsz = 64 * 1024;
-    if (allocsz < memsz)
-      allocsz = memsz;
+    if (allocsz < size) allocsz = size;
     uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
-    stats.mapped += allocsz;
-    atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
-    atomic_store(&depot.region_pos, mem, memory_order_release);
+    atomic_store(&region_end, mem + allocsz, memory_order_release);
+    atomic_store(&region_pos, mem, memory_order_release);
   }
 }
 
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
-  // Searches linked list s for the stack, returns its id.
-  for (; s; s = s->link) {
-    if (s->hash == hash && s->size == size) {
-      uptr i = 0;
-      for (; i < size; i++) {
-        if (stack[i] != s->stack[i])
-          break;
-      }
-      if (i == size)
-        return s->id;
-    }
-  }
-  return 0;
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+  return thePersistentAllocator.alloc(sz);
 }
 
-static StackDesc *lock(atomic_uintptr_t *p) {
-  // Uses the pointer lsb as mutex.
-  for (int i = 0;; i++) {
-    uptr cmp = atomic_load(p, memory_order_relaxed);
-    if ((cmp & 1) == 0
-        && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
-                                        memory_order_acquire))
-      return (StackDesc*)cmp;
-    if (i < 10)
-      proc_yield(10);
-    else
-      internal_sched_yield();
-  }
-}
-
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
-  DCHECK_EQ((uptr)s & 1, 0);
-  atomic_store(p, (uptr)s, memory_order_release);
-}
-
-u32 StackDepotPut(const uptr *stack, uptr size) {
-  if (stack == 0 || size == 0)
-    return 0;
-  uptr h = hash(stack, size);
-  atomic_uintptr_t *p = &depot.tab[h % kTabSize];
-  uptr v = atomic_load(p, memory_order_consume);
-  StackDesc *s = (StackDesc*)(v & ~1);
-  // First, try to find the existing stack.
-  u32 id = find(s, stack, size, h);
-  if (id)
-    return id;
-  // If failed, lock, retry and insert new.
-  StackDesc *s2 = lock(p);
-  if (s2 != s) {
-    id = find(s2, stack, size, h);
-    if (id) {
-      unlock(p, s2);
-      return id;
-    }
-  }
-  uptr part = (h % kTabSize) / kPartSize;
-  id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
-  stats.n_uniq_ids++;
-  CHECK_LT(id, kMaxId);
-  id |= part << kPartShift;
-  CHECK_NE(id, 0);
-  CHECK_EQ(id & (1u << 31), 0);
-  s = allocDesc(size);
-  s->id = id;
-  s->hash = h;
-  s->size = size;
-  internal_memcpy(s->stack, stack, size * sizeof(uptr));
-  s->link = s2;
-  unlock(p, s);
-  return id;
-}
-
-const uptr *StackDepotGet(u32 id, uptr *size) {
-  if (id == 0)
-    return 0;
-  CHECK_EQ(id & (1u << 31), 0);
-  // High kPartBits contain part id, so we need to scan at most kPartSize lists.
-  uptr part = id >> kPartShift;
-  for (int i = 0; i != kPartSize; i++) {
-    uptr idx = part * kPartSize + i;
-    CHECK_LT(idx, kTabSize);
-    atomic_uintptr_t *p = &depot.tab[idx];
-    uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
-    for (; s; s = s->link) {
-      if (s->id == id) {
-        *size = s->size;
-        return s->stack;
-      }
-    }
-  }
-  *size = 0;
-  return 0;
-}
-
-bool StackDepotReverseMap::IdDescPair::IdComparator(
-    const StackDepotReverseMap::IdDescPair &a,
-    const StackDepotReverseMap::IdDescPair &b) {
-  return a.id < b.id;
-}
-
-StackDepotReverseMap::StackDepotReverseMap()
-    : map_(StackDepotGetStats()->n_uniq_ids + 100) {
-  for (int idx = 0; idx < kTabSize; idx++) {
-    atomic_uintptr_t *p = &depot.tab[idx];
-    uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
-    for (; s; s = s->link) {
-      IdDescPair pair = {s->id, s};
-      map_.push_back(pair);
-    }
-  }
-  InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
-}
-
-const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
-  if (!map_.size()) return 0;
-  IdDescPair pair = {id, 0};
-  uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
-                                  IdDescPair::IdComparator);
-  if (idx > map_.size()) {
-    *size = 0;
-    return 0;
-  }
-  StackDesc *desc = map_[idx].desc;
-  *size = desc->size;
-  return desc->stack;
-}
+} // namespace __sanitizer
 
-}  // namespace __sanitizer
+#endif  // SANITIZER_PERSISTENT_ALLOCATOR_H

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc Wed May 21 04:02:13 2014
@@ -12,193 +12,116 @@
 //===----------------------------------------------------------------------===//
 
 #include "sanitizer_stackdepot.h"
+
 #include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
+#include "sanitizer_stackdepotbase.h"
 
 namespace __sanitizer {
 
-const int kTabSize = 1024 * 1024;  // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits;  // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
+struct StackDepotDesc {
+  const uptr *stack;
+  uptr size;
+  u32 hash() const {
+    // murmur2
+    const u32 m = 0x5bd1e995;
+    const u32 seed = 0x9747b28c;
+    const u32 r = 24;
+    u32 h = seed ^ (size * sizeof(uptr));
+    for (uptr i = 0; i < size; i++) {
+      u32 k = stack[i];
+      k *= m;
+      k ^= k >> r;
+      k *= m;
+      h *= m;
+      h ^= k;
+    }
+    h ^= h >> 13;
+    h *= m;
+    h ^= h >> 15;
+    return h;
+  }
+  bool is_valid() { return size > 0 && stack; }
+};
 
-struct StackDesc {
-  StackDesc *link;
+struct StackDepotNode {
+  StackDepotNode *link;
   u32 id;
-  u32 hash;
+  atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
   uptr size;
   uptr stack[1];  // [size]
-};
-
-static struct {
-  StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
-  atomic_uintptr_t region_pos;  // Region allocator for StackDesc's.
-  atomic_uintptr_t region_end;
-  atomic_uintptr_t tab[kTabSize];  // Hash table of StackDesc's.
-  atomic_uint32_t seq[kPartCount];  // Unique id generators.
-} depot;
-
-static StackDepotStats stats;
 
-StackDepotStats *StackDepotGetStats() {
-  return &stats;
-}
-
-static u32 hash(const uptr *stack, uptr size) {
-  // murmur2
-  const u32 m = 0x5bd1e995;
-  const u32 seed = 0x9747b28c;
-  const u32 r = 24;
-  u32 h = seed ^ (size * sizeof(uptr));
-  for (uptr i = 0; i < size; i++) {
-    u32 k = stack[i];
-    k *= m;
-    k ^= k >> r;
-    k *= m;
-    h *= m;
-    h ^= k;
+  static const u32 kUseCountBits = 20;
+  static const u32 kMaxUseCount = 1 << kUseCountBits;
+  static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+  static const u32 kHashMask = ~kUseCountMask;
+
+  typedef StackDepotDesc args_type;
+  bool eq(u32 hash, const args_type &args) const {
+    u32 hash_bits =
+        atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+    if ((hash & kHashMask) != hash_bits || args.size != size) return false;
+    uptr i = 0;
+    for (; i < size; i++) {
+      if (stack[i] != args.stack[i]) return false;
+    }
+    return true;
   }
-  h ^= h >> 13;
-  h *= m;
-  h ^= h >> 15;
-  return h;
-}
-
-static StackDesc *tryallocDesc(uptr memsz) {
-  // Optimisic lock-free allocation, essentially try to bump the region ptr.
-  for (;;) {
-    uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
-    uptr end = atomic_load(&depot.region_end, memory_order_acquire);
-    if (cmp == 0 || cmp + memsz > end)
-      return 0;
-    if (atomic_compare_exchange_weak(
-        &depot.region_pos, &cmp, cmp + memsz,
-        memory_order_acquire))
-      return (StackDesc*)cmp;
+  static uptr storage_size(const args_type &args) {
+    return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
   }
-}
-
-static StackDesc *allocDesc(uptr size) {
-  // First, try to allocate optimisitically.
-  uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
-  StackDesc *s = tryallocDesc(memsz);
-  if (s)
-    return s;
-  // If failed, lock, retry and alloc new superblock.
-  SpinMutexLock l(&depot.mtx);
-  for (;;) {
-    s = tryallocDesc(memsz);
-    if (s)
-      return s;
-    atomic_store(&depot.region_pos, 0, memory_order_relaxed);
-    uptr allocsz = 64 * 1024;
-    if (allocsz < memsz)
-      allocsz = memsz;
-    uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
-    stats.mapped += allocsz;
-    atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
-    atomic_store(&depot.region_pos, mem, memory_order_release);
+  void store(const args_type &args, u32 hash) {
+    atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+    size = args.size;
+    internal_memcpy(stack, args.stack, size * sizeof(uptr));
   }
-}
-
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
-  // Searches linked list s for the stack, returns its id.
-  for (; s; s = s->link) {
-    if (s->hash == hash && s->size == size) {
-      uptr i = 0;
-      for (; i < size; i++) {
-        if (stack[i] != s->stack[i])
-          break;
-      }
-      if (i == size)
-        return s->id;
-    }
+  args_type load() const {
+    args_type ret = {&stack[0], size};
+    return ret;
   }
-  return 0;
-}
+  StackDepotHandle get_handle() { return StackDepotHandle(this); }
 
-static StackDesc *lock(atomic_uintptr_t *p) {
-  // Uses the pointer lsb as mutex.
-  for (int i = 0;; i++) {
-    uptr cmp = atomic_load(p, memory_order_relaxed);
-    if ((cmp & 1) == 0
-        && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
-                                        memory_order_acquire))
-      return (StackDesc*)cmp;
-    if (i < 10)
-      proc_yield(10);
-    else
-      internal_sched_yield();
-  }
-}
+  typedef StackDepotHandle handle_type;
+};
 
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
-  DCHECK_EQ((uptr)s & 1, 0);
-  atomic_store(p, (uptr)s, memory_order_release);
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == kStackDepotMaxUseCount);
+
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+  return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+         StackDepotNode::kUseCountMask;
+}
+void StackDepotHandle::inc_use_count_unsafe() {
+  u32 prev =
+      atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+      StackDepotNode::kUseCountMask;
+  CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
+}
+uptr StackDepotHandle::size() { return node_->size; }
+uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
+
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1> StackDepot;
+static StackDepot theDepot;
+
+StackDepotStats *StackDepotGetStats() {
+  return theDepot.GetStats();
 }
 
 u32 StackDepotPut(const uptr *stack, uptr size) {
-  if (stack == 0 || size == 0)
-    return 0;
-  uptr h = hash(stack, size);
-  atomic_uintptr_t *p = &depot.tab[h % kTabSize];
-  uptr v = atomic_load(p, memory_order_consume);
-  StackDesc *s = (StackDesc*)(v & ~1);
-  // First, try to find the existing stack.
-  u32 id = find(s, stack, size, h);
-  if (id)
-    return id;
-  // If failed, lock, retry and insert new.
-  StackDesc *s2 = lock(p);
-  if (s2 != s) {
-    id = find(s2, stack, size, h);
-    if (id) {
-      unlock(p, s2);
-      return id;
-    }
-  }
-  uptr part = (h % kTabSize) / kPartSize;
-  id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
-  stats.n_uniq_ids++;
-  CHECK_LT(id, kMaxId);
-  id |= part << kPartShift;
-  CHECK_NE(id, 0);
-  CHECK_EQ(id & (1u << 31), 0);
-  s = allocDesc(size);
-  s->id = id;
-  s->hash = h;
-  s->size = size;
-  internal_memcpy(s->stack, stack, size * sizeof(uptr));
-  s->link = s2;
-  unlock(p, s);
-  return id;
+  StackDepotDesc desc = {stack, size};
+  StackDepotHandle h = theDepot.Put(desc);
+  return h.valid() ? h.id() : 0;
+}
+
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
+  StackDepotDesc desc = {stack, size};
+  return theDepot.Put(desc);
 }
 
 const uptr *StackDepotGet(u32 id, uptr *size) {
-  if (id == 0)
-    return 0;
-  CHECK_EQ(id & (1u << 31), 0);
-  // High kPartBits contain part id, so we need to scan at most kPartSize lists.
-  uptr part = id >> kPartShift;
-  for (int i = 0; i != kPartSize; i++) {
-    uptr idx = part * kPartSize + i;
-    CHECK_LT(idx, kTabSize);
-    atomic_uintptr_t *p = &depot.tab[idx];
-    uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
-    for (; s; s = s->link) {
-      if (s->id == id) {
-        *size = s->size;
-        return s->stack;
-      }
-    }
-  }
-  *size = 0;
-  return 0;
+  StackDepotDesc desc = theDepot.Get(id);
+  *size = desc.size;
+  return desc.stack;
 }
 
 bool StackDepotReverseMap::IdDescPair::IdComparator(
@@ -209,10 +132,10 @@ bool StackDepotReverseMap::IdDescPair::I
 
 StackDepotReverseMap::StackDepotReverseMap()
     : map_(StackDepotGetStats()->n_uniq_ids + 100) {
-  for (int idx = 0; idx < kTabSize; idx++) {
-    atomic_uintptr_t *p = &depot.tab[idx];
+  for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+    atomic_uintptr_t *p = &theDepot.tab[idx];
     uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
+    StackDepotNode *s = (StackDepotNode*)(v & ~1);
     for (; s; s = s->link) {
       IdDescPair pair = {s->id, s};
       map_.push_back(pair);
@@ -230,7 +153,7 @@ const uptr *StackDepotReverseMap::Get(u3
     *size = 0;
     return 0;
   }
-  StackDesc *desc = map_[idx].desc;
+  StackDepotNode *desc = map_[idx].desc;
   *size = desc->size;
   return desc->stack;
 }

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h?rev=209284&r1=209283&r2=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h Wed May 21 04:02:13 2014
@@ -19,21 +19,27 @@
 namespace __sanitizer {
 
 // StackDepot efficiently stores huge amounts of stack traces.
+struct StackDepotNode;
+struct StackDepotHandle {
+  StackDepotNode *node_;
+  StackDepotHandle() : node_(0) {}
+  explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+  bool valid() { return node_; }
+  u32 id();
+  int use_count();
+  void inc_use_count_unsafe();
+  uptr size();
+  uptr *stack();
+};
+
+const int kStackDepotMaxUseCount = 1U << 20;
 
-// Maps stack trace to an unique id.
+StackDepotStats *StackDepotGetStats();
 u32 StackDepotPut(const uptr *stack, uptr size);
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size);
 // Retrieves a stored stack trace by the id.
 const uptr *StackDepotGet(u32 id, uptr *size);
 
-struct StackDepotStats {
-  uptr n_uniq_ids;
-  uptr mapped;
-};
-
-StackDepotStats *StackDepotGetStats();
-
-struct StackDesc;
-
 // Instantiating this class creates a snapshot of StackDepot which can be
 // efficiently queried with StackDepotGet(). You can use it concurrently with
 // StackDepot, but the snapshot is only guaranteed to contain those stack traces
@@ -46,7 +52,7 @@ class StackDepotReverseMap {
  private:
   struct IdDescPair {
     u32 id;
-    StackDesc *desc;
+    StackDepotNode *desc;
 
     static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
   };
@@ -57,6 +63,7 @@ class StackDepotReverseMap {
   StackDepotReverseMap(const StackDepotReverseMap&);
   void operator=(const StackDepotReverseMap&);
 };
+
 }  // namespace __sanitizer
 
 #endif  // SANITIZER_STACKDEPOT_H

Copied: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepotbase.h (from r209116, compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc)
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepotbase.h?p2=compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepotbase.h&p1=compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc&r1=209116&r2=209284&rev=209284&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepotbase.h Wed May 21 04:02:13 2014
@@ -1,4 +1,4 @@
-//===-- sanitizer_stackdepot.cc -------------------------------------------===//
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -7,128 +7,72 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
 //===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
 
-#include "sanitizer_stackdepot.h"
-#include "sanitizer_common.h"
 #include "sanitizer_internal_defs.h"
 #include "sanitizer_mutex.h"
 #include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
 
 namespace __sanitizer {
 
-const int kTabSize = 1024 * 1024;  // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits;  // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
-
-struct StackDesc {
-  StackDesc *link;
-  u32 id;
-  u32 hash;
-  uptr size;
-  uptr stack[1];  // [size]
-};
+template <class Node, int kReservedBits>
+class StackDepotBase {
+ public:
+  typedef typename Node::args_type args_type;
+  typedef typename Node::handle_type handle_type;
+  // Maps stack trace to an unique id.
+  handle_type Put(args_type args, bool *inserted = 0);
+  // Retrieves a stored stack trace by the id.
+  args_type Get(u32 id);
+
+  StackDepotStats *GetStats() { return &stats; }
+
+ private:
+  static Node *find(Node *s, args_type args, u32 hash);
+  static Node *lock(atomic_uintptr_t *p);
+  static void unlock(atomic_uintptr_t *p, Node *s);
+
+  static const int kTabSize = 1024 * 1024;  // Hash table size.
+  static const int kPartBits = 8;
+  static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+  static const int kPartCount =
+      1 << kPartBits;  // Number of subparts in the table.
+  static const int kPartSize = kTabSize / kPartCount;
+  static const int kMaxId = 1 << kPartShift;
 
-static struct {
-  StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
-  atomic_uintptr_t region_pos;  // Region allocator for StackDesc's.
-  atomic_uintptr_t region_end;
-  atomic_uintptr_t tab[kTabSize];  // Hash table of StackDesc's.
+  atomic_uintptr_t tab[kTabSize];   // Hash table of Node's.
   atomic_uint32_t seq[kPartCount];  // Unique id generators.
-} depot;
-
-static StackDepotStats stats;
-
-StackDepotStats *StackDepotGetStats() {
-  return &stats;
-}
-
-static u32 hash(const uptr *stack, uptr size) {
-  // murmur2
-  const u32 m = 0x5bd1e995;
-  const u32 seed = 0x9747b28c;
-  const u32 r = 24;
-  u32 h = seed ^ (size * sizeof(uptr));
-  for (uptr i = 0; i < size; i++) {
-    u32 k = stack[i];
-    k *= m;
-    k ^= k >> r;
-    k *= m;
-    h *= m;
-    h ^= k;
-  }
-  h ^= h >> 13;
-  h *= m;
-  h ^= h >> 15;
-  return h;
-}
 
-static StackDesc *tryallocDesc(uptr memsz) {
-  // Optimisic lock-free allocation, essentially try to bump the region ptr.
-  for (;;) {
-    uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
-    uptr end = atomic_load(&depot.region_end, memory_order_acquire);
-    if (cmp == 0 || cmp + memsz > end)
-      return 0;
-    if (atomic_compare_exchange_weak(
-        &depot.region_pos, &cmp, cmp + memsz,
-        memory_order_acquire))
-      return (StackDesc*)cmp;
-  }
-}
+  StackDepotStats stats;
 
-static StackDesc *allocDesc(uptr size) {
-  // First, try to allocate optimisitically.
-  uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
-  StackDesc *s = tryallocDesc(memsz);
-  if (s)
-    return s;
-  // If failed, lock, retry and alloc new superblock.
-  SpinMutexLock l(&depot.mtx);
-  for (;;) {
-    s = tryallocDesc(memsz);
-    if (s)
-      return s;
-    atomic_store(&depot.region_pos, 0, memory_order_relaxed);
-    uptr allocsz = 64 * 1024;
-    if (allocsz < memsz)
-      allocsz = memsz;
-    uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
-    stats.mapped += allocsz;
-    atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
-    atomic_store(&depot.region_pos, mem, memory_order_release);
-  }
-}
+  friend class StackDepotReverseMap;
+};
 
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
+template <class Node, int kReservedBits>
+Node *StackDepotBase<Node, kReservedBits>::find(Node *s, args_type args,
+                                                u32 hash) {
   // Searches linked list s for the stack, returns its id.
   for (; s; s = s->link) {
-    if (s->hash == hash && s->size == size) {
-      uptr i = 0;
-      for (; i < size; i++) {
-        if (stack[i] != s->stack[i])
-          break;
-      }
-      if (i == size)
-        return s->id;
+    if (s->eq(hash, args)) {
+      return s;
     }
   }
   return 0;
 }
 
-static StackDesc *lock(atomic_uintptr_t *p) {
+template <class Node, int kReservedBits>
+Node *StackDepotBase<Node, kReservedBits>::lock(atomic_uintptr_t *p) {
   // Uses the pointer lsb as mutex.
   for (int i = 0;; i++) {
     uptr cmp = atomic_load(p, memory_order_relaxed);
-    if ((cmp & 1) == 0
-        && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
-                                        memory_order_acquire))
-      return (StackDesc*)cmp;
+    if ((cmp & 1) == 0 &&
+        atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+      return (Node *)cmp;
     if (i < 10)
       proc_yield(10);
     else
@@ -136,103 +80,74 @@ static StackDesc *lock(atomic_uintptr_t
   }
 }
 
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
+template <class Node, int kReservedBits>
+void StackDepotBase<Node, kReservedBits>::unlock(atomic_uintptr_t *p, Node *s) {
   DCHECK_EQ((uptr)s & 1, 0);
   atomic_store(p, (uptr)s, memory_order_release);
 }
 
-u32 StackDepotPut(const uptr *stack, uptr size) {
-  if (stack == 0 || size == 0)
-    return 0;
-  uptr h = hash(stack, size);
-  atomic_uintptr_t *p = &depot.tab[h % kTabSize];
+template <class Node, int kReservedBits>
+typename StackDepotBase<Node, kReservedBits>::handle_type
+StackDepotBase<Node, kReservedBits>::Put(args_type args, bool *inserted) {
+  if (inserted) *inserted = false;
+  if (!args.is_valid()) return handle_type();
+  uptr h = args.hash();
+  atomic_uintptr_t *p = &tab[h % kTabSize];
   uptr v = atomic_load(p, memory_order_consume);
-  StackDesc *s = (StackDesc*)(v & ~1);
+  Node *s = (Node *)(v & ~1);
   // First, try to find the existing stack.
-  u32 id = find(s, stack, size, h);
-  if (id)
-    return id;
+  Node *node = find(s, args, h);
+  if (node) return node->get_handle();
   // If failed, lock, retry and insert new.
-  StackDesc *s2 = lock(p);
+  Node *s2 = lock(p);
   if (s2 != s) {
-    id = find(s2, stack, size, h);
-    if (id) {
+    node = find(s2, args, h);
+    if (node) {
       unlock(p, s2);
-      return id;
+      return node->get_handle();
     }
   }
   uptr part = (h % kTabSize) / kPartSize;
-  id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+  u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
   stats.n_uniq_ids++;
   CHECK_LT(id, kMaxId);
   id |= part << kPartShift;
   CHECK_NE(id, 0);
-  CHECK_EQ(id & (1u << 31), 0);
-  s = allocDesc(size);
+  CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+  uptr memsz = Node::storage_size(args);
+  s = (Node *)PersistentAlloc(memsz);
+  stats.allocated += memsz;
   s->id = id;
-  s->hash = h;
-  s->size = size;
-  internal_memcpy(s->stack, stack, size * sizeof(uptr));
+  s->store(args, h);
   s->link = s2;
   unlock(p, s);
-  return id;
+  if (inserted) *inserted = true;
+  return s->get_handle();
 }
 
-const uptr *StackDepotGet(u32 id, uptr *size) {
-  if (id == 0)
-    return 0;
-  CHECK_EQ(id & (1u << 31), 0);
+template <class Node, int kReservedBits>
+typename StackDepotBase<Node, kReservedBits>::args_type
+StackDepotBase<Node, kReservedBits>::Get(u32 id) {
+  if (id == 0) {
+    return args_type();
+  }
+  CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
   // High kPartBits contain part id, so we need to scan at most kPartSize lists.
   uptr part = id >> kPartShift;
   for (int i = 0; i != kPartSize; i++) {
     uptr idx = part * kPartSize + i;
     CHECK_LT(idx, kTabSize);
-    atomic_uintptr_t *p = &depot.tab[idx];
+    atomic_uintptr_t *p = &tab[idx];
     uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
+    Node *s = (Node *)(v & ~1);
     for (; s; s = s->link) {
       if (s->id == id) {
-        *size = s->size;
-        return s->stack;
+        return s->load();
       }
     }
   }
-  *size = 0;
-  return 0;
-}
-
-bool StackDepotReverseMap::IdDescPair::IdComparator(
-    const StackDepotReverseMap::IdDescPair &a,
-    const StackDepotReverseMap::IdDescPair &b) {
-  return a.id < b.id;
-}
-
-StackDepotReverseMap::StackDepotReverseMap()
-    : map_(StackDepotGetStats()->n_uniq_ids + 100) {
-  for (int idx = 0; idx < kTabSize; idx++) {
-    atomic_uintptr_t *p = &depot.tab[idx];
-    uptr v = atomic_load(p, memory_order_consume);
-    StackDesc *s = (StackDesc*)(v & ~1);
-    for (; s; s = s->link) {
-      IdDescPair pair = {s->id, s};
-      map_.push_back(pair);
-    }
-  }
-  InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
-}
-
-const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
-  if (!map_.size()) return 0;
-  IdDescPair pair = {id, 0};
-  uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
-                                  IdDescPair::IdComparator);
-  if (idx > map_.size()) {
-    *size = 0;
-    return 0;
-  }
-  StackDesc *desc = map_[idx].desc;
-  *size = desc->size;
-  return desc->stack;
+  return args_type();
 }
 
-}  // namespace __sanitizer
+} // namespace __sanitizer
+#endif  // SANITIZER_STACKDEPOTBASE_H

Added: compiler-rt/trunk/test/msan/chained_origin_limits.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/msan/chained_origin_limits.cc?rev=209284&view=auto
==============================================================================
--- compiler-rt/trunk/test/msan/chained_origin_limits.cc (added)
+++ compiler-rt/trunk/test/msan/chained_origin_limits.cc Wed May 21 04:02:13 2014
@@ -0,0 +1,101 @@
+// This test program creates a very large number of unique histories.
+
+// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -m64 -O3 %s -o %t
+
+// RUN: MSAN_OPTIONS=origin_history_size=7 not %run %t >%t.out 2>&1
+// RUN: FileCheck %s --check-prefix=CHECK7 < %t.out
+
+// RUN: MSAN_OPTIONS=origin_history_size=2 not %run %t >%t.out 2>&1
+// RUN: FileCheck %s --check-prefix=CHECK2 < %t.out
+
+// RUN: MSAN_OPTIONS=origin_history_per_stack_limit=1 not %run %t >%t.out 2>&1
+// RUN: FileCheck %s --check-prefix=CHECK-PER-STACK < %t.out
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static char *buf, *cur, *end;
+void init() {
+  buf = new char[1000];
+  cur = buf;
+  end = buf + 1000;
+}
+
+void line_flush() {
+  char *p;
+  for (p = cur - 1; p >= buf; --p)
+    if (*p == '\n')
+      break;
+  if (p >= buf) {
+    size_t write_sz = p - buf + 1;
+    // write(2, buf, write_sz);
+    memmove(buf, p + 1, end - p - 1);
+    cur -= write_sz;
+  }
+}
+
+void buffered_write(const char *p, size_t sz) {
+  while (sz > 0) {
+    size_t copy_sz = end - cur;
+    if (sz < copy_sz) copy_sz = sz;
+    memcpy(cur, p, copy_sz);
+    cur += copy_sz;
+    sz -= copy_sz;
+    line_flush();
+  }
+}
+
+void fn1() {
+  buffered_write("a\n", 2);
+}
+
+void fn2() {
+  buffered_write("a\n", 2);
+}
+
+void fn3() {
+  buffered_write("a\n", 2);
+}
+
+int main(void) {
+  init();
+  for (int i = 0; i < 2000; ++i) {
+    fn1();
+    fn2();
+    fn3();
+  }
+  return buf[50];
+}
+
+// CHECK7: WARNING: MemorySanitizer: use-of-uninitialized-value
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was stored to memory at
+// CHECK7-NOT: Uninitialized value was stored to memory at
+// CHECK7: Uninitialized value was created by a heap allocation
+
+// CHECK2: WARNING: MemorySanitizer: use-of-uninitialized-value
+// CHECK2-NOT: Uninitialized value was stored to memory at
+// CHECK2: Uninitialized value was stored to memory at
+// CHECK2-NOT: Uninitialized value was stored to memory at
+// CHECK2: Uninitialized value was created by a heap allocation
+
+// CHECK-PER-STACK: WARNING: MemorySanitizer: use-of-uninitialized-value
+// CHECK-PER-STACK: Uninitialized value was stored to memory at
+// CHECK-PER-STACK: in fn3
+// CHECK-PER-STACK: Uninitialized value was stored to memory at
+// CHECK-PER-STACK: in fn2
+// CHECK-PER-STACK: Uninitialized value was stored to memory at
+// CHECK-PER-STACK: in fn1
+// CHECK-PER-STACK: Uninitialized value was created by a heap allocation

Propchange: compiler-rt/trunk/test/msan/chained_origin_limits.cc
------------------------------------------------------------------------------
    svn:eol-style = LF





More information about the llvm-commits mailing list