[compiler-rt] r286478 - [lsan] fix a rare lsan false positive: ensure that we don't re-sort the chunks_ array while iterating over it. A test is hard to create, but I've added a consistency check that fires w/o the fix on existing tests. The bug analysis and the initial patch were provided by Pierre Bourdon

Kostya Serebryany via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 10 09:27:29 PST 2016


Author: kcc
Date: Thu Nov 10 11:27:28 2016
New Revision: 286478

URL: http://llvm.org/viewvc/llvm-project?rev=286478&view=rev
Log:
[lsan] fix a rare lsan false positive: ensure that we don't re-sort the chunks_ array while iterating over it. A test is hard to create, but I've added a consistency check that fires w/o the fix on existing tests. The bug analysis and the initial patch were provided by Pierre Bourdon

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h?rev=286478&r1=286477&r2=286478&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h Thu Nov 10 11:27:28 2016
@@ -161,6 +161,14 @@ class LargeMmapAllocator {
     return GetUser(h);
   }
 
+  void EnsureSortedChunks() {
+    if (chunks_sorted_) return;
+    SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+    for (uptr i = 0; i < n_chunks_; i++)
+      chunks_[i]->chunk_idx = i;
+    chunks_sorted_ = true;
+  }
+
   // This function does the same as GetBlockBegin, but is much faster.
   // Must be called with the allocator locked.
   void *GetBlockBeginFastLocked(void *ptr) {
@@ -168,16 +176,10 @@ class LargeMmapAllocator {
     uptr p = reinterpret_cast<uptr>(ptr);
     uptr n = n_chunks_;
     if (!n) return nullptr;
-    if (!chunks_sorted_) {
-      // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
-      SortArray(reinterpret_cast<uptr*>(chunks_), n);
-      for (uptr i = 0; i < n; i++)
-        chunks_[i]->chunk_idx = i;
-      chunks_sorted_ = true;
-      min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
-      max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
-          chunks_[n - 1]->map_size;
-    }
+    EnsureSortedChunks();
+    auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+    auto max_mmap_ =
+        reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
     if (p < min_mmap_ || p >= max_mmap_)
       return nullptr;
     uptr beg = 0, end = n - 1;
@@ -230,8 +232,14 @@ class LargeMmapAllocator {
   // Iterate over all existing chunks.
   // The allocator must be locked when calling this function.
   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
-    for (uptr i = 0; i < n_chunks_; i++)
+    EnsureSortedChunks();  // Avoid doing the sort while iterating.
+    for (uptr i = 0; i < n_chunks_; i++) {
+      auto t = chunks_[i];
       callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+      // Consistency check: verify that the array did not change.
+      CHECK_EQ(chunks_[i], t);
+      CHECK_EQ(chunks_[i]->chunk_idx, i);
+    }
   }
 
  private:
@@ -263,7 +271,6 @@ class LargeMmapAllocator {
   uptr page_size_;
   Header *chunks_[kMaxNumChunks];
   uptr n_chunks_;
-  uptr min_mmap_, max_mmap_;
   bool chunks_sorted_;
   struct Stats {
     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];




More information about the llvm-commits mailing list