[PATCH] D40754: [sanitizer] 64-bit allocator's PopulateFreeArray partial refactor

Kostya Kortchinsky via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 1 14:19:45 PST 2017


cryptoad created this revision.
Herald added a subscriber: kubamracek.

This is an attempt at making `PopulateFreeArray` less obscure, more consistent,
and a tiny bit faster in some circumstances:

- use more consistent variable names, that work both for the user & the metadata portions of the code; the purpose of the code is mostly the same for both regions, so it makes sense that the code should be mostly similar as well;
- replace the while sum loops with a single `RoundUpTo`;
- mask most of the metadata computations behind kMetadataSize, allowing some blocks to be completely optimized out if not use metadata;


https://reviews.llvm.org/D40754

Files:
  lib/sanitizer_common/sanitizer_allocator_primary64.h


Index: lib/sanitizer_common/sanitizer_allocator_primary64.h
===================================================================
--- lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -664,13 +664,13 @@
   NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
                                   RegionInfo *region, uptr requested_count) {
     // region->mutex is held.
-    const uptr size = ClassIdToSize(class_id);
-    const uptr new_space_beg = region->allocated_user;
-    const uptr new_space_end = new_space_beg + requested_count * size;
     const uptr region_beg = GetRegionBeginBySizeClass(class_id);
+    const uptr size = ClassIdToSize(class_id);
 
+    const uptr total_user_bytes =
+        region->allocated_user + requested_count * size;
     // Map more space for chunks, if necessary.
-    if (new_space_end > region->mapped_user) {
+    if (LIKELY(total_user_bytes > region->mapped_user)) {
       if (UNLIKELY(region->mapped_user == 0)) {
         if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
           // The random state is initialized from ASLR.
@@ -685,26 +685,25 @@
           region->rtoi.last_release_at_ns = NanoTime();
       }
       // Do the mmap for the user memory.
-      uptr map_size = kUserMapSize;
-      while (new_space_end > region->mapped_user + map_size)
-        map_size += kUserMapSize;
-      CHECK_GE(region->mapped_user + map_size, new_space_end);
+      const uptr user_map_size =
+          RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
       if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
-                                    map_size)))
+                                    user_map_size)))
         return false;
-      stat->Add(AllocatorStatMapped, map_size);
-      region->mapped_user += map_size;
+      stat->Add(AllocatorStatMapped, user_map_size);
+      region->mapped_user += user_map_size;
     }
-    const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size;
+    const uptr new_chunks_count =
+        (region->mapped_user - region->allocated_user) / size;
 
     // Calculate the required space for metadata.
-    const uptr requested_allocated_meta =
-        region->allocated_meta + new_chunks_count * kMetadataSize;
-    uptr requested_mapped_meta = region->mapped_meta;
-    while (requested_allocated_meta > requested_mapped_meta)
-      requested_mapped_meta += kMetaMapSize;
+    const uptr total_meta_bytes = kMetadataSize ?
+        region->allocated_meta + new_chunks_count * kMetadataSize : 0;
+    const uptr meta_map_size =
+        (kMetadataSize && total_meta_bytes > region->mapped_meta) ?
+        RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
     // Check whether this size class is exhausted.
-    if (region->mapped_user + requested_mapped_meta >
+    if (region->mapped_user + region->mapped_meta + meta_map_size >
         kRegionSize - kFreeArraySize) {
       if (!region->exhausted) {
         region->exhausted = true;
@@ -715,21 +714,21 @@
       return false;
     }
     // Map more space for metadata, if necessary.
-    if (requested_mapped_meta > region->mapped_meta) {
+    if (meta_map_size) {
       if (UNLIKELY(!MapWithCallback(
-              GetMetadataEnd(region_beg) - requested_mapped_meta,
-              requested_mapped_meta - region->mapped_meta)))
+          GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
+          meta_map_size)))
         return false;
-      region->mapped_meta = requested_mapped_meta;
+      region->mapped_meta += meta_map_size;
     }
 
     // If necessary, allocate more space for the free array and populate it with
     // newly allocated chunks.
     const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
     if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
       return false;
     CompactPtrT *free_array = GetFreeArray(region_beg);
-    for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count;
+    for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
          i++, chunk += size)
       free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
     if (kRandomShuffleChunks)
@@ -741,7 +740,7 @@
     region->num_freed_chunks += new_chunks_count;
     region->allocated_user += new_chunks_count * size;
     CHECK_LE(region->allocated_user, region->mapped_user);
-    region->allocated_meta = requested_allocated_meta;
+    region->allocated_meta = total_meta_bytes;
     CHECK_LE(region->allocated_meta, region->mapped_meta);
     region->exhausted = false;
 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D40754.125225.patch
Type: text/x-patch
Size: 4688 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20171201/92e6c3c4/attachment.bin>


More information about the llvm-commits mailing list