[compiler-rt] r301015 - [scudo] Bypass Quarantine if its size is set to 0

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 21 11:10:53 PDT 2017


Author: cryptoad
Date: Fri Apr 21 13:10:53 2017
New Revision: 301015

URL: http://llvm.org/viewvc/llvm-project?rev=301015&view=rev
Log:
[scudo] Bypass Quarantine if its size is set to 0

Summary:
In the current state of things, the deallocation path puts a chunk in the
Quarantine whether it's enabled or not (size of 0). When the Quarantine is
disabled, this results in the header being loaded (and checked) twice, and
stored (and checksummed) once, in `deallocate` and `Recycle`.

This change introduces a `quarantineOrDeallocateChunk` function that has a
fast path to deallocation if the Quarantine is disabled. Even though this is
not the preferred configuration security-wise, this change saves a sizeable
amount of processing for that particular situation (which could be adopted by
low memory devices). Additionally this simplifies a bit `deallocate` and
`reallocate`.

Reviewers: dvyukov, kcc, alekseyshl

Reviewed By: dvyukov

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D32310

Modified:
    compiler-rt/trunk/lib/scudo/scudo_allocator.cpp

Modified: compiler-rt/trunk/lib/scudo/scudo_allocator.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_allocator.cpp?rev=301015&r1=301014&r2=301015&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_allocator.cpp (original)
+++ compiler-rt/trunk/lib/scudo/scudo_allocator.cpp Fri Apr 21 13:10:53 2017
@@ -460,6 +460,38 @@ struct ScudoAllocator {
     return UserPtr;
   }
 
+  // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
+  // we directly deallocate the chunk, otherwise the flow would lead to the
+  // chunk being checksummed twice, once before Put and once in Recycle, with
+  // no additional security value.
+  void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
+                                   uptr Size) {
+    bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
+    if (BypassQuarantine) {
+      Chunk->eraseHeader();
+      void *Ptr = Chunk->getAllocBeg(Header);
+      if (LIKELY(!ThreadTornDown)) {
+        getBackendAllocator().Deallocate(&Cache, Ptr);
+      } else {
+        SpinMutexLock Lock(&FallbackMutex);
+        getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
+      }
+    } else {
+      UnpackedHeader NewHeader = *Header;
+      NewHeader.State = ChunkQuarantine;
+      Chunk->compareExchangeHeader(&NewHeader, Header);
+      if (LIKELY(!ThreadTornDown)) {
+        AllocatorQuarantine.Put(&ThreadQuarantineCache,
+                                QuarantineCallback(&Cache), Chunk, Size);
+      } else {
+        SpinMutexLock l(&FallbackMutex);
+        AllocatorQuarantine.Put(&FallbackQuarantineCache,
+                                QuarantineCallback(&FallbackAllocatorCache),
+                                Chunk, Size);
+      }
+    }
+  }
+
   // Deallocates a Chunk, which means adding it to the delayed free list (or
   // Quarantine).
   void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
@@ -499,24 +531,12 @@ struct ScudoAllocator {
       }
     }
 
-    UnpackedHeader NewHeader = OldHeader;
-    NewHeader.State = ChunkQuarantine;
-    Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
-
     // If a small memory amount was allocated with a larger alignment, we want
     // to take that into account. Otherwise the Quarantine would be filled with
-    // tiny chunks, taking a lot of VA memory. This an approximation of the
+    // tiny chunks, taking a lot of VA memory. This is an approximation of the
     // usable size, that allows us to not call GetActuallyAllocatedSize.
     uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
-    if (LIKELY(!ThreadTornDown)) {
-      AllocatorQuarantine.Put(&ThreadQuarantineCache,
-                              QuarantineCallback(&Cache), Chunk, LiableSize);
-    } else {
-      SpinMutexLock l(&FallbackMutex);
-      AllocatorQuarantine.Put(&FallbackQuarantineCache,
-                              QuarantineCallback(&FallbackAllocatorCache),
-                              Chunk, LiableSize);
-    }
+    quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
   }
 
   // Reallocates a chunk. We can save on a new allocation if the new requested
@@ -541,11 +561,11 @@ struct ScudoAllocator {
                      OldPtr);
     }
     uptr UsableSize = Chunk->getUsableSize(&OldHeader);
-    UnpackedHeader NewHeader = OldHeader;
     // The new size still fits in the current chunk, and the size difference
     // is reasonable.
     if (NewSize <= UsableSize &&
         (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+      UnpackedHeader NewHeader = OldHeader;
       NewHeader.SizeOrUnusedBytes =
                 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
       Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
@@ -558,17 +578,7 @@ struct ScudoAllocator {
       uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
           UsableSize - OldHeader.SizeOrUnusedBytes;
       memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
-      NewHeader.State = ChunkQuarantine;
-      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
-      if (LIKELY(!ThreadTornDown)) {
-        AllocatorQuarantine.Put(&ThreadQuarantineCache,
-                                QuarantineCallback(&Cache), Chunk, UsableSize);
-      } else {
-        SpinMutexLock l(&FallbackMutex);
-        AllocatorQuarantine.Put(&FallbackQuarantineCache,
-                                QuarantineCallback(&FallbackAllocatorCache),
-                                Chunk, UsableSize);
-      }
+      quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
     }
     return NewPtr;
   }




More information about the llvm-commits mailing list