[compiler-rt] r366918 - [scudo][standalone] Optimization pass

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 24 09:36:01 PDT 2019


Author: cryptoad
Date: Wed Jul 24 09:36:01 2019
New Revision: 366918

URL: http://llvm.org/viewvc/llvm-project?rev=366918&view=rev
Log:
[scudo][standalone] Optimization pass

Summary:
This introduces a bunch of small optimizations with the purpose of
making the fastpath tighter:
- tag more conditions as `LIKELY`/`UNLIKELY`: as a rule of thumb we
  consider that every operation related to the secondary is unlikely
- attempt to reduce the number of potentially extraneous instructions
- reorganize the `Chunk` header to not straddle a word boundary and
  use more appropriate types

Note that some `LIKELY`/`UNLIKELY` impact might be less obvious as
they are in slow paths (for example in `secondary.cc`), but at this
point I am throwing a pretty wide net, and it's consistant and doesn't
hurt.

This was mosly done for the benfit of Android, but other platforms
benefit from it too. An aarch64 Android benchmark gives:
- before:
```
  BM_youtube/min_time:15.000/repeats:4/manual_time_mean              445244 us       659385 us            4
  BM_youtube/min_time:15.000/repeats:4/manual_time_median            445007 us       658970 us            4
  BM_youtube/min_time:15.000/repeats:4/manual_time_stddev               885 us         1332 us            4
```
- after:
```
  BM_youtube/min_time:15.000/repeats:4/manual_time_mean       415697 us       621925 us            4
  BM_youtube/min_time:15.000/repeats:4/manual_time_median     415913 us       622061 us            4
  BM_youtube/min_time:15.000/repeats:4/manual_time_stddev        990 us         1163 us            4
```

Additional since `-Werror=conversion` is enabled on some platforms we
are built on, enable it upstream to catch things early: a few sign
conversions had slept through and needed additional casting.

Reviewers: hctim, morehouse, eugenis, vitalybuka

Reviewed By: vitalybuka

Subscribers: srhines, mgorny, javed.absar, kristof.beyls, delcypher, #sanitizers, llvm-commits

Tags: #llvm, #sanitizers

Differential Revision: https://reviews.llvm.org/D64664

Modified:
    compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt
    compiler-rt/trunk/lib/scudo/standalone/chunk.h
    compiler-rt/trunk/lib/scudo/standalone/combined.h
    compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc
    compiler-rt/trunk/lib/scudo/standalone/local_cache.h
    compiler-rt/trunk/lib/scudo/standalone/mutex.h
    compiler-rt/trunk/lib/scudo/standalone/primary32.h
    compiler-rt/trunk/lib/scudo/standalone/primary64.h
    compiler-rt/trunk/lib/scudo/standalone/secondary.cc
    compiler-rt/trunk/lib/scudo/standalone/secondary.h
    compiler-rt/trunk/lib/scudo/standalone/size_class_map.h
    compiler-rt/trunk/lib/scudo/standalone/string_utils.cc
    compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h
    compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h

Modified: compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt Wed Jul 24 09:36:01 2019
@@ -5,6 +5,7 @@ include_directories(../..)
 set(SCUDO_CFLAGS)
 
 list(APPEND SCUDO_CFLAGS
+  -Werror=conversion
   -Wall
   -nostdinc++)
 

Modified: compiler-rt/trunk/lib/scudo/standalone/chunk.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/chunk.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/chunk.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/chunk.h Wed Jul 24 09:36:01 2019
@@ -29,15 +29,15 @@ INLINE u16 computeChecksum(u32 Seed, upt
   u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
   for (uptr I = 0; I < ArraySize; I++)
     Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
-  return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+  return static_cast<u16>(Crc ^ (Crc >> 16));
 #else
   if (HashAlgorithm == Checksum::HardwareCRC32) {
     u32 Crc = computeHardwareCRC32(Seed, Value);
     for (uptr I = 0; I < ArraySize; I++)
       Crc = computeHardwareCRC32(Crc, Array[I]);
-    return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+    return static_cast<u16>(Crc ^ (Crc >> 16));
   } else {
-    u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed & 0xffff), Value);
+    u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
     for (uptr I = 0; I < ArraySize; I++)
       Checksum = computeBSDChecksum(Checksum, Array[I]);
     return Checksum;
@@ -63,24 +63,24 @@ enum State : u8 { Available = 0, Allocat
 typedef u64 PackedHeader;
 // Update the 'Mask' constants to reflect changes in this structure.
 struct UnpackedHeader {
-  u64 Checksum : 16;
-  u64 ClassId : 8;
-  u64 SizeOrUnusedBytes : 20;
+  uptr ClassId : 8;
   u8 State : 2;
   u8 Origin : 2;
-  u64 Offset : 16;
+  uptr SizeOrUnusedBytes : 20;
+  uptr Offset : 16;
+  uptr Checksum : 16;
 };
 typedef atomic_u64 AtomicPackedHeader;
 COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
 
 // Those constants are required to silence some -Werror=conversion errors when
 // assigning values to the related bitfield variables.
-constexpr uptr ChecksumMask = (1UL << 16) - 1;
 constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr u8 StateMask = (1U << 2) - 1;
+constexpr u8 OriginMask = (1U << 2) - 1;
 constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
-constexpr uptr StateMask = (1UL << 2) - 1;
-constexpr uptr OriginMask = (1UL << 2) - 1;
 constexpr uptr OffsetMask = (1UL << 16) - 1;
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
 
 constexpr uptr getHeaderSize() {
   return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);

Modified: compiler-rt/trunk/lib/scudo/standalone/combined.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/combined.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/combined.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/combined.h Wed Jul 24 09:36:01 2019
@@ -46,8 +46,8 @@ public:
       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
 
       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
-      const uptr ClassId = Header.ClassId;
-      if (ClassId)
+      const uptr ClassId = NewHeader.ClassId;
+      if (LIKELY(ClassId))
         Cache.deallocate(ClassId, BlockBegin);
       else
         Allocator.Secondary.deallocate(BlockBegin);
@@ -123,14 +123,16 @@ public:
     Options.ZeroContents = getFlags()->zero_contents;
     Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
     Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
-    Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
+    Options.QuarantineMaxChunkSize =
+        static_cast<u32>(getFlags()->quarantine_max_chunk_size);
 
     Stats.initLinkerInitialized();
     Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
     Secondary.initLinkerInitialized(&Stats);
 
-    Quarantine.init(getFlags()->quarantine_size_kb << 10,
-                    getFlags()->thread_local_quarantine_size_kb << 10);
+    Quarantine.init(
+        static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+        static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
   }
 
   void reset() { memset(this, 0, sizeof(*this)); }
@@ -165,16 +167,17 @@ public:
         return nullptr;
       reportAlignmentTooBig(Alignment, MaxAlignment);
     }
-    if (UNLIKELY(Alignment < MinAlignment))
+    if (Alignment < MinAlignment)
       Alignment = MinAlignment;
 
     // If the requested size happens to be 0 (more common than you might think),
-    // allocate 1 byte on top of the header. Then add the extra bytes required
-    // to fulfill the alignment requirements: we allocate enough to be sure that
-    // there will be an address in the block that will satisfy the alignment.
+    // allocate MinAlignment bytes on top of the header. Then add the extra
+    // bytes required to fulfill the alignment requirements: we allocate enough
+    // to be sure that there will be an address in the block that will satisfy
+    // the alignment.
     const uptr NeededSize =
-        Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
-        ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
+        roundUpTo(Size, MinAlignment) +
+        ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
 
     // Takes care of extravagantly large sizes as well as integer overflows.
     if (UNLIKELY(Size >= MaxAllowedMallocSize ||
@@ -186,9 +189,10 @@ public:
 
     void *Block;
     uptr ClassId;
-    uptr BlockEnd = 0;
-    if (PrimaryT::canAllocate(NeededSize)) {
+    uptr BlockEnd;
+    if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
       ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+      DCHECK_NE(ClassId, 0U);
       bool UnlockRequired;
       auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
       Block = TSD->Cache.allocate(ClassId);
@@ -205,17 +209,17 @@ public:
       reportOutOfMemory(NeededSize);
     }
 
-    // We only need to zero the contents for Primary backed allocations.
-    if ((ZeroContents || Options.ZeroContents) && ClassId)
+    // We only need to zero the contents for Primary backed allocations. This
+    // condition is not necessarily unlikely, but since memset is costly, we
+    // might as well mark it as such.
+    if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId))
       memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
 
     Chunk::UnpackedHeader Header = {};
     uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
-    // The following condition isn't necessarily "UNLIKELY".
-    if (!isAligned(UserPtr, Alignment)) {
+    if (UNLIKELY(!isAligned(UserPtr, Alignment))) {
       const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
       const uptr Offset = AlignedUserPtr - UserPtr;
-      Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
       DCHECK_GT(Offset, 2 * sizeof(u32));
       // The BlockMarker has no security purpose, but is specifically meant for
       // the chunk iteration function that can be used in debugging situations.
@@ -224,16 +228,13 @@ public:
       reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
       reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
       UserPtr = AlignedUserPtr;
+      Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
     }
+    Header.ClassId = ClassId & Chunk::ClassIdMask;
     Header.State = Chunk::State::Allocated;
     Header.Origin = Origin & Chunk::OriginMask;
-    if (ClassId) {
-      Header.ClassId = ClassId & Chunk::ClassIdMask;
-      Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
-    } else {
-      Header.SizeOrUnusedBytes =
-          (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
-    }
+    Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
+                               Chunk::SizeOrUnusedBytesMask;
     void *Ptr = reinterpret_cast<void *>(UserPtr);
     Chunk::storeHeader(Cookie, Ptr, &Header);
 
@@ -313,7 +314,7 @@ public:
     const uptr OldSize = getSize(OldPtr, &OldHeader);
     // If the new size is identical to the old one, or lower but within an
     // acceptable range, we just keep the old chunk, and update its header.
-    if (NewSize == OldSize)
+    if (UNLIKELY(NewSize == OldSize))
       return OldPtr;
     if (NewSize < OldSize) {
       const uptr Delta = OldSize - NewSize;
@@ -471,8 +472,7 @@ private:
     // last and last class sizes, as well as the dynamic base for the Primary.
     // The following is an over-approximation that works for our needs.
     const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
-    Header.SizeOrUnusedBytes =
-        MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+    Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
     if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
       reportSanityCheckError("size (or unused bytes)");
 
@@ -484,15 +484,15 @@ private:
 
   static INLINE void *getBlockBegin(const void *Ptr,
                                     Chunk::UnpackedHeader *Header) {
-    return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
-                                    Chunk::getHeaderSize() -
-                                    (Header->Offset << MinAlignmentLog));
+    return reinterpret_cast<void *>(
+        reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
+        (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
   }
 
   // Return the size of a chunk as requested during its allocation.
   INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
-    if (Header->ClassId)
+    if (LIKELY(Header->ClassId))
       return SizeOrUnusedBytes;
     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
@@ -514,7 +514,7 @@ private:
       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
       const uptr ClassId = NewHeader.ClassId;
-      if (ClassId) {
+      if (LIKELY(ClassId)) {
         bool UnlockRequired;
         auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
         TSD->Cache.deallocate(ClassId, BlockBegin);

Modified: compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc Wed Jul 24 09:36:01 2019
@@ -40,7 +40,7 @@ static void *allocateVmar(uptr Size, Map
       _zx_vmar_root_self(),
       ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
       Size, &Data->Vmar, &Data->VmarBase);
-  if (Status != ZX_OK) {
+  if (UNLIKELY(Status != ZX_OK)) {
     if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
@@ -78,7 +78,7 @@ void *map(void *Addr, uptr Size, const c
   } else {
     // Otherwise, create a Vmo and set its name.
     Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
-    if (Status != ZX_OK) {
+    if (UNLIKELY(Status != ZX_OK)) {
       if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
         dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
       return nullptr;
@@ -102,7 +102,7 @@ void *map(void *Addr, uptr Size, const c
   } else {
     CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
   }
-  if (Status != ZX_OK) {
+  if (UNLIKELY(Status != ZX_OK)) {
     if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
@@ -125,7 +125,7 @@ void unmap(void *Addr, uptr Size, uptr F
     const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
     const zx_status_t Status =
         _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
-    if (Status != ZX_OK)
+    if (UNLIKELY(Status != ZX_OK))
       dieOnMapUnmapError();
   }
   if (Data) {
@@ -172,7 +172,7 @@ u32 getNumberOfCPUs() { return _zx_syste
 
 bool getRandom(void *Buffer, uptr Length, bool Blocking) {
   COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
-  if (!Buffer || !Length || Length > MaxRandomLength)
+  if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
     return false;
   _zx_cprng_draw(Buffer, Length);
   return true;

Modified: compiler-rt/trunk/lib/scudo/standalone/local_cache.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/local_cache.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/local_cache.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/local_cache.h Wed Jul 24 09:36:01 2019
@@ -22,9 +22,8 @@ template <class SizeClassAllocator> stru
     static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
     void setFromArray(void **Array, u32 N) {
       DCHECK_LE(N, MaxNumCached);
-      for (u32 I = 0; I < N; I++)
-        Batch[I] = Array[I];
       Count = N;
+      memcpy(Batch, Array, sizeof(void *) * Count);
     }
     void clear() { Count = 0; }
     void add(void *P) {
@@ -32,8 +31,7 @@ template <class SizeClassAllocator> stru
       Batch[Count++] = P;
     }
     void copyToArray(void **Array) const {
-      for (u32 I = 0; I < Count; I++)
-        Array[I] = Batch[I];
+      memcpy(Array, Batch, sizeof(void *) * Count);
     }
     u32 getCount() const { return Count; }
     void *get(u32 I) const {
@@ -52,7 +50,7 @@ template <class SizeClassAllocator> stru
 
   void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
     Stats.initLinkerInitialized();
-    if (S)
+    if (LIKELY(S))
       S->link(&Stats);
     Allocator = A;
   }
@@ -64,12 +62,12 @@ template <class SizeClassAllocator> stru
 
   void destroy(GlobalStats *S) {
     drain();
-    if (S)
+    if (LIKELY(S))
       S->unlink(&Stats);
   }
 
   void *allocate(uptr ClassId) {
-    CHECK_LT(ClassId, NumClasses);
+    DCHECK_LT(ClassId, NumClasses);
     PerClass *C = &PerClassArray[ClassId];
     if (C->Count == 0) {
       if (UNLIKELY(!refill(C, ClassId)))
@@ -157,8 +155,8 @@ private:
     if (UNLIKELY(!B))
       return false;
     DCHECK_GT(B->getCount(), 0);
-    B->copyToArray(C->Chunks);
     C->Count = B->getCount();
+    B->copyToArray(C->Chunks);
     destroyBatch(ClassId, B);
     return true;
   }

Modified: compiler-rt/trunk/lib/scudo/standalone/mutex.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/mutex.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/mutex.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/mutex.h Wed Jul 24 09:36:01 2019
@@ -25,12 +25,12 @@ public:
   void init() { memset(this, 0, sizeof(*this)); }
   bool tryLock();
   NOINLINE void lock() {
-    if (tryLock())
+    if (LIKELY(tryLock()))
       return;
-      // The compiler may try to fully unroll the loop, ending up in a
-      // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
-      // is large, ugly and unneeded, a compact loop is better for our purpose
-      // here. Use a pragma to tell the compiler not to unroll the loop.
+    // The compiler may try to fully unroll the loop, ending up in a
+    // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+    // is large, ugly and unneeded, a compact loop is better for our purpose
+    // here. Use a pragma to tell the compiler not to unroll the loop.
 #ifdef __clang__
 #pragma nounroll
 #endif
@@ -44,8 +44,8 @@ public:
   void unlock();
 
 private:
-  static constexpr u8 NumberOfTries = 10U;
-  static constexpr u8 NumberOfYields = 10U;
+  static constexpr u8 NumberOfTries = 8U;
+  static constexpr u8 NumberOfYields = 8U;
 
 #if SCUDO_LINUX
   atomic_u32 M;

Modified: compiler-rt/trunk/lib/scudo/standalone/primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/primary32.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/primary32.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/primary32.h Wed Jul 24 09:36:01 2019
@@ -74,7 +74,7 @@ public:
       // See comment in the 64-bit primary about releasing smaller size classes.
       Sci->CanRelease = (ReleaseToOsInterval > 0) &&
                         (I != SizeClassMap::BatchClassId) &&
-                        (getSizeByClassId(I) >= (PageSize / 32));
+                        (getSizeByClassId(I) >= (PageSize / 16));
     }
     ReleaseToOsIntervalMs = ReleaseToOsInterval;
   }
@@ -99,9 +99,9 @@ public:
     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
     ScopedLock L(Sci->Mutex);
     TransferBatch *B = Sci->FreeList.front();
-    if (B)
+    if (B) {
       Sci->FreeList.pop_front();
-    else {
+    } else {
       B = populateFreeList(C, ClassId, Sci);
       if (UNLIKELY(!B))
         return nullptr;
@@ -129,7 +129,7 @@ public:
 
   void enable() {
     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
-      getSizeClassInfo(I)->Mutex.unlock();
+      getSizeClassInfo(static_cast<uptr>(I))->Mutex.unlock();
   }
 
   template <typename F> void iterateOverBlocks(F Callback) {
@@ -356,7 +356,8 @@ private:
       const s32 IntervalMs = ReleaseToOsIntervalMs;
       if (IntervalMs < 0)
         return;
-      if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+      if (Sci->ReleaseInfo.LastReleaseAtNs +
+              static_cast<uptr>(IntervalMs) * 1000000ULL >
           getMonotonicTime()) {
         return; // Memory was returned recently.
       }

Modified: compiler-rt/trunk/lib/scudo/standalone/primary64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/primary64.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/primary64.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/primary64.h Wed Jul 24 09:36:01 2019
@@ -81,7 +81,7 @@ public:
       // TODO(kostyak): make the lower limit a runtime option
       Region->CanRelease = (ReleaseToOsInterval > 0) &&
                            (I != SizeClassMap::BatchClassId) &&
-                           (getSizeByClassId(I) >= (PageSize / 32));
+                           (getSizeByClassId(I) >= (PageSize / 16));
       Region->RandState = getRandomU32(&Seed);
     }
     ReleaseToOsIntervalMs = ReleaseToOsInterval;
@@ -102,9 +102,9 @@ public:
     RegionInfo *Region = getRegionInfo(ClassId);
     ScopedLock L(Region->Mutex);
     TransferBatch *B = Region->FreeList.front();
-    if (B)
+    if (B) {
       Region->FreeList.pop_front();
-    else {
+    } else {
       B = populateFreeList(C, ClassId, Region);
       if (UNLIKELY(!B))
         return nullptr;
@@ -131,7 +131,7 @@ public:
 
   void enable() {
     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
-      getRegionInfo(I)->Mutex.unlock();
+      getRegionInfo(static_cast<uptr>(I))->Mutex.unlock();
   }
 
   template <typename F> void iterateOverBlocks(F Callback) const {
@@ -181,7 +181,7 @@ private:
   static const uptr PrimarySize = RegionSize * NumClasses;
 
   // Call map for user memory with at least this size.
-  static const uptr MapSizeIncrement = 1UL << 16;
+  static const uptr MapSizeIncrement = 1UL << 17;
 
   struct RegionStats {
     uptr PoppedBlocks;
@@ -272,7 +272,7 @@ private:
         }
         return nullptr;
       }
-      if (MappedUser == 0)
+      if (UNLIKELY(MappedUser == 0))
         Region->Data = Data;
       if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
                         UserMapSize, "scudo:primary",
@@ -307,7 +307,7 @@ private:
         return nullptr;
     }
     DCHECK(B);
-    CHECK_GT(B->getCount(), 0);
+    DCHECK_GT(B->getCount(), 0);
 
     Region->AllocatedUser += AllocatedUser;
     Region->Exhausted = false;
@@ -355,7 +355,8 @@ private:
       const s32 IntervalMs = ReleaseToOsIntervalMs;
       if (IntervalMs < 0)
         return;
-      if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+      if (Region->ReleaseInfo.LastReleaseAtNs +
+              static_cast<uptr>(IntervalMs) * 1000000ULL >
           getMonotonicTime()) {
         return; // Memory was returned recently.
       }

Modified: compiler-rt/trunk/lib/scudo/standalone/secondary.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/secondary.cc?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/secondary.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/secondary.cc Wed Jul 24 09:36:01 2019
@@ -32,14 +32,14 @@ void *MapAllocator::allocate(uptr Size,
   uptr MapBase =
       reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
                                  MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
-  if (!MapBase)
+  if (UNLIKELY(!MapBase))
     return nullptr;
   uptr CommitBase = MapBase + PageSize;
   uptr MapEnd = MapBase + MapSize;
 
   // In the unlikely event of alignments larger than a page, adjust the amount
   // of memory we want to commit, and trim the extra memory.
-  if (AlignmentHint >= PageSize) {
+  if (UNLIKELY(AlignmentHint >= PageSize)) {
     // For alignments greater than or equal to a page, the user pointer (eg: the
     // pointer that is returned by the C or C++ allocation APIs) ends up on a
     // page boundary , and our headers will live in the preceding page.
@@ -73,13 +73,11 @@ void *MapAllocator::allocate(uptr Size,
   H->Data = Data;
   {
     ScopedLock L(Mutex);
-    if (!Tail) {
-      Tail = H;
-    } else {
+    if (LIKELY(Tail)) {
       Tail->Next = H;
       H->Prev = Tail;
-      Tail = H;
     }
+    Tail = H;
     AllocatedBytes += CommitSize;
     if (LargestSize < CommitSize)
       LargestSize = CommitSize;
@@ -106,7 +104,7 @@ void MapAllocator::deallocate(void *Ptr)
       CHECK_EQ(Next->Prev, H);
       Next->Prev = Prev;
     }
-    if (Tail == H) {
+    if (UNLIKELY(Tail == H)) {
       CHECK(!Next);
       Tail = Prev;
     } else {

Modified: compiler-rt/trunk/lib/scudo/standalone/secondary.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/secondary.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/secondary.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/secondary.h Wed Jul 24 09:36:01 2019
@@ -50,7 +50,7 @@ class MapAllocator {
 public:
   void initLinkerInitialized(GlobalStats *S) {
     Stats.initLinkerInitialized();
-    if (S)
+    if (LIKELY(S))
       S->link(&Stats);
   }
   void init(GlobalStats *S) {

Modified: compiler-rt/trunk/lib/scudo/standalone/size_class_map.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/size_class_map.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/size_class_map.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/size_class_map.h Wed Jul 24 09:36:01 2019
@@ -138,10 +138,10 @@ typedef SizeClassMap<3, 5, 8, 17, 8, 10>
 // TODO(kostyak): further tune class maps for Android & Fuchsia.
 #if SCUDO_WORDSIZE == 64U
 typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap;
-typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
 #else
 typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap;
-typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap;
+typedef SizeClassMap<3, 4, 7, 17, 14, 14> AndroidSizeClassMap;
 #endif
 
 } // namespace scudo

Modified: compiler-rt/trunk/lib/scudo/standalone/string_utils.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/string_utils.cc?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/string_utils.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/string_utils.cc Wed Jul 24 09:36:01 2019
@@ -9,7 +9,6 @@
 #include "string_utils.h"
 #include "common.h"
 
-#include <ctype.h>
 #include <stdarg.h>
 #include <string.h>
 
@@ -44,7 +43,7 @@ static int appendNumber(char **Buffer, c
   do {
     RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
                   "appendNumber buffer overflow");
-    NumBuffer[Pos++] = AbsoluteValue % Base;
+    NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
     AbsoluteValue /= Base;
   } while (AbsoluteValue > 0);
   if (Pos < MinNumberLength) {
@@ -117,7 +116,7 @@ static int appendPointer(char **Buffer,
 
 int formatString(char *Buffer, uptr BufferLength, const char *Format,
                  va_list Args) {
-  UNUSED static const char *PrintfFormatsHelp =
+  static const char *PrintfFormatsHelp =
       "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
       "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
   RAW_CHECK(Format);

Modified: compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h Wed Jul 24 09:36:01 2019
@@ -61,7 +61,7 @@ template <class Allocator> struct TSDReg
 private:
   void initOnceMaybe(Allocator *Instance) {
     ScopedLock L(Mutex);
-    if (Initialized)
+    if (LIKELY(Initialized))
       return;
     initLinkerInitialized(Instance); // Sets Initialized.
   }
@@ -71,7 +71,7 @@ private:
   // used instead.
   NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
     initOnceMaybe(Instance);
-    if (MinimalInit)
+    if (UNLIKELY(MinimalInit))
       return;
     CHECK_EQ(
         pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);

Modified: compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h?rev=366918&r1=366917&r2=366918&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h Wed Jul 24 09:36:01 2019
@@ -95,7 +95,7 @@ private:
 
   void initOnceMaybe(Allocator *Instance) {
     ScopedLock L(Mutex);
-    if (Initialized)
+    if (LIKELY(Initialized))
       return;
     initLinkerInitialized(Instance); // Sets Initialized.
   }
@@ -112,8 +112,7 @@ private:
       // Use the Precedence of the current TSD as our random seed. Since we are
       // in the slow path, it means that tryLock failed, and as a result it's
       // very likely that said Precedence is non-zero.
-      u32 RandState = static_cast<u32>(CurrentTSD->getPrecedence());
-      const u32 R = getRandomU32(&RandState);
+      const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
       const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
       u32 Index = R % NumberOfTSDs;
       uptr LowestPrecedence = UINTPTR_MAX;




More information about the llvm-commits mailing list