[compiler-rt] r335054 - [scudo] Move noinline functions definitions out of line

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 19 08:36:30 PDT 2018


Author: cryptoad
Date: Tue Jun 19 08:36:30 2018
New Revision: 335054

URL: http://llvm.org/viewvc/llvm-project?rev=335054&view=rev
Log:
[scudo] Move noinline functions definitions out of line

Summary:
Mark `isRssLimitExceeded` as `NOINLINE`, and move it's definition as well as
the one of `performSanityChecks` out of the class definition, as requested.

Reviewers: filcab, alekseyshl

Reviewed By: alekseyshl

Subscribers: delcypher, #sanitizers, llvm-commits

Differential Revision: https://reviews.llvm.org/D48228

Modified:
    compiler-rt/trunk/lib/scudo/scudo_allocator.cpp

Modified: compiler-rt/trunk/lib/scudo/scudo_allocator.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_allocator.cpp?rev=335054&r1=335053&r2=335054&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_allocator.cpp (original)
+++ compiler-rt/trunk/lib/scudo/scudo_allocator.cpp Tue Jun 19 08:36:30 2018
@@ -243,38 +243,7 @@ struct ScudoAllocator {
   explicit ScudoAllocator(LinkerInitialized)
     : AllocatorQuarantine(LINKER_INITIALIZED) {}
 
-  NOINLINE void performSanityChecks() {
-    // Verify that the header offset field can hold the maximum offset. In the
-    // case of the Secondary allocator, it takes care of alignment and the
-    // offset will always be 0. In the case of the Primary, the worst case
-    // scenario happens in the last size class, when the backend allocation
-    // would already be aligned on the requested alignment, which would happen
-    // to be the maximum alignment that would fit in that size class. As a
-    // result, the maximum offset will be at most the maximum alignment for the
-    // last size class minus the header size, in multiples of MinAlignment.
-    UnpackedHeader Header = {};
-    const uptr MaxPrimaryAlignment =
-        1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
-    const uptr MaxOffset =
-        (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
-    Header.Offset = MaxOffset;
-    if (Header.Offset != MaxOffset)
-      dieWithMessage("maximum possible offset doesn't fit in header\n");
-    // Verify that we can fit the maximum size or amount of unused bytes in the
-    // header. Given that the Secondary fits the allocation to a page, the worst
-    // case scenario happens in the Primary. It will depend on the second to
-    // last and last class sizes, as well as the dynamic base for the Primary.
-    // The following is an over-approximation that works for our needs.
-    const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
-    Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
-    if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
-      dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
-
-    const uptr LargestClassId = SizeClassMap::kLargestClassID;
-    Header.ClassId = LargestClassId;
-    if (Header.ClassId != LargestClassId)
-      dieWithMessage("largest class ID doesn't fit in header\n");
-  }
+  NOINLINE void performSanityChecks();
 
   void init() {
     SanitizerToolName = "Scudo";
@@ -323,37 +292,7 @@ struct ScudoAllocator {
     return Chunk::isValid(Ptr);
   }
 
-  // Opportunistic RSS limit check. This will update the RSS limit status, if
-  // it can, every 100ms, otherwise it will just return the current one.
-  bool isRssLimitExceeded() {
-    u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
-    const u64 CurrentCheck = MonotonicNanoTime();
-    if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
-      return atomic_load_relaxed(&RssLimitExceeded);
-    if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
-                                      CurrentCheck, memory_order_relaxed))
-      return atomic_load_relaxed(&RssLimitExceeded);
-    // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
-    //                RSS from /proc/self/statm by default. We might want to
-    //                call getrusage directly, even if it's less accurate.
-    const uptr CurrentRssMb = GetRSS() >> 20;
-    if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
-      dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
-                     HardRssLimitMb, CurrentRssMb);
-    if (SoftRssLimitMb) {
-      if (atomic_load_relaxed(&RssLimitExceeded)) {
-        if (CurrentRssMb <= SoftRssLimitMb)
-          atomic_store_relaxed(&RssLimitExceeded, false);
-      } else {
-        if (CurrentRssMb > SoftRssLimitMb) {
-          atomic_store_relaxed(&RssLimitExceeded, true);
-          Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
-                 SoftRssLimitMb, CurrentRssMb);
-        }
-      }
-    }
-    return atomic_load_relaxed(&RssLimitExceeded);
-  }
+  NOINLINE bool isRssLimitExceeded();
 
   // Allocates a chunk.
   void *allocate(uptr Size, uptr Alignment, AllocType Type,
@@ -622,6 +561,71 @@ struct ScudoAllocator {
   }
 };
 
+NOINLINE void ScudoAllocator::performSanityChecks() {
+  // Verify that the header offset field can hold the maximum offset. In the
+  // case of the Secondary allocator, it takes care of alignment and the
+  // offset will always be 0. In the case of the Primary, the worst case
+  // scenario happens in the last size class, when the backend allocation
+  // would already be aligned on the requested alignment, which would happen
+  // to be the maximum alignment that would fit in that size class. As a
+  // result, the maximum offset will be at most the maximum alignment for the
+  // last size class minus the header size, in multiples of MinAlignment.
+  UnpackedHeader Header = {};
+  const uptr MaxPrimaryAlignment =
+      1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
+  const uptr MaxOffset =
+      (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+  Header.Offset = MaxOffset;
+  if (Header.Offset != MaxOffset)
+    dieWithMessage("maximum possible offset doesn't fit in header\n");
+  // Verify that we can fit the maximum size or amount of unused bytes in the
+  // header. Given that the Secondary fits the allocation to a page, the worst
+  // case scenario happens in the Primary. It will depend on the second to
+  // last and last class sizes, as well as the dynamic base for the Primary.
+  // The following is an over-approximation that works for our needs.
+  const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
+  Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+  if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+    dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
+
+  const uptr LargestClassId = SizeClassMap::kLargestClassID;
+  Header.ClassId = LargestClassId;
+  if (Header.ClassId != LargestClassId)
+    dieWithMessage("largest class ID doesn't fit in header\n");
+}
+
+// Opportunistic RSS limit check. This will update the RSS limit status, if
+// it can, every 100ms, otherwise it will just return the current one.
+NOINLINE bool ScudoAllocator::isRssLimitExceeded() {
+  u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
+  const u64 CurrentCheck = MonotonicNanoTime();
+  if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
+    return atomic_load_relaxed(&RssLimitExceeded);
+  if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
+                                    CurrentCheck, memory_order_relaxed))
+    return atomic_load_relaxed(&RssLimitExceeded);
+  // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
+  //                RSS from /proc/self/statm by default. We might want to
+  //                call getrusage directly, even if it's less accurate.
+  const uptr CurrentRssMb = GetRSS() >> 20;
+  if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+    dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+                   HardRssLimitMb, CurrentRssMb);
+  if (SoftRssLimitMb) {
+    if (atomic_load_relaxed(&RssLimitExceeded)) {
+      if (CurrentRssMb <= SoftRssLimitMb)
+        atomic_store_relaxed(&RssLimitExceeded, false);
+    } else {
+      if (CurrentRssMb > SoftRssLimitMb) {
+        atomic_store_relaxed(&RssLimitExceeded, true);
+        Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+               SoftRssLimitMb, CurrentRssMb);
+      }
+    }
+  }
+  return atomic_load_relaxed(&RssLimitExceeded);
+}
+
 static ScudoAllocator Instance(LINKER_INITIALIZED);
 
 static ScudoBackendAllocator &getBackendAllocator() {




More information about the llvm-commits mailing list