[compiler-rt] 87303fd - scudo: Fix various test failures, mostly on 32-bit.

Peter Collingbourne via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 11 12:19:51 PST 2020


Author: Peter Collingbourne
Date: 2020-02-11T12:18:35-08:00
New Revision: 87303fd9171199ac3082e17d4a91304bf82baeea

URL: https://github.com/llvm/llvm-project/commit/87303fd9171199ac3082e17d4a91304bf82baeea
DIFF: https://github.com/llvm/llvm-project/commit/87303fd9171199ac3082e17d4a91304bf82baeea.diff

LOG: scudo: Fix various test failures, mostly on 32-bit.

Differential Revision: https://reviews.llvm.org/D74429

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/size_class_map.h
    compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
index 151f4f95f541..3bbd165289e6 100644
--- a/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -24,7 +24,6 @@ inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
 
 template <typename Config> struct SizeClassMapBase {
   static u32 getMaxCachedHint(uptr Size) {
-    DCHECK_LE(Size, (1UL << Config::MaxSizeLog) + Chunk::getHeaderSize());
     DCHECK_NE(Size, 0);
     u32 N;
     // Force a 32-bit division if the template parameters allow for it.
@@ -95,10 +94,17 @@ class FixedSizeClassMap : public SizeClassMapBase<Config> {
       return (Size + MinSize - 1) >> Config::MinSizeLog;
     return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
   }
+
+  static u32 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
 };
 
 template <typename Config>
 class TableSizeClassMap : public SizeClassMapBase<Config> {
+  typedef SizeClassMapBase<Config> Base;
+
   static const u8 S = Config::NumBits - 1;
   static const uptr M = (1UL << S) - 1;
   static const uptr ClassesSize =
@@ -156,8 +162,10 @@ class TableSizeClassMap : public SizeClassMapBase<Config> {
     return Table.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
   }
 
-  static void print() {}
-  static void validate() {}
+  static u32 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
 };
 
 struct AndroidSizeClassConfig {

diff  --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
index d4ba7d7138ab..8b2bc6ecbd5b 100644
--- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp
@@ -268,10 +268,26 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) {
   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
   const size_t SpecialSize = PageSize - BlockDelta;
 
-  void *P = malloc(SpecialSize);
-  EXPECT_NE(P, nullptr);
-  BoundaryP = reinterpret_cast<uintptr_t>(P);
-  const uintptr_t Block = BoundaryP - BlockDelta;
+  // We aren't guaranteed that any size class is exactly a page wide. So we need
+  // to keep making allocations until we succeed.
+  //
+  // With a 16-byte block alignment and 4096-byte page size, each allocation has
+  // a probability of (1 - (16/4096)) of failing to meet the alignment
+  // requirements, and the probability of failing 65536 times is
+  // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
+  // 65536 tries, give up.
+  uintptr_t Block;
+  void *P = nullptr;
+  for (unsigned I = 0; I != 65536; ++I) {
+    void *PrevP = P;
+    P = malloc(SpecialSize);
+    EXPECT_NE(P, nullptr);
+    *reinterpret_cast<void **>(P) = PrevP;
+    BoundaryP = reinterpret_cast<uintptr_t>(P);
+    Block = BoundaryP - BlockDelta;
+    if ((Block & (PageSize - 1)) == 0U)
+      break;
+  }
   EXPECT_EQ((Block & (PageSize - 1)), 0U);
 
   Count = 0U;
@@ -281,7 +297,11 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) {
   malloc_enable();
   EXPECT_EQ(Count, 1U);
 
-  free(P);
+  while (P) {
+    void *NextP = *reinterpret_cast<void **>(P);
+    free(P);
+    P = NextP;
+  }
 }
 
 // We expect heap operations within a disable/enable scope to deadlock.


        


More information about the llvm-commits mailing list