[llvm] r205143 - [Allocator] Lift the slab size and size threshold into template

Chandler Carruth chandlerc at gmail.com
Sun Mar 30 05:07:07 PDT 2014


Author: chandlerc
Date: Sun Mar 30 07:07:07 2014
New Revision: 205143

URL: http://llvm.org/viewvc/llvm-project?rev=205143&view=rev
Log:
[Allocator] Lift the slab size and size threshold into template
parameters rather than runtime parameters.

There is only one user of these parameters and they are compile time for
that user. Making these compile time seems to better reflect their
intended usage as well.

Modified:
    llvm/trunk/include/llvm/Support/Allocator.h
    llvm/trunk/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
    llvm/trunk/lib/Support/Allocator.cpp
    llvm/trunk/unittests/Support/AllocatorTest.cpp

Modified: llvm/trunk/include/llvm/Support/Allocator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/Allocator.h?rev=205143&r1=205142&r2=205143&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/Allocator.h (original)
+++ llvm/trunk/include/llvm/Support/Allocator.h Sun Mar 30 07:07:07 2014
@@ -17,6 +17,7 @@
 #include "llvm/Support/AlignOf.h"
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
 #include <algorithm>
 #include <cassert>
 #include <cstddef>
@@ -87,6 +88,27 @@ public:
   void Deallocate(MemSlab *Slab) override;
 };
 
+/// \brief Non-templated base class for the \c BumpPtrAllocatorImpl template.
+class BumpPtrAllocatorBase {
+public:
+  void Deallocate(const void * /*Ptr*/) {}
+  void PrintStats() const;
+
+  /// \brief Returns the total physical memory allocated by this allocator.
+  size_t getTotalMemory() const;
+
+protected:
+  /// \brief The slab that we are currently allocating into.
+  MemSlab *CurSlab;
+
+  /// \brief How many bytes we've allocated.
+  ///
+  /// Used so that we can compute how much space was wasted.
+  size_t BytesAllocated;
+
+  BumpPtrAllocatorBase() : CurSlab(0), BytesAllocated(0) {}
+};
+
 /// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
 ///
 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
@@ -97,21 +119,85 @@ public:
 ///
 /// Note that this also has a threshold for forcing allocations above a certain
 /// size into their own slab.
-class BumpPtrAllocator {
-  BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
-  void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
+template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
+class BumpPtrAllocatorImpl : public BumpPtrAllocatorBase {
+  BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
+  void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
 
 public:
-  BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096);
-  BumpPtrAllocator(size_t size, size_t threshold, SlabAllocator &allocator);
-  ~BumpPtrAllocator();
+  static_assert(SizeThreshold <= SlabSize,
+                "The SizeThreshold must be at most the SlabSize to ensure "
+                "that objects larger than a slab go into their own memory "
+                "allocation.");
+
+  BumpPtrAllocatorImpl()
+      : Allocator(DefaultSlabAllocator), NumSlabs(0) {}
+  BumpPtrAllocatorImpl(SlabAllocator &Allocator)
+      : Allocator(Allocator), NumSlabs(0) {}
+  ~BumpPtrAllocatorImpl() { DeallocateSlabs(CurSlab); }
 
   /// \brief Deallocate all but the current slab and reset the current pointer
   /// to the beginning of it, freeing all memory allocated so far.
-  void Reset();
+  void Reset() {
+    if (!CurSlab)
+      return;
+    DeallocateSlabs(CurSlab->NextPtr);
+    CurSlab->NextPtr = 0;
+    CurPtr = (char *)(CurSlab + 1);
+    End = ((char *)CurSlab) + CurSlab->Size;
+    BytesAllocated = 0;
+  }
 
   /// \brief Allocate space at the specified alignment.
-  void *Allocate(size_t Size, size_t Alignment);
+  void *Allocate(size_t Size, size_t Alignment) {
+    if (!CurSlab) // Start a new slab if we haven't allocated one already.
+      StartNewSlab();
+
+    // Keep track of how many bytes we've allocated.
+    BytesAllocated += Size;
+
+    // 0-byte alignment means 1-byte alignment.
+    if (Alignment == 0)
+      Alignment = 1;
+
+    // Allocate the aligned space, going forwards from CurPtr.
+    char *Ptr = alignPtr(CurPtr, Alignment);
+
+    // Check if we can hold it.
+    if (Ptr + Size <= End) {
+      CurPtr = Ptr + Size;
+      // Update the allocation point of this memory block in MemorySanitizer.
+      // Without this, MemorySanitizer messages for values originated from here
+      // will point to the allocation of the entire slab.
+      __msan_allocated_memory(Ptr, Size);
+      return Ptr;
+    }
+
+    // If Size is really big, allocate a separate slab for it.
+    size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
+    if (PaddedSize > SizeThreshold) {
+      ++NumSlabs;
+      MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
+
+      // Put the new slab after the current slab, since we are not allocating
+      // into it.
+      NewSlab->NextPtr = CurSlab->NextPtr;
+      CurSlab->NextPtr = NewSlab;
+
+      Ptr = alignPtr((char *)(NewSlab + 1), Alignment);
+      assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
+      __msan_allocated_memory(Ptr, Size);
+      return Ptr;
+    }
+
+    // Otherwise, start a new slab and try again.
+    StartNewSlab();
+    Ptr = alignPtr(CurPtr, Alignment);
+    CurPtr = Ptr + Size;
+    assert(CurPtr <= End && "Unable to allocate memory!");
+    __msan_allocated_memory(Ptr, Size);
+    return Ptr;
+  }
 
   /// \brief Allocate space for one object without constructing it.
   template <typename T> T *Allocate() {
@@ -131,22 +217,9 @@ public:
     return static_cast<T *>(Allocate(Num * EltSize, Alignment));
   }
 
-  void Deallocate(const void * /*Ptr*/) {}
-
   size_t GetNumSlabs() const { return NumSlabs; }
 
-  void PrintStats() const;
-
-  /// \brief Returns the total physical memory allocated by this allocator.
-  size_t getTotalMemory() const;
-
 private:
-  /// \brief Allocate at least this many bytes of memory in a slab.
-  size_t SlabSize;
-
-  /// \brief Threshold above which allocations to go into a dedicated slab.
-  size_t SizeThreshold;
-
   /// \brief The default allocator used if one is not provided.
   MallocSlabAllocator DefaultSlabAllocator;
 
@@ -156,9 +229,6 @@ private:
   /// changed to use a custom allocator.
   SlabAllocator &Allocator;
 
-  /// \brief The slab that we are currently allocating into.
-  MemSlab *CurSlab;
-
   /// \brief The current pointer into the current slab.
   ///
   /// This points to the next free byte in the slab.
@@ -167,11 +237,6 @@ private:
   /// \brief The end of the current slab.
   char *End;
 
-  /// \brief How many bytes we've allocated.
-  ///
-  /// Used so that we can compute how much space was wasted.
-  size_t BytesAllocated;
-
   /// \brief How many slabs we've allocated.
   ///
   /// Used to scale the size of each slab and reduce the number of allocations
@@ -180,14 +245,48 @@ private:
 
   /// \brief Allocate a new slab and move the bump pointers over into the new
   /// slab, modifying CurPtr and End.
-  void StartNewSlab();
+  void StartNewSlab() {
+    ++NumSlabs;
+    // Scale the actual allocated slab size based on the number of slabs
+    // allocated. Every 128 slabs allocated, we double the allocated size to
+    // reduce allocation frequency, but saturate at multiplying the slab size by
+    // 2^30.
+    // FIXME: Currently, this count includes special slabs for objects above the
+    // size threshold. That will be fixed in a subsequent commit to make the
+    // growth even more predictable.
+    size_t AllocatedSlabSize =
+        SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
+
+    MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
+    NewSlab->NextPtr = CurSlab;
+    CurSlab = NewSlab;
+    CurPtr = (char *)(CurSlab + 1);
+    End = ((char *)CurSlab) + CurSlab->Size;
+  }
 
   /// \brief Deallocate all memory slabs after and including this one.
-  void DeallocateSlabs(MemSlab *Slab);
+  void DeallocateSlabs(MemSlab *Slab) {
+    while (Slab) {
+      MemSlab *NextSlab = Slab->NextPtr;
+#ifndef NDEBUG
+      // Poison the memory so stale pointers crash sooner.  Note we must
+      // preserve the Size and NextPtr fields at the beginning.
+      sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
+      memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
+#endif
+      Allocator.Deallocate(Slab);
+      Slab = NextSlab;
+      --NumSlabs;
+    }
+  }
 
   template <typename T> friend class SpecificBumpPtrAllocator;
 };
 
+/// \brief The standard BumpPtrAllocator which just uses the default template
+/// paramaters.
+typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
+
 /// \brief A BumpPtrAllocator that allows only elements of a specific type to be
 /// allocated.
 ///
@@ -197,11 +296,8 @@ template <typename T> class SpecificBump
   BumpPtrAllocator Allocator;
 
 public:
-  SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096)
-      : Allocator(size, threshold) {}
-  SpecificBumpPtrAllocator(size_t size, size_t threshold,
-                           SlabAllocator &allocator)
-      : Allocator(size, threshold, allocator) {}
+  SpecificBumpPtrAllocator() : Allocator() {}
+  SpecificBumpPtrAllocator(SlabAllocator &allocator) : Allocator(allocator) {}
 
   ~SpecificBumpPtrAllocator() { DestroyAll(); }
 
@@ -229,7 +325,10 @@ public:
 
 }  // end namespace llvm
 
-inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
+template <size_t SlabSize, size_t SizeThreshold>
+void *
+operator new(size_t Size,
+             llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &Allocator) {
   struct S {
     char c;
     union {
@@ -239,10 +338,12 @@ inline void *operator new(size_t Size, l
       void *P;
     } x;
   };
-  return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
-                                           offsetof(S, x)));
+  return Allocator.Allocate(
+      Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
 }
 
-inline void operator delete(void *, llvm::BumpPtrAllocator &) {}
+template <size_t SlabSize, size_t SizeThreshold>
+void operator delete(void *,
+                     llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &) {}
 
 #endif // LLVM_SUPPORT_ALLOCATOR_H

Modified: llvm/trunk/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/JIT/JITMemoryManager.cpp?rev=205143&r1=205142&r2=205143&view=diff
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/JIT/JITMemoryManager.cpp (original)
+++ llvm/trunk/lib/ExecutionEngine/JIT/JITMemoryManager.cpp Sun Mar 30 07:07:07 2014
@@ -314,8 +314,8 @@ namespace {
     // confuse them with the blocks of memory described above.
     std::vector<sys::MemoryBlock> CodeSlabs;
     JITSlabAllocator BumpSlabAllocator;
-    BumpPtrAllocator StubAllocator;
-    BumpPtrAllocator DataAllocator;
+    BumpPtrAllocatorImpl<DefaultSlabSize, DefaultSizeThreshold> StubAllocator;
+    BumpPtrAllocatorImpl<DefaultSlabSize, DefaultSizeThreshold> DataAllocator;
 
     // Circular list of free blocks.
     FreeRangeHeader *FreeMemoryList;
@@ -590,8 +590,8 @@ DefaultJITMemoryManager::DefaultJITMemor
 #endif
     LastSlab(0, 0),
     BumpSlabAllocator(*this),
-    StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
-    DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
+    StubAllocator(BumpSlabAllocator),
+    DataAllocator(BumpSlabAllocator) {
 
   // Allocate space for code.
   sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);

Modified: llvm/trunk/lib/Support/Allocator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Support/Allocator.cpp?rev=205143&r1=205142&r2=205143&view=diff
==============================================================================
--- llvm/trunk/lib/Support/Allocator.cpp (original)
+++ llvm/trunk/lib/Support/Allocator.cpp Sun Mar 30 07:07:07 2014
@@ -21,130 +21,22 @@
 
 namespace llvm {
 
-BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
-                                   SlabAllocator &allocator)
-    : SlabSize(size), SizeThreshold(std::min(size, threshold)),
-      Allocator(allocator), CurSlab(0), BytesAllocated(0), NumSlabs(0) {}
-
-BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
-    : SlabSize(size), SizeThreshold(std::min(size, threshold)),
-      Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0),
-      NumSlabs(0) {}
-
-BumpPtrAllocator::~BumpPtrAllocator() {
-  DeallocateSlabs(CurSlab);
-}
-
-/// StartNewSlab - Allocate a new slab and move the bump pointers over into
-/// the new slab.  Modifies CurPtr and End.
-void BumpPtrAllocator::StartNewSlab() {
-  ++NumSlabs;
-  // Scale the actual allocated slab size based on the number of slabs
-  // allocated. Every 128 slabs allocated, we double the allocated size to
-  // reduce allocation frequency, but saturate at multiplying the slab size by
-  // 2^30.
-  // FIXME: Currently, this count includes special slabs for objects above the
-  // size threshold. That will be fixed in a subsequent commit to make the
-  // growth even more predictable.
-  size_t AllocatedSlabSize =
-      SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
-
-  MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
-  NewSlab->NextPtr = CurSlab;
-  CurSlab = NewSlab;
-  CurPtr = (char*)(CurSlab + 1);
-  End = ((char*)CurSlab) + CurSlab->Size;
-}
+SlabAllocator::~SlabAllocator() { }
 
-/// DeallocateSlabs - Deallocate all memory slabs after and including this
-/// one.
-void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
-  while (Slab) {
-    MemSlab *NextSlab = Slab->NextPtr;
-#ifndef NDEBUG
-    // Poison the memory so stale pointers crash sooner.  Note we must
-    // preserve the Size and NextPtr fields at the beginning.
-    sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
-    memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
-#endif
-    Allocator.Deallocate(Slab);
-    Slab = NextSlab;
-    --NumSlabs;
-  }
-}
+MallocSlabAllocator::~MallocSlabAllocator() { }
 
-/// Reset - Deallocate all but the current slab and reset the current pointer
-/// to the beginning of it, freeing all memory allocated so far.
-void BumpPtrAllocator::Reset() {
-  if (!CurSlab)
-    return;
-  DeallocateSlabs(CurSlab->NextPtr);
-  CurSlab->NextPtr = 0;
-  CurPtr = (char*)(CurSlab + 1);
-  End = ((char*)CurSlab) + CurSlab->Size;
-  BytesAllocated = 0;
+MemSlab *MallocSlabAllocator::Allocate(size_t Size) {
+  MemSlab *Slab = (MemSlab*)Allocator.Allocate(Size, 0);
+  Slab->Size = Size;
+  Slab->NextPtr = 0;
+  return Slab;
 }
 
-/// Allocate - Allocate space at the specified alignment.
-///
-void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
-  if (!CurSlab) // Start a new slab if we haven't allocated one already.
-    StartNewSlab();
-
-  // Keep track of how many bytes we've allocated.
-  BytesAllocated += Size;
-
-  // 0-byte alignment means 1-byte alignment.
-  if (Alignment == 0) Alignment = 1;
-
-  // Allocate the aligned space, going forwards from CurPtr.
-  char *Ptr = alignPtr(CurPtr, Alignment);
-
-  // Check if we can hold it.
-  if (Ptr + Size <= End) {
-    CurPtr = Ptr + Size;
-    // Update the allocation point of this memory block in MemorySanitizer.
-    // Without this, MemorySanitizer messages for values originated from here
-    // will point to the allocation of the entire slab.
-    __msan_allocated_memory(Ptr, Size);
-    return Ptr;
-  }
-
-  // If Size is really big, allocate a separate slab for it.
-  size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
-  if (PaddedSize > SizeThreshold) {
-    ++NumSlabs;
-    MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
-
-    // Put the new slab after the current slab, since we are not allocating
-    // into it.
-    NewSlab->NextPtr = CurSlab->NextPtr;
-    CurSlab->NextPtr = NewSlab;
-
-    Ptr = alignPtr((char*)(NewSlab + 1), Alignment);
-    assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
-    __msan_allocated_memory(Ptr, Size);
-    return Ptr;
-  }
-
-  // Otherwise, start a new slab and try again.
-  StartNewSlab();
-  Ptr = alignPtr(CurPtr, Alignment);
-  CurPtr = Ptr + Size;
-  assert(CurPtr <= End && "Unable to allocate memory!");
-  __msan_allocated_memory(Ptr, Size);
-  return Ptr;
+void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
+  Allocator.Deallocate(Slab);
 }
 
-size_t BumpPtrAllocator::getTotalMemory() const {
-  size_t TotalMemory = 0;
-  for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
-    TotalMemory += Slab->Size;
-  }
-  return TotalMemory;
-}
-  
-void BumpPtrAllocator::PrintStats() const {
+void BumpPtrAllocatorBase::PrintStats() const {
   unsigned NumSlabs = 0;
   size_t TotalMemory = 0;
   for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
@@ -159,19 +51,12 @@ void BumpPtrAllocator::PrintStats() cons
          << " (includes alignment, etc)\n";
 }
 
-SlabAllocator::~SlabAllocator() { }
-
-MallocSlabAllocator::~MallocSlabAllocator() { }
-
-MemSlab *MallocSlabAllocator::Allocate(size_t Size) {
-  MemSlab *Slab = (MemSlab*)Allocator.Allocate(Size, 0);
-  Slab->Size = Size;
-  Slab->NextPtr = 0;
-  return Slab;
-}
-
-void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
-  Allocator.Deallocate(Slab);
+size_t BumpPtrAllocatorBase::getTotalMemory() const {
+  size_t TotalMemory = 0;
+  for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
+    TotalMemory += Slab->Size;
+  }
+  return TotalMemory;
 }
 
 void PrintRecyclerStats(size_t Size,

Modified: llvm/trunk/unittests/Support/AllocatorTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Support/AllocatorTest.cpp?rev=205143&r1=205142&r2=205143&view=diff
==============================================================================
--- llvm/trunk/unittests/Support/AllocatorTest.cpp (original)
+++ llvm/trunk/unittests/Support/AllocatorTest.cpp Sun Mar 30 07:07:07 2014
@@ -141,7 +141,7 @@ public:
 // will not.
 TEST(AllocatorTest, TestBigAlignment) {
   MockSlabAllocator SlabAlloc;
-  BumpPtrAllocator Alloc(4096, 4096, SlabAlloc);
+  BumpPtrAllocator Alloc(SlabAlloc);
   uintptr_t Ptr = (uintptr_t)Alloc.Allocate(3000, 2048);
   MemSlab *Slab = SlabAlloc.GetLastSlab();
   EXPECT_LE(Ptr + 3000, ((uintptr_t)Slab) + Slab->Size);





More information about the llvm-commits mailing list