[llvm-branch-commits] [llvm] a7130d8 - [ADT][NFC] Use empty base optimisation in BumpPtrAllocatorImpl

Nathan James via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 12 14:47:44 PST 2021


Author: Nathan James
Date: 2021-01-12T22:43:48Z
New Revision: a7130d85e4b9e47b18a89eac3d47fd8c19d449c1

URL: https://github.com/llvm/llvm-project/commit/a7130d85e4b9e47b18a89eac3d47fd8c19d449c1
DIFF: https://github.com/llvm/llvm-project/commit/a7130d85e4b9e47b18a89eac3d47fd8c19d449c1.diff

LOG: [ADT][NFC] Use empty base optimisation in BumpPtrAllocatorImpl

Most uses of this class just use the default MallocAllocator.
As this contains no fields, we can use the empty base optimisation for BumpPtrAllocatorImpl and save 8 bytes of padding for most use cases.

This prevents using a class that is marked as `final` as the `AllocatorT` template argument.
In one must use an allocator that has been marked as `final`, the simplest way around this is a proxy class.
The class should have all the methods that `AllocaterBase` expects and should forward the calls to your own allocator instance.

Reviewed By: dblaikie

Differential Revision: https://reviews.llvm.org/D94439

Added: 
    

Modified: 
    llvm/include/llvm/Support/Allocator.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/Allocator.h b/llvm/include/llvm/Support/Allocator.h
index 40c967ccc485..245432debce6 100644
--- a/llvm/include/llvm/Support/Allocator.h
+++ b/llvm/include/llvm/Support/Allocator.h
@@ -66,7 +66,8 @@ template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
           size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
 class BumpPtrAllocatorImpl
     : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
-                                                SizeThreshold, GrowthDelay>> {
+                                                SizeThreshold, GrowthDelay>>,
+      private AllocatorT {
 public:
   static_assert(SizeThreshold <= SlabSize,
                 "The SizeThreshold must be at most the SlabSize to ensure "
@@ -80,15 +81,15 @@ class BumpPtrAllocatorImpl
 
   template <typename T>
   BumpPtrAllocatorImpl(T &&Allocator)
-      : Allocator(std::forward<T &&>(Allocator)) {}
+      : AllocatorT(std::forward<T &&>(Allocator)) {}
 
   // Manually implement a move constructor as we must clear the old allocator's
   // slabs as a matter of correctness.
   BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
-      : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
+      : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
+        End(Old.End), Slabs(std::move(Old.Slabs)),
         CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
-        BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
-        Allocator(std::move(Old.Allocator)) {
+        BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
     Old.CurPtr = Old.End = nullptr;
     Old.BytesAllocated = 0;
     Old.Slabs.clear();
@@ -110,7 +111,7 @@ class BumpPtrAllocatorImpl
     RedZoneSize = RHS.RedZoneSize;
     Slabs = std::move(RHS.Slabs);
     CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
-    Allocator = std::move(RHS.Allocator);
+    AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
 
     RHS.CurPtr = RHS.End = nullptr;
     RHS.BytesAllocated = 0;
@@ -170,7 +171,8 @@ class BumpPtrAllocatorImpl
     // If Size is really big, allocate a separate slab for it.
     size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
     if (PaddedSize > SizeThreshold) {
-      void *NewSlab = Allocator.Allocate(PaddedSize, alignof(std::max_align_t));
+      void *NewSlab =
+          AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
       // We own the new slab and don't want anyone reading anyting other than
       // pieces returned from this method.  So poison the whole slab.
       __asan_poison_memory_region(NewSlab, PaddedSize);
@@ -315,9 +317,6 @@ class BumpPtrAllocatorImpl
   /// a sanitizer.
   size_t RedZoneSize = 1;
 
-  /// The allocator instance we use to get slabs of memory.
-  AllocatorT Allocator;
-
   static size_t computeSlabSize(unsigned SlabIdx) {
     // Scale the actual allocated slab size based on the number of slabs
     // allocated. Every GrowthDelay slabs allocated, we double
@@ -333,7 +332,7 @@ class BumpPtrAllocatorImpl
     size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
 
     void *NewSlab =
-        Allocator.Allocate(AllocatedSlabSize, alignof(std::max_align_t));
+        AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
     // We own the new slab and don't want anyone reading anything other than
     // pieces returned from this method.  So poison the whole slab.
     __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
@@ -349,7 +348,7 @@ class BumpPtrAllocatorImpl
     for (; I != E; ++I) {
       size_t AllocatedSlabSize =
           computeSlabSize(std::distance(Slabs.begin(), I));
-      Allocator.Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
+      AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
     }
   }
 
@@ -358,7 +357,7 @@ class BumpPtrAllocatorImpl
     for (auto &PtrAndSize : CustomSizedSlabs) {
       void *Ptr = PtrAndSize.first;
       size_t Size = PtrAndSize.second;
-      Allocator.Deallocate(Ptr, Size, alignof(std::max_align_t));
+      AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
     }
   }
 


        


More information about the llvm-branch-commits mailing list