[libc-commits] [libc] [libc] Add aligned_alloc (PR #96586)

via libc-commits libc-commits at lists.llvm.org
Tue Jun 25 16:34:27 PDT 2024


https://github.com/PiJoules updated https://github.com/llvm/llvm-project/pull/96586

>From 7b1d4935ea499fa529fa2b1383300aa262f86190 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Mon, 24 Jun 2024 20:09:35 -0700
Subject: [PATCH] [libc] Add aligned_alloc

This adds support for aligned_alloc with the freelist allocator. This
works by finding blocks large enough to hold the requested size plus
some shift amount that's at most the requested alignment. Blocks that
meet this requirement but aren't properly aligned can be split such that
the usable_space of a new block is aligned properly. The "padding" block
created will be merged with the previous block if one exists.
---
 libc/src/__support/block.h                    | 113 ++++++++++
 libc/src/__support/fixedvector.h              |   5 +
 libc/src/__support/freelist.h                 |  18 ++
 libc/src/__support/freelist_heap.h            |  50 ++++-
 libc/src/stdlib/aligned_alloc.h               |  20 ++
 libc/src/stdlib/freelist_malloc.cpp           |   5 +
 libc/test/src/__support/CMakeLists.txt        |   1 +
 libc/test/src/__support/block_test.cpp        | 196 ++++++++++++++++++
 .../test/src/__support/freelist_heap_test.cpp |  80 ++++++-
 .../src/__support/freelist_malloc_test.cpp    |  18 ++
 10 files changed, 495 insertions(+), 11 deletions(-)
 create mode 100644 libc/src/stdlib/aligned_alloc.h

diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h
index 580f20e1ec4a4..161fd1eadc9b6 100644
--- a/libc/src/__support/block.h
+++ b/libc/src/__support/block.h
@@ -16,6 +16,7 @@
 #include "src/__support/CPP/optional.h"
 #include "src/__support/CPP/span.h"
 #include "src/__support/CPP/type_traits.h"
+#include "src/__support/libc_assert.h"
 
 #include <stdint.h>
 
@@ -261,6 +262,58 @@ class Block {
 
   constexpr Block(size_t prev_outer_size, size_t outer_size);
 
+  bool is_usable_space_aligned(size_t alignment) const {
+    return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
+  }
+
+  size_t padding_for_alignment(size_t alignment) const {
+    if (is_usable_space_aligned(alignment))
+      return 0;
+
+    // We need to ensure we can always split this block into a "padding" block
+    // and the aligned block. To do this, we need enough extra space for at
+    // least one block.
+    //
+    // |block   |usable_space                          |
+    // |........|......................................|
+    //                            ^
+    //                            Alignment requirement
+    //
+    //
+    // |block   |space   |block   |usable_space        |
+    // |........|........|........|....................|
+    //                            ^
+    //                            Alignment requirement
+    //
+    uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
+    alignment = cpp::max(alignment, ALIGNMENT);
+    return align_up(start + BLOCK_OVERHEAD, alignment) - start;
+  }
+
+  bool can_allocate(size_t alignment, size_t size) const;
+
+  // This is the return type for `allocate` which can split one block into up to
+  // three blocks.
+  struct BlockInfo {
+    // This is the newly aligned block. It will have the alignment requested by
+    // a call to `allocate` and at most `size`.
+    Block *block;
+
+    // If the usable_space in the new block was not aligned according to the
+    // `alignment` parameter, we will need to split into this block and the
+    // `block` to ensure `block` is properly aligned. In this case, `prev` will
+    // be a pointer to this new "padding" block. `prev` will be nullptr if no
+    // new block was created or we were able to merge the block before the
+    // original block with the "padding" block.
+    Block *prev;
+
+    // This is the remainder of the next block after splitting the `block`
+    // according to `size`. This can happen if there's enough space after the
+    // `block`.
+    Block *next;
+  };
+  static BlockInfo allocate(Block *block, size_t alignment, size_t size);
+
 private:
   /// Consumes the block and returns as a span of bytes.
   static ByteSpan as_bytes(Block *&&block);
@@ -357,6 +410,66 @@ void Block<OffsetType, kAlign>::free(Block *&block) {
   merge_next(block);
 }
 
+template <typename OffsetType, size_t kAlign>
+bool Block<OffsetType, kAlign>::can_allocate(size_t alignment,
+                                             size_t size) const {
+  if (is_usable_space_aligned(alignment) && inner_size() >= size)
+    return true; // Size and alignment constraints met.
+
+  // Either the alignment isn't met or we don't have enough size.
+  // If we don't meet alignment, we can always adjust such that we do meet the
+  // alignment. If we meet the alignment but just don't have enough size. This
+  // check will fail anyway.
+  size_t adjustment = padding_for_alignment(alignment);
+  return inner_size() >= size + adjustment;
+}
+
+template <typename OffsetType, size_t kAlign>
+typename Block<OffsetType, kAlign>::BlockInfo
+Block<OffsetType, kAlign>::allocate(Block *block, size_t alignment,
+                                    size_t size) {
+  BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
+
+  if (!info.block->is_usable_space_aligned(alignment)) {
+    size_t adjustment = info.block->padding_for_alignment(alignment);
+    size_t new_inner_size = adjustment - BLOCK_OVERHEAD;
+    LIBC_ASSERT(new_inner_size % ALIGNMENT == 0 &&
+                "The adjustment calculation should always return a new size "
+                "that's a multiple of ALIGNMENT");
+
+    Block *original = info.block;
+    optional<Block *> maybe_aligned_block =
+        Block::split(original, adjustment - BLOCK_OVERHEAD);
+    LIBC_ASSERT(maybe_aligned_block.has_value() &&
+                "This split should always result in a new block. The check in "
+                "`can_allocate` ensures that we have enough space here to make "
+                "two blocks.");
+
+    Block *aligned_block = *maybe_aligned_block;
+    LIBC_ASSERT(aligned_block->is_usable_space_aligned(alignment) &&
+                "The aligned block isn't aligned somehow.");
+
+    Block *prev = original->prev();
+    if (prev) {
+      // If there is a block before this, we can merge the current one with the
+      // newly created one.
+      merge_next(prev);
+    } else {
+      // Otherwise, this was the very first block in the chain. Now we can make
+      // it the new first block.
+      info.prev = original;
+    }
+
+    info.block = aligned_block;
+  }
+
+  // Now get a block for the requested size.
+  if (optional<Block *> next = Block::split(info.block, size))
+    info.next = *next;
+
+  return info;
+}
+
 template <typename OffsetType, size_t kAlign>
 optional<Block<OffsetType, kAlign> *>
 Block<OffsetType, kAlign>::split(Block *&block, size_t new_inner_size) {
diff --git a/libc/src/__support/fixedvector.h b/libc/src/__support/fixedvector.h
index 403b1620d20df..5161c0d7a533c 100644
--- a/libc/src/__support/fixedvector.h
+++ b/libc/src/__support/fixedvector.h
@@ -90,6 +90,11 @@ template <typename T, size_t CAPACITY> class FixedVector {
 
   LIBC_INLINE constexpr iterator begin() { return store.begin(); }
   LIBC_INLINE constexpr iterator end() { return iterator{&store[item_count]}; }
+
+  LIBC_INLINE constexpr const_iterator begin() const { return store.begin(); }
+  LIBC_INLINE constexpr const_iterator end() const {
+    return const_iterator{&store[item_count]};
+  }
 };
 
 } // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/freelist.h b/libc/src/__support/freelist.h
index 0641ba93807d6..b2eb84a85dbce 100644
--- a/libc/src/__support/freelist.h
+++ b/libc/src/__support/freelist.h
@@ -66,6 +66,8 @@ template <size_t NUM_BUCKETS = 6> class FreeList {
   ///   A span with a size of 0.
   cpp::span<cpp::byte> find_chunk(size_t size) const;
 
+  template <typename Cond> cpp::span<cpp::byte> find_chunk_if(Cond op) const;
+
   /// Removes a chunk from this freelist.
   bool remove_chunk(cpp::span<cpp::byte> chunk);
 
@@ -111,6 +113,22 @@ bool FreeList<NUM_BUCKETS>::add_chunk(span<cpp::byte> chunk) {
   return true;
 }
 
+template <size_t NUM_BUCKETS>
+template <typename Cond>
+span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk_if(Cond op) const {
+  for (FreeListNode *node : chunks_) {
+    while (node != nullptr) {
+      span<cpp::byte> chunk(reinterpret_cast<cpp::byte *>(node), node->size);
+      if (op(chunk))
+        return chunk;
+
+      node = node->next;
+    }
+  }
+
+  return {};
+}
+
 template <size_t NUM_BUCKETS>
 span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk(size_t size) const {
   if (size == 0)
diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h
index 3569baf27bdaa..7516dc7e1891f 100644
--- a/libc/src/__support/freelist_heap.h
+++ b/libc/src/__support/freelist_heap.h
@@ -24,6 +24,8 @@ namespace LIBC_NAMESPACE {
 using cpp::optional;
 using cpp::span;
 
+inline constexpr bool IsPow2(size_t x) { return x && (x & (x - 1)) == 0; }
+
 static constexpr cpp::array<size_t, 6> DEFAULT_BUCKETS{16,  32,  64,
                                                        128, 256, 512};
 
@@ -32,6 +34,9 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
   using BlockType = Block<>;
   using FreeListType = FreeList<NUM_BUCKETS>;
 
+  static constexpr size_t MIN_ALIGNMENT =
+      cpp::max(BlockType::ALIGNMENT, alignof(max_align_t));
+
   struct HeapStats {
     size_t total_bytes;
     size_t bytes_allocated;
@@ -55,6 +60,7 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
   }
 
   void *allocate(size_t size);
+  void *aligned_allocate(size_t alignment, size_t size);
   void free(void *ptr);
   void *realloc(void *ptr, size_t size);
   void *calloc(size_t num, size_t size);
@@ -74,6 +80,8 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
     freelist_.set_freelist_node(node, chunk);
   }
 
+  void *allocate_impl(size_t alignment, size_t size);
+
 private:
   span<cpp::byte> block_to_span(BlockType *block) {
     return span<cpp::byte>(block->usable_space(), block->inner_size());
@@ -109,20 +117,31 @@ struct FreeListHeapBuffer : public FreeListHeap<NUM_BUCKETS> {
 };
 
 template <size_t NUM_BUCKETS>
-void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
-  // Find a chunk in the freelist. Split it if needed, then return
-  auto chunk = freelist_.find_chunk(size);
+void *FreeListHeap<NUM_BUCKETS>::allocate_impl(size_t alignment, size_t size) {
+  if (size == 0)
+    return nullptr;
+
+  // Find a chunk in the freelist. Split it if needed, then return.
+  auto chunk =
+      freelist_.find_chunk_if([alignment, size](span<cpp::byte> chunk) {
+        BlockType *block = BlockType::from_usable_space(chunk.data());
+        return block->can_allocate(alignment, size);
+      });
 
   if (chunk.data() == nullptr)
     return nullptr;
   freelist_.remove_chunk(chunk);
 
   BlockType *chunk_block = BlockType::from_usable_space(chunk.data());
+  LIBC_ASSERT(!chunk_block->used());
 
   // Split that chunk. If there's a leftover chunk, add it to the freelist
-  optional<BlockType *> result = BlockType::split(chunk_block, size);
-  if (result)
-    freelist_.add_chunk(block_to_span(*result));
+  auto block_info = BlockType::allocate(chunk_block, alignment, size);
+  if (block_info.next)
+    freelist_.add_chunk(block_to_span(block_info.next));
+  if (block_info.prev)
+    freelist_.add_chunk(block_to_span(block_info.prev));
+  chunk_block = block_info.block;
 
   chunk_block->mark_used();
 
@@ -133,6 +152,25 @@ void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
   return chunk_block->usable_space();
 }
 
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
+  return allocate_impl(MIN_ALIGNMENT, size);
+}
+
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::aligned_allocate(size_t alignment,
+                                                  size_t size) {
+  // The alignment must be an integral power of two.
+  if (!IsPow2(alignment))
+    return nullptr;
+
+  // The size parameter must be an integral multiple of alignment.
+  if (size % alignment != 0)
+    return nullptr;
+
+  return allocate_impl(alignment, size);
+}
+
 template <size_t NUM_BUCKETS> void FreeListHeap<NUM_BUCKETS>::free(void *ptr) {
   cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
 
diff --git a/libc/src/stdlib/aligned_alloc.h b/libc/src/stdlib/aligned_alloc.h
new file mode 100644
index 0000000000000..7f294c8114d49
--- /dev/null
+++ b/libc/src/stdlib/aligned_alloc.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for aligned_alloc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+#ifndef LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+#define LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+
+namespace LIBC_NAMESPACE {
+
+void *aligned_alloc(size_t alignment, size_t size);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
diff --git a/libc/src/stdlib/freelist_malloc.cpp b/libc/src/stdlib/freelist_malloc.cpp
index 4d3c42ca90bab..684c447a204e4 100644
--- a/libc/src/stdlib/freelist_malloc.cpp
+++ b/libc/src/stdlib/freelist_malloc.cpp
@@ -7,6 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
 #include "src/stdlib/calloc.h"
 #include "src/stdlib/free.h"
 #include "src/stdlib/malloc.h"
@@ -42,4 +43,8 @@ LLVM_LIBC_FUNCTION(void *, realloc, (void *ptr, size_t size)) {
   return freelist_heap->realloc(ptr, size);
 }
 
+LLVM_LIBC_FUNCTION(void *, aligned_alloc, (size_t alignment, size_t size)) {
+  return freelist_heap->aligned_allocate(alignment, size);
+}
+
 } // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index ce8413fed7172..98f263b74b759 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -8,6 +8,7 @@ add_libc_test(
     block_test.cpp
   DEPENDS
     libc.src.__support.CPP.array
+    libc.src.__support.CPP.bit
     libc.src.__support.CPP.span
     libc.src.__support.block
     libc.src.string.memcpy
diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp
index 6614e4b583d3f..70ff9776b4274 100644
--- a/libc/test/src/__support/block_test.cpp
+++ b/libc/test/src/__support/block_test.cpp
@@ -8,6 +8,7 @@
 #include <stddef.h>
 
 #include "src/__support/CPP/array.h"
+#include "src/__support/CPP/bit.h"
 #include "src/__support/CPP/span.h"
 #include "src/__support/block.h"
 #include "src/string/memcpy.h"
@@ -36,6 +37,7 @@ using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
   template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()
 
 using LIBC_NAMESPACE::cpp::array;
+using LIBC_NAMESPACE::cpp::bit_ceil;
 using LIBC_NAMESPACE::cpp::byte;
 using LIBC_NAMESPACE::cpp::span;
 
@@ -567,3 +569,197 @@ TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
   const BlockType *block2 = BlockType::from_usable_space(ptr);
   EXPECT_EQ(block1, block2);
 }
+
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) {
+  constexpr size_t kN = 1024;
+
+  // Ensure we can allocate everything up to the block size within this block.
+  for (size_t i = 0; i < kN - BlockType::BLOCK_OVERHEAD; ++i) {
+    alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+    auto result = BlockType::init(bytes);
+    ASSERT_TRUE(result.has_value());
+    BlockType *block = *result;
+
+    constexpr size_t kAlign = 1; // Effectively ignores alignment.
+    EXPECT_TRUE(block->can_allocate(kAlign, i));
+
+    // For each can_allocate, we should be able to do a successful call to
+    // allocate.
+    auto info = BlockType::allocate(block, kAlign, i);
+    EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
+  }
+
+  alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+  auto result = BlockType::init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // Given a block of size kN (assuming it's also a power of two), we should be
+  // able to allocate a block within it that's aligned to half its size. This is
+  // because regardless of where the buffer is located, we can always find a
+  // starting location within it that meets this alignment.
+  EXPECT_TRUE(block->can_allocate(kN / 2, 1));
+  auto info = BlockType::allocate(block, kN / 2, 1);
+  EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) {
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+  auto result = BlockType::init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // This should result in no new blocks.
+  constexpr size_t kAlignment = BlockType::ALIGNMENT;
+  constexpr size_t kExpectedSize = BlockType::ALIGNMENT;
+  EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
+
+  auto [aligned_block, prev, next] =
+      BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize);
+
+  // Since this is already aligned, there should be no previous block.
+  EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+
+  // Ensure we the block is aligned and the size we expect.
+  EXPECT_NE(aligned_block, static_cast<BlockType *>(nullptr));
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(BlockType::ALIGNMENT));
+  EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);
+
+  // Check the next block.
+  EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(aligned_block->next(), next);
+  EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) {
+  constexpr size_t kN = 1024;
+
+  alignas(kN) array<byte, kN> bytes{};
+  auto result = BlockType::init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // Ensure first the usable_data is only aligned to the block alignment.
+  ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+  ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+  // Now pick an alignment such that the usable space is not already aligned to
+  // it. We want to explicitly test that the block will split into one before
+  // it.
+  constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
+
+  constexpr size_t kSize = BlockType::ALIGNMENT;
+  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+  auto [aligned_block, prev, next] =
+      BlockType::allocate(block, kAlignment, kSize);
+
+  // Check the previous block was created appropriately. Since this block is the
+  // first block, a new one should be made before this.
+  EXPECT_NE(prev, static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(aligned_block->prev(), prev);
+  EXPECT_EQ(prev->next(), aligned_block);
+  EXPECT_EQ(prev->outer_size(), reinterpret_cast<uintptr_t>(aligned_block) -
+                                    reinterpret_cast<uintptr_t>(prev));
+
+  // Ensure we the block is aligned and the size we expect.
+  EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));
+
+  // Check the next block.
+  EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(aligned_block->next(), next);
+  EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) {
+  constexpr size_t kN = 1024;
+
+  alignas(kN) array<byte, kN> bytes{};
+  auto result = BlockType::init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // Split the block roughly halfway and work on the second half.
+  auto result2 = BlockType::split(block, kN / 2);
+  ASSERT_TRUE(result2.has_value());
+  BlockType *newblock = *result2;
+  ASSERT_EQ(newblock->prev(), block);
+  size_t old_prev_size = block->outer_size();
+
+  // Now pick an alignment such that the usable space is not already aligned to
+  // it. We want to explicitly test that the block will split into one before
+  // it.
+  constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+  ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));
+
+  // Ensure we can allocate in the new block.
+  constexpr size_t kSize = BlockType::ALIGNMENT;
+  EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
+
+  auto [aligned_block, prev, next] =
+      BlockType::allocate(newblock, kAlignment, kSize);
+
+  // Now there should be no new previous block. Instead, the padding we did
+  // create should be merged into the original previous block.
+  EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(aligned_block->prev(), block);
+  EXPECT_EQ(block->next(), aligned_block);
+  EXPECT_GT(block->outer_size(), old_prev_size);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) {
+  // Finally to ensure we made the split blocks correctly via allocate. We
+  // should be able to reconstruct the original block from the blocklets.
+  //
+  // This is the same setup as with the `AllocateNeedsAlignment` test case.
+  constexpr size_t kN = 1024;
+
+  alignas(kN) array<byte, kN> bytes{};
+  auto result = BlockType::init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // Ensure first the usable_data is only aligned to the block alignment.
+  ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+  ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+  // Now pick an alignment such that the usable space is not already aligned to
+  // it. We want to explicitly test that the block will split into one before
+  // it.
+  constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
+
+  constexpr size_t kSize = BlockType::ALIGNMENT;
+  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+  auto [aligned_block, prev, next] =
+      BlockType::allocate(block, kAlignment, kSize);
+
+  // Check we have the appropriate blocks.
+  ASSERT_NE(prev, static_cast<BlockType *>(nullptr));
+  ASSERT_FALSE(prev->last());
+  ASSERT_EQ(aligned_block->prev(), prev);
+  EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+  EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(aligned_block->next(), next);
+  EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+  ASSERT_TRUE(next->last());
+
+  // Now check for successful merges.
+  EXPECT_TRUE(BlockType::merge_next(prev));
+  EXPECT_EQ(prev->next(), next);
+  EXPECT_TRUE(BlockType::merge_next(prev));
+  EXPECT_EQ(prev->next(), static_cast<BlockType *>(nullptr));
+  EXPECT_TRUE(prev->last());
+
+  // We should have the original buffer.
+  EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
+  EXPECT_EQ(prev->outer_size(), bytes.size());
+  EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(), &*bytes.end());
+}
diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp
index a35cb5589ed62..add590f5c6d31 100644
--- a/libc/test/src/__support/freelist_heap_test.cpp
+++ b/libc/test/src/__support/freelist_heap_test.cpp
@@ -47,10 +47,6 @@ TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
   void *ptr = allocator.allocate(ALLOC_SIZE);
 
   ASSERT_NE(ptr, static_cast<void *>(nullptr));
-  // In this case, the allocator should be returning us the start of the chunk.
-  EXPECT_EQ(ptr, static_cast<void *>(
-                     reinterpret_cast<cpp::byte *>(allocator.region_start()) +
-                     FreeListHeap<>::BlockType::BLOCK_OVERHEAD));
 }
 
 TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
@@ -94,7 +90,10 @@ TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
 
   FreeListHeap<> allocator(buf);
 
-  EXPECT_NE(allocator.allocate(N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
+  // Use aligned_allocate so we don't need to worry about ensuring the `buf`
+  // being aligned to max_align_t.
+  EXPECT_NE(allocator.aligned_allocate(
+                1, N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
             static_cast<void *>(nullptr));
   EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
 }
@@ -214,4 +213,75 @@ TEST_FOR_EACH_ALLOCATOR(CallocTooLarge, 2048) {
   EXPECT_EQ(allocator.calloc(1, ALLOC_SIZE), static_cast<void *>(nullptr));
 }
 
+TEST_FOR_EACH_ALLOCATOR(AllocateZero, 2048) {
+  void *ptr = allocator.allocate(0);
+  ASSERT_EQ(ptr, static_cast<void *>(nullptr));
+}
+
+TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
+  constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+  constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+  for (size_t alignment : ALIGNMENTS) {
+    for (size_t scale : SIZE_SCALES) {
+      size_t size = alignment * scale;
+      void *ptr = allocator.aligned_allocate(alignment, size);
+      EXPECT_NE(ptr, static_cast<void *>(nullptr));
+      EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+      allocator.free(ptr);
+    }
+  }
+}
+
+// This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
+// explicitly ensure that the buffer can still return aligned allocations even
+// if the underlying buffer is at most aligned to the BlockType alignment. This
+// is so we can check that we can still get aligned allocations even if the
+// underlying buffer is not aligned to the alignments we request.
+TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockTypeAligned) {
+  constexpr size_t BUFFER_SIZE = 4096;
+  constexpr size_t BUFFER_ALIGNMENT = alignof(FreeListHeap<>::BlockType) * 2;
+  alignas(BUFFER_ALIGNMENT) cpp::byte buf[BUFFER_SIZE] = {cpp::byte(0)};
+
+  // Ensure the underlying buffer is at most aligned to the block type.
+  FreeListHeap<> allocator(
+      span<cpp::byte>(buf).subspan(alignof(FreeListHeap<>::BlockType)));
+
+  constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+  constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+  for (size_t alignment : ALIGNMENTS) {
+    for (size_t scale : SIZE_SCALES) {
+      size_t size = alignment * scale;
+      void *ptr = allocator.aligned_allocate(alignment, size);
+      EXPECT_NE(ptr, static_cast<void *>(nullptr));
+      EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+      allocator.free(ptr);
+    }
+  }
+}
+
+TEST_FOR_EACH_ALLOCATOR(InvalidAlignedAllocAlignment, 2048) {
+  // Must be a power of 2.
+  constexpr size_t ALIGNMENTS[] = {4, 8, 16, 32, 64, 128, 256};
+  for (size_t alignment : ALIGNMENTS) {
+    void *ptr = allocator.aligned_allocate(alignment - 1, alignment - 1);
+    EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+  }
+
+  // Size must be a multiple of alignment
+  for (size_t alignment : ALIGNMENTS) {
+    void *ptr = allocator.aligned_allocate(alignment, alignment + 1);
+    EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+  }
+
+  // Don't accept zero size.
+  void *ptr = allocator.aligned_allocate(1, 0);
+  EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+
+  // Don't accept zero alignment.
+  ptr = allocator.aligned_allocate(0, 8);
+  EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+}
+
 } // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp
index 989e9548fa26d..e8d5c5a22c157 100644
--- a/libc/test/src/__support/freelist_malloc_test.cpp
+++ b/libc/test/src/__support/freelist_malloc_test.cpp
@@ -7,6 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
 #include "src/stdlib/calloc.h"
 #include "src/stdlib/free.h"
 #include "src/stdlib/malloc.h"
@@ -53,4 +54,21 @@ TEST(LlvmLibcFreeListMalloc, MallocStats) {
             kAllocSize + kCallocNum * kCallocSize);
   EXPECT_EQ(freelist_heap_stats.cumulative_freed,
             kAllocSize + kCallocNum * kCallocSize);
+
+  constexpr size_t kAlign = kAllocSize;
+  void *ptr3 = LIBC_NAMESPACE::aligned_alloc(kAlign, kAllocSize);
+  EXPECT_NE(ptr3, static_cast<void *>(nullptr));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % kAlign, size_t(0));
+  EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize);
+  EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+            kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+  EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+            kAllocSize + kCallocNum * kCallocSize);
+
+  LIBC_NAMESPACE::free(ptr3);
+  EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
+  EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+            kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+  EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+            kAllocSize + kCallocNum * kCallocSize + kAllocSize);
 }



More information about the libc-commits mailing list