[libc-commits] [libc] [libc] Add aligned_alloc (PR #96586)
via libc-commits
libc-commits at lists.llvm.org
Mon Jun 24 20:16:28 PDT 2024
https://github.com/PiJoules created https://github.com/llvm/llvm-project/pull/96586
This adds support for aligned_alloc with the freelist allocator. This works by finding blocks large enough to hold the requested size plus some shift amount that's at most the requested alignment. Blocks that meet this requirement but aren't properly aligned can be split such that the usable_space of a new block is aligned properly. The "padding" block created will be merged with the previous block if one exists.
>From 1d17a6c0f471471856fcc6a514d691edabe29566 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Mon, 24 Jun 2024 20:09:35 -0700
Subject: [PATCH] [libc] Add aligned_alloc
This adds support for aligned_alloc with the freelist allocator. This
works by finding blocks large enough to hold the requested size plus
some shift amount that's at most the requested alignment. Blocks that
meet this requirement but aren't properly aligned can be split such that
the usable_space of a new block is aligned properly. The "padding" block
created will be merged with the previous block if one exists.
---
libc/src/__support/block.h | 87 ++++++++
libc/src/__support/freelist.h | 20 ++
libc/src/__support/freelist_heap.h | 51 ++++-
libc/src/stdlib/aligned_alloc.h | 20 ++
libc/src/stdlib/freelist_malloc.cpp | 5 +
libc/test/src/__support/CMakeLists.txt | 1 +
libc/test/src/__support/block_test.cpp | 200 ++++++++++++++++++
.../test/src/__support/freelist_heap_test.cpp | 86 +++++++-
.../src/__support/freelist_malloc_test.cpp | 18 ++
9 files changed, 477 insertions(+), 11 deletions(-)
create mode 100644 libc/src/stdlib/aligned_alloc.h
diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h
index 580f20e1ec4a4..8a7608e479e61 100644
--- a/libc/src/__support/block.h
+++ b/libc/src/__support/block.h
@@ -16,6 +16,7 @@
#include "src/__support/CPP/optional.h"
#include "src/__support/CPP/span.h"
#include "src/__support/CPP/type_traits.h"
+#include "src/__support/libc_assert.h"
#include <stdint.h>
@@ -261,6 +262,40 @@ class Block {
constexpr Block(size_t prev_outer_size, size_t outer_size);
+ bool usable_space_is_aligned(size_t alignment) const {
+ return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
+ }
+
+ size_t extra_space_for_adjustment(size_t alignment) const {
+ if (usable_space_is_aligned(alignment))
+ return 0;
+
+ // We need to ensure we can always split this block into a "padding" block
+ // and the aligned block. To do this, we need enough extra space for at
+ // least one block.
+ //
+ // |block |usable_space |
+ // |........|......................................|
+ // ^
+ // Alignment requirement
+ //
+ //
+ // |block |space |block |usable_space |
+ // |........|........|........|....................|
+ // ^
+ // Alignment requirement
+ //
+ uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
+ alignment = cpp::max(alignment, ALIGNMENT);
+ size_t adjustment = align_up(start + BLOCK_OVERHEAD, alignment) - start;
+ return adjustment;
+ }
+
+ bool can_allocate(size_t alignment, size_t size) const;
+
+ static void allocate(Block *&block, size_t alignment, size_t size,
+ Block *&new_prev, Block *&new_next);
+
private:
/// Consumes the block and returns as a span of bytes.
static ByteSpan as_bytes(Block *&&block);
@@ -357,6 +392,58 @@ void Block<OffsetType, kAlign>::free(Block *&block) {
merge_next(block);
}
+template <typename OffsetType, size_t kAlign>
+bool Block<OffsetType, kAlign>::can_allocate(size_t alignment,
+ size_t size) const {
+ if (usable_space_is_aligned(alignment) && inner_size() >= size)
+ return true; // Size and alignment constraints met.
+
+ // Either the alignment isn't met or we don't have enough size.
+ // If we don't meet alignment, we can always adjust such that we do meet the
+ // alignment. If we meet the alignment but just don't have enough size. This
+ // check will fail anyway.
+ size_t adjustment = extra_space_for_adjustment(alignment);
+ if (inner_size() >= size + adjustment)
+ return true;
+
+ return false;
+}
+
+template <typename OffsetType, size_t kAlign>
+void Block<OffsetType, kAlign>::allocate(Block *&block, size_t alignment,
+ size_t size, Block *&new_prev,
+ Block *&new_next) {
+ if (!block->usable_space_is_aligned(alignment)) {
+ size_t adjustment = block->extra_space_for_adjustment(alignment);
+ size_t new_inner_size = adjustment - BLOCK_OVERHEAD;
+ LIBC_ASSERT(new_inner_size % ALIGNMENT == 0 &&
+ "The adjustment calculation should always return a new size "
+ "that's a multiple of ALIGNMENT");
+
+ Block *aligned_block = *Block::split(block, adjustment - BLOCK_OVERHEAD);
+ LIBC_ASSERT(aligned_block->usable_space_is_aligned(alignment) &&
+ "The aligned block isn't aligned somehow.");
+
+ Block *prev = block->prev();
+ if (prev) {
+ // If there is a block before this, we can merge the current one with the
+ // newly created one.
+ merge_next(prev);
+ } else {
+ // Otherwise, this was the very first block in the chain. Now we can make
+ // it the new first block.
+ new_prev = block;
+ }
+
+ block = aligned_block;
+ }
+
+ // Now get a block for the requested size.
+ optional<Block *> next = Block::split(block, size);
+ if (next)
+ new_next = *next;
+}
+
template <typename OffsetType, size_t kAlign>
optional<Block<OffsetType, kAlign> *>
Block<OffsetType, kAlign>::split(Block *&block, size_t new_inner_size) {
diff --git a/libc/src/__support/freelist.h b/libc/src/__support/freelist.h
index 0641ba93807d6..f8f5cd1878a32 100644
--- a/libc/src/__support/freelist.h
+++ b/libc/src/__support/freelist.h
@@ -66,6 +66,8 @@ template <size_t NUM_BUCKETS = 6> class FreeList {
/// A span with a size of 0.
cpp::span<cpp::byte> find_chunk(size_t size) const;
+ template <typename Cond> cpp::span<cpp::byte> find_chunk_if(Cond op) const;
+
/// Removes a chunk from this freelist.
bool remove_chunk(cpp::span<cpp::byte> chunk);
@@ -111,6 +113,24 @@ bool FreeList<NUM_BUCKETS>::add_chunk(span<cpp::byte> chunk) {
return true;
}
+template <size_t NUM_BUCKETS>
+template <typename Cond>
+span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk_if(Cond op) const {
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ FreeListNode *node = chunks_[i];
+
+ while (node != nullptr) {
+ span<cpp::byte> chunk(reinterpret_cast<cpp::byte *>(node), node->size);
+ if (op(chunk))
+ return chunk;
+
+ node = node->next;
+ }
+ }
+
+ return {};
+}
+
template <size_t NUM_BUCKETS>
span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk(size_t size) const {
if (size == 0)
diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h
index 3569baf27bdaa..100b800f84ae6 100644
--- a/libc/src/__support/freelist_heap.h
+++ b/libc/src/__support/freelist_heap.h
@@ -24,6 +24,8 @@ namespace LIBC_NAMESPACE {
using cpp::optional;
using cpp::span;
+inline constexpr bool IsPow2(size_t x) { return x && (x & (x - 1)) == 0; }
+
static constexpr cpp::array<size_t, 6> DEFAULT_BUCKETS{16, 32, 64,
128, 256, 512};
@@ -32,6 +34,9 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
using BlockType = Block<>;
using FreeListType = FreeList<NUM_BUCKETS>;
+ static constexpr size_t MIN_ALIGNMENT =
+ cpp::max(BlockType::ALIGNMENT, alignof(max_align_t));
+
struct HeapStats {
size_t total_bytes;
size_t bytes_allocated;
@@ -55,6 +60,7 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
}
void *allocate(size_t size);
+ void *aligned_allocate(size_t alignment, size_t size);
void free(void *ptr);
void *realloc(void *ptr, size_t size);
void *calloc(size_t num, size_t size);
@@ -74,6 +80,8 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
freelist_.set_freelist_node(node, chunk);
}
+ void *allocate_impl(size_t alignment, size_t size);
+
private:
span<cpp::byte> block_to_span(BlockType *block) {
return span<cpp::byte>(block->usable_space(), block->inner_size());
@@ -109,20 +117,29 @@ struct FreeListHeapBuffer : public FreeListHeap<NUM_BUCKETS> {
};
template <size_t NUM_BUCKETS>
-void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
- // Find a chunk in the freelist. Split it if needed, then return
- auto chunk = freelist_.find_chunk(size);
+void *FreeListHeap<NUM_BUCKETS>::allocate_impl(size_t alignment, size_t size) {
+ // Find a chunk in the freelist. Split it if needed, then return.
+ auto chunk =
+ freelist_.find_chunk_if([alignment, size](span<cpp::byte> chunk) {
+ BlockType *block = BlockType::from_usable_space(chunk.data());
+ return block->can_allocate(alignment, size);
+ });
if (chunk.data() == nullptr)
return nullptr;
freelist_.remove_chunk(chunk);
BlockType *chunk_block = BlockType::from_usable_space(chunk.data());
+ LIBC_ASSERT(!chunk_block->used());
// Split that chunk. If there's a leftover chunk, add it to the freelist
- optional<BlockType *> result = BlockType::split(chunk_block, size);
- if (result)
- freelist_.add_chunk(block_to_span(*result));
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(chunk_block, alignment, size, prev, next);
+ if (next)
+ freelist_.add_chunk(block_to_span(next));
+ if (prev)
+ freelist_.add_chunk(block_to_span(prev));
chunk_block->mark_used();
@@ -133,6 +150,28 @@ void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
return chunk_block->usable_space();
}
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
+ return allocate_impl(MIN_ALIGNMENT, size);
+}
+
+template <size_t NUM_BUCKETS>
+void *FreeListHeap<NUM_BUCKETS>::aligned_allocate(size_t alignment,
+ size_t size) {
+ if (size == 0)
+ return nullptr;
+
+ // The alignment must be an integral power of two.
+ if (!IsPow2(alignment))
+ return nullptr;
+
+ // The size parameter must be an integral multiple of alignment.
+ if (size % alignment != 0)
+ return nullptr;
+
+ return allocate_impl(alignment, size);
+}
+
template <size_t NUM_BUCKETS> void FreeListHeap<NUM_BUCKETS>::free(void *ptr) {
cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
diff --git a/libc/src/stdlib/aligned_alloc.h b/libc/src/stdlib/aligned_alloc.h
new file mode 100644
index 0000000000000..7f294c8114d49
--- /dev/null
+++ b/libc/src/stdlib/aligned_alloc.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for aligned_alloc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+#ifndef LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+#define LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
+
+namespace LIBC_NAMESPACE {
+
+void *aligned_alloc(size_t alignment, size_t size);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_ALIGNED_ALLOC_H
diff --git a/libc/src/stdlib/freelist_malloc.cpp b/libc/src/stdlib/freelist_malloc.cpp
index 4d3c42ca90bab..684c447a204e4 100644
--- a/libc/src/stdlib/freelist_malloc.cpp
+++ b/libc/src/stdlib/freelist_malloc.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
#include "src/stdlib/calloc.h"
#include "src/stdlib/free.h"
#include "src/stdlib/malloc.h"
@@ -42,4 +43,8 @@ LLVM_LIBC_FUNCTION(void *, realloc, (void *ptr, size_t size)) {
return freelist_heap->realloc(ptr, size);
}
+LLVM_LIBC_FUNCTION(void *, aligned_alloc, (size_t alignment, size_t size)) {
+ return freelist_heap->aligned_allocate(alignment, size);
+}
+
} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index 082a002959c95..bbe856de39bea 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -8,6 +8,7 @@ add_libc_test(
block_test.cpp
DEPENDS
libc.src.__support.CPP.array
+ libc.src.__support.CPP.bit
libc.src.__support.CPP.span
libc.src.__support.block
libc.src.string.memcpy
diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp
index 6614e4b583d3f..020f0ee5d910b 100644
--- a/libc/test/src/__support/block_test.cpp
+++ b/libc/test/src/__support/block_test.cpp
@@ -8,6 +8,7 @@
#include <stddef.h>
#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/bit.h"
#include "src/__support/CPP/span.h"
#include "src/__support/block.h"
#include "src/string/memcpy.h"
@@ -36,6 +37,7 @@ using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()
using LIBC_NAMESPACE::cpp::array;
+using LIBC_NAMESPACE::cpp::bit_ceil;
using LIBC_NAMESPACE::cpp::byte;
using LIBC_NAMESPACE::cpp::span;
@@ -567,3 +569,201 @@ TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
const BlockType *block2 = BlockType::from_usable_space(ptr);
EXPECT_EQ(block1, block2);
}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) {
+ constexpr size_t kN = 1024;
+
+ // Ensure we can allocate everything up to the block size within this block.
+ for (size_t i = 0; i < kN - BlockType::BLOCK_OVERHEAD; ++i) {
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ constexpr size_t kAlign = 1; // Effectively ignores alignmenr.
+ EXPECT_TRUE(block->can_allocate(kAlign, i));
+
+ // For each can_allocate, we should be able to do a successful call to
+ // allocate.
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(block, kAlign, i, prev, next);
+ EXPECT_NE(block, static_cast<BlockType *>(nullptr));
+ }
+
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Given a block of size kN (assuming it's also a power of two), we should be
+ // able to allocate a block within it that's aligned to half its size. This is
+ // because regardless of where the buffer is located, we can always find a
+ // starting location within it that meets this alignment.
+ EXPECT_TRUE(block->can_allocate(kN / 2, 1));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) {
+ constexpr size_t kN = 1024;
+
+ alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // This should result in no new blocks.
+ constexpr size_t kAlignment = BlockType::ALIGNMENT;
+ constexpr size_t kExpectedSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
+
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize, prev, next);
+
+ // Since this is already aligned, there should be no previous block.
+ EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+
+ // Ensure we the block is aligned and the size we expect.
+ EXPECT_NE(block, static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(block->usable_space_is_aligned(BlockType::ALIGNMENT));
+ EXPECT_EQ(block->inner_size(), kExpectedSize);
+
+ // Check the next block.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) {
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Ensure first the usable_data is only aligned to the block alignment.
+ ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+ ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(block->usable_space_is_aligned(kAlignment));
+
+ constexpr size_t kSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(block, kAlignment, kSize, prev, next);
+
+ // Check the previous block was created appropriately. Since this block is the
+ // first block, a new one should be made before this.
+ EXPECT_NE(prev, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(block->prev(), prev);
+ EXPECT_EQ(prev->next(), block);
+ EXPECT_EQ(prev->outer_size(), reinterpret_cast<uintptr_t>(block) -
+ reinterpret_cast<uintptr_t>(prev));
+
+ // Ensure we the block is aligned and the size we expect.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(block->usable_space_is_aligned(kAlignment));
+
+ // Check the next block.
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) {
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Split the block roughly halfway and work on the second half.
+ auto result2 = BlockType::split(block, kN / 2);
+ ASSERT_TRUE(result2.has_value());
+ BlockType *newblock = *result2;
+ ASSERT_EQ(newblock->prev(), block);
+ size_t old_prev_size = block->outer_size();
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(newblock->usable_space_is_aligned(kAlignment));
+
+ // Ensure we can allocate in the new block.
+ constexpr size_t kSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
+
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(newblock, kAlignment, kSize, prev, next);
+
+ // Now there should be no new previous block. Instead, the padding we did
+ // create should be merged into the original previous block.
+ EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(newblock->prev(), block);
+ EXPECT_EQ(block->next(), newblock);
+ EXPECT_GT(block->outer_size(), old_prev_size);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) {
+ // Finally to ensure we made the split blocks correctly via allocate. We
+ // should be able to reconstruct the original block from the blocklets.
+ //
+ // This is the same setup as with the `AllocateNeedsAlignment` test case.
+ constexpr size_t kN = 1024;
+
+ alignas(kN) array<byte, kN> bytes{};
+ auto result = BlockType::init(bytes);
+ ASSERT_TRUE(result.has_value());
+ BlockType *block = *result;
+
+ // Ensure first the usable_data is only aligned to the block alignment.
+ ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
+ ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));
+
+ // Now pick an alignment such that the usable space is not already aligned to
+ // it. We want to explicitly test that the block will split into one before
+ // it.
+ constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
+ ASSERT_FALSE(block->usable_space_is_aligned(kAlignment));
+
+ constexpr size_t kSize = BlockType::ALIGNMENT;
+ EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+
+ BlockType *prev = nullptr;
+ BlockType *next = nullptr;
+ BlockType::allocate(block, kAlignment, kSize, prev, next);
+
+ // Check we have the appropriate blocks.
+ ASSERT_NE(prev, static_cast<BlockType *>(nullptr));
+ ASSERT_FALSE(prev->last());
+ ASSERT_EQ(block->prev(), prev);
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_NE(next, static_cast<BlockType *>(nullptr));
+ EXPECT_EQ(block->next(), next);
+ EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
+ ASSERT_TRUE(next->last());
+
+ // Now check for successful merges.
+ EXPECT_TRUE(BlockType::merge_next(prev));
+ EXPECT_EQ(prev->next(), next);
+ EXPECT_TRUE(BlockType::merge_next(prev));
+ EXPECT_EQ(prev->next(), static_cast<BlockType *>(nullptr));
+ EXPECT_TRUE(prev->last());
+
+ // We should have the original buffer.
+ EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
+ EXPECT_EQ(prev->outer_size(), bytes.size());
+ EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(), &*bytes.end());
+}
diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp
index a35cb5589ed62..16e283a4a8759 100644
--- a/libc/test/src/__support/freelist_heap_test.cpp
+++ b/libc/test/src/__support/freelist_heap_test.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "src/__support/CPP/array.h"
#include "src/__support/CPP/span.h"
#include "src/__support/freelist_heap.h"
#include "src/string/memcmp.h"
@@ -47,10 +48,16 @@ TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
void *ptr = allocator.allocate(ALLOC_SIZE);
ASSERT_NE(ptr, static_cast<void *>(nullptr));
- // In this case, the allocator should be returning us the start of the chunk.
- EXPECT_EQ(ptr, static_cast<void *>(
- reinterpret_cast<cpp::byte *>(allocator.region_start()) +
- FreeListHeap<>::BlockType::BLOCK_OVERHEAD));
+
+ if (&allocator != freelist_heap) {
+ // In this case, the allocator should be returning us the start of the
+ // chunk. This will really only happen if this is the very first `allocate`
+ // call to the allocator since prior malloc calls to this could make this
+ // anywhere in the buffer.
+ EXPECT_EQ(ptr, static_cast<void *>(
+ reinterpret_cast<cpp::byte *>(allocator.region_start()) +
+ FreeListHeap<>::BlockType::BLOCK_OVERHEAD));
+ }
}
TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
@@ -94,7 +101,10 @@ TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
FreeListHeap<> allocator(buf);
- EXPECT_NE(allocator.allocate(N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
+ // Use aligned_allocate so we don't need to worry about ensuring the `buf`
+ // being aligned to max_align_t.
+ EXPECT_NE(allocator.aligned_allocate(
+ 1, N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
static_cast<void *>(nullptr));
EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
}
@@ -214,4 +224,70 @@ TEST_FOR_EACH_ALLOCATOR(CallocTooLarge, 2048) {
EXPECT_EQ(allocator.calloc(1, ALLOC_SIZE), static_cast<void *>(nullptr));
}
+TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
+ constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+ constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+ for (size_t alignment : ALIGNMENTS) {
+ for (size_t scale : SIZE_SCALES) {
+ size_t size = alignment * scale;
+ void *ptr = allocator.aligned_allocate(alignment, size);
+ EXPECT_NE(ptr, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+ allocator.free(ptr);
+ }
+ }
+}
+
+// This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
+// explicitly ensure that the buffer can still return aligned allocations even
+// if the underlying buffer is at most aligned to the BlockType alignment. This
+// is so we can check that we can still get aligned allocations even if the
+// underlying buffer is not aligned to the alignments we request.
+TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockTypeAligned) {
+ constexpr size_t BUFFER_SIZE = 4096;
+ constexpr size_t BUFFER_ALIGNMENT = alignof(FreeListHeap<>::BlockType) * 2;
+ alignas(BUFFER_ALIGNMENT) cpp::byte buf[BUFFER_SIZE] = {cpp::byte(0)};
+
+ // Ensure the underlying buffer is at most aligned to the block type.
+ FreeListHeap<> allocator(
+ span<cpp::byte>(buf).subspan(alignof(FreeListHeap<>::BlockType)));
+
+ constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
+ constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
+
+ for (size_t alignment : ALIGNMENTS) {
+ for (size_t scale : SIZE_SCALES) {
+ size_t size = alignment * scale;
+ void *ptr = allocator.aligned_allocate(alignment, size);
+ EXPECT_NE(ptr, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
+ allocator.free(ptr);
+ }
+ }
+}
+
+TEST_FOR_EACH_ALLOCATOR(InvalidAlignedAllocAlignment, 2048) {
+ // Must be a power of 2.
+ constexpr size_t ALIGNMENTS[] = {4, 8, 16, 32, 64, 128, 256};
+ for (size_t alignment : ALIGNMENTS) {
+ void *ptr = allocator.aligned_allocate(alignment - 1, alignment - 1);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+ }
+
+ // Size must be a multiple of alignment
+ for (size_t alignment : ALIGNMENTS) {
+ void *ptr = allocator.aligned_allocate(alignment, alignment + 1);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+ }
+
+ // Don't accept zero size.
+ void *ptr = allocator.aligned_allocate(1, 0);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+
+ // Don't accept zero alignment.
+ ptr = allocator.aligned_allocate(0, 8);
+ EXPECT_EQ(ptr, static_cast<void *>(nullptr));
+}
+
} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp
index 989e9548fa26d..e8d5c5a22c157 100644
--- a/libc/test/src/__support/freelist_malloc_test.cpp
+++ b/libc/test/src/__support/freelist_malloc_test.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "src/__support/freelist_heap.h"
+#include "src/stdlib/aligned_alloc.h"
#include "src/stdlib/calloc.h"
#include "src/stdlib/free.h"
#include "src/stdlib/malloc.h"
@@ -53,4 +54,21 @@ TEST(LlvmLibcFreeListMalloc, MallocStats) {
kAllocSize + kCallocNum * kCallocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_freed,
kAllocSize + kCallocNum * kCallocSize);
+
+ constexpr size_t kAlign = kAllocSize;
+ void *ptr3 = LIBC_NAMESPACE::aligned_alloc(kAlign, kAllocSize);
+ EXPECT_NE(ptr3, static_cast<void *>(nullptr));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % kAlign, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+ kAllocSize + kCallocNum * kCallocSize);
+
+ LIBC_NAMESPACE::free(ptr3);
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+ kAllocSize + kCallocNum * kCallocSize + kAllocSize);
}
More information about the libc-commits
mailing list