[libc-commits] [libc] 60de7dc - [libc] Fix malloc Block alignment issue on riscv32 (#117815)

via libc-commits libc-commits at lists.llvm.org
Thu Jan 16 11:56:49 PST 2025


Author: Daniel Thornburgh
Date: 2025-01-16T11:56:46-08:00
New Revision: 60de7dc886b9d83b0e2b6c9d7b73173d5d870388

URL: https://github.com/llvm/llvm-project/commit/60de7dc886b9d83b0e2b6c9d7b73173d5d870388
DIFF: https://github.com/llvm/llvm-project/commit/60de7dc886b9d83b0e2b6c9d7b73173d5d870388.diff

LOG: [libc] Fix malloc Block alignment issue on riscv32 (#117815)

Aligning blocks to max_align_t is neither necessary nor sufficient to
ensure that the usable_space() is so aligned. Instead, we make this an
invariant of Block and maintain it in init() and split().

This allows targets like riscv32 with small pointers and large
max_align_t to maintain the property that all available blocks are
aligned for malloc(). This change adjusts the tests to match and also
updates them closer to llvm-libc style.

Added: 
    

Modified: 
    libc/src/__support/CMakeLists.txt
    libc/src/__support/block.h
    libc/src/__support/freelist_heap.h
    libc/test/src/__support/block_test.cpp
    libc/test/src/__support/freelist_heap_test.cpp

Removed: 
    


################################################################################
diff  --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt
index 5090dc218cda4a..148484052dcad5 100644
--- a/libc/src/__support/CMakeLists.txt
+++ b/libc/src/__support/CMakeLists.txt
@@ -12,6 +12,7 @@ add_header_library(
     libc.src.__support.CPP.optional
     libc.src.__support.CPP.span
     libc.src.__support.CPP.type_traits
+    libc.src.__support.math_extras
 )
 
 add_object_library(

diff  --git a/libc/src/__support/block.h b/libc/src/__support/block.h
index 9ca3f11530c4ba..1ec5fde77de0ce 100644
--- a/libc/src/__support/block.h
+++ b/libc/src/__support/block.h
@@ -18,6 +18,7 @@
 #include "src/__support/CPP/type_traits.h"
 #include "src/__support/libc_assert.h"
 #include "src/__support/macros/config.h"
+#include "src/__support/math_extras.h"
 
 #include <stdint.h>
 
@@ -40,24 +41,10 @@ LIBC_INLINE constexpr size_t align_down(size_t value, size_t alignment) {
   return (value / alignment) * alignment;
 }
 
-/// Returns the value rounded down to the nearest multiple of alignment.
-template <typename T>
-LIBC_INLINE constexpr T *align_down(T *value, size_t alignment) {
-  return reinterpret_cast<T *>(
-      align_down(reinterpret_cast<size_t>(value), alignment));
-}
-
-/// Returns the value rounded up to the nearest multiple of alignment.
+/// Returns the value rounded up to the nearest multiple of alignment. May wrap
+/// around.
 LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) {
-  __builtin_add_overflow(value, alignment - 1, &value);
-  return align_down(value, alignment);
-}
-
-/// Returns the value rounded up to the nearest multiple of alignment.
-template <typename T>
-LIBC_INLINE constexpr T *align_up(T *value, size_t alignment) {
-  return reinterpret_cast<T *>(
-      align_up(reinterpret_cast<size_t>(value), alignment));
+  return align_down(value + alignment - 1, alignment);
 }
 
 using ByteSpan = cpp::span<LIBC_NAMESPACE::cpp::byte>;
@@ -68,8 +55,8 @@ using cpp::optional;
 /// The blocks store their offsets to the previous and next blocks. The latter
 /// is also the block's size.
 ///
-/// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will
-/// always be rounded up to a multiple of `ALIGNMENT`.
+/// All blocks have their usable space aligned to some multiple of max_align_t.
+/// This also implies that block outer sizes are aligned to max_align_t.
 ///
 /// As an example, the diagram below represents two contiguous `Block`s. The
 /// indices indicate byte offsets:
@@ -129,8 +116,9 @@ class Block {
   Block(const Block &other) = delete;
   Block &operator=(const Block &other) = delete;
 
-  /// Creates the first block for a given memory region, followed by a sentinel
-  /// last block. Returns the first block.
+  /// Initializes a given memory region into a first block and a sentinel last
+  /// block. Returns the first block, which has its usable space aligned to
+  /// max_align_t.
   static optional<Block *> init(ByteSpan region);
 
   /// @returns  A pointer to a `Block`, given a pointer to the start of the
@@ -186,11 +174,19 @@ class Block {
   }
 
   /// @returns A pointer to the usable space inside this block.
+  ///
+  /// Aligned to some multiple of max_align_t.
   LIBC_INLINE cpp::byte *usable_space() {
-    return reinterpret_cast<cpp::byte *>(this) + BLOCK_OVERHEAD;
+    auto *s = reinterpret_cast<cpp::byte *>(this) + BLOCK_OVERHEAD;
+    LIBC_ASSERT(reinterpret_cast<uintptr_t>(s) % alignof(max_align_t) == 0 &&
+                "usable space must be aligned to a multiple of max_align_t");
+    return s;
   }
   LIBC_INLINE const cpp::byte *usable_space() const {
-    return reinterpret_cast<const cpp::byte *>(this) + BLOCK_OVERHEAD;
+    const auto *s = reinterpret_cast<const cpp::byte *>(this) + BLOCK_OVERHEAD;
+    LIBC_ASSERT(reinterpret_cast<uintptr_t>(s) % alignof(max_align_t) == 0 &&
+                "usable space must be aligned to a multiple of max_align_t");
+    return s;
   }
 
   // @returns The region of memory the block manages, including the header.
@@ -201,11 +197,12 @@ class Block {
   /// Attempts to split this block.
   ///
   /// If successful, the block will have an inner size of at least
-  /// `new_inner_size`, rounded to ensure that the split point is on an
-  /// ALIGNMENT boundary. The remaining space will be returned as a new block.
-  /// Note that the prev_ field of the next block counts as part of the inner
-  /// size of the returnd block.
-  optional<Block *> split(size_t new_inner_size);
+  /// `new_inner_size`. The remaining space will be returned as a new block,
+  /// with usable space aligned to `usable_space_alignment`. Note that the prev_
+  /// field of the next block counts as part of the inner size of the block.
+  /// `usable_space_alignment` must be a multiple of max_align_t.
+  optional<Block *> split(size_t new_inner_size,
+                          size_t usable_space_alignment = alignof(max_align_t));
 
   /// Merges this block with the one that comes after it.
   bool merge_next();
@@ -248,46 +245,56 @@ class Block {
   /// nullptr.
   LIBC_INLINE void mark_last() { next_ |= LAST_MASK; }
 
-  LIBC_INLINE constexpr Block(size_t outer_size) : next_(outer_size) {
+  LIBC_INLINE Block(size_t outer_size) : next_(outer_size) {
     LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned");
+    LIBC_ASSERT(is_usable_space_aligned(alignof(max_align_t)) &&
+                "usable space must be aligned to a multiple of max_align_t");
   }
 
   LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const {
     return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
   }
 
-  /// @returns The new inner size of this block that would give the usable
-  /// space of the next block the given alignment.
-  LIBC_INLINE size_t padding_for_alignment(size_t alignment) const {
-    if (is_usable_space_aligned(alignment))
+  // Returns the minimum inner size necessary for a block of that size to
+  // always be able to allocate at the given size and alignment.
+  //
+  // Returns 0 if there is no such size.
+  LIBC_INLINE static size_t min_size_for_allocation(size_t alignment,
+                                                    size_t size) {
+    LIBC_ASSERT(alignment >= alignof(max_align_t) &&
+                alignment % alignof(max_align_t) == 0 &&
+                "alignment must be multiple of max_align_t");
+
+    if (alignment == alignof(max_align_t))
+      return size;
+
+    // We must create a new block inside this one (splitting). This requires a
+    // block header in addition to the requested size.
+    if (add_overflow(size, sizeof(Block), size))
       return 0;
 
-    // We need to ensure we can always split this block into a "padding" block
-    // and the aligned block. To do this, we need enough extra space for at
-    // least one block.
-    //
-    // |block   |usable_space                          |
-    // |........|......................................|
-    //                            ^
-    //                            Alignment requirement
+    // Beyond that, padding space may need to remain in this block to ensure
+    // that the usable space of the next block is aligned.
     //
+    // Consider a position P of some lesser alignment, L, with maximal distance
+    // to the next position of some greater alignment, G, where G is a multiple
+    // of L. P must be one L unit past a G-aligned point. If it were one L-unit
+    // earlier, its distance would be zero. If it were one L-unit later, its
+    // distance would not be maximal. If it were not some integral number of L
+    // units away, it would not be L-aligned.
     //
-    // |block   |space   |block   |usable_space        |
-    // |........|........|........|....................|
-    //                            ^
-    //                            Alignment requirement
+    // So the maximum distance would be G - L. As a special case, if L is 1
+    // (unaligned), the max distance is G - 1.
     //
-    alignment = cpp::max(alignment, ALIGNMENT);
-    uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
-    uintptr_t next_usable_space = align_up(start + BLOCK_OVERHEAD, alignment);
-    uintptr_t next_block = next_usable_space - BLOCK_OVERHEAD;
-    return next_block - start + sizeof(prev_);
+    // This block's usable space is aligned to max_align_t >= Block. With zero
+    // padding, the next block's usable space is sizeof(Block) past it, which is
+    // a point aligned to Block. Thus the max padding needed is alignment -
+    // alignof(Block).
+    if (add_overflow(size, alignment - alignof(Block), size))
+      return 0;
+    return size;
   }
 
-  // Check that we can `allocate` a block with a given alignment and size from
-  // this existing block.
-  bool can_allocate(size_t alignment, size_t size) const;
-
   // This is the return type for `allocate` which can split one block into up to
   // three blocks.
   struct BlockInfo {
@@ -309,21 +316,31 @@ class Block {
     Block *next;
   };
 
-  // Divide a block into up to 3 blocks according to `BlockInfo`. This should
-  // only be called if `can_allocate` returns true.
+  // Divide a block into up to 3 blocks according to `BlockInfo`. Behavior is
+  // undefined if allocation is not possible for the given size and alignment.
   static BlockInfo allocate(Block *block, size_t alignment, size_t size);
 
+  // These two functions may wrap around.
+  LIBC_INLINE static uintptr_t next_possible_block_start(
+      uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) {
+    return align_up(ptr + sizeof(Block), usable_space_alignment) -
+           sizeof(Block);
+  }
+  LIBC_INLINE static uintptr_t prev_possible_block_start(
+      uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) {
+    return align_down(ptr, usable_space_alignment) - sizeof(Block);
+  }
+
 private:
   /// Construct a block to represent a span of bytes. Overwrites only enough
   /// memory for the block header; the rest of the span is left alone.
   LIBC_INLINE static Block *as_block(ByteSpan bytes) {
+    LIBC_ASSERT(reinterpret_cast<uintptr_t>(bytes.data()) % alignof(Block) ==
+                    0 &&
+                "block start must be suitably aligned");
     return ::new (bytes.data()) Block(bytes.size());
   }
 
-  /// Like `split`, but assumes the caller has already checked to parameters to
-  /// ensure the split will succeed.
-  Block *split_impl(size_t new_inner_size);
-
   /// Offset from this block to the previous block. 0 if this is the first
   /// block. This field is only alive when the previous block is free;
   /// otherwise, its memory is reused as part of the previous block's usable
@@ -343,81 +360,54 @@ class Block {
   ///   previous block is free.
   /// * If the `last` flag is set, the block is the sentinel last block. It is
   ///   summarily considered used and has no next block.
-} __attribute__((packed, aligned(cpp::max(alignof(max_align_t), size_t{4}))));
+};
 
 inline constexpr size_t Block::BLOCK_OVERHEAD =
     align_up(sizeof(Block), ALIGNMENT);
 
-LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) {
-  if (bytes.data() == nullptr)
-    return ByteSpan();
-
-  auto unaligned_start = reinterpret_cast<uintptr_t>(bytes.data());
-  auto aligned_start = align_up(unaligned_start, alignment);
-  auto unaligned_end = unaligned_start + bytes.size();
-  auto aligned_end = align_down(unaligned_end, alignment);
-
-  if (aligned_end <= aligned_start)
-    return ByteSpan();
-
-  return bytes.subspan(aligned_start - unaligned_start,
-                       aligned_end - aligned_start);
-}
-
 LIBC_INLINE
 optional<Block *> Block::init(ByteSpan region) {
-  optional<ByteSpan> result = get_aligned_subspan(region, ALIGNMENT);
-  if (!result)
+  if (!region.data())
+    return {};
+
+  uintptr_t start = reinterpret_cast<uintptr_t>(region.data());
+  uintptr_t end = start + region.size();
+  if (end < start)
+    return {};
+
+  uintptr_t block_start = next_possible_block_start(start);
+  if (block_start < start)
     return {};
 
-  region = result.value();
-  // Two blocks are allocated: a free block and a sentinel last block.
-  if (region.size() < 2 * BLOCK_OVERHEAD)
+  uintptr_t last_start = prev_possible_block_start(end);
+  if (last_start >= end)
     return {};
 
-  if (cpp::numeric_limits<size_t>::max() < region.size())
+  if (block_start + sizeof(Block) > last_start)
     return {};
 
-  Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD));
-  Block *last = as_block(region.last(BLOCK_OVERHEAD));
+  auto *last_start_ptr = reinterpret_cast<cpp::byte *>(last_start);
+  Block *block =
+      as_block({reinterpret_cast<cpp::byte *>(block_start), last_start_ptr});
+  Block *last = as_block({last_start_ptr, sizeof(Block)});
   block->mark_free();
   last->mark_last();
   return block;
 }
 
-LIBC_INLINE
-bool Block::can_allocate(size_t alignment, size_t size) const {
-  if (inner_size() < size)
-    return false;
-  if (is_usable_space_aligned(alignment))
-    return true;
-
-  // Alignment isn't met, so a padding block is needed. Determine amount of
-  // inner_size() consumed by the padding block.
-  size_t padding_size = padding_for_alignment(alignment) - sizeof(prev_);
-
-  // Check that there is room for the allocation in the following aligned block.
-  size_t aligned_inner_size = inner_size() - padding_size - BLOCK_OVERHEAD;
-  return size <= aligned_inner_size;
-}
-
 LIBC_INLINE
 Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
-  LIBC_ASSERT(
-      block->can_allocate(alignment, size) &&
-      "Calls to this function for a given alignment and size should only be "
-      "done if `can_allocate` for these parameters returns true.");
+  LIBC_ASSERT(alignment % alignof(max_align_t) == 0 &&
+              "alignment must be a multiple of max_align_t");
 
   BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
 
   if (!info.block->is_usable_space_aligned(alignment)) {
     Block *original = info.block;
-    optional<Block *> maybe_aligned_block =
-        original->split(info.block->padding_for_alignment(alignment));
+    // The padding block has no minimum size requirement.
+    optional<Block *> maybe_aligned_block = original->split(0, alignment);
     LIBC_ASSERT(maybe_aligned_block.has_value() &&
-                "This split should always result in a new block. The check in "
-                "`can_allocate` ensures that we have enough space here to make "
-                "two blocks.");
+                "it should always be possible to split for alignment");
 
     if (Block *prev = original->prev_free()) {
       // If there is a free block before this, we can merge the current one with
@@ -441,37 +431,40 @@ Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
 }
 
 LIBC_INLINE
-optional<Block *> Block::split(size_t new_inner_size) {
+optional<Block *> Block::split(size_t new_inner_size,
+                               size_t usable_space_alignment) {
+  LIBC_ASSERT(usable_space_alignment % alignof(max_align_t) == 0 &&
+              "alignment must be a multiple of max_align_t");
   if (used())
     return {};
-  // The prev_ field of the next block is always available, so there is a
-  // minimum size to a block created through splitting.
-  if (new_inner_size < sizeof(prev_))
-    new_inner_size = sizeof(prev_);
-
-  size_t old_inner_size = inner_size();
-  new_inner_size =
-      align_up(new_inner_size - sizeof(prev_), ALIGNMENT) + sizeof(prev_);
-  if (old_inner_size < new_inner_size)
-    return {};
 
-  if (old_inner_size - new_inner_size < BLOCK_OVERHEAD)
+  // Compute the minimum outer size that produces a block of at least
+  // `new_inner_size`.
+  size_t min_outer_size = outer_size(cpp::max(new_inner_size, sizeof(prev_)));
+
+  uintptr_t start = reinterpret_cast<uintptr_t>(this);
+  uintptr_t next_block_start =
+      next_possible_block_start(start + min_outer_size, usable_space_alignment);
+  if (next_block_start < start)
     return {};
+  size_t new_outer_size = next_block_start - start;
+  LIBC_ASSERT(new_outer_size % alignof(max_align_t) == 0 &&
+              "new size must be aligned to max_align_t");
 
-  return split_impl(new_inner_size);
-}
+  if (outer_size() < new_outer_size ||
+      outer_size() - new_outer_size < sizeof(Block))
+    return {};
 
-LIBC_INLINE
-Block *Block::split_impl(size_t new_inner_size) {
-  size_t outer_size1 = outer_size(new_inner_size);
-  LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned");
-  ByteSpan new_region = region().subspan(outer_size1);
+  ByteSpan new_region = region().subspan(new_outer_size);
   next_ &= ~SIZE_MASK;
-  next_ |= outer_size1;
+  next_ |= new_outer_size;
 
   Block *new_block = as_block(new_region);
   mark_free(); // Free status for this block is now stored in new_block.
   new_block->next()->prev_ = new_region.size();
+
+  LIBC_ASSERT(new_block->is_usable_space_aligned(usable_space_alignment) &&
+              "usable space must have requested alignment");
   return new_block;
 }
 

diff  --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h
index 8fa36257cb91ae..d58685194aeb81 100644
--- a/libc/src/__support/freelist_heap.h
+++ b/libc/src/__support/freelist_heap.h
@@ -89,28 +89,14 @@ LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) {
   if (!is_initialized)
     init();
 
-  size_t request_size = size;
-
-  // TODO: usable_space should always be aligned to max_align_t.
-  if (alignment > alignof(max_align_t) ||
-      (Block::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) {
-    // TODO: This bound isn't precisely calculated yet. It assumes one extra
-    // Block::ALIGNMENT to accomodate the possibility for padding block
-    // overhead. (alignment - 1) ensures that there is an aligned point
-    // somewhere in usable_space, but this isn't tight either, since
-    // usable_space is also already somewhat aligned.
-    if (add_overflow(size, (alignment - 1) + Block::ALIGNMENT, request_size))
-      return nullptr;
-  }
+  size_t request_size = Block::min_size_for_allocation(alignment, size);
+  if (!request_size)
+    return nullptr;
 
   Block *block = free_store.remove_best_fit(request_size);
   if (!block)
     return nullptr;
 
-  LIBC_ASSERT(block->can_allocate(alignment, size) &&
-              "block should always be large enough to allocate at the correct "
-              "alignment");
-
   auto block_info = Block::allocate(block, alignment, size);
   if (block_info.next)
     free_store.insert(block_info.next);
@@ -135,6 +121,9 @@ LIBC_INLINE void *FreeListHeap::aligned_allocate(size_t alignment,
   if (size % alignment != 0)
     return nullptr;
 
+  // The minimum alignment supported by Block is max_align_t.
+  alignment = cpp::max(alignment, alignof(max_align_t));
+
   return allocate_impl(alignment, size);
 }
 

diff  --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp
index 5e437db51b6092..1da811544a949c 100644
--- a/libc/test/src/__support/block_test.cpp
+++ b/libc/test/src/__support/block_test.cpp
@@ -28,17 +28,22 @@ TEST(LlvmLibcBlockTest, CanCreateSingleAlignedBlock) {
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t)));
+
   Block *last = block->next();
   ASSERT_NE(last, static_cast<Block *>(nullptr));
-  constexpr size_t last_outer_size = Block::BLOCK_OVERHEAD;
-  EXPECT_EQ(last->outer_size(), last_outer_size);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(last) % alignof(Block), size_t{0});
+
+  EXPECT_EQ(last->outer_size(), sizeof(Block));
   EXPECT_EQ(last->prev_free(), block);
   EXPECT_TRUE(last->used());
 
-  EXPECT_EQ(block->outer_size(), kN - last_outer_size);
-  constexpr size_t last_prev_field_size = sizeof(size_t);
-  EXPECT_EQ(block->inner_size(), kN - last_outer_size - Block::BLOCK_OVERHEAD +
-                                     last_prev_field_size);
+  size_t block_outer_size =
+      reinterpret_cast<uintptr_t>(last) - reinterpret_cast<uintptr_t>(block);
+  EXPECT_EQ(block->outer_size(), block_outer_size);
+  EXPECT_EQ(block->inner_size(),
+            block_outer_size - sizeof(Block) + sizeof(size_t));
   EXPECT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
   EXPECT_FALSE(block->used());
 }
@@ -52,6 +57,14 @@ TEST(LlvmLibcBlockTest, CanCreateUnalignedSingleBlock) {
 
   auto result = Block::init(aligned.subspan(1));
   EXPECT_TRUE(result.has_value());
+
+  Block *block = *result;
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t)));
+
+  Block *last = block->next();
+  ASSERT_NE(last, static_cast<Block *>(nullptr));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(last) % alignof(Block), size_t{0});
 }
 
 TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) {
@@ -66,7 +79,7 @@ TEST(LlvmLibcBlockTest, CanSplitBlock) {
   // Give the split position a large alignment.
   constexpr size_t kSplitN = 512 + prev_field_size;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   auto *block1 = *result;
@@ -82,6 +95,8 @@ TEST(LlvmLibcBlockTest, CanSplitBlock) {
 
   EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
   EXPECT_FALSE(block2->used());
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block2) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t)));
 
   EXPECT_EQ(block1->next(), block2);
   EXPECT_EQ(block2->prev_free(), block1);
@@ -90,28 +105,24 @@ TEST(LlvmLibcBlockTest, CanSplitBlock) {
 TEST(LlvmLibcBlockTest, CanSplitBlockUnaligned) {
   constexpr size_t kN = 1024;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
   size_t orig_size = block1->outer_size();
 
   constexpr size_t kSplitN = 513;
-  constexpr size_t prev_field_size = sizeof(size_t);
-  uintptr_t split_addr =
-      reinterpret_cast<uintptr_t>(block1) + (kSplitN - prev_field_size);
-  // Round split_addr up to a multiple of the alignment.
-  split_addr += alignof(Block) - (split_addr % alignof(Block));
-  uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size;
 
   result = block1->split(kSplitN);
   ASSERT_TRUE(result.has_value());
   Block *block2 = *result;
 
-  EXPECT_EQ(block1->inner_size(), split_len);
+  EXPECT_GE(block1->inner_size(), kSplitN);
 
   EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
   EXPECT_FALSE(block2->used());
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block2) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t)));
 
   EXPECT_EQ(block1->next(), block2);
   EXPECT_EQ(block2->prev_free(), block1);
@@ -131,7 +142,7 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) {
   constexpr size_t kSplit1 = 512;
   constexpr size_t kSplit2 = 256;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
@@ -152,27 +163,25 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) {
 
 TEST(LlvmLibcBlockTest, CannotSplitTooSmallBlock) {
   constexpr size_t kN = 64;
-  constexpr size_t kSplitN = kN + 1;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(kSplitN);
+  result = block->split(block->inner_size() + 1);
   ASSERT_FALSE(result.has_value());
 }
 
 TEST(LlvmLibcBlockTest, CannotSplitBlockWithoutHeaderSpace) {
   constexpr size_t kN = 1024;
-  constexpr size_t kSplitN = kN - 2 * Block::BLOCK_OVERHEAD - 1;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(kSplitN);
+  result = block->split(block->inner_size() - sizeof(Block) + 1);
   ASSERT_FALSE(result.has_value());
 }
 
@@ -180,7 +189,7 @@ TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) {
   // Ensure that we can't ask for more space than the block actually has...
   constexpr size_t kN = 1024;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -189,55 +198,41 @@ TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) {
   ASSERT_FALSE(result.has_value());
 }
 
-TEST(LlvmLibcBlockTest, CannotMakeSecondBlockLargerInSplit) {
-  // Ensure that the second block in split is at least of the size of header.
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
-  auto result = Block::init(bytes);
-  ASSERT_TRUE(result.has_value());
-  Block *block = *result;
-
-  result = block->split(block->inner_size() - Block::BLOCK_OVERHEAD + 1);
-  ASSERT_FALSE(result.has_value());
-}
-
 TEST(LlvmLibcBlockTest, CanMakeMinimalSizeFirstBlock) {
   // This block does support splitting with minimal payload size.
   constexpr size_t kN = 1024;
-  constexpr size_t minimal_size = sizeof(size_t);
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(minimal_size);
+  result = block->split(0);
   ASSERT_TRUE(result.has_value());
-  EXPECT_EQ(block->inner_size(), minimal_size);
+  EXPECT_LE(block->outer_size(), sizeof(Block) + alignof(max_align_t));
 }
 
 TEST(LlvmLibcBlockTest, CanMakeMinimalSizeSecondBlock) {
   // Likewise, the split block can be minimal-width.
   constexpr size_t kN = 1024;
-  constexpr size_t minimal_size = sizeof(size_t);
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
 
-  result = block1->split(block1->inner_size() - Block::BLOCK_OVERHEAD);
+  result = block1->split(Block::prev_possible_block_start(
+                             reinterpret_cast<uintptr_t>(block1->next())) -
+                         reinterpret_cast<uintptr_t>(block1->usable_space()) +
+                         sizeof(size_t));
   ASSERT_TRUE(result.has_value());
-  Block *block2 = *result;
-
-  EXPECT_EQ(block2->inner_size(), minimal_size);
+  EXPECT_LE((*result)->outer_size(), sizeof(Block) + alignof(max_align_t));
 }
 
 TEST(LlvmLibcBlockTest, CanMarkBlockUsed) {
   constexpr size_t kN = 1024;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -255,7 +250,7 @@ TEST(LlvmLibcBlockTest, CannotSplitUsedBlock) {
   constexpr size_t kN = 1024;
   constexpr size_t kSplitN = 512;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -269,21 +264,19 @@ TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) {
   // Do the three way merge from "CanSplitMidBlock", and let's
   // merge block 3 and 2
   constexpr size_t kN = 1024;
-  // Give the split positions large alignments.
-  constexpr size_t prev_field_size = sizeof(size_t);
-  constexpr size_t kSplit1 = 512 + prev_field_size;
-  constexpr size_t kSplit2 = 256 + prev_field_size;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
-  size_t orig_size = block1->outer_size();
+  size_t total_size = block1->outer_size();
 
   result = block1->split(kSplit1);
   ASSERT_TRUE(result.has_value());
 
   result = block1->split(kSplit2);
+  size_t block1_size = block1->outer_size();
   ASSERT_TRUE(result.has_value());
   Block *block3 = *result;
 
@@ -291,15 +284,15 @@ TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) {
 
   EXPECT_EQ(block1->next(), block3);
   EXPECT_EQ(block3->prev_free(), block1);
-  EXPECT_EQ(block1->inner_size(), kSplit2);
-  EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size());
+  EXPECT_EQ(block1->outer_size(), block1_size);
+  EXPECT_EQ(block3->outer_size(), total_size - block1->outer_size());
 }
 
 TEST(LlvmLibcBlockTest, CannotMergeWithFirstOrLastBlock) {
   constexpr size_t kN = 1024;
   constexpr size_t kSplitN = 512;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
@@ -316,7 +309,7 @@ TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) {
   constexpr size_t kN = 1024;
   constexpr size_t kSplitN = 512;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -330,9 +323,7 @@ TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) {
 }
 
 TEST(LlvmLibcBlockTest, CanGetBlockFromUsableSpace) {
-  constexpr size_t kN = 1024;
-
-  array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
@@ -355,93 +346,86 @@ TEST(LlvmLibcBlockTest, CanGetConstBlockFromUsableSpace) {
   EXPECT_EQ(block1, block2);
 }
 
-TEST(LlvmLibcBlockTest, CanAllocate) {
-  constexpr size_t kN = 1024 + Block::BLOCK_OVERHEAD;
+TEST(LlvmLibcBlockTest, Allocate) {
+  constexpr size_t kN = 1024;
 
   // Ensure we can allocate everything up to the block size within this block.
-  for (size_t i = 0; i < kN - 2 * Block::BLOCK_OVERHEAD; ++i) {
-    alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
+  for (size_t i = 0; i < kN; ++i) {
+    array<byte, kN> bytes;
     auto result = Block::init(bytes);
     ASSERT_TRUE(result.has_value());
     Block *block = *result;
 
-    constexpr size_t ALIGN = 1; // Effectively ignores alignment.
-    EXPECT_TRUE(block->can_allocate(ALIGN, i));
+    if (i > block->inner_size())
+      continue;
 
-    // For each can_allocate, we should be able to do a successful call to
-    // allocate.
-    auto info = Block::allocate(block, ALIGN, i);
+    auto info = Block::allocate(block, alignof(max_align_t), i);
     EXPECT_NE(info.block, static_cast<Block *>(nullptr));
   }
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
-  auto result = Block::init(bytes);
-  ASSERT_TRUE(result.has_value());
-  Block *block = *result;
+  // Ensure we can allocate a byte at every guaranteeable alignment.
+  for (size_t i = 1; i < kN / alignof(max_align_t); ++i) {
+    array<byte, kN> bytes;
+    auto result = Block::init(bytes);
+    ASSERT_TRUE(result.has_value());
+    Block *block = *result;
+
+    size_t alignment = i * alignof(max_align_t);
+    if (Block::min_size_for_allocation(alignment, 1) > block->inner_size())
+      continue;
 
-  // Given a block of size N (assuming it's also a power of two), we should be
-  // able to allocate a block within it that's aligned to N/2. This is
-  // because regardless of where the buffer is located, we can always find a
-  // starting location within it that meets this alignment.
-  EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1));
-  auto info = Block::allocate(block, block->outer_size() / 2, 1);
-  EXPECT_NE(info.block, static_cast<Block *>(nullptr));
+    auto info = Block::allocate(block, alignment, 1);
+    EXPECT_NE(info.block, static_cast<Block *>(nullptr));
+  }
 }
 
 TEST(LlvmLibcBlockTest, AllocateAlreadyAligned) {
   constexpr size_t kN = 1024;
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
+  uintptr_t orig_end = reinterpret_cast<uintptr_t>(block) + block->outer_size();
 
-  // This should result in no new blocks.
-  constexpr size_t kAlignment = Block::ALIGNMENT;
-  constexpr size_t prev_field_size = sizeof(size_t);
-  constexpr size_t kExpectedSize = Block::ALIGNMENT + prev_field_size;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
+  // Request a size one byte more than the prev_ field.
+  constexpr size_t SIZE = sizeof(size_t) + 1;
 
   auto [aligned_block, prev, next] =
-      Block::allocate(block, Block::ALIGNMENT, kExpectedSize);
+      Block::allocate(block, alignof(max_align_t), SIZE);
 
   // Since this is already aligned, there should be no previous block.
   EXPECT_EQ(prev, static_cast<Block *>(nullptr));
 
-  // Ensure we the block is aligned and the size we expect.
+  // Ensure we the block is aligned and large enough.
   EXPECT_NE(aligned_block, static_cast<Block *>(nullptr));
-  EXPECT_TRUE(aligned_block->is_usable_space_aligned(Block::ALIGNMENT));
-  EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignof(max_align_t)));
+  EXPECT_GE(aligned_block->inner_size(), SIZE);
 
   // Check the next block.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
   EXPECT_EQ(aligned_block->next(), next);
-  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
-            bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(next) + next->outer_size(), orig_end);
 }
 
 TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) {
   constexpr size_t kN = 1024;
 
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  // Ensure first the usable_data is only aligned to the block alignment.
-  ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD);
-  ASSERT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
+  uintptr_t orig_end = reinterpret_cast<uintptr_t>(block) + block->outer_size();
 
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
-
-  constexpr size_t kSize = 10;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+  size_t alignment = alignof(max_align_t);
+  while (block->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
-  auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(block, alignment, 10);
 
   // Check the previous block was created appropriately. Since this block is the
   // first block, a new one should be made before this.
@@ -453,19 +437,18 @@ TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) {
 
   // Ensure we the block is aligned and the size we expect.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
-  EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignment));
 
   // Check the next block.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
   EXPECT_EQ(aligned_block->next(), next);
-  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
-            bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(next) + next->outer_size(), orig_end);
 }
 
 TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) {
   constexpr size_t kN = 1024;
 
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -480,15 +463,12 @@ TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) {
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));
+  size_t alignment = alignof(max_align_t);
+  while (newblock->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
   // Ensure we can allocate in the new block.
-  constexpr size_t kSize = Block::ALIGNMENT;
-  EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
-
-  auto [aligned_block, prev, next] =
-      Block::allocate(newblock, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(newblock, alignment, 1);
 
   // Now there should be no new previous block. Instead, the padding we did
   // create should be merged into the original previous block.
@@ -505,26 +485,26 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) {
   // This is the same setup as with the `AllocateNeedsAlignment` test case.
   constexpr size_t kN = 1024;
 
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, kN> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
+
+  Block *orig_block = block;
+  size_t orig_size = orig_block->outer_size();
+
   Block *last = block->next();
 
-  // Ensure first the usable_data is only aligned to the block alignment.
-  ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD);
   ASSERT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
 
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
-
-  constexpr size_t kSize = Block::ALIGNMENT;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+  size_t alignment = alignof(max_align_t);
+  while (block->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
-  auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(block, alignment, 1);
 
   // Check we have the appropriate blocks.
   ASSERT_NE(prev, static_cast<Block *>(nullptr));
@@ -540,8 +520,6 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) {
   EXPECT_EQ(prev->next(), last);
 
   // We should have the original buffer.
-  EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
-  EXPECT_EQ(prev->outer_size(), bytes.size() - Block::BLOCK_OVERHEAD);
-  EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(),
-            &*bytes.end() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(prev, orig_block);
+  EXPECT_EQ(prev->outer_size(), orig_size);
 }

diff  --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp
index 991c158825a888..a3d4713e89e44e 100644
--- a/libc/test/src/__support/freelist_heap_test.cpp
+++ b/libc/test/src/__support/freelist_heap_test.cpp
@@ -42,7 +42,7 @@ using LIBC_NAMESPACE::cpp::span;
     void RunTest(FreeListHeap &allocator, [[maybe_unused]] size_t N);          \
   };                                                                           \
   TEST_F(LlvmLibcFreeListHeapTest##TestCase, TestCase) {                       \
-    alignas(Block) byte buf[BufferSize] = {byte(0)};                           \
+    byte buf[BufferSize] = {byte(0)};                                          \
     FreeListHeap allocator(buf);                                               \
     RunTest(allocator, BufferSize);                                            \
     RunTest(*freelist_heap, freelist_heap->region().size());                   \
@@ -95,30 +95,31 @@ TEST_FOR_EACH_ALLOCATOR(ReturnsNullWhenAllocationTooLarge, 2048) {
 // is used for other test cases and we don't explicitly free them.
 TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
   constexpr size_t N = 2048;
-  alignas(Block) byte buf[N] = {byte(0)};
+  byte buf[N];
 
   FreeListHeap allocator(buf);
 
-  // Use aligned_allocate so we don't need to worry about ensuring the `buf`
-  // being aligned to max_align_t.
-  EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block::BLOCK_OVERHEAD),
-            static_cast<void *>(nullptr));
+  bool went_null = false;
+  for (int i = 0; i < N; i++) {
+    if (!allocator.allocate(1)) {
+      went_null = true;
+      break;
+    }
+  }
+  EXPECT_TRUE(went_null);
   EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
 }
 
 TEST_FOR_EACH_ALLOCATOR(ReturnedPointersAreAligned, 2048) {
   void *ptr1 = allocator.allocate(1);
 
-  // Should be aligned to native pointer alignment
   uintptr_t ptr1_start = reinterpret_cast<uintptr_t>(ptr1);
-  size_t alignment = alignof(void *);
-
-  EXPECT_EQ(ptr1_start % alignment, static_cast<size_t>(0));
+  EXPECT_EQ(ptr1_start % alignof(max_align_t), static_cast<size_t>(0));
 
   void *ptr2 = allocator.allocate(1);
   uintptr_t ptr2_start = reinterpret_cast<uintptr_t>(ptr2);
 
-  EXPECT_EQ(ptr2_start % alignment, static_cast<size_t>(0));
+  EXPECT_EQ(ptr2_start % alignof(max_align_t), static_cast<size_t>(0));
 }
 
 TEST_FOR_EACH_ALLOCATOR(CanRealloc, 2048) {
@@ -241,16 +242,15 @@ TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
 
 // This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
 // explicitly ensure that the buffer can still return aligned allocations even
-// if the underlying buffer is at most aligned to the Block alignment. This
-// is so we can check that we can still get aligned allocations even if the
-// underlying buffer is not aligned to the alignments we request.
-TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockAligned) {
+// if the underlying buffer is unaligned. This is so we can check that we can
+// still get aligned allocations even if the underlying buffer is not aligned to
+// the alignments we request.
+TEST(LlvmLibcFreeListHeap, AlignedAllocUnalignedBuffer) {
   constexpr size_t BUFFER_SIZE = 4096;
-  constexpr size_t BUFFER_ALIGNMENT = alignof(Block) * 2;
-  alignas(BUFFER_ALIGNMENT) byte buf[BUFFER_SIZE] = {byte(0)};
+  byte buf[4096] = {byte(0)};
 
-  // Ensure the underlying buffer is at most aligned to the block type.
-  FreeListHeap allocator(span<byte>(buf).subspan(alignof(Block)));
+  // Ensure the underlying buffer is poorly aligned.
+  FreeListHeap allocator(span<byte>(buf).subspan(1));
 
   constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
   constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};


        


More information about the libc-commits mailing list