[libc-commits] [libc] [libc] Fix malloc Block alignment issue on riscv32 (PR #117815)

Daniel Thornburgh via libc-commits libc-commits at lists.llvm.org
Wed Nov 27 12:21:23 PST 2024


https://github.com/mysterymath updated https://github.com/llvm/llvm-project/pull/117815

>From 996928e76a698e3f55b9c1213cae3c500ca310fa Mon Sep 17 00:00:00 2001
From: Daniel Thornburgh <mysterymath at gmail.com>
Date: Sat, 23 Nov 2024 11:30:28 -0800
Subject: [PATCH] [libc] Fix malloc Block alignment issue on riscv32

Aligning blocks to max_align_t is neither necessary nor sufficient to
ensure that the usable_space() is so aligned. Instead, we make this an
invariant of Block and maintain it in init() and split().

This allows targets like riscv32 with small pointers and large
max_align_t to maintain the property that all available blocks are
aligned for malloc(). This change adjusts the tests to match and also
updates them closer to llvm-libc style.
---
 libc/fuzzing/__support/freelist_heap_fuzz.cpp |   6 +-
 libc/src/__support/CMakeLists.txt             |   1 +
 libc/src/__support/block.h                    | 244 +++++++-------
 libc/src/__support/freelist_heap.h            |  23 +-
 libc/src/__support/freestore.h                |  10 +-
 libc/test/src/__support/block_test.cpp        | 317 +++++++-----------
 .../test/src/__support/freelist_heap_test.cpp | 106 +++---
 .../src/__support/freelist_malloc_test.cpp    |  21 +-
 8 files changed, 298 insertions(+), 430 deletions(-)

diff --git a/libc/fuzzing/__support/freelist_heap_fuzz.cpp b/libc/fuzzing/__support/freelist_heap_fuzz.cpp
index d152d0f35499f8..d5764adcbb2642 100644
--- a/libc/fuzzing/__support/freelist_heap_fuzz.cpp
+++ b/libc/fuzzing/__support/freelist_heap_fuzz.cpp
@@ -100,13 +100,13 @@ optional<AllocType> choose<AllocType>(const uint8_t *&data, size_t &remainder) {
       *raw % static_cast<uint8_t>(AllocType::NUM_ALLOC_TYPES));
 }
 
-constexpr size_t heap_size = 64 * 1024;
+constexpr size_t HEAP_SIZE = 64 * 1024;
 
 optional<size_t> choose_size(const uint8_t *&data, size_t &remainder) {
   auto raw = choose<size_t>(data, remainder);
   if (!raw)
     return nullopt;
-  return *raw % heap_size;
+  return *raw % HEAP_SIZE;
 }
 
 optional<size_t> choose_alloc_idx(const AllocVec &allocs, const uint8_t *&data,
@@ -126,7 +126,7 @@ optional<size_t> choose_alloc_idx(const AllocVec &allocs, const uint8_t *&data,
   TYPE NAME = *maybe_##NAME
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t remainder) {
-  LIBC_NAMESPACE::FreeListHeapBuffer<heap_size> heap;
+  LIBC_NAMESPACE::FreeListHeapBuffer<HEAP_SIZE> heap;
   AllocVec allocs(heap);
 
   uint8_t canary = 0;
diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt
index 8f85740f70a06e..f11ea3e84af559 100644
--- a/libc/src/__support/CMakeLists.txt
+++ b/libc/src/__support/CMakeLists.txt
@@ -12,6 +12,7 @@ add_header_library(
     libc.src.__support.CPP.optional
     libc.src.__support.CPP.span
     libc.src.__support.CPP.type_traits
+    libc.src.__support.math_extras
 )
 
 add_object_library(
diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h
index 9ca3f11530c4ba..7cdad47b29be1d 100644
--- a/libc/src/__support/block.h
+++ b/libc/src/__support/block.h
@@ -18,6 +18,7 @@
 #include "src/__support/CPP/type_traits.h"
 #include "src/__support/libc_assert.h"
 #include "src/__support/macros/config.h"
+#include "src/__support/math_extras.h"
 
 #include <stdint.h>
 
@@ -49,8 +50,7 @@ LIBC_INLINE constexpr T *align_down(T *value, size_t alignment) {
 
 /// Returns the value rounded up to the nearest multiple of alignment.
 LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) {
-  __builtin_add_overflow(value, alignment - 1, &value);
-  return align_down(value, alignment);
+  return align_down(value + alignment - 1, alignment);
 }
 
 /// Returns the value rounded up to the nearest multiple of alignment.
@@ -68,8 +68,8 @@ using cpp::optional;
 /// The blocks store their offsets to the previous and next blocks. The latter
 /// is also the block's size.
 ///
-/// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will
-/// always be rounded up to a multiple of `ALIGNMENT`.
+/// All blocks have their usable space aligned to some multiple of max_align_t.
+/// This also implies that block outer sizes are aligned to max_align_t.
 ///
 /// As an example, the diagram below represents two contiguous `Block`s. The
 /// indices indicate byte offsets:
@@ -122,15 +122,13 @@ class Block {
   static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK);
 
 public:
-  static constexpr size_t ALIGNMENT = cpp::max(alignof(max_align_t), size_t{4});
-  static const size_t BLOCK_OVERHEAD;
-
   // No copy or move.
   Block(const Block &other) = delete;
   Block &operator=(const Block &other) = delete;
 
-  /// Creates the first block for a given memory region, followed by a sentinel
-  /// last block. Returns the first block.
+  /// Initializes a given memory region into a first block and a sentinel last
+  /// block. Returns the first block, which has its usable space aligned to
+  /// max_align_t.
   static optional<Block *> init(ByteSpan region);
 
   /// @returns  A pointer to a `Block`, given a pointer to the start of the
@@ -142,11 +140,11 @@ class Block {
   ///           pointer will return a non-null pointer.
   LIBC_INLINE static Block *from_usable_space(void *usable_space) {
     auto *bytes = reinterpret_cast<cpp::byte *>(usable_space);
-    return reinterpret_cast<Block *>(bytes - BLOCK_OVERHEAD);
+    return reinterpret_cast<Block *>(bytes - sizeof(Block));
   }
   LIBC_INLINE static const Block *from_usable_space(const void *usable_space) {
     const auto *bytes = reinterpret_cast<const cpp::byte *>(usable_space);
-    return reinterpret_cast<const Block *>(bytes - BLOCK_OVERHEAD);
+    return reinterpret_cast<const Block *>(bytes - sizeof(Block));
   }
 
   /// @returns The total size of the block in bytes, including the header.
@@ -154,7 +152,7 @@ class Block {
 
   LIBC_INLINE static size_t outer_size(size_t inner_size) {
     // The usable region includes the prev_ field of the next block.
-    return inner_size - sizeof(prev_) + BLOCK_OVERHEAD;
+    return inner_size - sizeof(prev_) + sizeof(Block);
   }
 
   /// @returns The number of usable bytes inside the block were it to be
@@ -182,15 +180,18 @@ class Block {
   /// @returns The number of usable bytes inside a block with the given outer
   /// size if it remains free.
   LIBC_INLINE static size_t inner_size_free(size_t outer_size) {
-    return outer_size - BLOCK_OVERHEAD;
+    return outer_size - sizeof(Block);
   }
 
   /// @returns A pointer to the usable space inside this block.
+  ///
+  /// Unless specifically requested otherwise, this will be aligned to
+  /// max_align_t.
   LIBC_INLINE cpp::byte *usable_space() {
-    return reinterpret_cast<cpp::byte *>(this) + BLOCK_OVERHEAD;
+    return reinterpret_cast<cpp::byte *>(this) + sizeof(Block);
   }
   LIBC_INLINE const cpp::byte *usable_space() const {
-    return reinterpret_cast<const cpp::byte *>(this) + BLOCK_OVERHEAD;
+    return reinterpret_cast<const cpp::byte *>(this) + sizeof(Block);
   }
 
   // @returns The region of memory the block manages, including the header.
@@ -201,11 +202,12 @@ class Block {
   /// Attempts to split this block.
   ///
   /// If successful, the block will have an inner size of at least
-  /// `new_inner_size`, rounded to ensure that the split point is on an
-  /// ALIGNMENT boundary. The remaining space will be returned as a new block.
-  /// Note that the prev_ field of the next block counts as part of the inner
-  /// size of the returnd block.
-  optional<Block *> split(size_t new_inner_size);
+  /// `new_inner_size`. The remaining space will be returned as a new block,
+  /// with usable space aligned to `usable_space_alignment`. Note that the prev_
+  /// field of the next block counts as part of the inner size of the block.
+  /// `usable_space_alignment` must be a multiple of max_align_t.
+  optional<Block *> split(size_t new_inner_size,
+                          size_t usable_space_alignment = alignof(max_align_t));
 
   /// Merges this block with the one that comes after it.
   bool merge_next();
@@ -249,45 +251,49 @@ class Block {
   LIBC_INLINE void mark_last() { next_ |= LAST_MASK; }
 
   LIBC_INLINE constexpr Block(size_t outer_size) : next_(outer_size) {
-    LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned");
+    LIBC_ASSERT(outer_size % alignof(Block) == 0 &&
+                "block sizes must be aligned");
+    LIBC_ASSERT(is_usable_space_aligned(alignof(max_align_t)) &&
+                "usable space must be aligned to a multiple of max_align_t");
   }
 
   LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const {
     return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
   }
 
-  /// @returns The new inner size of this block that would give the usable
-  /// space of the next block the given alignment.
-  LIBC_INLINE size_t padding_for_alignment(size_t alignment) const {
-    if (is_usable_space_aligned(alignment))
-      return 0;
-
-    // We need to ensure we can always split this block into a "padding" block
-    // and the aligned block. To do this, we need enough extra space for at
-    // least one block.
-    //
-    // |block   |usable_space                          |
-    // |........|......................................|
-    //                            ^
-    //                            Alignment requirement
+  // Returns the minimum inner size necessary for a block of that size to
+  // always be able to allocate at the given size and alignment.
+  //
+  // Returns 0 if there is no such size.
+  LIBC_INLINE static size_t min_size_for_allocation(size_t alignment,
+                                                    size_t size) {
+    LIBC_ASSERT(alignment >= alignof(max_align_t) &&
+                alignment % alignof(max_align_t) == 0 &&
+                "alignment must be multiple of max_align_t");
+
+    if (alignment == alignof(max_align_t))
+      return size;
+
+    // We must create a padding block to align the usable space of the next
+    // block. The next block's usable space can be found by advancing by
+    // sizeof(Block) then aligning up. The amount advanced is the amount of
+    // additional inner size required.
     //
-    //
-    // |block   |space   |block   |usable_space        |
-    // |........|........|........|....................|
-    //                            ^
-    //                            Alignment requirement
-    //
-    alignment = cpp::max(alignment, ALIGNMENT);
-    uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
-    uintptr_t next_usable_space = align_up(start + BLOCK_OVERHEAD, alignment);
-    uintptr_t next_block = next_usable_space - BLOCK_OVERHEAD;
-    return next_block - start + sizeof(prev_);
+    // The minimum advancment is sizeof(Block), since the resulting position
+    // may happen to be aligned. What is the maximum? Advancing by
+    // sizeof(Block) leaves the result still aligned to max_align_t. So the
+    // most additional advancement required would be if the point is exactly
+    // alignof(max_align_t) past alignment. The remaining size to the next
+    // alignment would then be alignment - alignof(max_align_t). So the total
+    // maximum advancement required is sizeof(Block) + alignment -
+    // alignof(max_align_t).
+    if (add_overflow(size, sizeof(Block), size))
+      return 0;
+    if (add_overflow(size, alignment - alignof(max_align_t), size))
+      return 0;
+    return size;
   }
 
-  // Check that we can `allocate` a block with a given alignment and size from
-  // this existing block.
-  bool can_allocate(size_t alignment, size_t size) const;
-
   // This is the return type for `allocate` which can split one block into up to
   // three blocks.
   struct BlockInfo {
@@ -309,21 +315,30 @@ class Block {
     Block *next;
   };
 
-  // Divide a block into up to 3 blocks according to `BlockInfo`. This should
-  // only be called if `can_allocate` returns true.
+  // Divide a block into up to 3 blocks according to `BlockInfo`. Behavior is
+  // undefined if allocation is not possible for the given size and alignment.
   static BlockInfo allocate(Block *block, size_t alignment, size_t size);
 
+  LIBC_INLINE static uintptr_t next_possible_block_start(
+      uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) {
+    return align_up(ptr + sizeof(Block), usable_space_alignment) -
+           sizeof(Block);
+  }
+  LIBC_INLINE static uintptr_t prev_possible_block_start(
+      uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) {
+    return align_down(ptr, usable_space_alignment) - sizeof(Block);
+  }
+
 private:
   /// Construct a block to represent a span of bytes. Overwrites only enough
   /// memory for the block header; the rest of the span is left alone.
   LIBC_INLINE static Block *as_block(ByteSpan bytes) {
+    LIBC_ASSERT(reinterpret_cast<uintptr_t>(bytes.data()) % alignof(Block) ==
+                    0 &&
+                "block start must be suitably aligned");
     return ::new (bytes.data()) Block(bytes.size());
   }
 
-  /// Like `split`, but assumes the caller has already checked to parameters to
-  /// ensure the split will succeed.
-  Block *split_impl(size_t new_inner_size);
-
   /// Offset from this block to the previous block. 0 if this is the first
   /// block. This field is only alive when the previous block is free;
   /// otherwise, its memory is reused as part of the previous block's usable
@@ -343,81 +358,46 @@ class Block {
   ///   previous block is free.
   /// * If the `last` flag is set, the block is the sentinel last block. It is
   ///   summarily considered used and has no next block.
-} __attribute__((packed, aligned(cpp::max(alignof(max_align_t), size_t{4}))));
-
-inline constexpr size_t Block::BLOCK_OVERHEAD =
-    align_up(sizeof(Block), ALIGNMENT);
-
-LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) {
-  if (bytes.data() == nullptr)
-    return ByteSpan();
-
-  auto unaligned_start = reinterpret_cast<uintptr_t>(bytes.data());
-  auto aligned_start = align_up(unaligned_start, alignment);
-  auto unaligned_end = unaligned_start + bytes.size();
-  auto aligned_end = align_down(unaligned_end, alignment);
-
-  if (aligned_end <= aligned_start)
-    return ByteSpan();
+};
 
-  return bytes.subspan(aligned_start - unaligned_start,
-                       aligned_end - aligned_start);
-}
+static_assert(alignof(max_align_t) >= 4,
+              "the low two bits of block sizes must be free for use as flags");
 
 LIBC_INLINE
 optional<Block *> Block::init(ByteSpan region) {
-  optional<ByteSpan> result = get_aligned_subspan(region, ALIGNMENT);
-  if (!result)
+  if (!region.data())
     return {};
 
-  region = result.value();
-  // Two blocks are allocated: a free block and a sentinel last block.
-  if (region.size() < 2 * BLOCK_OVERHEAD)
-    return {};
+  uintptr_t start = reinterpret_cast<uintptr_t>(region.data());
+  uintptr_t end = start + region.size();
 
-  if (cpp::numeric_limits<size_t>::max() < region.size())
+  uintptr_t block_start = next_possible_block_start(start);
+  uintptr_t last_start = prev_possible_block_start(end);
+  if (block_start + sizeof(Block) > last_start)
     return {};
 
-  Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD));
-  Block *last = as_block(region.last(BLOCK_OVERHEAD));
+  auto *last_start_ptr = reinterpret_cast<cpp::byte *>(last_start);
+  Block *block =
+      as_block({reinterpret_cast<cpp::byte *>(block_start), last_start_ptr});
+  Block *last = as_block({last_start_ptr, sizeof(Block)});
   block->mark_free();
   last->mark_last();
   return block;
 }
 
-LIBC_INLINE
-bool Block::can_allocate(size_t alignment, size_t size) const {
-  if (inner_size() < size)
-    return false;
-  if (is_usable_space_aligned(alignment))
-    return true;
-
-  // Alignment isn't met, so a padding block is needed. Determine amount of
-  // inner_size() consumed by the padding block.
-  size_t padding_size = padding_for_alignment(alignment) - sizeof(prev_);
-
-  // Check that there is room for the allocation in the following aligned block.
-  size_t aligned_inner_size = inner_size() - padding_size - BLOCK_OVERHEAD;
-  return size <= aligned_inner_size;
-}
-
 LIBC_INLINE
 Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
-  LIBC_ASSERT(
-      block->can_allocate(alignment, size) &&
-      "Calls to this function for a given alignment and size should only be "
-      "done if `can_allocate` for these parameters returns true.");
+  LIBC_ASSERT(alignment % alignof(max_align_t) == 0 &&
+              "alignment must be a multiple of max_align_t");
 
   BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
 
   if (!info.block->is_usable_space_aligned(alignment)) {
     Block *original = info.block;
-    optional<Block *> maybe_aligned_block =
-        original->split(info.block->padding_for_alignment(alignment));
+    // The padding block has no minimum size requirement.
+    optional<Block *> maybe_aligned_block = original->split(0, alignment);
     LIBC_ASSERT(maybe_aligned_block.has_value() &&
-                "This split should always result in a new block. The check in "
-                "`can_allocate` ensures that we have enough space here to make "
-                "two blocks.");
+                "it should always be possible to split for alignment");
 
     if (Block *prev = original->prev_free()) {
       // If there is a free block before this, we can merge the current one with
@@ -428,8 +408,6 @@ Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
     }
 
     Block *aligned_block = *maybe_aligned_block;
-    LIBC_ASSERT(aligned_block->is_usable_space_aligned(alignment) &&
-                "The aligned block isn't aligned somehow.");
     info.block = aligned_block;
   }
 
@@ -441,33 +419,35 @@ Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
 }
 
 LIBC_INLINE
-optional<Block *> Block::split(size_t new_inner_size) {
+optional<Block *> Block::split(size_t new_inner_size,
+                               size_t usable_space_alignment) {
+
+  LIBC_ASSERT(usable_space_alignment % alignof(max_align_t) == 0 &&
+              "alignment must be a multiple of max_align_t");
+
   if (used())
     return {};
-  // The prev_ field of the next block is always available, so there is a
-  // minimum size to a block created through splitting.
-  if (new_inner_size < sizeof(prev_))
-    new_inner_size = sizeof(prev_);
-
-  size_t old_inner_size = inner_size();
-  new_inner_size =
-      align_up(new_inner_size - sizeof(prev_), ALIGNMENT) + sizeof(prev_);
-  if (old_inner_size < new_inner_size)
-    return {};
 
-  if (old_inner_size - new_inner_size < BLOCK_OVERHEAD)
-    return {};
+  size_t old_outer_size = outer_size();
 
-  return split_impl(new_inner_size);
-}
+  // Compute the minimum outer size that produces a block of at least
+  // `new_inner_size`.
+  size_t min_outer_size = outer_size(cpp::max(new_inner_size, sizeof(prev_)));
 
-LIBC_INLINE
-Block *Block::split_impl(size_t new_inner_size) {
-  size_t outer_size1 = outer_size(new_inner_size);
-  LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned");
-  ByteSpan new_region = region().subspan(outer_size1);
+  uintptr_t start = reinterpret_cast<uintptr_t>(this);
+  uintptr_t next_block_start =
+      next_possible_block_start(start + min_outer_size, usable_space_alignment);
+  size_t new_outer_size = next_block_start - start;
+  LIBC_ASSERT(new_outer_size % alignof(max_align_t) == 0 &&
+              "new size must be aligned to max_align_t");
+
+  if (old_outer_size < new_outer_size ||
+      old_outer_size - new_outer_size < sizeof(Block))
+    return {};
+
+  ByteSpan new_region = region().subspan(new_outer_size);
   next_ &= ~SIZE_MASK;
-  next_ |= outer_size1;
+  next_ |= new_outer_size;
 
   Block *new_block = as_block(new_region);
   mark_free(); // Free status for this block is now stored in new_block.
diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h
index 8fa36257cb91ae..d58685194aeb81 100644
--- a/libc/src/__support/freelist_heap.h
+++ b/libc/src/__support/freelist_heap.h
@@ -89,28 +89,14 @@ LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) {
   if (!is_initialized)
     init();
 
-  size_t request_size = size;
-
-  // TODO: usable_space should always be aligned to max_align_t.
-  if (alignment > alignof(max_align_t) ||
-      (Block::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) {
-    // TODO: This bound isn't precisely calculated yet. It assumes one extra
-    // Block::ALIGNMENT to accomodate the possibility for padding block
-    // overhead. (alignment - 1) ensures that there is an aligned point
-    // somewhere in usable_space, but this isn't tight either, since
-    // usable_space is also already somewhat aligned.
-    if (add_overflow(size, (alignment - 1) + Block::ALIGNMENT, request_size))
-      return nullptr;
-  }
+  size_t request_size = Block::min_size_for_allocation(alignment, size);
+  if (!request_size)
+    return nullptr;
 
   Block *block = free_store.remove_best_fit(request_size);
   if (!block)
     return nullptr;
 
-  LIBC_ASSERT(block->can_allocate(alignment, size) &&
-              "block should always be large enough to allocate at the correct "
-              "alignment");
-
   auto block_info = Block::allocate(block, alignment, size);
   if (block_info.next)
     free_store.insert(block_info.next);
@@ -135,6 +121,9 @@ LIBC_INLINE void *FreeListHeap::aligned_allocate(size_t alignment,
   if (size % alignment != 0)
     return nullptr;
 
+  // The minimum alignment supported by Block is max_align_t.
+  alignment = cpp::max(alignment, alignof(max_align_t));
+
   return allocate_impl(alignment, size);
 }
 
diff --git a/libc/src/__support/freestore.h b/libc/src/__support/freestore.h
index 97197dda4b546b..09f2479debb36a 100644
--- a/libc/src/__support/freestore.h
+++ b/libc/src/__support/freestore.h
@@ -40,13 +40,12 @@ class FreeStore {
   Block *remove_best_fit(size_t size);
 
 private:
-  static constexpr size_t ALIGNMENT = alignof(max_align_t);
   static constexpr size_t MIN_OUTER_SIZE =
-      align_up(Block::BLOCK_OVERHEAD + sizeof(FreeList::Node), ALIGNMENT);
+      align_up(sizeof(Block) + sizeof(FreeList::Node), alignof(max_align_t));
   static constexpr size_t MIN_LARGE_OUTER_SIZE =
-      align_up(Block::BLOCK_OVERHEAD + sizeof(FreeTrie::Node), ALIGNMENT);
+      align_up(sizeof(Block) + sizeof(FreeTrie::Node), alignof(max_align_t));
   static constexpr size_t NUM_SMALL_SIZES =
-      (MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / ALIGNMENT;
+      (MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / alignof(max_align_t);
 
   LIBC_INLINE static bool too_small(Block *block) {
     return block->outer_size() < MIN_OUTER_SIZE;
@@ -99,7 +98,8 @@ LIBC_INLINE Block *FreeStore::remove_best_fit(size_t size) {
 
 LIBC_INLINE FreeList &FreeStore::small_list(Block *block) {
   LIBC_ASSERT(is_small(block) && "only legal for small blocks");
-  return small_lists[(block->outer_size() - MIN_OUTER_SIZE) / ALIGNMENT];
+  return small_lists[(block->outer_size() - MIN_OUTER_SIZE) /
+                     alignof(max_align_t)];
 }
 
 LIBC_INLINE FreeList *FreeStore::find_best_small_fit(size_t size) {
diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp
index 5e437db51b6092..f39daac88726dc 100644
--- a/libc/test/src/__support/block_test.cpp
+++ b/libc/test/src/__support/block_test.cpp
@@ -21,37 +21,47 @@ using LIBC_NAMESPACE::cpp::byte;
 using LIBC_NAMESPACE::cpp::span;
 
 TEST(LlvmLibcBlockTest, CanCreateSingleAlignedBlock) {
-  constexpr size_t kN = 1024;
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  alignas(alignof(Block)) array<byte, 1024> bytes;
 
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t)));
+
   Block *last = block->next();
   ASSERT_NE(last, static_cast<Block *>(nullptr));
-  constexpr size_t last_outer_size = Block::BLOCK_OVERHEAD;
-  EXPECT_EQ(last->outer_size(), last_outer_size);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(last) % alignof(Block), size_t{0});
+
+  EXPECT_EQ(last->outer_size(), sizeof(Block));
   EXPECT_EQ(last->prev_free(), block);
   EXPECT_TRUE(last->used());
 
-  EXPECT_EQ(block->outer_size(), kN - last_outer_size);
-  constexpr size_t last_prev_field_size = sizeof(size_t);
-  EXPECT_EQ(block->inner_size(), kN - last_outer_size - Block::BLOCK_OVERHEAD +
-                                     last_prev_field_size);
+  size_t block_outer_size =
+      reinterpret_cast<uintptr_t>(last) - reinterpret_cast<uintptr_t>(block);
+  EXPECT_EQ(block->outer_size(), block_outer_size);
+  EXPECT_EQ(block->inner_size(),
+            block_outer_size - sizeof(Block) + sizeof(size_t));
   EXPECT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
   EXPECT_FALSE(block->used());
 }
 
 TEST(LlvmLibcBlockTest, CanCreateUnalignedSingleBlock) {
-  constexpr size_t kN = 1024;
-
   // Force alignment, so we can un-force it below
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  alignas(alignof(Block)) array<byte, 1024> bytes;
   span<byte> aligned(bytes);
 
   auto result = Block::init(aligned.subspan(1));
   EXPECT_TRUE(result.has_value());
+
+  Block *block = *result;
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block->is_usable_space_aligned(alignof(max_align_t)));
+
+  Block *last = block->next();
+  ASSERT_NE(last, static_cast<Block *>(nullptr));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(last) % alignof(Block), size_t{0});
 }
 
 TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) {
@@ -61,57 +71,50 @@ TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) {
 }
 
 TEST(LlvmLibcBlockTest, CanSplitBlock) {
-  constexpr size_t kN = 1024;
-  constexpr size_t prev_field_size = sizeof(size_t);
   // Give the split position a large alignment.
-  constexpr size_t kSplitN = 512 + prev_field_size;
+  constexpr size_t SPLIT = 512 + sizeof(size_t);
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   auto *block1 = *result;
   size_t orig_size = block1->outer_size();
 
-  result = block1->split(kSplitN);
+  result = block1->split(SPLIT);
   ASSERT_TRUE(result.has_value());
   auto *block2 = *result;
 
-  EXPECT_EQ(block1->inner_size(), kSplitN);
-  EXPECT_EQ(block1->outer_size(),
-            kSplitN - prev_field_size + Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(block1->inner_size(), SPLIT);
+  EXPECT_EQ(block1->outer_size(), SPLIT - sizeof(size_t) + sizeof(Block));
 
   EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
   EXPECT_FALSE(block2->used());
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block2) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t)));
 
   EXPECT_EQ(block1->next(), block2);
   EXPECT_EQ(block2->prev_free(), block1);
 }
 
 TEST(LlvmLibcBlockTest, CanSplitBlockUnaligned) {
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
   size_t orig_size = block1->outer_size();
 
-  constexpr size_t kSplitN = 513;
-  constexpr size_t prev_field_size = sizeof(size_t);
-  uintptr_t split_addr =
-      reinterpret_cast<uintptr_t>(block1) + (kSplitN - prev_field_size);
-  // Round split_addr up to a multiple of the alignment.
-  split_addr += alignof(Block) - (split_addr % alignof(Block));
-  uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size;
+  constexpr size_t SPLIT = 513;
 
-  result = block1->split(kSplitN);
+  result = block1->split(SPLIT);
   ASSERT_TRUE(result.has_value());
   Block *block2 = *result;
 
-  EXPECT_EQ(block1->inner_size(), split_len);
+  EXPECT_GE(block1->inner_size(), SPLIT);
 
   EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size());
   EXPECT_FALSE(block2->used());
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(block2) % alignof(Block), size_t{0});
+  EXPECT_TRUE(block2->is_usable_space_aligned(alignof(max_align_t)));
 
   EXPECT_EQ(block1->next(), block2);
   EXPECT_EQ(block2->prev_free(), block1);
@@ -127,20 +130,16 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) {
   // block1->split()
   // [[ BLOCK1 ]][[ BLOCK3 ]][[ BLOCK2 ]]
 
-  constexpr size_t kN = 1024;
-  constexpr size_t kSplit1 = 512;
-  constexpr size_t kSplit2 = 256;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  alignas(alignof(Block)) array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
 
-  result = block1->split(kSplit1);
+  result = block1->split(512);
   ASSERT_TRUE(result.has_value());
   Block *block2 = *result;
 
-  result = block1->split(kSplit2);
+  result = block1->split(256);
   ASSERT_TRUE(result.has_value());
   Block *block3 = *result;
 
@@ -151,36 +150,28 @@ TEST(LlvmLibcBlockTest, CanSplitMidBlock) {
 }
 
 TEST(LlvmLibcBlockTest, CannotSplitTooSmallBlock) {
-  constexpr size_t kN = 64;
-  constexpr size_t kSplitN = kN + 1;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 64> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(kSplitN);
+  result = block->split(block->inner_size() + 1);
   ASSERT_FALSE(result.has_value());
 }
 
 TEST(LlvmLibcBlockTest, CannotSplitBlockWithoutHeaderSpace) {
-  constexpr size_t kN = 1024;
-  constexpr size_t kSplitN = kN - 2 * Block::BLOCK_OVERHEAD - 1;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(kSplitN);
+  result = block->split(block->inner_size() - sizeof(Block) + 1);
   ASSERT_FALSE(result.has_value());
 }
 
+// Ensure that we can't ask for more space than the block actually has...
 TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) {
-  // Ensure that we can't ask for more space than the block actually has...
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -189,55 +180,35 @@ TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) {
   ASSERT_FALSE(result.has_value());
 }
 
-TEST(LlvmLibcBlockTest, CannotMakeSecondBlockLargerInSplit) {
-  // Ensure that the second block in split is at least of the size of header.
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
-  auto result = Block::init(bytes);
-  ASSERT_TRUE(result.has_value());
-  Block *block = *result;
-
-  result = block->split(block->inner_size() - Block::BLOCK_OVERHEAD + 1);
-  ASSERT_FALSE(result.has_value());
-}
-
+// This block does support splitting with minimal payload size.
 TEST(LlvmLibcBlockTest, CanMakeMinimalSizeFirstBlock) {
-  // This block does support splitting with minimal payload size.
-  constexpr size_t kN = 1024;
-  constexpr size_t minimal_size = sizeof(size_t);
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
-  result = block->split(minimal_size);
+  result = block->split(0);
   ASSERT_TRUE(result.has_value());
-  EXPECT_EQ(block->inner_size(), minimal_size);
+  EXPECT_LE(block->outer_size(), sizeof(Block) + alignof(max_align_t));
 }
 
+// Likewise, the split block can be minimal-width.
 TEST(LlvmLibcBlockTest, CanMakeMinimalSizeSecondBlock) {
-  // Likewise, the split block can be minimal-width.
-  constexpr size_t kN = 1024;
-  constexpr size_t minimal_size = sizeof(size_t);
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
 
-  result = block1->split(block1->inner_size() - Block::BLOCK_OVERHEAD);
+  result = block1->split(Block::prev_possible_block_start(
+                             reinterpret_cast<uintptr_t>(block1->next())) -
+                         reinterpret_cast<uintptr_t>(block1->usable_space()) +
+                         sizeof(size_t));
   ASSERT_TRUE(result.has_value());
-  Block *block2 = *result;
-
-  EXPECT_EQ(block2->inner_size(), minimal_size);
+  EXPECT_LE((*result)->outer_size(), sizeof(Block) + alignof(max_align_t));
 }
 
 TEST(LlvmLibcBlockTest, CanMarkBlockUsed) {
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
@@ -252,38 +223,30 @@ TEST(LlvmLibcBlockTest, CanMarkBlockUsed) {
 }
 
 TEST(LlvmLibcBlockTest, CannotSplitUsedBlock) {
-  constexpr size_t kN = 1024;
-  constexpr size_t kSplitN = 512;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
   block->mark_used();
-  result = block->split(kSplitN);
+  result = block->split(512);
   ASSERT_FALSE(result.has_value());
 }
 
 TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) {
   // Do the three way merge from "CanSplitMidBlock", and let's
   // merge block 3 and 2
-  constexpr size_t kN = 1024;
-  // Give the split positions large alignments.
-  constexpr size_t prev_field_size = sizeof(size_t);
-  constexpr size_t kSplit1 = 512 + prev_field_size;
-  constexpr size_t kSplit2 = 256 + prev_field_size;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
-  size_t orig_size = block1->outer_size();
+  size_t total_size = block1->outer_size();
 
-  result = block1->split(kSplit1);
+  result = block1->split(512);
   ASSERT_TRUE(result.has_value());
 
-  result = block1->split(kSplit2);
+  result = block1->split(256);
+  size_t block1_size = block1->outer_size();
   ASSERT_TRUE(result.has_value());
   Block *block3 = *result;
 
@@ -291,21 +254,18 @@ TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) {
 
   EXPECT_EQ(block1->next(), block3);
   EXPECT_EQ(block3->prev_free(), block1);
-  EXPECT_EQ(block1->inner_size(), kSplit2);
-  EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size());
+  EXPECT_EQ(block1->outer_size(), block1_size);
+  EXPECT_EQ(block3->outer_size(), total_size - block1->outer_size());
 }
 
 TEST(LlvmLibcBlockTest, CannotMergeWithFirstOrLastBlock) {
-  constexpr size_t kN = 1024;
-  constexpr size_t kSplitN = 512;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
 
   // Do a split, just to check that the checks on next/prev are different...
-  result = block1->split(kSplitN);
+  result = block1->split(512);
   ASSERT_TRUE(result.has_value());
   Block *block2 = *result;
 
@@ -313,16 +273,13 @@ TEST(LlvmLibcBlockTest, CannotMergeWithFirstOrLastBlock) {
 }
 
 TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) {
-  constexpr size_t kN = 1024;
-  constexpr size_t kSplitN = 512;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes;
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
   // Do a split, just to check that the checks on next/prev are different...
-  result = block->split(kSplitN);
+  result = block->split(512);
   ASSERT_TRUE(result.has_value());
 
   block->mark_used();
@@ -330,9 +287,7 @@ TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) {
 }
 
 TEST(LlvmLibcBlockTest, CanGetBlockFromUsableSpace) {
-  constexpr size_t kN = 1024;
-
-  array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block1 = *result;
@@ -343,9 +298,7 @@ TEST(LlvmLibcBlockTest, CanGetBlockFromUsableSpace) {
 }
 
 TEST(LlvmLibcBlockTest, CanGetConstBlockFromUsableSpace) {
-  constexpr size_t kN = 1024;
-
-  array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   const Block *block1 = *result;
@@ -355,93 +308,79 @@ TEST(LlvmLibcBlockTest, CanGetConstBlockFromUsableSpace) {
   EXPECT_EQ(block1, block2);
 }
 
-TEST(LlvmLibcBlockTest, CanAllocate) {
-  constexpr size_t kN = 1024 + Block::BLOCK_OVERHEAD;
-
+TEST(LlvmLibcBlockTest, Allocate) {
   // Ensure we can allocate everything up to the block size within this block.
-  for (size_t i = 0; i < kN - 2 * Block::BLOCK_OVERHEAD; ++i) {
-    alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
+  for (size_t i = 0; i < 1024; ++i) {
+    array<byte, 1024> bytes;
     auto result = Block::init(bytes);
     ASSERT_TRUE(result.has_value());
     Block *block = *result;
 
-    constexpr size_t ALIGN = 1; // Effectively ignores alignment.
-    EXPECT_TRUE(block->can_allocate(ALIGN, i));
+    if (i > block->inner_size())
+      continue;
 
-    // For each can_allocate, we should be able to do a successful call to
-    // allocate.
-    auto info = Block::allocate(block, ALIGN, i);
+    auto info = Block::allocate(block, alignof(max_align_t), i);
     EXPECT_NE(info.block, static_cast<Block *>(nullptr));
   }
 
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
-  auto result = Block::init(bytes);
-  ASSERT_TRUE(result.has_value());
-  Block *block = *result;
+  // Ensure we can allocate a byte at every guaranteeable alignment.
+  for (size_t i = 1; i < 1024 / alignof(max_align_t); ++i) {
+    array<byte, 1024> bytes;
+    auto result = Block::init(bytes);
+    ASSERT_TRUE(result.has_value());
+    Block *block = *result;
+
+    size_t alignment = i * alignof(max_align_t);
+    if (Block::min_size_for_allocation(alignment, 1) > block->inner_size())
+      continue;
 
-  // Given a block of size N (assuming it's also a power of two), we should be
-  // able to allocate a block within it that's aligned to N/2. This is
-  // because regardless of where the buffer is located, we can always find a
-  // starting location within it that meets this alignment.
-  EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1));
-  auto info = Block::allocate(block, block->outer_size() / 2, 1);
-  EXPECT_NE(info.block, static_cast<Block *>(nullptr));
+    auto info = Block::allocate(block, alignment, 1);
+    EXPECT_NE(info.block, static_cast<Block *>(nullptr));
+  }
 }
 
 TEST(LlvmLibcBlockTest, AllocateAlreadyAligned) {
-  constexpr size_t kN = 1024;
-
-  alignas(Block::ALIGNMENT) array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
+  uintptr_t orig_end = reinterpret_cast<uintptr_t>(block) + block->outer_size();
 
-  // This should result in no new blocks.
-  constexpr size_t kAlignment = Block::ALIGNMENT;
-  constexpr size_t prev_field_size = sizeof(size_t);
-  constexpr size_t kExpectedSize = Block::ALIGNMENT + prev_field_size;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));
+  // Request a size one byte more than the prev_ field.
+  constexpr size_t SIZE = sizeof(size_t) + 1;
 
   auto [aligned_block, prev, next] =
-      Block::allocate(block, Block::ALIGNMENT, kExpectedSize);
+      Block::allocate(block, alignof(max_align_t), SIZE);
 
   // Since this is already aligned, there should be no previous block.
   EXPECT_EQ(prev, static_cast<Block *>(nullptr));
 
-  // Ensure we the block is aligned and the size we expect.
+  // Ensure we the block is aligned and large enough.
   EXPECT_NE(aligned_block, static_cast<Block *>(nullptr));
-  EXPECT_TRUE(aligned_block->is_usable_space_aligned(Block::ALIGNMENT));
-  EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignof(max_align_t)));
+  EXPECT_GE(aligned_block->inner_size(), SIZE);
 
   // Check the next block.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
   EXPECT_EQ(aligned_block->next(), next);
-  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
-            bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(next) + next->outer_size(), orig_end);
 }
 
 TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) {
-  constexpr size_t kN = 1024;
-
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
-
-  // Ensure first the usable_data is only aligned to the block alignment.
-  ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD);
-  ASSERT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
+  uintptr_t orig_end = reinterpret_cast<uintptr_t>(block) + block->outer_size();
 
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
-
-  constexpr size_t kSize = 10;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+  size_t alignment = alignof(max_align_t);
+  while (block->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
-  auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(block, alignment, 10);
 
   // Check the previous block was created appropriately. Since this block is the
   // first block, a new one should be made before this.
@@ -453,25 +392,22 @@ TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) {
 
   // Ensure we the block is aligned and the size we expect.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
-  EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));
+  EXPECT_TRUE(aligned_block->is_usable_space_aligned(alignment));
 
   // Check the next block.
   EXPECT_NE(next, static_cast<Block *>(nullptr));
   EXPECT_EQ(aligned_block->next(), next);
-  EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
-            bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(next) + next->outer_size(), orig_end);
 }
 
 TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) {
-  constexpr size_t kN = 1024;
-
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
 
   // Split the block roughly halfway and work on the second half.
-  auto result2 = block->split(kN / 2);
+  auto result2 = block->split(512);
   ASSERT_TRUE(result2.has_value());
   Block *newblock = *result2;
   ASSERT_EQ(newblock->prev_free(), block);
@@ -480,15 +416,12 @@ TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) {
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));
+  size_t alignment = alignof(max_align_t);
+  while (newblock->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
   // Ensure we can allocate in the new block.
-  constexpr size_t kSize = Block::ALIGNMENT;
-  EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));
-
-  auto [aligned_block, prev, next] =
-      Block::allocate(newblock, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(newblock, alignment, 1);
 
   // Now there should be no new previous block. Instead, the padding we did
   // create should be merged into the original previous block.
@@ -503,28 +436,26 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) {
   // should be able to reconstruct the original block from the blocklets.
   //
   // This is the same setup as with the `AllocateNeedsAlignment` test case.
-  constexpr size_t kN = 1024;
-
-  alignas(kN) array<byte, kN> bytes{};
+  array<byte, 1024> bytes;
   auto result = Block::init(bytes);
   ASSERT_TRUE(result.has_value());
   Block *block = *result;
+
+  Block *orig_block = block;
+  size_t orig_size = orig_block->outer_size();
+
   Block *last = block->next();
 
-  // Ensure first the usable_data is only aligned to the block alignment.
-  ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD);
   ASSERT_EQ(block->prev_free(), static_cast<Block *>(nullptr));
 
   // Now pick an alignment such that the usable space is not already aligned to
   // it. We want to explicitly test that the block will split into one before
   // it.
-  constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8;
-  ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));
-
-  constexpr size_t kSize = Block::ALIGNMENT;
-  EXPECT_TRUE(block->can_allocate(kAlignment, kSize));
+  size_t alignment = alignof(max_align_t);
+  while (block->is_usable_space_aligned(alignment))
+    alignment += alignof(max_align_t);
 
-  auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize);
+  auto [aligned_block, prev, next] = Block::allocate(block, alignment, 1);
 
   // Check we have the appropriate blocks.
   ASSERT_NE(prev, static_cast<Block *>(nullptr));
@@ -540,8 +471,6 @@ TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) {
   EXPECT_EQ(prev->next(), last);
 
   // We should have the original buffer.
-  EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
-  EXPECT_EQ(prev->outer_size(), bytes.size() - Block::BLOCK_OVERHEAD);
-  EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(),
-            &*bytes.end() - Block::BLOCK_OVERHEAD);
+  EXPECT_EQ(prev, orig_block);
+  EXPECT_EQ(prev->outer_size(), orig_size);
 }
diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp
index 991c158825a888..e66333a2171ca6 100644
--- a/libc/test/src/__support/freelist_heap_test.cpp
+++ b/libc/test/src/__support/freelist_heap_test.cpp
@@ -42,7 +42,7 @@ using LIBC_NAMESPACE::cpp::span;
     void RunTest(FreeListHeap &allocator, [[maybe_unused]] size_t N);          \
   };                                                                           \
   TEST_F(LlvmLibcFreeListHeapTest##TestCase, TestCase) {                       \
-    alignas(Block) byte buf[BufferSize] = {byte(0)};                           \
+    byte buf[BufferSize] = {byte(0)};                                          \
     FreeListHeap allocator(buf);                                               \
     RunTest(allocator, BufferSize);                                            \
     RunTest(*freelist_heap, freelist_heap->region().size());                   \
@@ -51,24 +51,20 @@ using LIBC_NAMESPACE::cpp::span;
                                                    size_t N)
 
 TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-
-  void *ptr = allocator.allocate(ALLOC_SIZE);
+  void *ptr = allocator.allocate(512);
 
   ASSERT_NE(ptr, static_cast<void *>(nullptr));
 }
 
 TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
-  void *ptr2 = allocator.allocate(ALLOC_SIZE);
+  void *ptr1 = allocator.allocate(512);
+  void *ptr2 = allocator.allocate(512);
 
   ASSERT_NE(ptr1, static_cast<void *>(nullptr));
   ASSERT_NE(ptr2, static_cast<void *>(nullptr));
 
   uintptr_t ptr1_start = reinterpret_cast<uintptr_t>(ptr1);
-  uintptr_t ptr1_end = ptr1_start + ALLOC_SIZE;
+  uintptr_t ptr1_end = ptr1_start + 512;
   uintptr_t ptr2_start = reinterpret_cast<uintptr_t>(ptr2);
 
   EXPECT_GT(ptr2_start, ptr1_end);
@@ -77,11 +73,9 @@ TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
 TEST_FOR_EACH_ALLOCATOR(CanFreeAndRealloc, 2048) {
   // There's not really a nice way to test that free works, apart from to try
   // and get that value back again.
-  constexpr size_t ALLOC_SIZE = 512;
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
+  void *ptr1 = allocator.allocate(512);
   allocator.free(ptr1);
-  void *ptr2 = allocator.allocate(ALLOC_SIZE);
+  void *ptr2 = allocator.allocate(512);
 
   EXPECT_EQ(ptr1, ptr2);
 }
@@ -94,39 +88,36 @@ TEST_FOR_EACH_ALLOCATOR(ReturnsNullWhenAllocationTooLarge, 2048) {
 // here will likely actually return a nullptr since the same global allocator
 // is used for other test cases and we don't explicitly free them.
 TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
-  constexpr size_t N = 2048;
-  alignas(Block) byte buf[N] = {byte(0)};
+  byte buf[2048];
 
   FreeListHeap allocator(buf);
 
-  // Use aligned_allocate so we don't need to worry about ensuring the `buf`
-  // being aligned to max_align_t.
-  EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block::BLOCK_OVERHEAD),
-            static_cast<void *>(nullptr));
+  bool went_null = false;
+  for (int i = 0; i < 2048; i++) {
+    if (!allocator.allocate(1)) {
+      went_null = true;
+      break;
+    }
+  }
+  EXPECT_TRUE(went_null);
   EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
 }
 
 TEST_FOR_EACH_ALLOCATOR(ReturnedPointersAreAligned, 2048) {
   void *ptr1 = allocator.allocate(1);
 
-  // Should be aligned to native pointer alignment
   uintptr_t ptr1_start = reinterpret_cast<uintptr_t>(ptr1);
-  size_t alignment = alignof(void *);
-
-  EXPECT_EQ(ptr1_start % alignment, static_cast<size_t>(0));
+  EXPECT_EQ(ptr1_start % alignof(max_align_t), static_cast<size_t>(0));
 
   void *ptr2 = allocator.allocate(1);
   uintptr_t ptr2_start = reinterpret_cast<uintptr_t>(ptr2);
 
-  EXPECT_EQ(ptr2_start % alignment, static_cast<size_t>(0));
+  EXPECT_EQ(ptr2_start % alignof(max_align_t), static_cast<size_t>(0));
 }
 
 TEST_FOR_EACH_ALLOCATOR(CanRealloc, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-  constexpr size_t kNewAllocSize = 768;
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
-  void *ptr2 = allocator.realloc(ptr1, kNewAllocSize);
+  void *ptr1 = allocator.allocate(512);
+  void *ptr2 = allocator.realloc(ptr1, 768);
 
   ASSERT_NE(ptr1, static_cast<void *>(nullptr));
   ASSERT_NE(ptr2, static_cast<void *>(nullptr));
@@ -134,7 +125,6 @@ TEST_FOR_EACH_ALLOCATOR(CanRealloc, 2048) {
 
 TEST_FOR_EACH_ALLOCATOR(ReallocHasSameContent, 2048) {
   constexpr size_t ALLOC_SIZE = sizeof(int);
-  constexpr size_t kNewAllocSize = sizeof(int) * 2;
   // Data inside the allocated block.
   byte data1[ALLOC_SIZE];
   // Data inside the reallocated block.
@@ -143,7 +133,7 @@ TEST_FOR_EACH_ALLOCATOR(ReallocHasSameContent, 2048) {
   int *ptr1 = reinterpret_cast<int *>(allocator.allocate(ALLOC_SIZE));
   *ptr1 = 42;
   LIBC_NAMESPACE::memcpy(data1, ptr1, ALLOC_SIZE);
-  int *ptr2 = reinterpret_cast<int *>(allocator.realloc(ptr1, kNewAllocSize));
+  int *ptr2 = reinterpret_cast<int *>(allocator.realloc(ptr1, ALLOC_SIZE * 2));
   LIBC_NAMESPACE::memcpy(data2, ptr2, ALLOC_SIZE);
 
   ASSERT_NE(ptr1, static_cast<int *>(nullptr));
@@ -153,33 +143,24 @@ TEST_FOR_EACH_ALLOCATOR(ReallocHasSameContent, 2048) {
 }
 
 TEST_FOR_EACH_ALLOCATOR(ReturnsNullReallocFreedPointer, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-  constexpr size_t kNewAllocSize = 256;
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
+  void *ptr1 = allocator.allocate(512);
   allocator.free(ptr1);
-  void *ptr2 = allocator.realloc(ptr1, kNewAllocSize);
+  void *ptr2 = allocator.realloc(ptr1, 256);
 
   EXPECT_EQ(static_cast<void *>(nullptr), ptr2);
 }
 
 TEST_FOR_EACH_ALLOCATOR(ReallocSmallerSize, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-  constexpr size_t kNewAllocSize = 256;
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
-  void *ptr2 = allocator.realloc(ptr1, kNewAllocSize);
+  void *ptr1 = allocator.allocate(512);
+  void *ptr2 = allocator.realloc(ptr1, 256);
 
   // For smaller sizes, realloc will not shrink the block.
   EXPECT_EQ(ptr1, ptr2);
 }
 
 TEST_FOR_EACH_ALLOCATOR(ReallocTooLarge, 2048) {
-  constexpr size_t ALLOC_SIZE = 512;
-  size_t kNewAllocSize = N * 2; // Large enough to fail.
-
-  void *ptr1 = allocator.allocate(ALLOC_SIZE);
-  void *ptr2 = allocator.realloc(ptr1, kNewAllocSize);
+  void *ptr1 = allocator.allocate(512);
+  void *ptr2 = allocator.realloc(ptr1, N * 2); // Large enough to fail
 
   // realloc() will not invalidate the original pointer if realloc() fails
   EXPECT_NE(static_cast<void *>(nullptr), ptr1);
@@ -187,12 +168,10 @@ TEST_FOR_EACH_ALLOCATOR(ReallocTooLarge, 2048) {
 }
 
 TEST_FOR_EACH_ALLOCATOR(CanCalloc, 2048) {
-  constexpr size_t ALLOC_SIZE = 128;
-  constexpr size_t NUM = 4;
-  constexpr int size = NUM * ALLOC_SIZE;
+  constexpr int size = 4 * 128;
   constexpr byte zero{0};
 
-  byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(NUM, ALLOC_SIZE));
+  byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(4, 128));
 
   // calloc'd content is zero.
   for (int i = 0; i < size; i++) {
@@ -201,12 +180,10 @@ TEST_FOR_EACH_ALLOCATOR(CanCalloc, 2048) {
 }
 
 TEST_FOR_EACH_ALLOCATOR(CanCallocWeirdSize, 2048) {
-  constexpr size_t ALLOC_SIZE = 143;
-  constexpr size_t NUM = 3;
-  constexpr int size = NUM * ALLOC_SIZE;
+  constexpr int size = 3 * 143;
   constexpr byte zero{0};
 
-  byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(NUM, ALLOC_SIZE));
+  byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(3, 143));
 
   // calloc'd content is zero.
   for (int i = 0; i < size; i++) {
@@ -215,8 +192,7 @@ TEST_FOR_EACH_ALLOCATOR(CanCallocWeirdSize, 2048) {
 }
 
 TEST_FOR_EACH_ALLOCATOR(CallocTooLarge, 2048) {
-  size_t ALLOC_SIZE = N + 1;
-  EXPECT_EQ(allocator.calloc(1, ALLOC_SIZE), static_cast<void *>(nullptr));
+  EXPECT_EQ(allocator.calloc(1, N + 1), static_cast<void *>(nullptr));
 }
 
 TEST_FOR_EACH_ALLOCATOR(AllocateZero, 2048) {
@@ -241,16 +217,14 @@ TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
 
 // This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
 // explicitly ensure that the buffer can still return aligned allocations even
-// if the underlying buffer is at most aligned to the Block alignment. This
-// is so we can check that we can still get aligned allocations even if the
-// underlying buffer is not aligned to the alignments we request.
-TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockAligned) {
-  constexpr size_t BUFFER_SIZE = 4096;
-  constexpr size_t BUFFER_ALIGNMENT = alignof(Block) * 2;
-  alignas(BUFFER_ALIGNMENT) byte buf[BUFFER_SIZE] = {byte(0)};
-
-  // Ensure the underlying buffer is at most aligned to the block type.
-  FreeListHeap allocator(span<byte>(buf).subspan(alignof(Block)));
+// if the underlying buffer is unaligned. This is so we can check that we can
+// still get aligned allocations even if the underlying buffer is not aligned to
+// the alignments we request.
+TEST(LlvmLibcFreeListHeap, AlignedAllocUnalignedBuffer) {
+  byte buf[4096] = {byte(0)};
+
+  // Ensure the underlying buffer is poorly aligned.
+  FreeListHeap allocator(span<byte>(buf).subspan(1));
 
   constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
   constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp
index 793e2498304fb9..5f8515b109124b 100644
--- a/libc/test/src/__support/freelist_malloc_test.cpp
+++ b/libc/test/src/__support/freelist_malloc_test.cpp
@@ -19,35 +19,30 @@ using LIBC_NAMESPACE::FreeListHeap;
 using LIBC_NAMESPACE::FreeListHeapBuffer;
 
 TEST(LlvmLibcFreeListMalloc, Malloc) {
-  constexpr size_t kAllocSize = 256;
-  constexpr size_t kCallocNum = 4;
-  constexpr size_t kCallocSize = 64;
-
-  void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize);
+  void *ptr1 = LIBC_NAMESPACE::malloc(256);
   auto *block = Block::from_usable_space(ptr1);
-  EXPECT_GE(block->inner_size(), kAllocSize);
+  EXPECT_GE(block->inner_size(), size_t{256});
 
   LIBC_NAMESPACE::free(ptr1);
   ASSERT_NE(block->next(), static_cast<Block *>(nullptr));
   ASSERT_EQ(block->next()->next(), static_cast<Block *>(nullptr));
   size_t heap_size = block->inner_size();
 
-  void *ptr2 = LIBC_NAMESPACE::calloc(kCallocNum, kCallocSize);
+  void *ptr2 = LIBC_NAMESPACE::calloc(4, 64);
   ASSERT_EQ(ptr2, ptr1);
-  EXPECT_GE(block->inner_size(), kCallocNum * kCallocSize);
+  EXPECT_GE(block->inner_size(), size_t{4 * 64});
 
-  for (size_t i = 0; i < kCallocNum * kCallocSize; ++i)
+  for (size_t i = 0; i < 4 * 64; ++i)
     EXPECT_EQ(reinterpret_cast<uint8_t *>(ptr2)[i], uint8_t(0));
 
   LIBC_NAMESPACE::free(ptr2);
   EXPECT_EQ(block->inner_size(), heap_size);
 
-  constexpr size_t ALIGN = kAllocSize;
-  void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize);
+  void *ptr3 = LIBC_NAMESPACE::aligned_alloc(256, 256);
   EXPECT_NE(ptr3, static_cast<void *>(nullptr));
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % ALIGN, size_t(0));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % 256, size_t(0));
   auto *aligned_block = reinterpret_cast<Block *>(ptr3);
-  EXPECT_GE(aligned_block->inner_size(), kAllocSize);
+  EXPECT_GE(aligned_block->inner_size(), size_t{256});
 
   LIBC_NAMESPACE::free(ptr3);
   EXPECT_EQ(block->inner_size(), heap_size);



More information about the libc-commits mailing list