[libc-commits] [libc] [libc][stdlib] Add Block class (PR #94407)

via libc-commits libc-commits at lists.llvm.org
Fri Jun 7 12:01:31 PDT 2024


https://github.com/PiJoules updated https://github.com/llvm/llvm-project/pull/94407

>From a7f3a70509011a890799b35dc661e4d8a463d714 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Tue, 4 Jun 2024 15:44:06 -0700
Subject: [PATCH] [libc][stdlib] Add Block class

A block represents a chunk of memory used by the freelist allocator. It
contains header information denoting the usable space and pointers as
offsets to the next and previous block.

On it's own, this doesn't do much. This is a part of
https://github.com/llvm/llvm-project/pull/94270 to land in smaller
patches.

This is a subset of pigweed's freelist allocator implementation.
---
 libc/src/stdlib/CMakeLists.txt      |  12 +
 libc/src/stdlib/block.h             | 479 +++++++++++++++++++++++
 libc/test/src/stdlib/CMakeLists.txt |  13 +
 libc/test/src/stdlib/block_test.cpp | 566 ++++++++++++++++++++++++++++
 4 files changed, 1070 insertions(+)
 create mode 100644 libc/src/stdlib/block.h
 create mode 100644 libc/test/src/stdlib/block_test.cpp

diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index f0091ad367c0a..afb2d6d91cba4 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -380,6 +380,18 @@ elseif(LIBC_TARGET_OS_IS_GPU)
     aligned_alloc
   )
 else()
+  add_header_library(
+    block
+    HDRS
+      block.h
+    DEPENDS
+      libc.src.__support.CPP.algorithm
+      libc.src.__support.CPP.limits
+      libc.src.__support.CPP.new
+      libc.src.__support.CPP.optional
+      libc.src.__support.CPP.span
+      libc.src.__support.CPP.type_traits
+  )
   add_entrypoint_external(
     malloc
   )
diff --git a/libc/src/stdlib/block.h b/libc/src/stdlib/block.h
new file mode 100644
index 0000000000000..0b5c1aa446db5
--- /dev/null
+++ b/libc/src/stdlib/block.h
@@ -0,0 +1,479 @@
+//===-- Implementation header for a block of memory -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_BLOCK_H
+#define LLVM_LIBC_SRC_STDLIB_BLOCK_H
+
+#include "src/__support/CPP/algorithm.h"
+#include "src/__support/CPP/cstddef.h"
+#include "src/__support/CPP/limits.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/CPP/span.h"
+#include "src/__support/CPP/type_traits.h"
+
+#include <stdint.h>
+
+namespace LIBC_NAMESPACE {
+
+namespace internal {
+// Types of corrupted blocks, and functions to crash with an error message
+// corresponding to each type.
+enum BlockStatus {
+  kValid,
+  kMisaligned,
+  kPrevMismatched,
+  kNextMismatched,
+};
+} // namespace internal
+
+/// Returns the value rounded down to the nearest multiple of alignment.
+LIBC_INLINE constexpr size_t AlignDown(size_t value, size_t alignment) {
+  __builtin_mul_overflow(value / alignment, alignment, &value);
+  return value;
+}
+
+/// Returns the value rounded down to the nearest multiple of alignment.
+LIBC_INLINE template <typename T>
+constexpr T *AlignDown(T *value, size_t alignment) {
+  return reinterpret_cast<T *>(
+      AlignDown(reinterpret_cast<size_t>(value), alignment));
+}
+
+/// Returns the value rounded up to the nearest multiple of alignment.
+LIBC_INLINE constexpr size_t AlignUp(size_t value, size_t alignment) {
+  __builtin_add_overflow(value, alignment - 1, &value);
+  return AlignDown(value, alignment);
+}
+
+/// Returns the value rounded up to the nearest multiple of alignment.
+template <typename T>
+LIBC_INLINE constexpr T *AlignUp(T *value, size_t alignment) {
+  return reinterpret_cast<T *>(
+      AlignUp(reinterpret_cast<size_t>(value), alignment));
+}
+
+using ByteSpan = cpp::span<LIBC_NAMESPACE::cpp::byte>;
+using cpp::optional;
+
+/// Memory region with links to adjacent blocks.
+///
+/// The blocks do not encode their size directly. Instead, they encode offsets
+/// to the next and previous blocks using the type given by the `OffsetType`
+/// template parameter. The encoded offsets are simply the offsets divded by the
+/// minimum block alignment, `kAlignment`.
+///
+/// The `kAlignment` constant provided by the derived block is typically the
+/// minimum value of `alignof(OffsetType)`. Since the addressable range of a
+/// block is given by `std::numeric_limits<OffsetType>::max() *
+/// kAlignment`, it may be advantageous to set a higher alignment if it allows
+/// using a smaller offset type, even if this wastes some bytes in order to
+/// align block headers.
+///
+/// Blocks will always be aligned to a `kAlignment` boundary. Block sizes will
+/// always be rounded up to a multiple of `kAlignment`.
+///
+/// As an example, the diagram below represents two contiguous
+/// `Block<uint32_t, 8>`s. The indices indicate byte offsets:
+///
+/// @code{.unparsed}
+/// Block 1:
+/// +---------------------+------+--------------+
+/// | Header              | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev     | Next     |      |              |
+/// | 0......3 | 4......7 | 8..9 | 10.......280 |
+/// | 00000000 | 00000046 | 8008 |  <app data>  |
+/// +----------+----------+------+--------------+
+/// Block 2:
+/// +---------------------+------+--------------+
+/// | Header              | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev     | Next     |      |              |
+/// | 0......3 | 4......7 | 8..9 | 10......1056 |
+/// | 00000046 | 00000106 | 2008 | f7f7....f7f7 |
+/// +----------+----------+------+--------------+
+/// @endcode
+///
+/// The overall size of the block (e.g. 280 bytes) is given by its next offset
+/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
+/// block matches the previous offset of its next block. The first block in a
+/// list is denoted by having a previous offset of `0`.
+///
+/// @tparam   OffsetType  Unsigned integral type used to encode offsets. Larger
+///                       types can address more memory, but consume greater
+///                       overhead.
+/// @tparam   kAlign      Sets the overall alignment for blocks. Minimum is
+///                       `alignof(OffsetType)` (the default). Larger values can
+///                       address more memory, but consume greater overhead.
+template <typename OffsetType = uintptr_t, size_t kAlign = alignof(OffsetType)>
+class Block {
+public:
+  using offset_type = OffsetType;
+  static_assert(cpp::is_unsigned_v<offset_type>,
+                "offset type must be unsigned");
+
+  static constexpr size_t kAlignment = cpp::max(kAlign, alignof(offset_type));
+  static constexpr size_t kBlockOverhead = AlignUp(sizeof(Block), kAlignment);
+
+  // No copy or move.
+  Block(const Block &other) = delete;
+  Block &operator=(const Block &other) = delete;
+
+  /// Creates the first block for a given memory region.
+  static optional<Block *> Init(ByteSpan region);
+
+  /// @returns  A pointer to a `Block`, given a pointer to the start of the
+  ///           usable space inside the block.
+  ///
+  /// This is the inverse of `UsableSpace()`.
+  ///
+  /// @warning  This method does not do any checking; passing a random
+  ///           pointer will return a non-null pointer.
+  static Block *FromUsableSpace(void *usable_space) {
+    auto *bytes = reinterpret_cast<cpp::byte *>(usable_space);
+    return reinterpret_cast<Block *>(bytes - kBlockOverhead);
+  }
+  static const Block *FromUsableSpace(const void *usable_space) {
+    const auto *bytes = reinterpret_cast<const cpp::byte *>(usable_space);
+    return reinterpret_cast<const Block *>(bytes - kBlockOverhead);
+  }
+
+  /// @returns The total size of the block in bytes, including the header.
+  size_t OuterSize() const { return next_ * kAlignment; }
+
+  /// @returns The number of usable bytes inside the block.
+  size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
+
+  /// @returns The number of bytes requested using AllocFirst or AllocLast.
+  size_t RequestedSize() const { return InnerSize() - padding_; }
+
+  /// @returns A pointer to the usable space inside this block.
+  cpp::byte *UsableSpace() {
+    return reinterpret_cast<cpp::byte *>(this) + kBlockOverhead;
+  }
+  const cpp::byte *UsableSpace() const {
+    return reinterpret_cast<const cpp::byte *>(this) + kBlockOverhead;
+  }
+
+  /// Marks the block as free and merges it with any free neighbors.
+  ///
+  /// This method is static in order to consume and replace the given block
+  /// pointer. If neither member is free, the returned pointer will point to the
+  /// original block. Otherwise, it will point to the new, larger block created
+  /// by merging adjacent free blocks together.
+  static void Free(Block *&block);
+
+  /// Attempts to split this block.
+  ///
+  /// If successful, the block will have an inner size of `new_inner_size`,
+  /// rounded up to a `kAlignment` boundary. The remaining space will be
+  /// returned as a new block.
+  ///
+  /// This method may fail if the remaining space is too small to hold a new
+  /// block. If this method fails for any reason, the original block is
+  /// unmodified.
+  ///
+  /// This method is static in order to consume and replace the given block
+  /// pointer with a pointer to the new, smaller block.
+  static optional<Block *> Split(Block *&block, size_t new_inner_size);
+
+  /// Merges this block with the one that comes after it.
+  ///
+  /// This method is static in order to consume and replace the given block
+  /// pointer with a pointer to the new, larger block.
+  static bool MergeNext(Block *&block);
+
+  /// Fetches the block immediately after this one.
+  ///
+  /// For performance, this always returns a block pointer, even if the returned
+  /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
+  ///
+  /// Typically, after calling `Init` callers may save a pointer past the end of
+  /// the list using `Next()`. This makes it easy to subsequently iterate over
+  /// the list:
+  /// @code{.cpp}
+  ///   auto result = Block<>::Init(byte_span);
+  ///   Block<>* begin = *result;
+  ///   Block<>* end = begin->Next();
+  ///   ...
+  ///   for (auto* block = begin; block != end; block = block->Next()) {
+  ///     // Do something which each block.
+  ///   }
+  /// @endcode
+  Block *Next() const;
+
+  /// @copydoc `Next`.
+  static Block *NextBlock(const Block *block) {
+    return block == nullptr ? nullptr : block->Next();
+  }
+
+  /// @returns The block immediately before this one, or a null pointer if this
+  /// is the first block.
+  Block *Prev() const;
+
+  /// @copydoc `Prev`.
+  static Block *PrevBlock(const Block *block) {
+    return block == nullptr ? nullptr : block->Prev();
+  }
+
+  /// Returns the current alignment of a block.
+  size_t Alignment() const { return Used() ? info_.alignment : 1; }
+
+  /// Indicates whether the block is in use.
+  ///
+  /// @returns `true` if the block is in use or `false` if not.
+  bool Used() const { return info_.used; }
+
+  /// Indicates whether this block is the last block or not (i.e. whether
+  /// `Next()` points to a valid block or not). This is needed because
+  /// `Next()` points to the end of this block, whether there is a valid
+  /// block there or not.
+  ///
+  /// @returns `true` is this is the last block or `false` if not.
+  bool Last() const { return info_.last; }
+
+  /// Marks this block as in use.
+  void MarkUsed() { info_.used = 1; }
+
+  /// Marks this block as free.
+  void MarkFree() { info_.used = 0; }
+
+  /// Marks this block as the last one in the chain.
+  void MarkLast() { info_.last = 1; }
+
+  /// Clears the last bit from this block.
+  void ClearLast() { info_.last = 1; }
+
+  /// @brief Checks if a block is valid.
+  ///
+  /// @returns `true` if and only if the following conditions are met:
+  /// * The block is aligned.
+  /// * The prev/next fields match with the previous and next blocks.
+  bool IsValid() const { return CheckStatus() == internal::kValid; }
+
+private:
+  /// Consumes the block and returns as a span of bytes.
+  static ByteSpan AsBytes(Block *&&block);
+
+  /// Consumes the span of bytes and uses it to construct and return a block.
+  static Block *AsBlock(size_t prev_outer_size, ByteSpan bytes);
+
+  Block(size_t prev_outer_size, size_t outer_size);
+
+  /// Returns a `BlockStatus` that is either kValid or indicates the reason why
+  /// the block is invalid.
+  ///
+  /// If the block is invalid at multiple points, this function will only return
+  /// one of the reasons.
+  internal::BlockStatus CheckStatus() const;
+
+  /// Like `Split`, but assumes the caller has already checked to parameters to
+  /// ensure the split will succeed.
+  static Block *SplitImpl(Block *&block, size_t new_inner_size);
+
+  /// Offset (in increments of the minimum alignment) from this block to the
+  /// previous block. 0 if this is the first block.
+  offset_type prev_ = 0;
+
+  /// Offset (in increments of the minimum alignment) from this block to the
+  /// next block. Valid even if this is the last block, since it equals the
+  /// size of the block.
+  offset_type next_ = 0;
+
+  /// Information about the current state of the block:
+  /// * If the `used` flag is set, the block's usable memory has been allocated
+  ///   and is being used.
+  /// * If the `last` flag is set, the block does not have a next block.
+  /// * If the `used` flag is set, the alignment represents the requested value
+  ///   when the memory was allocated, which may be less strict than the actual
+  ///   alignment.
+  struct {
+    uint16_t used : 1;
+    uint16_t last : 1;
+    uint16_t alignment : 14;
+  } info_;
+
+  /// Number of bytes allocated beyond what was requested. This will be at most
+  /// the minimum alignment, i.e. `alignof(offset_type).`
+  uint16_t padding_ = 0;
+} __attribute__((packed, aligned(kAlign)));
+
+// Public template method implementations.
+
+inline ByteSpan GetAlignedSubspan(ByteSpan bytes, size_t alignment) {
+  if (bytes.data() == nullptr)
+    return ByteSpan();
+
+  auto unaligned_start = reinterpret_cast<uintptr_t>(bytes.data());
+  auto aligned_start = AlignUp(unaligned_start, alignment);
+  auto unaligned_end = unaligned_start + bytes.size();
+  auto aligned_end = AlignDown(unaligned_end, alignment);
+
+  if (aligned_end <= aligned_start)
+    return ByteSpan();
+
+  return bytes.subspan(aligned_start - unaligned_start,
+                       aligned_end - aligned_start);
+}
+
+template <typename OffsetType, size_t kAlign>
+optional<Block<OffsetType, kAlign> *>
+Block<OffsetType, kAlign>::Init(ByteSpan region) {
+  optional<ByteSpan> result = GetAlignedSubspan(region, kAlignment);
+  if (!result)
+    return {};
+
+  region = result.value();
+  if (region.size() < kBlockOverhead)
+    return {};
+
+  if (cpp::numeric_limits<OffsetType>::max() < region.size() / kAlignment)
+    return {};
+
+  Block *block = AsBlock(0, region);
+  block->MarkLast();
+  return block;
+}
+
+template <typename OffsetType, size_t kAlign>
+void Block<OffsetType, kAlign>::Free(Block *&block) {
+  if (block == nullptr)
+    return;
+
+  block->MarkFree();
+  Block *prev = block->Prev();
+
+  if (MergeNext(prev))
+    block = prev;
+
+  MergeNext(block);
+}
+
+template <typename OffsetType, size_t kAlign>
+optional<Block<OffsetType, kAlign> *>
+Block<OffsetType, kAlign>::Split(Block *&block, size_t new_inner_size) {
+  if (block == nullptr)
+    return {};
+
+  if (block->Used())
+    return {};
+
+  size_t old_inner_size = block->InnerSize();
+  new_inner_size = AlignUp(new_inner_size, kAlignment);
+  if (old_inner_size < new_inner_size)
+    return {};
+
+  if (old_inner_size - new_inner_size < kBlockOverhead)
+    return {};
+
+  return SplitImpl(block, new_inner_size);
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *
+Block<OffsetType, kAlign>::SplitImpl(Block *&block, size_t new_inner_size) {
+  size_t prev_outer_size = block->prev_ * kAlignment;
+  size_t outer_size1 = new_inner_size + kBlockOverhead;
+  bool is_last = block->Last();
+  ByteSpan bytes = AsBytes(cpp::move(block));
+  Block *block1 = AsBlock(prev_outer_size, bytes.subspan(0, outer_size1));
+  Block *block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
+
+  if (is_last)
+    block2->MarkLast();
+  else
+    block2->Next()->prev_ = block2->next_;
+
+  block = cpp::move(block1);
+  return block2;
+}
+
+template <typename OffsetType, size_t kAlign>
+bool Block<OffsetType, kAlign>::MergeNext(Block *&block) {
+  if (block == nullptr)
+    return false;
+
+  if (block->Last())
+    return false;
+
+  Block *next = block->Next();
+  if (block->Used() || next->Used())
+    return false;
+
+  size_t prev_outer_size = block->prev_ * kAlignment;
+  bool is_last = next->Last();
+  ByteSpan prev_bytes = AsBytes(cpp::move(block));
+  ByteSpan next_bytes = AsBytes(cpp::move(next));
+  size_t outer_size = prev_bytes.size() + next_bytes.size();
+  cpp::byte *merged = ::new (prev_bytes.data()) cpp::byte[outer_size];
+  block = AsBlock(prev_outer_size, ByteSpan(merged, outer_size));
+
+  if (is_last)
+    block->MarkLast();
+  else
+    block->Next()->prev_ = block->next_;
+
+  return true;
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *Block<OffsetType, kAlign>::Next() const {
+  uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + OuterSize();
+  return reinterpret_cast<Block *>(addr);
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *Block<OffsetType, kAlign>::Prev() const {
+  uintptr_t addr =
+      (prev_ == 0) ? 0
+                   : reinterpret_cast<uintptr_t>(this) - (prev_ * kAlignment);
+  return reinterpret_cast<Block *>(addr);
+}
+
+// Private template method implementations.
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign>::Block(size_t prev_outer_size, size_t outer_size) {
+  prev_ = prev_outer_size / kAlignment;
+  next_ = outer_size / kAlignment;
+  info_.used = 0;
+  info_.last = 0;
+  info_.alignment = kAlignment;
+}
+
+template <typename OffsetType, size_t kAlign>
+ByteSpan Block<OffsetType, kAlign>::AsBytes(Block *&&block) {
+  size_t block_size = block->OuterSize();
+  cpp::byte *bytes = new (cpp::move(block)) cpp::byte[block_size];
+  return {bytes, block_size};
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *
+Block<OffsetType, kAlign>::AsBlock(size_t prev_outer_size, ByteSpan bytes) {
+  return ::new (bytes.data()) Block(prev_outer_size, bytes.size());
+}
+
+template <typename OffsetType, size_t kAlign>
+internal::BlockStatus Block<OffsetType, kAlign>::CheckStatus() const {
+  if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0)
+    return internal::kMisaligned;
+
+  if (!Last() && (this >= Next() || this != Next()->Prev()))
+    return internal::kNextMismatched;
+
+  if (Prev() && (this <= Prev() || this != Prev()->Next()))
+    return internal::kPrevMismatched;
+
+  return internal::kValid;
+}
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_BLOCK_H
diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt
index 38488778c657c..f122cd56a6060 100644
--- a/libc/test/src/stdlib/CMakeLists.txt
+++ b/libc/test/src/stdlib/CMakeLists.txt
@@ -54,6 +54,19 @@ add_libc_test(
     libc.src.stdlib.atoll
 )
 
+add_libc_test(
+  block_test
+  SUITE
+    libc-stdlib-tests
+  SRCS
+    block_test.cpp
+  DEPENDS
+    libc.src.stdlib.block
+    libc.src.__support.CPP.array
+    libc.src.__support.CPP.span
+    libc.src.string.memcpy
+)
+
 add_fp_unittest(
   strtod_test
   SUITE
diff --git a/libc/test/src/stdlib/block_test.cpp b/libc/test/src/stdlib/block_test.cpp
new file mode 100644
index 0000000000000..5f730c24a5962
--- /dev/null
+++ b/libc/test/src/stdlib/block_test.cpp
@@ -0,0 +1,566 @@
+//===-- Unittests for a block of memory -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include <stddef.h>
+
+#include "src/stdlib/block.h"
+
+#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/span.h"
+#include "src/string/memcpy.h"
+#include "test/UnitTest/Test.h"
+
+// Block types.
+using LargeOffsetBlock = LIBC_NAMESPACE::Block<uint64_t>;
+using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
+
+// For each of the block types above, we'd like to run the same tests since
+// they should work independently of the parameter sizes. Rather than re-writing
+// the same test for each case, let's instead create a custom test framework for
+// each test case that invokes the actual testing function for each block type.
+//
+// It's organized this way because the ASSERT/EXPECT macros only work within a
+// `Test` class due to those macros expanding to `test` methods.
+#define TEST_FOR_EACH_BLOCK_TYPE(TestCase)                                     \
+  class LlvmLibcBlockTest##TestCase : public LIBC_NAMESPACE::testing::Test {   \
+  public:                                                                      \
+    template <typename BlockType> void RunTest();                              \
+  };                                                                           \
+  TEST_F(LlvmLibcBlockTest##TestCase, TestCase) {                              \
+    RunTest<LargeOffsetBlock>();                                               \
+    RunTest<SmallOffsetBlock>();                                               \
+  }                                                                            \
+  template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()
+
+using LIBC_NAMESPACE::cpp::array;
+using LIBC_NAMESPACE::cpp::byte;
+using LIBC_NAMESPACE::cpp::span;
+
+TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) {
+  constexpr size_t kN = 1024;
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  EXPECT_EQ(block->OuterSize(), kN);
+  EXPECT_EQ(block->InnerSize(), kN - BlockType::kBlockOverhead);
+  EXPECT_EQ(block->Prev(), static_cast<BlockType *>(nullptr));
+  EXPECT_EQ(block->Next(), static_cast<BlockType *>(nullptr));
+  EXPECT_FALSE(block->Used());
+  EXPECT_TRUE(block->Last());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanCreateUnalignedSingleBlock) {
+  constexpr size_t kN = 1024;
+
+  // Force alignment, so we can un-force it below
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  span<byte> aligned(bytes);
+
+  auto result = BlockType::Init(aligned.subspan(1));
+  EXPECT_TRUE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotCreateTooSmallBlock) {
+  array<byte, 2> bytes;
+  auto result = BlockType::Init(bytes);
+  EXPECT_FALSE(result.has_value());
+}
+
+TEST(LlvmLibcBlockTest, CannotCreateTooLargeBlock) {
+  using BlockType = LIBC_NAMESPACE::Block<uint8_t>;
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  EXPECT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 512;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  auto *block1 = *result;
+
+  result = BlockType::Split(block1, kSplitN);
+  ASSERT_TRUE(result.has_value());
+
+  auto *block2 = *result;
+
+  EXPECT_EQ(block1->InnerSize(), kSplitN);
+  EXPECT_EQ(block1->OuterSize(), kSplitN + BlockType::kBlockOverhead);
+  EXPECT_FALSE(block1->Last());
+
+  EXPECT_EQ(block2->OuterSize(), kN - kSplitN - BlockType::kBlockOverhead);
+  EXPECT_FALSE(block2->Used());
+  EXPECT_TRUE(block2->Last());
+
+  EXPECT_EQ(block1->Next(), block2);
+  EXPECT_EQ(block2->Prev(), block1);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) {
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  // We should split at sizeof(BlockType) + kSplitN bytes. Then
+  // we need to round that up to an alignof(BlockType) boundary.
+  constexpr size_t kSplitN = 513;
+  uintptr_t split_addr = reinterpret_cast<uintptr_t>(block1) + kSplitN;
+  split_addr += alignof(BlockType) - (split_addr % alignof(BlockType));
+  uintptr_t split_len = split_addr - (uintptr_t)&bytes;
+
+  result = BlockType::Split(block1, kSplitN);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  EXPECT_EQ(block1->InnerSize(), split_len);
+  EXPECT_EQ(block1->OuterSize(), split_len + BlockType::kBlockOverhead);
+
+  EXPECT_EQ(block2->OuterSize(), kN - block1->OuterSize());
+  EXPECT_FALSE(block2->Used());
+
+  EXPECT_EQ(block1->Next(), block2);
+  EXPECT_EQ(block2->Prev(), block1);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) {
+  // Split once, then split the original block again to ensure that the
+  // pointers get rewired properly.
+  // I.e.
+  // [[             BLOCK 1            ]]
+  // block1->Split()
+  // [[       BLOCK1       ]][[ BLOCK2 ]]
+  // block1->Split()
+  // [[ BLOCK1 ]][[ BLOCK3 ]][[ BLOCK2 ]]
+
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block1, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  EXPECT_EQ(block1->Next(), block3);
+  EXPECT_EQ(block3->Prev(), block1);
+  EXPECT_EQ(block3->Next(), block2);
+  EXPECT_EQ(block2->Prev(), block3);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) {
+  constexpr size_t kN = 64;
+  constexpr size_t kSplitN = kN + 1;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  result = BlockType::Split(block, kSplitN);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = kN - BlockType::kBlockOverhead - 1;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  result = BlockType::Split(block, kSplitN);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitNull) {
+  BlockType *block = nullptr;
+  auto result = BlockType::Split(block, 1);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMakeBlockLargerInSplit) {
+  // Ensure that we can't ask for more space than the block actually has...
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  result = BlockType::Split(block, block->InnerSize() + 1);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) {
+  // Ensure that the second block in split is at least of the size of header.
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  result = BlockType::Split(block,
+                            block->InnerSize() - BlockType::kBlockOverhead + 1);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeFirstBlock) {
+  // This block does support splitting with zero payload size.
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  result = BlockType::Split(block, 0);
+  ASSERT_TRUE(result.has_value());
+  EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeSecondBlock) {
+  // Likewise, the split block can be zero-width.
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result =
+      BlockType::Split(block1, block1->InnerSize() - BlockType::kBlockOverhead);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  EXPECT_EQ(block2->InnerSize(), static_cast<size_t>(0));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) {
+  constexpr size_t kN = 1024;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  block->MarkUsed();
+  EXPECT_TRUE(block->Used());
+
+  // Size should be unaffected.
+  EXPECT_EQ(block->OuterSize(), kN);
+
+  block->MarkFree();
+  EXPECT_FALSE(block->Used());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitUsedBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 512;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  block->MarkUsed();
+  result = BlockType::Split(block, kSplitN);
+  ASSERT_FALSE(result.has_value());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) {
+  // Do the three way merge from "CanSplitMidBlock", and let's
+  // merge block 3 and 2
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+
+  result = BlockType::Split(block1, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  EXPECT_TRUE(BlockType::MergeNext(block3));
+
+  EXPECT_EQ(block1->Next(), block3);
+  EXPECT_EQ(block3->Prev(), block1);
+  EXPECT_EQ(block1->InnerSize(), kSplit2);
+  EXPECT_EQ(block3->OuterSize(), kN - block1->OuterSize());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 512;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  // Do a split, just to check that the checks on Next/Prev are different...
+  result = BlockType::Split(block1, kSplitN);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  EXPECT_FALSE(BlockType::MergeNext(block2));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeNull) {
+  BlockType *block = nullptr;
+  EXPECT_FALSE(BlockType::MergeNext(block));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplitN = 512;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  // Do a split, just to check that the checks on Next/Prev are different...
+  result = BlockType::Split(block, kSplitN);
+  ASSERT_TRUE(result.has_value());
+
+  block->MarkUsed();
+  EXPECT_FALSE(BlockType::MergeNext(block));
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeSingleBlock) {
+  constexpr size_t kN = 1024;
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block = *result;
+
+  block->MarkUsed();
+  BlockType::Free(block);
+  EXPECT_FALSE(block->Used());
+  EXPECT_EQ(block->OuterSize(), kN);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockWithoutMerging) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  block1->MarkUsed();
+  block2->MarkUsed();
+  block3->MarkUsed();
+
+  BlockType::Free(block2);
+  EXPECT_FALSE(block2->Used());
+  EXPECT_NE(block2->Prev(), static_cast<BlockType *>(nullptr));
+  EXPECT_FALSE(block2->Last());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithPrev) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  block2->MarkUsed();
+  block3->MarkUsed();
+
+  BlockType::Free(block2);
+  EXPECT_FALSE(block2->Used());
+  EXPECT_EQ(block2->Prev(), static_cast<BlockType *>(nullptr));
+  EXPECT_FALSE(block2->Last());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithNext) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+
+  block1->MarkUsed();
+  block2->MarkUsed();
+
+  BlockType::Free(block2);
+  EXPECT_FALSE(block2->Used());
+  EXPECT_NE(block2->Prev(), static_cast<BlockType *>(nullptr));
+  EXPECT_TRUE(block2->Last());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeUsedBlockAndMergeWithBoth) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+
+  block2->MarkUsed();
+
+  BlockType::Free(block2);
+  EXPECT_FALSE(block2->Used());
+  EXPECT_EQ(block2->Prev(), static_cast<BlockType *>(nullptr));
+  EXPECT_TRUE(block2->Last());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanCheckValidBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 512;
+  constexpr size_t kSplit2 = 256;
+
+  alignas(BlockType::kAlignment) array<byte, kN> bytes;
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  EXPECT_TRUE(block1->IsValid());
+  EXPECT_TRUE(block2->IsValid());
+  EXPECT_TRUE(block3->IsValid());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanCheckInvalidBlock) {
+  constexpr size_t kN = 1024;
+  constexpr size_t kSplit1 = 128;
+  constexpr size_t kSplit2 = 384;
+  constexpr size_t kSplit3 = 256;
+
+  array<byte, kN> bytes{};
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  result = BlockType::Split(block1, kSplit1);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block2 = *result;
+
+  result = BlockType::Split(block2, kSplit2);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block3 = *result;
+
+  result = BlockType::Split(block3, kSplit3);
+  ASSERT_TRUE(result.has_value());
+
+  // Corrupt a Block header.
+  // This must not touch memory outside the original region, or the test may
+  // (correctly) abort when run with address sanitizer.
+  // To remain as agostic to the internals of `Block` as possible, the test
+  // copies a smaller block's header to a larger block.
+  EXPECT_TRUE(block1->IsValid());
+  EXPECT_TRUE(block2->IsValid());
+  EXPECT_TRUE(block3->IsValid());
+  auto *src = reinterpret_cast<byte *>(block1);
+  auto *dst = reinterpret_cast<byte *>(block2);
+  LIBC_NAMESPACE::memcpy(dst, src, sizeof(BlockType));
+  EXPECT_FALSE(block1->IsValid());
+  EXPECT_FALSE(block2->IsValid());
+  EXPECT_FALSE(block3->IsValid());
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) {
+  constexpr size_t kN = 1024;
+
+  array<byte, kN> bytes{};
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  BlockType *block1 = *result;
+
+  void *ptr = block1->UsableSpace();
+  BlockType *block2 = BlockType::FromUsableSpace(ptr);
+  EXPECT_EQ(block1, block2);
+}
+
+TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
+  constexpr size_t kN = 1024;
+
+  array<byte, kN> bytes{};
+  auto result = BlockType::Init(bytes);
+  ASSERT_TRUE(result.has_value());
+  const BlockType *block1 = *result;
+
+  const void *ptr = block1->UsableSpace();
+  const BlockType *block2 = BlockType::FromUsableSpace(ptr);
+  EXPECT_EQ(block1, block2);
+}



More information about the libc-commits mailing list