[libc-commits] [libc] [WIP][libc] Add freelist malloc (PR #94270)
via libc-commits
libc-commits at lists.llvm.org
Mon Jun 3 18:00:48 PDT 2024
https://github.com/PiJoules updated https://github.com/llvm/llvm-project/pull/94270
>From 911182aec30f1f89948abba52dcde9d772a9cd39 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Fri, 31 May 2024 14:27:16 -0700
Subject: [PATCH] [WIP][libc] Add freelist malloc
---
libc/config/baremetal/riscv/entrypoints.txt | 1 +
libc/src/__support/CPP/new.h | 3 +
libc/src/__support/CPP/type_traits.h | 1 +
.../CPP/type_traits/aligned_storage.h | 27 +
libc/src/__support/fixedvector.h | 20 +
libc/src/stdlib/CMakeLists.txt | 14 +-
libc/src/stdlib/block.h | 509 ++++++++++++++++++
libc/src/stdlib/calloc.h | 20 +
libc/src/stdlib/free.h | 2 +-
libc/src/stdlib/freelist.cpp | 125 +++++
libc/src/stdlib/freelist.h | 224 ++++++++
libc/src/stdlib/freelist_heap.h | 208 +++++++
libc/src/stdlib/freelist_malloc.cpp | 42 ++
libc/src/sys/stat/linux/kernel_statx.h | 2 +-
libc/test/src/CMakeLists.txt | 2 +-
libc/test/src/stdlib/CMakeLists.txt | 30 +-
libc/test/src/stdlib/freelist_heap_test.cpp | 241 +++++++++
libc/test/src/stdlib/freelist_malloc_test.cpp | 56 ++
libc/test/src/stdlib/freelist_test.cpp | 172 ++++++
libc/test/src/stdlib/malloc_test.cpp | 4 +
20 files changed, 1684 insertions(+), 19 deletions(-)
create mode 100644 libc/src/__support/CPP/type_traits/aligned_storage.h
create mode 100644 libc/src/stdlib/block.h
create mode 100644 libc/src/stdlib/calloc.h
create mode 100644 libc/src/stdlib/freelist.cpp
create mode 100644 libc/src/stdlib/freelist.h
create mode 100644 libc/src/stdlib/freelist_heap.h
create mode 100644 libc/src/stdlib/freelist_malloc.cpp
create mode 100644 libc/test/src/stdlib/freelist_heap_test.cpp
create mode 100644 libc/test/src/stdlib/freelist_malloc_test.cpp
create mode 100644 libc/test/src/stdlib/freelist_test.cpp
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index b769b43f03a2c6..363a762909c3ad 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -170,6 +170,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.ldiv
libc.src.stdlib.llabs
libc.src.stdlib.lldiv
+ libc.src.stdlib.malloc
libc.src.stdlib.qsort
libc.src.stdlib.rand
libc.src.stdlib.srand
diff --git a/libc/src/__support/CPP/new.h b/libc/src/__support/CPP/new.h
index 6261dc1ffde6fa..7232265549573e 100644
--- a/libc/src/__support/CPP/new.h
+++ b/libc/src/__support/CPP/new.h
@@ -74,6 +74,9 @@ LIBC_INLINE void *operator new[](size_t size, std::align_val_t align,
return LIBC_NAMESPACE::AllocChecker::aligned_alloc(size, align, ac);
}
+LIBC_INLINE void *operator new(size_t, void *__p) { return __p; }
+LIBC_INLINE void *operator new[](size_t, void *__p) { return __p; }
+
// The ideal situation would be to define the various flavors of operator delete
// inlinelike we do with operator new above. However, since we need operator
// delete prototypes to match those specified by the C++ standard, we cannot
diff --git a/libc/src/__support/CPP/type_traits.h b/libc/src/__support/CPP/type_traits.h
index 1494aeb905e093..d50b6612656dbb 100644
--- a/libc/src/__support/CPP/type_traits.h
+++ b/libc/src/__support/CPP/type_traits.h
@@ -12,6 +12,7 @@
#include "src/__support/CPP/type_traits/add_lvalue_reference.h"
#include "src/__support/CPP/type_traits/add_pointer.h"
#include "src/__support/CPP/type_traits/add_rvalue_reference.h"
+#include "src/__support/CPP/type_traits/aligned_storage.h"
#include "src/__support/CPP/type_traits/bool_constant.h"
#include "src/__support/CPP/type_traits/conditional.h"
#include "src/__support/CPP/type_traits/decay.h"
diff --git a/libc/src/__support/CPP/type_traits/aligned_storage.h b/libc/src/__support/CPP/type_traits/aligned_storage.h
new file mode 100644
index 00000000000000..574b1146f6b2ad
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/aligned_storage.h
@@ -0,0 +1,27 @@
+//===-- aligned_storage type_traits --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_ALIGNED_STORAGE_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_ALIGNED_STORAGE_H
+
+#include <stddef.h> // size_t
+
+namespace LIBC_NAMESPACE::cpp {
+
+template <size_t Len, size_t Align> struct aligned_storage {
+ struct type {
+ alignas(Align) unsigned char data[Len];
+ };
+};
+
+template <size_t Len, size_t Align>
+using aligned_storage_t = typename aligned_storage<Len, Align>::type;
+
+} // namespace LIBC_NAMESPACE::cpp
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_ALIGNED_STORAGE_H
diff --git a/libc/src/__support/fixedvector.h b/libc/src/__support/fixedvector.h
index 81747ee10067c4..e7b7304f05a208 100644
--- a/libc/src/__support/fixedvector.h
+++ b/libc/src/__support/fixedvector.h
@@ -24,6 +24,18 @@ template <typename T, size_t CAPACITY> class FixedVector {
public:
constexpr FixedVector() = default;
+ template <typename It> FixedVector(It begin, It end) {
+ for (; begin != end; ++begin) {
+ push_back(*begin);
+ }
+ }
+
+ FixedVector(size_t count, const T &value) {
+ for (size_t i = 0; i < count; ++i) {
+ push_back(value);
+ }
+ }
+
bool push_back(const T &obj) {
if (item_count == CAPACITY)
return false;
@@ -36,6 +48,9 @@ template <typename T, size_t CAPACITY> class FixedVector {
T &back() { return store[item_count - 1]; }
+ T &operator[](size_t idx) { return store[idx]; }
+ const T &operator[](size_t idx) const { return store[idx]; }
+
bool pop_back() {
if (item_count == 0)
return false;
@@ -44,6 +59,7 @@ template <typename T, size_t CAPACITY> class FixedVector {
}
bool empty() const { return item_count == 0; }
+ size_t size() const { return item_count; }
// Empties the store for all practical purposes.
void reset() { item_count = 0; }
@@ -63,6 +79,10 @@ template <typename T, size_t CAPACITY> class FixedVector {
return reverse_iterator{&store[item_count]};
}
LIBC_INLINE constexpr reverse_iterator rend() { return store.rend(); }
+
+ using iterator = typename cpp::array<T, CAPACITY>::iterator;
+ LIBC_INLINE constexpr iterator begin() { return store.begin(); }
+ LIBC_INLINE constexpr iterator end() { return iterator{&store[item_count]}; }
};
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index e0bff5198b590c..4da1fdb449413e 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -379,8 +379,20 @@ elseif(LIBC_TARGET_OS_IS_GPU)
aligned_alloc
)
else()
- add_entrypoint_external(
+ add_entrypoint_object(
malloc
+ SRCS
+ freelist_malloc.cpp
+ HDRS
+ malloc.h
+ DEPENDS
+ libc.src.__support.CPP.new
+ libc.src.__support.CPP.optional
+ libc.src.__support.CPP.span
+ libc.src.__support.CPP.type_traits
+ libc.src.__support.fixedvector
+ libc.src.string.memcpy
+ libc.src.string.memset
)
add_entrypoint_external(
free
diff --git a/libc/src/stdlib/block.h b/libc/src/stdlib/block.h
new file mode 100644
index 00000000000000..99dcf2a7bb1a8a
--- /dev/null
+++ b/libc/src/stdlib/block.h
@@ -0,0 +1,509 @@
+//===-- Implementation header for a block of memory -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_BLOCK_H
+#define LLVM_LIBC_SRC_STDLIB_BLOCK_H
+
+#include "src/__support/CPP/algorithm.h"
+#include "src/__support/CPP/cstddef.h"
+#include "src/__support/CPP/limits.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/CPP/span.h"
+#include "src/__support/CPP/type_traits.h"
+
+#include <stdint.h>
+
+namespace LIBC_NAMESPACE {
+
+namespace internal {
+// Types of corrupted blocks, and functions to crash with an error message
+// corresponding to each type.
+enum BlockStatus {
+ kValid,
+ kMisaligned,
+ kPrevMismatched,
+ kNextMismatched,
+};
+} // namespace internal
+
+/// Returns the value rounded down to the nearest multiple of alignment.
+constexpr size_t AlignDown(size_t value, size_t alignment) {
+ __builtin_mul_overflow(value / alignment, alignment, &value);
+ return value;
+}
+
+/// Returns the value rounded down to the nearest multiple of alignment.
+template <typename T> constexpr T *AlignDown(T *value, size_t alignment) {
+ return reinterpret_cast<T *>(
+ AlignDown(reinterpret_cast<size_t>(value), alignment));
+}
+
+/// Returns the value rounded up to the nearest multiple of alignment.
+constexpr size_t AlignUp(size_t value, size_t alignment) {
+ __builtin_add_overflow(value, alignment - 1, &value);
+ return AlignDown(value, alignment);
+}
+
+/// Returns the value rounded up to the nearest multiple of alignment.
+template <typename T> constexpr T *AlignUp(T *value, size_t alignment) {
+ return reinterpret_cast<T *>(
+ AlignUp(reinterpret_cast<size_t>(value), alignment));
+}
+
+using ByteSpan = cpp::span<LIBC_NAMESPACE::cpp::byte>;
+using cpp::optional;
+
+/// Memory region with links to adjacent blocks.
+///
+/// The blocks do not encode their size directly. Instead, they encode offsets
+/// to the next and previous blocks using the type given by the `OffsetType`
+/// template parameter. The encoded offsets are simply the offsets divded by the
+/// minimum block alignment, `kAlignment`.
+///
+/// The `kAlignment` constant provided by the derived block is typically the
+/// minimum value of `alignof(OffsetType)`. Since the addressable range of a
+/// block is given by `std::numeric_limits<OffsetType>::max() *
+/// kAlignment`, it may be advantageous to set a higher alignment if it allows
+/// using a smaller offset type, even if this wastes some bytes in order to
+/// align block headers.
+///
+/// Blocks will always be aligned to a `kAlignment` boundary. Block sizes will
+/// always be rounded up to a multiple of `kAlignment`.
+///
+/// As an example, the diagram below represents two contiguous
+/// `Block<uint32_t, true, 8>`s. The indices indicate byte offsets:
+///
+/// @code{.unparsed}
+/// Block 1:
+/// +---------------------+------+--------------+
+/// | Header | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev | Next | | |
+/// | 0......3 | 4......7 | 8..9 | 10.......280 |
+/// | 00000000 | 00000046 | 8008 | <app data> |
+/// +----------+----------+------+--------------+
+/// Block 2:
+/// +---------------------+------+--------------+
+/// | Header | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev | Next | | |
+/// | 0......3 | 4......7 | 8..9 | 10......1056 |
+/// | 00000046 | 00000106 | 6008 | f7f7....f7f7 |
+/// +----------+----------+------+--------------+
+/// @endcode
+///
+/// The overall size of the block (e.g. 280 bytes) is given by its next offset
+/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
+/// block matches the previous offset of its next block. The first block in a
+/// list is denoted by having a previous offset of `0`.
+///
+/// @tparam OffsetType Unsigned integral type used to encode offsets. Larger
+/// types can address more memory, but consume greater
+/// overhead.
+/// @tparam kAlign Sets the overall alignment for blocks. Minimum is
+/// `alignof(OffsetType)` (the default). Larger values can
+/// address more memory, but consume greater overhead.
+template <typename OffsetType = uintptr_t, size_t kAlign = alignof(OffsetType)>
+class Block {
+public:
+ using offset_type = OffsetType;
+ static_assert(cpp::is_unsigned_v<offset_type>,
+ "offset type must be unsigned");
+
+ static constexpr size_t kAlignment = cpp::max(kAlign, alignof(offset_type));
+ static constexpr size_t kBlockOverhead = AlignUp(sizeof(Block), kAlignment);
+
+ // No copy or move.
+ Block(const Block &other) = delete;
+ Block &operator=(const Block &other) = delete;
+
+ /// @brief Creates the first block for a given memory region.
+ ///
+ /// @returns @rst
+ ///
+ /// .. pw-status-codes::
+ ///
+ /// OK: Returns a block representing the region.
+ ///
+ /// INVALID_ARGUMENT: The region is null.
+ ///
+ /// RESOURCE_EXHAUSTED: The region is too small for a block.
+ ///
+ /// OUT_OF_RANGE: The region is too big to be addressed using
+ /// ``OffsetType``.
+ ///
+ /// @endrst
+ static optional<Block *> Init(ByteSpan region);
+
+ /// @returns A pointer to a `Block`, given a pointer to the start of the
+ /// usable space inside the block.
+ ///
+ /// This is the inverse of `UsableSpace()`.
+ ///
+ /// @warning This method does not do any checking; passing a random
+ /// pointer will return a non-null pointer.
+ static Block *FromUsableSpace(void *usable_space) {
+ auto *bytes = reinterpret_cast<cpp::byte *>(usable_space);
+ return reinterpret_cast<Block *>(bytes - kBlockOverhead);
+ }
+
+ /// @returns The total size of the block in bytes, including the header.
+ size_t OuterSize() const { return next_ * kAlignment; }
+
+ /// @returns The number of usable bytes inside the block.
+ size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
+
+ /// @returns The number of bytes requested using AllocFirst, AllocLast, or
+ /// Resize.
+ size_t RequestedSize() const { return InnerSize() - padding_; }
+
+ /// @returns A pointer to the usable space inside this block.
+ cpp::byte *UsableSpace() {
+ return reinterpret_cast<cpp::byte *>(this) + kBlockOverhead;
+ }
+
+ /// Marks the block as free and merges it with any free neighbors.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer. If neither member is free, the returned pointer will point to the
+ /// original block. Otherwise, it will point to the new, larger block created
+ /// by merging adjacent free blocks together.
+ static void Free(Block *&block);
+
+ /// Attempts to split this block.
+ ///
+ /// If successful, the block will have an inner size of `new_inner_size`,
+ /// rounded up to a `kAlignment` boundary. The remaining space will be
+ /// returned as a new block.
+ ///
+ /// This method may fail if the remaining space is too small to hold a new
+ /// block. If this method fails for any reason, the original block is
+ /// unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// TODO(b/326509341): Remove from the public API when FreeList is no longer
+ /// in use.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @returns @rst
+ ///
+ /// .. pw-status-codes::
+ ///
+ /// OK: The split completed successfully.
+ ///
+ /// FAILED_PRECONDITION: This block is in use and cannot be split.
+ ///
+ /// OUT_OF_RANGE: The requested size for this block is greater
+ /// than the current ``inner_size``.
+ ///
+ /// RESOURCE_EXHAUSTED: The remaining space is too small to hold a
+ /// new block.
+ ///
+ /// @endrst
+ static optional<Block *> Split(Block *&block, size_t new_inner_size);
+
+ /// Merges this block with the one that comes after it.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, larger block.
+ static bool MergeNext(Block *&block);
+
+ /// Fetches the block immediately after this one.
+ ///
+ /// For performance, this always returns a block pointer, even if the returned
+ /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
+ ///
+ /// Typically, after calling `Init` callers may save a pointer past the end of
+ /// the list using `Next()`. This makes it easy to subsequently iterate over
+ /// the list:
+ /// @code{.cpp}
+ /// auto result = Block<>::Init(byte_span);
+ /// Block<>* begin = *result;
+ /// Block<>* end = begin->Next();
+ /// ...
+ /// for (auto* block = begin; block != end; block = block->Next()) {
+ /// // Do something which each block.
+ /// }
+ /// @endcode
+ Block *Next() const;
+
+ /// @copydoc `Next`.
+ static Block *NextBlock(const Block *block) {
+ return block == nullptr ? nullptr : block->Next();
+ }
+
+ /// @returns The block immediately before this one, or a null pointer if this
+ /// is the first block.
+ Block *Prev() const;
+
+ /// @copydoc `Prev`.
+ static Block *PrevBlock(const Block *block) {
+ return block == nullptr ? nullptr : block->Prev();
+ }
+
+ /// Returns the current alignment of a block.
+ size_t Alignment() const { return Used() ? info_.alignment : 1; }
+
+ /// Indicates whether the block is in use.
+ ///
+ /// @returns `true` if the block is in use or `false` if not.
+ bool Used() const { return info_.used; }
+
+ /// Indicates whether this block is the last block or not (i.e. whether
+ /// `Next()` points to a valid block or not). This is needed because
+ /// `Next()` points to the end of this block, whether there is a valid
+ /// block there or not.
+ ///
+ /// @returns `true` is this is the last block or `false` if not.
+ bool Last() const { return info_.last; }
+
+ /// Marks this block as in use.
+ void MarkUsed() { info_.used = 1; }
+
+ /// Marks this block as free.
+ void MarkFree() { info_.used = 0; }
+
+ /// Marks this block as the last one in the chain.
+ void MarkLast() { info_.last = 1; }
+
+ /// Clears the last bit from this block.
+ void ClearLast() { info_.last = 1; }
+
+ /// @brief Checks if a block is valid.
+ ///
+ /// @returns `true` if and only if the following conditions are met:
+ /// * The block is aligned.
+ /// * The prev/next fields match with the previous and next blocks.
+ /// * The poisoned bytes are not damaged (if poisoning is enabled).
+ bool IsValid() const { return CheckStatus() == internal::kValid; }
+
+private:
+ /// Consumes the block and returns as a span of bytes.
+ static ByteSpan AsBytes(Block *&&block);
+
+ /// Consumes the span of bytes and uses it to construct and return a block.
+ static Block *AsBlock(size_t prev_outer_size, ByteSpan bytes);
+
+ Block(size_t prev_outer_size, size_t outer_size);
+
+ /// Returns a `BlockStatus` that is either kValid or indicates the reason why
+ /// the block is invalid.
+ ///
+ /// If the block is invalid at multiple points, this function will only return
+ /// one of the reasons.
+ internal::BlockStatus CheckStatus() const;
+
+ /// Like `Split`, but assumes the caller has already checked to parameters to
+ /// ensure the split will succeed.
+ static Block *SplitImpl(Block *&block, size_t new_inner_size);
+
+ /// Offset (in increments of the minimum alignment) from this block to the
+ /// previous block. 0 if this is the first block.
+ offset_type prev_ = 0;
+
+ /// Offset (in increments of the minimum alignment) from this block to the
+ /// next block. Valid even if this is the last block, since it equals the
+ /// size of the block.
+ offset_type next_ = 0;
+
+ /// Information about the current state of the block:
+ /// * If the `used` flag is set, the block's usable memory has been allocated
+ /// and is being used.
+ /// * If the `poisoned` flag is set and the `used` flag is clear, the block's
+ /// usable memory contains a poison pattern that will be checked when the
+ /// block is allocated.
+ /// * If the `last` flag is set, the block does not have a next block.
+ /// * If the `used` flag is set, the alignment represents the requested value
+ /// when the memory was allocated, which may be less strict than the actual
+ /// alignment.
+ struct {
+ uint16_t used : 1;
+ uint16_t poisoned : 1;
+ uint16_t last : 1;
+ uint16_t alignment : 13;
+ } info_;
+
+ /// Number of bytes allocated beyond what was requested. This will be at most
+ /// the minimum alignment, i.e. `alignof(offset_type).`
+ uint16_t padding_ = 0;
+} __attribute__((packed, aligned(kAlign)));
+
+// Public template method implementations.
+
+inline ByteSpan GetAlignedSubspan(ByteSpan bytes, size_t alignment) {
+ if (bytes.data() == nullptr) {
+ return ByteSpan();
+ }
+ auto unaligned_start = reinterpret_cast<uintptr_t>(bytes.data());
+ auto aligned_start = AlignUp(unaligned_start, alignment);
+ auto unaligned_end = unaligned_start + bytes.size();
+ auto aligned_end = AlignDown(unaligned_end, alignment);
+ if (aligned_end <= aligned_start) {
+ return ByteSpan();
+ }
+ return bytes.subspan(aligned_start - unaligned_start,
+ aligned_end - aligned_start);
+}
+
+template <typename OffsetType, size_t kAlign>
+optional<Block<OffsetType, kAlign> *>
+Block<OffsetType, kAlign>::Init(ByteSpan region) {
+ optional<ByteSpan> result = GetAlignedSubspan(region, kAlignment);
+ if (!result) {
+ return {};
+ }
+ region = result.value();
+ if (region.size() < kBlockOverhead) {
+ return {};
+ }
+ if (cpp::numeric_limits<OffsetType>::max() < region.size() / kAlignment) {
+ return {};
+ }
+ Block *block = AsBlock(0, region);
+ block->MarkLast();
+ return block;
+}
+
+template <typename OffsetType, size_t kAlign>
+void Block<OffsetType, kAlign>::Free(Block *&block) {
+ if (block == nullptr) {
+ return;
+ }
+ block->MarkFree();
+ Block *prev = block->Prev();
+ if (MergeNext(prev)) {
+ block = prev;
+ }
+ MergeNext(block);
+}
+
+template <typename OffsetType, size_t kAlign>
+optional<Block<OffsetType, kAlign> *>
+Block<OffsetType, kAlign>::Split(Block *&block, size_t new_inner_size) {
+ if (block == nullptr) {
+ return {};
+ }
+ if (block->Used()) {
+ return {};
+ }
+ size_t old_inner_size = block->InnerSize();
+ new_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size < new_inner_size) {
+ return {};
+ }
+ if (old_inner_size - new_inner_size < kBlockOverhead) {
+ return {};
+ }
+ return SplitImpl(block, new_inner_size);
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *
+Block<OffsetType, kAlign>::SplitImpl(Block *&block, size_t new_inner_size) {
+ size_t prev_outer_size = block->prev_ * kAlignment;
+ size_t outer_size1 = new_inner_size + kBlockOverhead;
+ bool is_last = block->Last();
+ ByteSpan bytes = AsBytes(cpp::move(block));
+ Block *block1 = AsBlock(prev_outer_size, bytes.subspan(0, outer_size1));
+ Block *block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
+ if (is_last) {
+ block2->MarkLast();
+ } else {
+ block2->Next()->prev_ = block2->next_;
+ }
+ block = cpp::move(block1);
+ return block2;
+}
+
+template <typename OffsetType, size_t kAlign>
+bool Block<OffsetType, kAlign>::MergeNext(Block *&block) {
+ if (block == nullptr) {
+ return false;
+ }
+ if (block->Last()) {
+ return false;
+ }
+ Block *next = block->Next();
+ if (block->Used() || next->Used()) {
+ return false;
+ }
+ size_t prev_outer_size = block->prev_ * kAlignment;
+ bool is_last = next->Last();
+ ByteSpan prev_bytes = AsBytes(cpp::move(block));
+ ByteSpan next_bytes = AsBytes(cpp::move(next));
+ size_t outer_size = prev_bytes.size() + next_bytes.size();
+ cpp::byte *merged = ::new (prev_bytes.data()) cpp::byte[outer_size];
+ block = AsBlock(prev_outer_size, ByteSpan(merged, outer_size));
+ if (is_last) {
+ block->MarkLast();
+ } else {
+ block->Next()->prev_ = block->next_;
+ }
+ return true;
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *Block<OffsetType, kAlign>::Next() const {
+ uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + OuterSize();
+ return reinterpret_cast<Block *>(addr);
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *Block<OffsetType, kAlign>::Prev() const {
+ uintptr_t addr =
+ (prev_ == 0) ? 0
+ : reinterpret_cast<uintptr_t>(this) - (prev_ * kAlignment);
+ return reinterpret_cast<Block *>(addr);
+}
+
+// Private template method implementations.
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign>::Block(size_t prev_outer_size, size_t outer_size) {
+ prev_ = prev_outer_size / kAlignment;
+ next_ = outer_size / kAlignment;
+ info_.used = 0;
+ info_.poisoned = 0;
+ info_.last = 0;
+ info_.alignment = kAlignment;
+}
+
+template <typename OffsetType, size_t kAlign>
+ByteSpan Block<OffsetType, kAlign>::AsBytes(Block *&&block) {
+ size_t block_size = block->OuterSize();
+ cpp::byte *bytes = new (cpp::move(block)) cpp::byte[block_size];
+ return {bytes, block_size};
+}
+
+template <typename OffsetType, size_t kAlign>
+Block<OffsetType, kAlign> *
+Block<OffsetType, kAlign>::AsBlock(size_t prev_outer_size, ByteSpan bytes) {
+ return ::new (bytes.data()) Block(prev_outer_size, bytes.size());
+}
+
+template <typename OffsetType, size_t kAlign>
+internal::BlockStatus Block<OffsetType, kAlign>::CheckStatus() const {
+ if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
+ return internal::kMisaligned;
+ }
+ if (!Last() && (this >= Next() || this != Next()->Prev())) {
+ return internal::kNextMismatched;
+ }
+ if (Prev() && (this <= Prev() || this != Prev()->Next())) {
+ return internal::kPrevMismatched;
+ }
+ return internal::kValid;
+}
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_BLOCK_H
diff --git a/libc/src/stdlib/calloc.h b/libc/src/stdlib/calloc.h
new file mode 100644
index 00000000000000..06c9430a2e266e
--- /dev/null
+++ b/libc/src/stdlib/calloc.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for calloc ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdlib.h>
+
+#ifndef LLVM_LIBC_SRC_STDLIB_CALLOC_H
+#define LLVM_LIBC_SRC_STDLIB_CALLOC_H
+
+namespace LIBC_NAMESPACE {
+
+void *calloc(size_t num, size_t size);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_CALLOC_H
diff --git a/libc/src/stdlib/free.h b/libc/src/stdlib/free.h
index f802f1d192d810..b3970fd9677401 100644
--- a/libc/src/stdlib/free.h
+++ b/libc/src/stdlib/free.h
@@ -17,4 +17,4 @@ void free(void *ptr);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_STDLIB_LDIV_H
+#endif // LLVM_LIBC_SRC_STDLIB_FREE_H
diff --git a/libc/src/stdlib/freelist.cpp b/libc/src/stdlib/freelist.cpp
new file mode 100644
index 00000000000000..c65164c24a2e6c
--- /dev/null
+++ b/libc/src/stdlib/freelist.cpp
@@ -0,0 +1,125 @@
+#include "freelist.h"
+#include "src/__support/CPP/span.h"
+#include <stddef.h>
+
+using LIBC_NAMESPACE::cpp::span;
+
+namespace pw::allocator {
+
+template <size_t kNumBuckets>
+Status FreeList<kNumBuckets>::AddChunk(span<LIBC_NAMESPACE::cpp::byte> chunk) {
+ // Check that the size is enough to actually store what we need
+ if (chunk.size() < sizeof(FreeListNode)) {
+ return Status::OutOfRange();
+ }
+
+ union {
+ FreeListNode *node;
+ LIBC_NAMESPACE::cpp::byte *bytes;
+ } aliased;
+
+ aliased.bytes = chunk.data();
+
+ unsigned short chunk_ptr = FindChunkPtrForSize(chunk.size(), false);
+
+ // Add it to the correct list.
+ aliased.node->size = chunk.size();
+ aliased.node->next = chunks_[chunk_ptr];
+ chunks_[chunk_ptr] = aliased.node;
+
+ return OkStatus();
+}
+
+template <size_t kNumBuckets>
+span<LIBC_NAMESPACE::cpp::byte>
+FreeList<kNumBuckets>::FindChunk(size_t size) const {
+ if (size == 0) {
+ return span<LIBC_NAMESPACE::cpp::byte>();
+ }
+
+ unsigned short chunk_ptr = FindChunkPtrForSize(size, true);
+
+ // Check that there's data. This catches the case where we run off the
+ // end of the array
+ if (chunks_[chunk_ptr] == nullptr) {
+ return span<LIBC_NAMESPACE::cpp::byte>();
+ }
+
+ // Now iterate up the buckets, walking each list to find a good candidate
+ for (size_t i = chunk_ptr; i < chunks_.size(); i++) {
+ union {
+ FreeListNode *node;
+ LIBC_NAMESPACE::cpp::byte *data;
+ } aliased;
+ aliased.node = chunks_[static_cast<unsigned short>(i)];
+
+ while (aliased.node != nullptr) {
+ if (aliased.node->size >= size) {
+ return span<LIBC_NAMESPACE::cpp::byte>(aliased.data,
+ aliased.node->size);
+ }
+
+ aliased.node = aliased.node->next;
+ }
+ }
+
+ // If we get here, we've checked every block in every bucket. There's
+ // nothing that can support this allocation.
+ return span<LIBC_NAMESPACE::cpp::byte>();
+}
+
+template <size_t kNumBuckets>
+Status
+FreeList<kNumBuckets>::RemoveChunk(span<LIBC_NAMESPACE::cpp::byte> chunk) {
+ unsigned short chunk_ptr = FindChunkPtrForSize(chunk.size(), true);
+
+ // Walk that list, finding the chunk.
+ union {
+ FreeListNode *node;
+ LIBC_NAMESPACE::cpp::byte *data;
+ } aliased, aliased_next;
+
+ // Check head first.
+ if (chunks_[chunk_ptr] == nullptr) {
+ return Status::NotFound();
+ }
+
+ aliased.node = chunks_[chunk_ptr];
+ if (aliased.data == chunk.data()) {
+ chunks_[chunk_ptr] = aliased.node->next;
+
+ return OkStatus();
+ }
+
+ // No? Walk the nodes.
+ aliased.node = chunks_[chunk_ptr];
+
+ while (aliased.node->next != nullptr) {
+ aliased_next.node = aliased.node->next;
+ if (aliased_next.data == chunk.data()) {
+ // Found it, remove this node out of the chain
+ aliased.node->next = aliased_next.node->next;
+ return OkStatus();
+ }
+
+ aliased.node = aliased.node->next;
+ }
+
+ return Status::NotFound();
+}
+
+template <size_t kNumBuckets>
+unsigned short FreeList<kNumBuckets>::FindChunkPtrForSize(size_t size,
+ bool non_null) const {
+ unsigned short chunk_ptr = 0;
+ for (chunk_ptr = 0u; chunk_ptr < sizes_.size(); chunk_ptr++) {
+ if (sizes_[chunk_ptr] >= size &&
+ (!non_null || chunks_[chunk_ptr] != nullptr)) {
+ break;
+ }
+ }
+
+ return chunk_ptr;
+}
+
+} // namespace pw::allocator
diff --git a/libc/src/stdlib/freelist.h b/libc/src/stdlib/freelist.h
new file mode 100644
index 00000000000000..c51c6a6bee24f4
--- /dev/null
+++ b/libc/src/stdlib/freelist.h
@@ -0,0 +1,224 @@
+//===-- Interface for freelist_malloc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_FREELIST_H
+#define LLVM_LIBC_SRC_STDLIB_FREELIST_H
+
+#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/cstddef.h"
+#include "src/__support/CPP/span.h"
+#include "src/__support/fixedvector.h"
+
+namespace LIBC_NAMESPACE {
+
+using cpp::span;
+
+/// Basic [freelist](https://en.wikipedia.org/wiki/Free_list) implementation
+/// for an allocator. This implementation buckets by chunk size, with a list
+/// of user-provided buckets. Each bucket is a linked list of storage chunks.
+/// Because this freelist uses the added chunks themselves as list nodes, there
+/// is a lower bound of `sizeof(FreeList.FreeListNode)` bytes for chunks which
+/// can be added to this freelist. There is also an implicit bucket for
+/// "everything else", for chunks which do not fit into a bucket.
+///
+/// Each added chunk will be added to the smallest bucket under which it fits.
+/// If it does not fit into any user-provided bucket, it will be added to the
+/// default bucket.
+///
+/// As an example, assume that the `FreeList` is configured with buckets of
+/// sizes {64, 128, 256, and 512} bytes. The internal state may look like the
+/// following:
+///
+/// @code{.unparsed}
+/// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
+/// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
+/// bucket[2] (256B) --> NULL
+/// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
+/// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
+/// @endcode
+///
+/// Note that added chunks should be aligned to a 4-byte boundary.
+template <size_t kNumBuckets = 6> class FreeList {
+public:
+ // Remove copy/move ctors
+ FreeList(const FreeList &other) = delete;
+ FreeList(FreeList &&other) = delete;
+ FreeList &operator=(const FreeList &other) = delete;
+ FreeList &operator=(FreeList &&other) = delete;
+
+ /// Adds a chunk to this freelist.
+ ///
+ /// @returns @rst
+ ///
+ /// .. pw-status-codes::
+ ///
+ /// OK: The chunk was added successfully.
+ ///
+ /// OUT_OF_RANGE: The chunk could not be added for size reasons (e.g. the
+ /// chunk is too small to store the ``FreeListNode``).
+ ///
+ /// @endrst
+ bool AddChunk(cpp::span<cpp::byte> chunk);
+
+ /// Finds an eligible chunk for an allocation of size `size`.
+ ///
+ /// @note This returns the first allocation possible within a given bucket;
+ /// It does not currently optimize for finding the smallest chunk.
+ ///
+ /// @returns
+ /// * On success - A span representing the chunk.
+ /// * On failure (e.g. there were no chunks available for that allocation) -
+ /// A span with a size of 0.
+ cpp::span<cpp::byte> FindChunk(size_t size) const;
+
+ /// Removes a chunk from this freelist.
+ ///
+ /// @returns @rst
+ ///
+ /// .. pw-status-codes::
+ ///
+ /// OK: The chunk was removed successfully.
+ ///
+ /// NOT_FOUND: The chunk could not be found in this freelist.
+ ///
+ /// @endrst
+ bool RemoveChunk(cpp::span<cpp::byte> chunk);
+
+private:
+ // For a given size, find which index into chunks_ the node should be written
+ // to.
+ unsigned short FindChunkPtrForSize(size_t size, bool non_null) const;
+
+ struct FreeListNode {
+ FreeListNode *next;
+ size_t size;
+ };
+
+public:
+ explicit FreeList(cpp::array<size_t, kNumBuckets> sizes)
+ : chunks_(kNumBuckets + 1, 0), sizes_(sizes.begin(), sizes.end()) {}
+
+ FixedVector<FreeList::FreeListNode *, kNumBuckets + 1> chunks_;
+ FixedVector<size_t, kNumBuckets> sizes_;
+};
+
+template <size_t kNumBuckets>
+bool FreeList<kNumBuckets>::AddChunk(span<cpp::byte> chunk) {
+ // Check that the size is enough to actually store what we need
+ if (chunk.size() < sizeof(FreeListNode)) {
+ return false;
+ }
+
+ union {
+ FreeListNode *node;
+ cpp::byte *bytes;
+ } aliased;
+
+ aliased.bytes = chunk.data();
+
+ unsigned short chunk_ptr = FindChunkPtrForSize(chunk.size(), false);
+
+ // Add it to the correct list.
+ aliased.node->size = chunk.size();
+ aliased.node->next = chunks_[chunk_ptr];
+ chunks_[chunk_ptr] = aliased.node;
+
+ return true;
+}
+
+template <size_t kNumBuckets>
+span<cpp::byte> FreeList<kNumBuckets>::FindChunk(size_t size) const {
+ if (size == 0) {
+ return span<cpp::byte>();
+ }
+
+ unsigned short chunk_ptr = FindChunkPtrForSize(size, true);
+
+ // Check that there's data. This catches the case where we run off the
+ // end of the array
+ if (chunks_[chunk_ptr] == nullptr) {
+ return span<cpp::byte>();
+ }
+
+ // Now iterate up the buckets, walking each list to find a good candidate
+ for (size_t i = chunk_ptr; i < chunks_.size(); i++) {
+ union {
+ FreeListNode *node;
+ cpp::byte *data;
+ } aliased;
+ aliased.node = chunks_[static_cast<unsigned short>(i)];
+
+ while (aliased.node != nullptr) {
+ if (aliased.node->size >= size) {
+ return span<cpp::byte>(aliased.data, aliased.node->size);
+ }
+
+ aliased.node = aliased.node->next;
+ }
+ }
+
+ // If we get here, we've checked every block in every bucket. There's
+ // nothing that can support this allocation.
+ return span<cpp::byte>();
+}
+
+template <size_t kNumBuckets>
+bool FreeList<kNumBuckets>::RemoveChunk(span<cpp::byte> chunk) {
+ unsigned short chunk_ptr = FindChunkPtrForSize(chunk.size(), true);
+
+ // Walk that list, finding the chunk.
+ union {
+ FreeListNode *node;
+ cpp::byte *data;
+ } aliased, aliased_next;
+
+ // Check head first.
+ if (chunks_[chunk_ptr] == nullptr) {
+ return false;
+ }
+
+ aliased.node = chunks_[chunk_ptr];
+ if (aliased.data == chunk.data()) {
+ chunks_[chunk_ptr] = aliased.node->next;
+ return true;
+ }
+
+ // No? Walk the nodes.
+ aliased.node = chunks_[chunk_ptr];
+
+ while (aliased.node->next != nullptr) {
+ aliased_next.node = aliased.node->next;
+ if (aliased_next.data == chunk.data()) {
+ // Found it, remove this node out of the chain
+ aliased.node->next = aliased_next.node->next;
+ return true;
+ }
+
+ aliased.node = aliased.node->next;
+ }
+
+ return false;
+}
+
+template <size_t kNumBuckets>
+unsigned short FreeList<kNumBuckets>::FindChunkPtrForSize(size_t size,
+ bool non_null) const {
+ unsigned short chunk_ptr = 0;
+ for (chunk_ptr = 0u; chunk_ptr < sizes_.size(); chunk_ptr++) {
+ if (sizes_[chunk_ptr] >= size &&
+ (!non_null || chunks_[chunk_ptr] != nullptr)) {
+ break;
+ }
+ }
+
+ return chunk_ptr;
+}
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_FREELIST_H
diff --git a/libc/src/stdlib/freelist_heap.h b/libc/src/stdlib/freelist_heap.h
new file mode 100644
index 00000000000000..3345a222f4a5ba
--- /dev/null
+++ b/libc/src/stdlib/freelist_heap.h
@@ -0,0 +1,208 @@
+//===-- Interface for freelist_heap ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_FREELIST_HEAP_H
+#define LLVM_LIBC_SRC_STDLIB_FREELIST_HEAP_H
+
+#include <stddef.h>
+
+#include "block.h"
+#include "freelist.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/CPP/span.h"
+#include "src/string/memcpy.h"
+#include "src/string/memset.h"
+
+namespace LIBC_NAMESPACE {
+
+void MallocInit(uint8_t *heap_low_addr, uint8_t *heap_high_addr);
+
+using cpp::optional;
+using cpp::span;
+
+static constexpr cpp::array<size_t, 6> defaultBuckets{16, 32, 64,
+ 128, 256, 512};
+
+template <size_t kNumBuckets = defaultBuckets.size()> class FreeListHeap {
+public:
+ using BlockType = Block<>;
+
+ template <size_t> friend class FreeListHeapBuffer;
+
+ struct HeapStats {
+ size_t total_bytes;
+ size_t bytes_allocated;
+ size_t cumulative_allocated;
+ size_t cumulative_freed;
+ size_t total_allocate_calls;
+ size_t total_free_calls;
+ };
+ FreeListHeap(span<cpp::byte> region);
+
+ void *Allocate(size_t size);
+ void Free(void *ptr);
+ void *Realloc(void *ptr, size_t size);
+ void *Calloc(size_t num, size_t size);
+
+ void LogHeapStats();
+ const HeapStats &heap_stats() const { return heap_stats_; }
+
+private:
+ span<cpp::byte> BlockToSpan(BlockType *block) {
+ return span<cpp::byte>(block->UsableSpace(), block->InnerSize());
+ }
+
+ void InvalidFreeCrash() { __builtin_trap(); }
+
+ span<cpp::byte> region_;
+ FreeList<kNumBuckets> freelist_;
+ HeapStats heap_stats_;
+};
+
+template <size_t kNumBuckets>
+FreeListHeap<kNumBuckets>::FreeListHeap(span<cpp::byte> region)
+ : freelist_(defaultBuckets), heap_stats_() {
+ auto result = BlockType::Init(region);
+ BlockType *block = *result;
+
+ freelist_.AddChunk(BlockToSpan(block));
+
+ region_ = region;
+ heap_stats_.total_bytes = region.size();
+}
+
+template <size_t kNumBuckets>
+void *FreeListHeap<kNumBuckets>::Allocate(size_t size) {
+ // Find a chunk in the freelist. Split it if needed, then return
+
+ auto chunk = freelist_.FindChunk(size);
+
+ if (chunk.data() == nullptr) {
+ return nullptr;
+ }
+ freelist_.RemoveChunk(chunk);
+
+ BlockType *chunk_block = BlockType::FromUsableSpace(chunk.data());
+
+ // Split that chunk. If there's a leftover chunk, add it to the freelist
+ optional<BlockType *> result = BlockType::Split(chunk_block, size);
+ if (result) {
+ freelist_.AddChunk(BlockToSpan(*result));
+ }
+
+ chunk_block->MarkUsed();
+
+ heap_stats_.bytes_allocated += size;
+ heap_stats_.cumulative_allocated += size;
+ heap_stats_.total_allocate_calls += 1;
+
+ return chunk_block->UsableSpace();
+}
+
+template <size_t kNumBuckets> void FreeListHeap<kNumBuckets>::Free(void *ptr) {
+ cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
+
+ if (bytes < region_.data() || bytes >= region_.data() + region_.size()) {
+ InvalidFreeCrash();
+ return;
+ }
+
+ BlockType *chunk_block = BlockType::FromUsableSpace(bytes);
+
+ size_t size_freed = chunk_block->InnerSize();
+ // Ensure that the block is in-use
+ if (!chunk_block->Used()) {
+ InvalidFreeCrash();
+ return;
+ }
+ chunk_block->MarkFree();
+ // Can we combine with the left or right blocks?
+ BlockType *prev = chunk_block->Prev();
+ BlockType *next = nullptr;
+
+ if (!chunk_block->Last()) {
+ next = chunk_block->Next();
+ }
+
+ if (prev != nullptr && !prev->Used()) {
+ // Remove from freelist and merge
+ freelist_.RemoveChunk(BlockToSpan(prev));
+ chunk_block = chunk_block->Prev();
+ BlockType::MergeNext(chunk_block);
+ }
+
+ if (next != nullptr && !next->Used()) {
+ freelist_.RemoveChunk(BlockToSpan(next));
+ BlockType::MergeNext(chunk_block);
+ }
+ // Add back to the freelist
+ freelist_.AddChunk(BlockToSpan(chunk_block));
+
+ heap_stats_.bytes_allocated -= size_freed;
+ heap_stats_.cumulative_freed += size_freed;
+ heap_stats_.total_free_calls += 1;
+}
+
+// Follows constract of the C standard realloc() function
+// If ptr is free'd, will return nullptr.
+template <size_t kNumBuckets>
+void *FreeListHeap<kNumBuckets>::Realloc(void *ptr, size_t size) {
+ if (size == 0) {
+ Free(ptr);
+ return nullptr;
+ }
+
+ // If the pointer is nullptr, allocate a new memory.
+ if (ptr == nullptr) {
+ return Allocate(size);
+ }
+
+ cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
+
+ // TODO(chenghanzh): Enhance with debug information for out-of-range and more.
+ if (bytes < region_.data() || bytes >= region_.data() + region_.size()) {
+ return nullptr;
+ }
+
+ BlockType *chunk_block = BlockType::FromUsableSpace(bytes);
+ if (!chunk_block->Used()) {
+ return nullptr;
+ }
+ size_t old_size = chunk_block->InnerSize();
+
+ // Do nothing and return ptr if the required memory size is smaller than
+ // the current size.
+ if (old_size >= size) {
+ return ptr;
+ }
+
+ void *new_ptr = Allocate(size);
+ // Don't invalidate ptr if Allocate(size) fails to initilize the memory.
+ if (new_ptr == nullptr) {
+ return nullptr;
+ }
+ memcpy(new_ptr, ptr, old_size);
+
+ Free(ptr);
+ return new_ptr;
+}
+
+template <size_t kNumBuckets>
+void *FreeListHeap<kNumBuckets>::Calloc(size_t num, size_t size) {
+ void *ptr = Allocate(num * size);
+ if (ptr != nullptr) {
+ memset(ptr, 0, num * size);
+ }
+ return ptr;
+}
+
+extern FreeListHeap<> *freelist_heap;
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_FREELIST_HEAP_H
diff --git a/libc/src/stdlib/freelist_malloc.cpp b/libc/src/stdlib/freelist_malloc.cpp
new file mode 100644
index 00000000000000..ee0c9aae771eb9
--- /dev/null
+++ b/libc/src/stdlib/freelist_malloc.cpp
@@ -0,0 +1,42 @@
+//===-- Implementation for freelist_malloc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "freelist_heap.h"
+#include "src/__support/CPP/new.h"
+#include "src/__support/CPP/span.h"
+#include "src/__support/CPP/type_traits.h"
+#include "src/string/memcpy.h"
+#include "src/string/memset.h"
+
+#include <stddef.h>
+
+namespace LIBC_NAMESPACE {
+
+namespace {
+cpp::aligned_storage_t<sizeof(FreeListHeap<>), alignof(FreeListHeap<>)> buf;
+} // namespace
+
+FreeListHeap<> *freelist_heap;
+
+// Define the global heap variables.
+void MallocInit(uint8_t *heap_low_addr, uint8_t *heap_high_addr) {
+ cpp::span<LIBC_NAMESPACE::cpp::byte> allocator_freelist_raw_heap =
+ cpp::span<cpp::byte>(reinterpret_cast<cpp::byte *>(heap_low_addr),
+ heap_high_addr - heap_low_addr);
+ freelist_heap = new (&buf) FreeListHeap<>(allocator_freelist_raw_heap);
+}
+
+void *malloc(size_t size) { return freelist_heap->Allocate(size); }
+
+void free(void *ptr) { freelist_heap->Free(ptr); }
+
+void *calloc(size_t num, size_t size) {
+ return freelist_heap->Calloc(num, size);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/sys/stat/linux/kernel_statx.h b/libc/src/sys/stat/linux/kernel_statx.h
index 60969160b9ba4f..5b4b014487c928 100644
--- a/libc/src/sys/stat/linux/kernel_statx.h
+++ b/libc/src/sys/stat/linux/kernel_statx.h
@@ -73,7 +73,7 @@ LIBC_INLINE int statx(int dirfd, const char *__restrict path, int flags,
struct stat *__restrict statbuf) {
// We make a statx syscall and copy out the result into the |statbuf|.
::statx_buf xbuf;
- int ret = LIBC_NAMESPACE::syscall_impl<int>(SYS_statx, dirfd, path, flags,
+ int ret = LIBC_NAMESPACE::syscall_impl<int>(0, dirfd, path, flags,
::STATX_BASIC_STATS_MASK, &xbuf);
if (ret < 0)
return -ret;
diff --git a/libc/test/src/CMakeLists.txt b/libc/test/src/CMakeLists.txt
index a5e7a2a4dee725..935feb59ecdf6a 100644
--- a/libc/test/src/CMakeLists.txt
+++ b/libc/test/src/CMakeLists.txt
@@ -61,7 +61,7 @@ add_subdirectory(inttypes)
if(${LIBC_TARGET_OS} STREQUAL "linux")
add_subdirectory(fcntl)
add_subdirectory(sched)
- add_subdirectory(sys)
+ #add_subdirectory(sys)
add_subdirectory(termios)
add_subdirectory(unistd)
endif()
diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt
index 6a7faedece380a..5e0e795b3ea1b2 100644
--- a/libc/test/src/stdlib/CMakeLists.txt
+++ b/libc/test/src/stdlib/CMakeLists.txt
@@ -386,19 +386,19 @@ if(LLVM_LIBC_FULL_BUILD)
libc.src.stdlib.quick_exit
)
- # Only the GPU has an in-tree 'malloc' implementation.
- if(LIBC_TARGET_OS_IS_GPU)
- add_libc_test(
- malloc_test
- HERMETIC_TEST_ONLY
- SUITE
- libc-stdlib-tests
- SRCS
- malloc_test.cpp
- DEPENDS
- libc.include.stdlib
- libc.src.stdlib.malloc
- libc.src.stdlib.free
- )
- endif()
+ add_libc_test(
+ malloc_test
+ SUITE
+ libc-stdlib-tests
+ SRCS
+ malloc_test.cpp
+ freelist_malloc_test.cpp
+ freelist_heap_test.cpp
+ freelist_test.cpp
+ DEPENDS
+ libc.include.stdlib
+ libc.src.string.memcmp
+ libc.src.stdlib.malloc
+ libc.src.stdlib.free
+ )
endif()
diff --git a/libc/test/src/stdlib/freelist_heap_test.cpp b/libc/test/src/stdlib/freelist_heap_test.cpp
new file mode 100644
index 00000000000000..cd792168f60b61
--- /dev/null
+++ b/libc/test/src/stdlib/freelist_heap_test.cpp
@@ -0,0 +1,241 @@
+//===-- Unittests for freelist_heap ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/span.h"
+#include "src/stdlib/freelist_heap.h"
+#include "src/string/memcmp.h"
+#include "src/string/memcpy.h"
+#include "test/UnitTest/Test.h"
+
+namespace LIBC_NAMESPACE {
+
+TEST(LlvmLibcFreeListHeap, CanAllocate) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr = allocator.Allocate(kAllocSize);
+
+ ASSERT_NE(ptr, static_cast<void *>(nullptr));
+ // In this case, the allocator should be returning us the start of the chunk.
+ EXPECT_EQ(ptr, static_cast<void *>(
+ &buf[0] + FreeListHeap<>::BlockType::kBlockOverhead));
+}
+
+TEST(LlvmLibcFreeListHeap, AllocationsDontOverlap) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ void *ptr2 = allocator.Allocate(kAllocSize);
+
+ ASSERT_NE(ptr1, static_cast<void *>(nullptr));
+ ASSERT_NE(ptr2, static_cast<void *>(nullptr));
+
+ uintptr_t ptr1_start = reinterpret_cast<uintptr_t>(ptr1);
+ uintptr_t ptr1_end = ptr1_start + kAllocSize;
+ uintptr_t ptr2_start = reinterpret_cast<uintptr_t>(ptr2);
+
+ EXPECT_GT(ptr2_start, ptr1_end);
+}
+
+TEST(LlvmLibcFreeListHeap, CanFreeAndRealloc) {
+ // There's not really a nice way to test that Free works, apart from to try
+ // and get that value back again.
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ allocator.Free(ptr1);
+ void *ptr2 = allocator.Allocate(kAllocSize);
+
+ EXPECT_EQ(ptr1, ptr2);
+}
+
+TEST(LlvmLibcFreeListHeap, ReturnsNullWhenAllocationTooLarge) {
+ constexpr size_t N = 2048;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ EXPECT_EQ(allocator.Allocate(N), static_cast<void *>(nullptr));
+}
+
+TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
+ constexpr size_t N = 2048;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ EXPECT_NE(allocator.Allocate(N - FreeListHeap<>::BlockType::kBlockOverhead),
+ static_cast<void *>(nullptr));
+ EXPECT_EQ(allocator.Allocate(1), static_cast<void *>(nullptr));
+}
+
+TEST(LlvmLibcFreeListHeap, ReturnedPointersAreAligned) {
+ constexpr size_t N = 2048;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(1);
+
+ // Should be aligned to native pointer alignment
+ uintptr_t ptr1_start = reinterpret_cast<uintptr_t>(ptr1);
+ size_t alignment = alignof(void *);
+
+ EXPECT_EQ(ptr1_start % alignment, static_cast<size_t>(0));
+
+ void *ptr2 = allocator.Allocate(1);
+ uintptr_t ptr2_start = reinterpret_cast<uintptr_t>(ptr2);
+
+ EXPECT_EQ(ptr2_start % alignment, static_cast<size_t>(0));
+}
+
+TEST(LlvmLibcFreeListHeap, CanRealloc) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ constexpr size_t kNewAllocSize = 768;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(1)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ void *ptr2 = allocator.Realloc(ptr1, kNewAllocSize);
+
+ ASSERT_NE(ptr1, static_cast<void *>(nullptr));
+ ASSERT_NE(ptr2, static_cast<void *>(nullptr));
+}
+
+TEST(LlvmLibcFreeListHeap, ReallocHasSameContent) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = sizeof(int);
+ constexpr size_t kNewAllocSize = sizeof(int) * 2;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(1)};
+ // Data inside the allocated block.
+ cpp::byte data1[kAllocSize];
+ // Data inside the reallocated block.
+ cpp::byte data2[kAllocSize];
+
+ FreeListHeap<> allocator(buf);
+
+ int *ptr1 = reinterpret_cast<int *>(allocator.Allocate(kAllocSize));
+ *ptr1 = 42;
+ memcpy(data1, ptr1, kAllocSize);
+ int *ptr2 = reinterpret_cast<int *>(allocator.Realloc(ptr1, kNewAllocSize));
+ memcpy(data2, ptr2, kAllocSize);
+
+ ASSERT_NE(ptr1, static_cast<int *>(nullptr));
+ ASSERT_NE(ptr2, static_cast<int *>(nullptr));
+ // Verify that data inside the allocated and reallocated chunks are the same.
+ EXPECT_EQ(LIBC_NAMESPACE::memcmp(data1, data2, kAllocSize), 0);
+}
+
+TEST(LlvmLibcFreeListHeap, ReturnsNullReallocFreedPointer) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ constexpr size_t kNewAllocSize = 256;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ allocator.Free(ptr1);
+ void *ptr2 = allocator.Realloc(ptr1, kNewAllocSize);
+
+ EXPECT_EQ(static_cast<void *>(nullptr), ptr2);
+}
+
+TEST(LlvmLibcFreeListHeap, ReallocSmallerSize) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ constexpr size_t kNewAllocSize = 256;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ void *ptr2 = allocator.Realloc(ptr1, kNewAllocSize);
+
+ // For smaller sizes, Realloc will not shrink the block.
+ EXPECT_EQ(ptr1, ptr2);
+}
+
+TEST(LlvmLibcFreeListHeap, ReallocTooLarge) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 512;
+ constexpr size_t kNewAllocSize = 4096;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
+
+ FreeListHeap<> allocator(buf);
+
+ void *ptr1 = allocator.Allocate(kAllocSize);
+ void *ptr2 = allocator.Realloc(ptr1, kNewAllocSize);
+
+ // Realloc() will not invalidate the original pointer if Reallc() fails
+ EXPECT_NE(static_cast<void *>(nullptr), ptr1);
+ EXPECT_EQ(static_cast<void *>(nullptr), ptr2);
+}
+
+TEST(LlvmLibcFreeListHeap, CanCalloc) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 128;
+ constexpr size_t kNum = 4;
+ constexpr int size = kNum * kAllocSize;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(1)};
+ constexpr cpp::byte zero{0};
+
+ FreeListHeap<> allocator(buf);
+
+ cpp::byte *ptr1 =
+ reinterpret_cast<cpp::byte *>(allocator.Calloc(kNum, kAllocSize));
+
+ // Calloc'd content is zero.
+ for (int i = 0; i < size; i++) {
+ EXPECT_EQ(ptr1[i], zero);
+ }
+}
+
+TEST(LlvmLibcFreeListHeap, CanCallocWeirdSize) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 143;
+ constexpr size_t kNum = 3;
+ constexpr int size = kNum * kAllocSize;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(132)};
+ constexpr cpp::byte zero{0};
+
+ FreeListHeap<> allocator(buf);
+
+ cpp::byte *ptr1 =
+ reinterpret_cast<cpp::byte *>(allocator.Calloc(kNum, kAllocSize));
+
+ // Calloc'd content is zero.
+ for (int i = 0; i < size; i++) {
+ EXPECT_EQ(ptr1[i], zero);
+ }
+}
+
+TEST(LlvmLibcFreeListHeap, CallocTooLarge) {
+ constexpr size_t N = 2048;
+ constexpr size_t kAllocSize = 2049;
+ alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(1)};
+
+ FreeListHeap<> allocator(buf);
+
+ EXPECT_EQ(allocator.Calloc(1, kAllocSize), static_cast<void *>(nullptr));
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/stdlib/freelist_malloc_test.cpp b/libc/test/src/stdlib/freelist_malloc_test.cpp
new file mode 100644
index 00000000000000..4844ed0127a32c
--- /dev/null
+++ b/libc/test/src/stdlib/freelist_malloc_test.cpp
@@ -0,0 +1,56 @@
+//===-- Unittests for freelist_malloc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdlib/calloc.h"
+#include "src/stdlib/free.h"
+#include "src/stdlib/freelist_heap.h"
+#include "src/stdlib/malloc.h"
+#include "test/UnitTest/Test.h"
+
+using LIBC_NAMESPACE::freelist_heap;
+
+TEST(LlvmLibcFreeListMalloc, ReplacingMalloc) {
+ constexpr size_t kAllocSize = 256;
+ constexpr size_t kCallocNum = 4;
+ constexpr size_t kCallocSize = 64;
+
+ uint8_t kBuff[4096];
+ LIBC_NAMESPACE::MallocInit(kBuff, kBuff + sizeof(kBuff));
+
+ void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize);
+
+ const auto &freelist_heap_stats = freelist_heap->heap_stats();
+
+ ASSERT_NE(ptr1, static_cast<void *>(nullptr));
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated, kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed, size_t(0));
+
+ LIBC_NAMESPACE::free(ptr1);
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated, kAllocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed, kAllocSize);
+
+ void *ptr2 = LIBC_NAMESPACE::calloc(kCallocNum, kCallocSize);
+ ASSERT_NE(ptr2, static_cast<void *>(nullptr));
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, kCallocNum * kCallocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed, kAllocSize);
+
+ for (size_t i = 0; i < kCallocNum * kCallocSize; ++i) {
+ EXPECT_EQ(reinterpret_cast<uint8_t *>(ptr2)[i], uint8_t(0));
+ }
+
+ LIBC_NAMESPACE::free(ptr2);
+ EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
+ EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
+ kAllocSize + kCallocNum * kCallocSize);
+ EXPECT_EQ(freelist_heap_stats.cumulative_freed,
+ kAllocSize + kCallocNum * kCallocSize);
+}
diff --git a/libc/test/src/stdlib/freelist_test.cpp b/libc/test/src/stdlib/freelist_test.cpp
new file mode 100644
index 00000000000000..57a71b3b80e286
--- /dev/null
+++ b/libc/test/src/stdlib/freelist_test.cpp
@@ -0,0 +1,172 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stddef.h>
+
+#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/span.h"
+#include "src/stdlib/freelist.h"
+#include "test/UnitTest/Test.h"
+
+using LIBC_NAMESPACE::FreeList;
+using LIBC_NAMESPACE::cpp::array;
+using LIBC_NAMESPACE::cpp::byte;
+using LIBC_NAMESPACE::cpp::span;
+
+static constexpr size_t SIZE = 8;
+static constexpr array<size_t, SIZE> example_sizes = {64, 128, 256, 512,
+ 1024, 2048, 4096, 8192};
+
+TEST(LlvmLibcFreeList, EmptyListHasNoMembers) {
+ FreeList<SIZE> list(example_sizes);
+
+ auto item = list.FindChunk(4);
+ EXPECT_EQ(item.size(), static_cast<size_t>(0));
+ item = list.FindChunk(128);
+ EXPECT_EQ(item.size(), static_cast<size_t>(0));
+}
+
+TEST(LlvmLibcFreeList, CanRetrieveAddedMember) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN = 512;
+
+ byte data[kN] = {byte(0)};
+
+ bool ok = list.AddChunk(span<byte>(data, kN));
+ EXPECT_TRUE(ok);
+
+ auto item = list.FindChunk(kN);
+ EXPECT_EQ(item.size(), kN);
+ EXPECT_EQ(item.data(), data);
+}
+
+TEST(LlvmLibcFreeList, CanRetrieveAddedMemberForSmallerSize) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN = 512;
+
+ byte data[kN] = {byte(0)};
+
+ ASSERT_TRUE(list.AddChunk(span<byte>(data, kN)));
+ auto item = list.FindChunk(kN / 2);
+ EXPECT_EQ(item.size(), kN);
+ EXPECT_EQ(item.data(), data);
+}
+
+TEST(LlvmLibcFreeList, CanRemoveItem) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN = 512;
+
+ byte data[kN] = {byte(0)};
+
+ ASSERT_TRUE(list.AddChunk(span<byte>(data, kN)));
+ EXPECT_TRUE(list.RemoveChunk(span<byte>(data, kN)));
+
+ auto item = list.FindChunk(kN);
+ EXPECT_EQ(item.size(), static_cast<size_t>(0));
+}
+
+TEST(LlvmLibcFreeList, FindReturnsSmallestChunk) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN1 = 512;
+ constexpr size_t kN2 = 1024;
+
+ byte data1[kN1] = {byte(0)};
+ byte data2[kN2] = {byte(0)};
+
+ ASSERT_TRUE(list.AddChunk(span<byte>(data1, kN1)));
+ ASSERT_TRUE(list.AddChunk(span<byte>(data2, kN2)));
+
+ auto chunk = list.FindChunk(kN1 / 2);
+ EXPECT_EQ(chunk.size(), kN1);
+ EXPECT_EQ(chunk.data(), data1);
+
+ chunk = list.FindChunk(kN1);
+ EXPECT_EQ(chunk.size(), kN1);
+ EXPECT_EQ(chunk.data(), data1);
+
+ chunk = list.FindChunk(kN1 + 1);
+ EXPECT_EQ(chunk.size(), kN2);
+ EXPECT_EQ(chunk.data(), data2);
+}
+
+TEST(LlvmLibcFreeList, FindReturnsCorrectChunkInSameBucket) {
+ // If we have two values in the same bucket, ensure that the allocation will
+ // pick an appropriately sized one.
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN1 = 512;
+ constexpr size_t kN2 = 257;
+
+ byte data1[kN1] = {byte(0)};
+ byte data2[kN2] = {byte(0)};
+
+ // List should now be 257 -> 512 -> NULL
+ ASSERT_TRUE(list.AddChunk(span<byte>(data1, kN1)));
+ ASSERT_TRUE(list.AddChunk(span<byte>(data2, kN2)));
+
+ auto chunk = list.FindChunk(kN2 + 1);
+ EXPECT_EQ(chunk.size(), kN1);
+}
+
+TEST(LlvmLibcFreeList, FindCanMoveUpThroughBuckets) {
+ // Ensure that finding a chunk will move up through buckets if no appropriate
+ // chunks were found in a given bucket
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN1 = 257;
+ constexpr size_t kN2 = 513;
+
+ byte data1[kN1] = {byte(0)};
+ byte data2[kN2] = {byte(0)};
+
+ // List should now be:
+ // bkt[3] (257 bytes up to 512 bytes) -> 257 -> NULL
+ // bkt[4] (513 bytes up to 1024 bytes) -> 513 -> NULL
+ ASSERT_TRUE(list.AddChunk(span<byte>(data1, kN1)));
+ ASSERT_TRUE(list.AddChunk(span<byte>(data2, kN2)));
+
+ // Request a 300 byte chunk. This should return the 513 byte one
+ auto chunk = list.FindChunk(kN1 + 1);
+ EXPECT_EQ(chunk.size(), kN2);
+}
+
+TEST(LlvmLibcFreeList, RemoveUnknownChunkReturnsNotFound) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN = 512;
+
+ byte data[kN] = {byte(0)};
+ byte data2[kN] = {byte(0)};
+
+ ASSERT_TRUE(list.AddChunk(span<byte>(data, kN)));
+ EXPECT_FALSE(list.RemoveChunk(span<byte>(data2, kN)));
+}
+
+TEST(LlvmLibcFreeList, CanStoreMultipleChunksPerBucket) {
+ FreeList<SIZE> list(example_sizes);
+ constexpr size_t kN = 512;
+
+ byte data1[kN] = {byte(0)};
+ byte data2[kN] = {byte(0)};
+
+ ASSERT_TRUE(list.AddChunk(span<byte>(data1, kN)));
+ ASSERT_TRUE(list.AddChunk(span<byte>(data2, kN)));
+
+ auto chunk1 = list.FindChunk(kN);
+ ASSERT_TRUE(list.RemoveChunk(chunk1));
+ auto chunk2 = list.FindChunk(kN);
+ ASSERT_TRUE(list.RemoveChunk(chunk2));
+
+ // Ordering of the chunks doesn't matter
+ EXPECT_TRUE(chunk1.data() != chunk2.data());
+ EXPECT_TRUE(chunk1.data() == data1 || chunk1.data() == data2);
+ EXPECT_TRUE(chunk2.data() == data1 || chunk2.data() == data2);
+}
diff --git a/libc/test/src/stdlib/malloc_test.cpp b/libc/test/src/stdlib/malloc_test.cpp
index d9023cf56d9fea..6fb07bec9f3368 100644
--- a/libc/test/src/stdlib/malloc_test.cpp
+++ b/libc/test/src/stdlib/malloc_test.cpp
@@ -7,10 +7,14 @@
//===----------------------------------------------------------------------===//
#include "src/stdlib/free.h"
+#include "src/stdlib/freelist_heap.h"
#include "src/stdlib/malloc.h"
#include "test/UnitTest/Test.h"
TEST(LlvmLibcMallocTest, Allocate) {
+ uint8_t kBuff[1024];
+ LIBC_NAMESPACE::MallocInit(kBuff, kBuff + sizeof(kBuff));
+
int *ptr = reinterpret_cast<int *>(LIBC_NAMESPACE::malloc(sizeof(int)));
EXPECT_NE(reinterpret_cast<void *>(ptr), static_cast<void *>(nullptr));
*ptr = 1;
More information about the libc-commits
mailing list