[libc] [llvm] [libc][NFC] refactor Cortex `memcpy` code (PR #148204)
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 16 00:58:20 PDT 2025
https://github.com/gchatelet updated https://github.com/llvm/llvm-project/pull/148204
>From fcb2e5d1b05ff97a23df6e39104beaaea6d8a68e Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Fri, 11 Jul 2025 10:44:46 +0000
Subject: [PATCH 1/4] [libc] refactor Cortex memcpy code in preparation of
memset
---
libc/src/string/memory_utils/CMakeLists.txt | 1 +
libc/src/string/memory_utils/arm/common.h | 52 ++++++++
.../string/memory_utils/arm/inline_memcpy.h | 126 +++++++-----------
.../llvm-project-overlay/libc/BUILD.bazel | 1 +
4 files changed, 100 insertions(+), 80 deletions(-)
create mode 100644 libc/src/string/memory_utils/arm/common.h
diff --git a/libc/src/string/memory_utils/CMakeLists.txt b/libc/src/string/memory_utils/CMakeLists.txt
index a967247db53f4..633d9f12949d2 100644
--- a/libc/src/string/memory_utils/CMakeLists.txt
+++ b/libc/src/string/memory_utils/CMakeLists.txt
@@ -7,6 +7,7 @@ add_header_library(
aarch64/inline_memcpy.h
aarch64/inline_memmove.h
aarch64/inline_memset.h
+ arm/common.h
arm/inline_memcpy.h
generic/aligned_access.h
generic/byte_per_byte.h
diff --git a/libc/src/string/memory_utils/arm/common.h b/libc/src/string/memory_utils/arm/common.h
new file mode 100644
index 0000000000000..dafd4aaf02343
--- /dev/null
+++ b/libc/src/string/memory_utils/arm/common.h
@@ -0,0 +1,52 @@
+//===-- Common constants and defines for arm --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H
+
+#include "src/__support/macros/attributes.h" // LIBC_INLINE_VAR
+#include "src/string/memory_utils/utils.h" // CPtr, Ptr, distance_to_align
+
+#include <stddef.h> // size_t
+
+// https://libc.llvm.org/compiler_support.html
+// Support for [[likely]] / [[unlikely]]
+// [X] GCC 12.2
+// [X] Clang 12
+// [ ] Clang 11
+#define LIBC_ATTR_LIKELY [[likely]]
+#define LIBC_ATTR_UNLIKELY [[unlikely]]
+
+#if defined(LIBC_COMPILER_IS_CLANG)
+#if LIBC_COMPILER_CLANG_VER < 1200
+#undef LIBC_ATTR_LIKELY
+#undef LIBC_ATTR_UNLIKELY
+#define LIBC_ATTR_LIKELY
+#define LIBC_ATTR_UNLIKELY
+#endif
+#endif
+
+namespace LIBC_NAMESPACE_DECL {
+
+LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
+
+enum class BumpSize : bool { kNo = false, kYes = true };
+enum class BlockOp : bool { kFull = false, kByWord = true };
+
+LIBC_INLINE auto misaligned(CPtr ptr) {
+ return distance_to_align_down<kWordSize>(ptr);
+}
+
+LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) {
+ return cpp::bit_cast<CPtr>(cpp::bit_cast<uintptr_t>(a) |
+ cpp::bit_cast<uintptr_t>(b));
+}
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H
diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index 61efebe29b485..ecf938d9ba3a6 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -10,57 +10,35 @@
#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/optimization.h" // LIBC_LOOP_NOUNROLL
+#include "src/string/memory_utils/arm/common.h" // LIBC_ATTR_LIKELY, LIBC_ATTR_UNLIKELY
#include "src/string/memory_utils/utils.h" // memcpy_inline, distance_to_align
#include <stddef.h> // size_t
-// https://libc.llvm.org/compiler_support.html
-// Support for [[likely]] / [[unlikely]]
-// [X] GCC 12.2
-// [X] Clang 12
-// [ ] Clang 11
-#define LIBC_ATTR_LIKELY [[likely]]
-#define LIBC_ATTR_UNLIKELY [[unlikely]]
-
-#if defined(LIBC_COMPILER_IS_CLANG)
-#if LIBC_COMPILER_CLANG_VER < 1200
-#undef LIBC_ATTR_LIKELY
-#undef LIBC_ATTR_UNLIKELY
-#define LIBC_ATTR_LIKELY
-#define LIBC_ATTR_UNLIKELY
-#endif
-#endif
-
namespace LIBC_NAMESPACE_DECL {
namespace {
-LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
-
-enum Strategy {
- ForceWordLdStChain,
- AssumeWordAligned,
- AssumeUnaligned,
-};
+template <size_t bytes>
+LIBC_INLINE void copy_assume_aligned(void *dst, const void *src) {
+ constexpr size_t alignment = bytes > kWordSize ? kWordSize : bytes;
+ memcpy_inline<bytes>(assume_aligned<alignment>(dst),
+ assume_aligned<alignment>(src));
+}
-template <size_t bytes, Strategy strategy = AssumeUnaligned>
-LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
- if constexpr (strategy == AssumeUnaligned) {
- memcpy_inline<bytes>(assume_aligned<1>(dst), assume_aligned<1>(src));
- } else if constexpr (strategy == AssumeWordAligned) {
- static_assert(bytes >= kWordSize);
- memcpy_inline<bytes>(assume_aligned<kWordSize>(dst),
- assume_aligned<kWordSize>(src));
- } else if constexpr (strategy == ForceWordLdStChain) {
+template <size_t bytes, BlockOp block_op = BlockOp::kFull>
+LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) {
+ if constexpr (block_op == BlockOp::kFull) {
+ copy_assume_aligned<bytes>(dst, src);
+ } else {
// We restrict loads/stores to 4 byte to prevent the use of load/store
- // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they may
- // fault (see notes below) and second, they use more registers which in turn
- // adds push/pop instructions in the hot path.
- static_assert((bytes % kWordSize == 0) && (bytes >= kWordSize));
+ // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they
+ // may fault (see notes below) and second, they use more registers which
+ // in turn adds push/pop instructions in the hot path.
+ static_assert(bytes >= kWordSize);
LIBC_LOOP_UNROLL
- for (size_t i = 0; i < bytes / kWordSize; ++i) {
- const size_t offset = i * kWordSize;
- memcpy_inline<kWordSize>(dst + offset, src + offset);
+ for (size_t offset = 0; offset < bytes; offset += kWordSize) {
+ copy_assume_aligned<kWordSize>(dst + offset, src + offset);
}
}
// In the 1, 2, 4 byte copy case, the compiler can fold pointer offsetting
@@ -72,30 +50,19 @@ LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
src += bytes;
}
-LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
- const size_t size) {
+template <size_t bytes, BlockOp block_op, BumpSize bump_size = BumpSize::kYes>
+LIBC_INLINE void consume_by_aligned_block(Ptr &dst, CPtr &src, size_t &size) {
LIBC_LOOP_NOUNROLL
- for (size_t i = 0; i < size; ++i)
- *dst++ = *src++;
-}
-
-template <size_t block_size, Strategy strategy>
-LIBC_INLINE void copy_blocks_and_update_args(Ptr &dst, CPtr &src,
- size_t &size) {
- LIBC_LOOP_NOUNROLL
- for (size_t i = 0; i < size / block_size; ++i)
- copy_and_bump_pointers<block_size, strategy>(dst, src);
- // Update `size` once at the end instead of once per iteration.
- size %= block_size;
-}
-
-LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) {
- return cpp::bit_cast<CPtr>(cpp::bit_cast<uintptr_t>(a) |
- cpp::bit_cast<uintptr_t>(b));
+ for (size_t i = 0; i < size / bytes; ++i)
+ copy_block_and_bump_pointers<bytes, block_op>(dst, src);
+ if constexpr (bump_size == BumpSize::kYes) {
+ size %= bytes;
+ }
}
-LIBC_INLINE auto misaligned(CPtr a) {
- return distance_to_align_down<kWordSize>(a);
+LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
+ size_t size) {
+ consume_by_aligned_block<1, BlockOp::kFull, BumpSize::kNo>(dst, src, size);
}
} // namespace
@@ -125,20 +92,21 @@ LIBC_INLINE auto misaligned(CPtr a) {
if (src_alignment == 0)
LIBC_ATTR_LIKELY {
// Both `src` and `dst` are now word-aligned.
- copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size);
- copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size);
- copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size);
+ consume_by_aligned_block<64, BlockOp::kFull>(dst, src, size);
+ consume_by_aligned_block<16, BlockOp::kFull>(dst, src, size);
+ consume_by_aligned_block<4, BlockOp::kFull>(dst, src, size);
}
else {
// `dst` is aligned but `src` is not.
LIBC_LOOP_NOUNROLL
while (size >= kWordSize) {
- // Recompose word from multiple loads depending on the alignment.
+ // Recompose word from multiple loads depending on the
+ // alignment.
const uint32_t value =
src_alignment == 2
? load_aligned<uint32_t, uint16_t, uint16_t>(src)
: load_aligned<uint32_t, uint8_t, uint16_t, uint8_t>(src);
- memcpy_inline<kWordSize>(assume_aligned<kWordSize>(dst), &value);
+ copy_assume_aligned<kWordSize>(dst, &value);
dst += kWordSize;
src += kWordSize;
size -= kWordSize;
@@ -169,31 +137,33 @@ LIBC_INLINE auto misaligned(CPtr a) {
if (size < 8)
LIBC_ATTR_UNLIKELY {
if (size & 1)
- copy_and_bump_pointers<1>(dst, src);
+ copy_block_and_bump_pointers<1>(dst, src);
if (size & 2)
- copy_and_bump_pointers<2>(dst, src);
+ copy_block_and_bump_pointers<2>(dst, src);
if (size & 4)
- copy_and_bump_pointers<4>(dst, src);
+ copy_block_and_bump_pointers<4>(dst, src);
return;
}
if (misaligned(src))
LIBC_ATTR_UNLIKELY {
const size_t offset = distance_to_align_up<kWordSize>(dst);
if (offset & 1)
- copy_and_bump_pointers<1>(dst, src);
+ copy_block_and_bump_pointers<1>(dst, src);
if (offset & 2)
- copy_and_bump_pointers<2>(dst, src);
+ copy_block_and_bump_pointers<2>(dst, src);
size -= offset;
}
}
- copy_blocks_and_update_args<64, ForceWordLdStChain>(dst, src, size);
- copy_blocks_and_update_args<16, ForceWordLdStChain>(dst, src, size);
- copy_blocks_and_update_args<4, AssumeUnaligned>(dst, src, size);
+ // `dst` and `src` are not necessarily both aligned at that point but this
+ // implementation assumes hardware support for unaligned loads and stores.
+ consume_by_aligned_block<64, BlockOp::kByWord>(dst, src, size);
+ consume_by_aligned_block<16, BlockOp::kByWord>(dst, src, size);
+ consume_by_aligned_block<4, BlockOp::kFull>(dst, src, size);
if (size & 1)
- copy_and_bump_pointers<1>(dst, src);
+ copy_block_and_bump_pointers<1>(dst, src);
if (size & 2)
LIBC_ATTR_UNLIKELY
- copy_and_bump_pointers<2>(dst, src);
+ copy_block_and_bump_pointers<2>(dst, src);
}
[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_,
@@ -210,8 +180,4 @@ LIBC_INLINE auto misaligned(CPtr a) {
} // namespace LIBC_NAMESPACE_DECL
-// Cleanup local macros
-#undef LIBC_ATTR_LIKELY
-#undef LIBC_ATTR_UNLIKELY
-
#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index b13a909770e58..5fa6dc1ee04fa 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -4268,6 +4268,7 @@ libc_support_library(
"src/string/memory_utils/aarch64/inline_memcpy.h",
"src/string/memory_utils/aarch64/inline_memmove.h",
"src/string/memory_utils/aarch64/inline_memset.h",
+ "src/string/memory_utils/arm/common.h",
"src/string/memory_utils/arm/inline_memcpy.h",
"src/string/memory_utils/generic/aligned_access.h",
"src/string/memory_utils/generic/byte_per_byte.h",
>From 3b59aa7abdbb4156e27778f98853a04970109b8c Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Fri, 11 Jul 2025 11:23:14 +0000
Subject: [PATCH 2/4] Handles `block_op == BlockOp::kByWord` explicitly
---
libc/src/string/memory_utils/arm/common.h | 4 ++--
libc/src/string/memory_utils/arm/inline_memcpy.h | 4 +++-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/libc/src/string/memory_utils/arm/common.h b/libc/src/string/memory_utils/arm/common.h
index dafd4aaf02343..9b0fae06232f7 100644
--- a/libc/src/string/memory_utils/arm/common.h
+++ b/libc/src/string/memory_utils/arm/common.h
@@ -35,8 +35,8 @@ namespace LIBC_NAMESPACE_DECL {
LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
-enum class BumpSize : bool { kNo = false, kYes = true };
-enum class BlockOp : bool { kFull = false, kByWord = true };
+enum class BumpSize { kNo, kYes };
+enum class BlockOp { kFull, kByWord };
LIBC_INLINE auto misaligned(CPtr ptr) {
return distance_to_align_down<kWordSize>(ptr);
diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index ecf938d9ba3a6..d515ca419c2ba 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -30,7 +30,7 @@ template <size_t bytes, BlockOp block_op = BlockOp::kFull>
LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) {
if constexpr (block_op == BlockOp::kFull) {
copy_assume_aligned<bytes>(dst, src);
- } else {
+ } else if constexpr (block_op == BlockOp::kByWord) {
// We restrict loads/stores to 4 byte to prevent the use of load/store
// multiple (LDM, STM) and load/store double (LDRD, STRD). First, they
// may fault (see notes below) and second, they use more registers which
@@ -40,6 +40,8 @@ LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) {
for (size_t offset = 0; offset < bytes; offset += kWordSize) {
copy_assume_aligned<kWordSize>(dst + offset, src + offset);
}
+ } else {
+ static_assert(false, "Invalid BlockOp");
}
// In the 1, 2, 4 byte copy case, the compiler can fold pointer offsetting
// into the load/store instructions.
>From 04a2946447650b4a3064aaef2934457c96048cce Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Tue, 15 Jul 2025 16:20:04 +0000
Subject: [PATCH 3/4] Improve design and documentation, this version performs
better than the previous one.
---
libc/src/string/memory_utils/arm/common.h | 2 +-
.../string/memory_utils/arm/inline_memcpy.h | 135 +++++++++++-------
2 files changed, 83 insertions(+), 54 deletions(-)
diff --git a/libc/src/string/memory_utils/arm/common.h b/libc/src/string/memory_utils/arm/common.h
index 9b0fae06232f7..155bc3481709e 100644
--- a/libc/src/string/memory_utils/arm/common.h
+++ b/libc/src/string/memory_utils/arm/common.h
@@ -35,7 +35,7 @@ namespace LIBC_NAMESPACE_DECL {
LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
-enum class BumpSize { kNo, kYes };
+enum class AssumeAccess { kUnknown, kAligned };
enum class BlockOp { kFull, kByWord };
LIBC_INLINE auto misaligned(CPtr ptr) {
diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index d515ca419c2ba..f9651251ddac5 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -5,6 +5,11 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+// The functions defined in this file give approximate code size. These sizes
+// assume the following configuration options:
+// - LIBC_CONF_KEEP_FRAME_POINTER = false
+// - LIBC_CONF_ENABLE_STRONG_STACK_PROTECTOR = false
+// - LIBC_ADD_NULL_CHECKS = false
#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
@@ -19,26 +24,34 @@ namespace LIBC_NAMESPACE_DECL {
namespace {
-template <size_t bytes>
-LIBC_INLINE void copy_assume_aligned(void *dst, const void *src) {
- constexpr size_t alignment = bytes > kWordSize ? kWordSize : bytes;
- memcpy_inline<bytes>(assume_aligned<alignment>(dst),
- assume_aligned<alignment>(src));
+// Performs a copy of `bytes` byte from `src` to `dst`. This function has the
+// semantics of `memcpy` where `src` and `dst` are `__restrict`. The compiler is
+// free to use whatever instruction is best for the size and assumed access.
+template <size_t bytes, AssumeAccess access>
+LIBC_INLINE void copy(void *dst, const void *src) {
+ if constexpr (access == AssumeAccess::kAligned) {
+ constexpr size_t alignment = bytes > kWordSize ? kWordSize : bytes;
+ memcpy_inline<bytes>(assume_aligned<alignment>(dst),
+ assume_aligned<alignment>(src));
+ } else if constexpr (access == AssumeAccess::kUnknown) {
+ memcpy_inline<bytes>(dst, src);
+ } else {
+ static_assert(false);
+ }
}
-template <size_t bytes, BlockOp block_op = BlockOp::kFull>
+template <size_t bytes, BlockOp block_op = BlockOp::kFull,
+ AssumeAccess access = AssumeAccess::kUnknown>
LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) {
if constexpr (block_op == BlockOp::kFull) {
- copy_assume_aligned<bytes>(dst, src);
+ copy<bytes, access>(dst, src);
} else if constexpr (block_op == BlockOp::kByWord) {
// We restrict loads/stores to 4 byte to prevent the use of load/store
- // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they
- // may fault (see notes below) and second, they use more registers which
- // in turn adds push/pop instructions in the hot path.
- static_assert(bytes >= kWordSize);
+ // multiple (LDM, STM) and load/store double (LDRD, STRD).
+ static_assert((bytes % kWordSize == 0) && (bytes >= kWordSize));
LIBC_LOOP_UNROLL
for (size_t offset = 0; offset < bytes; offset += kWordSize) {
- copy_assume_aligned<kWordSize>(dst + offset, src + offset);
+ copy<kWordSize, access>(dst + offset, src + offset);
}
} else {
static_assert(false, "Invalid BlockOp");
@@ -52,28 +65,27 @@ LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) {
src += bytes;
}
-template <size_t bytes, BlockOp block_op, BumpSize bump_size = BumpSize::kYes>
-LIBC_INLINE void consume_by_aligned_block(Ptr &dst, CPtr &src, size_t &size) {
+template <size_t bytes, BlockOp block_op, AssumeAccess access>
+LIBC_INLINE void consume_by_block(Ptr &dst, CPtr &src, size_t &size) {
LIBC_LOOP_NOUNROLL
for (size_t i = 0; i < size / bytes; ++i)
- copy_block_and_bump_pointers<bytes, block_op>(dst, src);
- if constexpr (bump_size == BumpSize::kYes) {
- size %= bytes;
- }
+ copy_block_and_bump_pointers<bytes, block_op, access>(dst, src);
+ size %= bytes;
}
-LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
- size_t size) {
- consume_by_aligned_block<1, BlockOp::kFull, BumpSize::kNo>(dst, src, size);
+[[maybe_unused]] LIBC_INLINE void
+copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) {
+ LIBC_LOOP_NOUNROLL
+ for (size_t i = 0; i < size; ++i)
+ *dst++ = *src++;
}
} // namespace
-// Implementation for Cortex-M0, M0+, M1.
-// Notes:
-// - It compiles down to 196 bytes, but 220 bytes when used through `memcpy`
-// that also needs to return the `dst` ptr.
-// - These cores do not allow for unaligned loads/stores.
+// Implementation for Cortex-M0, M0+, M1 cores that do not allow for unaligned
+// loads/stores. It compiles down to 208 bytes when used through `memcpy` that
+// also needs to return the `dst` ptr.
+// Note:
// - When `src` and `dst` are coaligned, we start by aligning them and perform
// bulk copies. We let the compiler know the pointers are aligned so it can
// use load/store multiple (LDM, STM). This significantly increase throughput
@@ -94,21 +106,29 @@ LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
if (src_alignment == 0)
LIBC_ATTR_LIKELY {
// Both `src` and `dst` are now word-aligned.
- consume_by_aligned_block<64, BlockOp::kFull>(dst, src, size);
- consume_by_aligned_block<16, BlockOp::kFull>(dst, src, size);
- consume_by_aligned_block<4, BlockOp::kFull>(dst, src, size);
+ // We first copy by blocks of 64 bytes, the compiler will use 4
+ // load/store multiple (LDM, STM), each of 4 words. This requires more
+ // registers so additional push/pop are needed but the speedup is worth
+ // it.
+ consume_by_block<64, BlockOp::kFull, AssumeAccess::kAligned>(dst, src,
+ size);
+ // Then we use blocks of 4 word load/store.
+ consume_by_block<16, BlockOp::kByWord, AssumeAccess::kAligned>(dst, src,
+ size);
+ // Then we use word by word copy.
+ consume_by_block<4, BlockOp::kByWord, AssumeAccess::kAligned>(dst, src,
+ size);
}
else {
// `dst` is aligned but `src` is not.
LIBC_LOOP_NOUNROLL
while (size >= kWordSize) {
- // Recompose word from multiple loads depending on the
- // alignment.
+ // Recompose word from multiple loads depending on the alignment.
const uint32_t value =
src_alignment == 2
? load_aligned<uint32_t, uint16_t, uint16_t>(src)
: load_aligned<uint32_t, uint8_t, uint16_t, uint8_t>(src);
- copy_assume_aligned<kWordSize>(dst, &value);
+ copy<kWordSize, AssumeAccess::kAligned>(dst, &value);
dst += kWordSize;
src += kWordSize;
size -= kWordSize;
@@ -121,17 +141,8 @@ LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
}
// Implementation for Cortex-M3, M4, M7, M23, M33, M35P, M52 with hardware
-// support for unaligned loads and stores.
-// Notes:
-// - It compiles down to 266 bytes.
-// - `dst` and `src` are not `__restrict` to prevent the compiler from
-// reordering loads/stores.
-// - We keep state variables to a strict minimum to keep everything in the free
-// registers and prevent costly push / pop.
-// - If unaligned single loads/stores to normal memory are supported, unaligned
-// accesses for load/store multiple (LDM, STM) and load/store double (LDRD,
-// STRD) instructions are generally not supported and will still fault so we
-// make sure to restrict unrolling to word loads/stores.
+// support for unaligned loads and stores. It compiles down to 272 bytes when
+// used through `memcpy` that also needs to return the `dst` ptr.
[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_mid_end(Ptr dst, CPtr src,
size_t size) {
if (misaligned(bitwise_or(src, dst)))
@@ -157,22 +168,40 @@ LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
}
}
// `dst` and `src` are not necessarily both aligned at that point but this
- // implementation assumes hardware support for unaligned loads and stores.
- consume_by_aligned_block<64, BlockOp::kByWord>(dst, src, size);
- consume_by_aligned_block<16, BlockOp::kByWord>(dst, src, size);
- consume_by_aligned_block<4, BlockOp::kFull>(dst, src, size);
+ // implementation assumes hardware support for unaligned loads and stores so
+ // it is still fast to perform unrolled word by word copy. Note that wider
+ // accesses through the use of load/store multiple (LDM, STM) and load/store
+ // double (LDRD, STRD) instructions are generally not supported and can fault.
+ // By forcing decomposition of 64 bytes copy into word by word copy, the
+ // compiler can use the first load to prefetch memory:
+ // ldr r3, [r1, #64]! <- prefetch next cache line
+ // str r3, [r0]
+ // ldr r3, [r1, #0x4]
+ // str r3, [r0, #0x4]
+ // ...
+ // ldr r3, [r1, #0x3c]
+ // str r3, [r0, #0x3c]
+ // This is a bit detrimental for sizes between 64 and 256 (less than 10%
+ // penalty) but the prefetch yields better throughput for larger copies.
+ consume_by_block<64, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src,
+ size);
+ consume_by_block<16, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src,
+ size);
+ consume_by_block<4, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src, size);
if (size & 1)
copy_block_and_bump_pointers<1>(dst, src);
if (size & 2)
- LIBC_ATTR_UNLIKELY
- copy_block_and_bump_pointers<2>(dst, src);
+ copy_block_and_bump_pointers<2>(dst, src);
}
-[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_,
- const void *__restrict src_,
+[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(Ptr dst, CPtr src,
size_t size) {
- Ptr dst = cpp::bit_cast<Ptr>(dst_);
- CPtr src = cpp::bit_cast<CPtr>(src_);
+ // The compiler performs alias analysis and is able to prove that `dst` and
+ // `src` do not alias by propagating the `__restrict` keyword from the
+ // `memcpy` prototype. This allows the compiler to merge consecutive
+ // load/store (LDR, STR) instructions into load/store double (LDRD, STRD)
+ // instructions.
+ asm volatile("" : "+r"(dst), "+r"(src));
#ifdef __ARM_FEATURE_UNALIGNED
return inline_memcpy_arm_mid_end(dst, src, size);
#else
>From b34235e630198b3d549d0244ded67006182a7370 Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Tue, 15 Jul 2025 16:45:55 +0000
Subject: [PATCH 4/4] Improve documentation
---
libc/src/string/memory_utils/arm/inline_memcpy.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index f9651251ddac5..30b99d41e0967 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -199,8 +199,10 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) {
// The compiler performs alias analysis and is able to prove that `dst` and
// `src` do not alias by propagating the `__restrict` keyword from the
// `memcpy` prototype. This allows the compiler to merge consecutive
- // load/store (LDR, STR) instructions into load/store double (LDRD, STRD)
- // instructions.
+ // load/store (LDR, STR) instructions generated in
+ // `copy_block_and_bump_pointers` with `BlockOp::kByWord` into load/store
+ // double (LDRD, STRD) instructions, this is is undesirable so we prevent the
+ // compiler from inferring `__restrict` with the following line.
asm volatile("" : "+r"(dst), "+r"(src));
#ifdef __ARM_FEATURE_UNALIGNED
return inline_memcpy_arm_mid_end(dst, src, size);
More information about the llvm-commits
mailing list