[libc] [llvm] [libc] Improve memcpy for ARM Cortex-M supporting unaligned accesses. (PR #144872)

Guillaume Chatelet via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 25 01:44:43 PDT 2025


https://github.com/gchatelet updated https://github.com/llvm/llvm-project/pull/144872

>From 757dcdc44a902f19b83eacc107e030de39ae2b95 Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Thu, 19 Jun 2025 10:56:50 +0000
Subject: [PATCH 1/4] [libc] Improve memcpy for ARM Cortex-M supporting
 unaligned accesses.

---
 libc/src/__support/macros/optimization.h      |   4 +-
 libc/src/string/memory_utils/CMakeLists.txt   |   1 +
 .../string/memory_utils/arm/inline_memcpy.h   | 127 ++++++++++++++++++
 libc/src/string/memory_utils/inline_memcpy.h  |   3 +
 libc/src/string/memory_utils/utils.h          |   2 +-
 .../llvm-project-overlay/libc/BUILD.bazel     |   1 +
 6 files changed, 136 insertions(+), 2 deletions(-)
 create mode 100644 libc/src/string/memory_utils/arm/inline_memcpy.h

diff --git a/libc/src/__support/macros/optimization.h b/libc/src/__support/macros/optimization.h
index 253843e5e37aa..f7133c94b405d 100644
--- a/libc/src/__support/macros/optimization.h
+++ b/libc/src/__support/macros/optimization.h
@@ -10,7 +10,7 @@
 #ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_OPTIMIZATION_H
 #define LLVM_LIBC_SRC___SUPPORT_MACROS_OPTIMIZATION_H
 
-#include "src/__support/macros/attributes.h"          // LIBC_INLINE
+#include "src/__support/macros/attributes.h" // LIBC_INLINE
 #include "src/__support/macros/config.h"
 #include "src/__support/macros/properties/compiler.h" // LIBC_COMPILER_IS_CLANG
 
@@ -30,8 +30,10 @@ LIBC_INLINE constexpr bool expects_bool_condition(T value, T expected) {
 
 #if defined(LIBC_COMPILER_IS_CLANG)
 #define LIBC_LOOP_NOUNROLL _Pragma("nounroll")
+#define LIBC_LOOP_UNROLL _Pragma("unroll")
 #elif defined(LIBC_COMPILER_IS_GCC)
 #define LIBC_LOOP_NOUNROLL _Pragma("GCC unroll 0")
+#define LIBC_LOOP_UNROLL _Pragma("GCC unroll 2048")
 #else
 #error "Unhandled compiler"
 #endif
diff --git a/libc/src/string/memory_utils/CMakeLists.txt b/libc/src/string/memory_utils/CMakeLists.txt
index 08c0b0d34d503..a967247db53f4 100644
--- a/libc/src/string/memory_utils/CMakeLists.txt
+++ b/libc/src/string/memory_utils/CMakeLists.txt
@@ -7,6 +7,7 @@ add_header_library(
     aarch64/inline_memcpy.h
     aarch64/inline_memmove.h
     aarch64/inline_memset.h
+    arm/inline_memcpy.h
     generic/aligned_access.h
     generic/byte_per_byte.h
     inline_bcmp.h
diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
new file mode 100644
index 0000000000000..be8bc6459b6c4
--- /dev/null
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -0,0 +1,127 @@
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
+
+#include "src/__support/macros/attributes.h"   // LIBC_INLINE
+#include "src/__support/macros/optimization.h" // LIBC_LOOP_NOUNROLL
+#include "src/string/memory_utils/utils.h" // memcpy_inline, distance_to_align
+
+#include <stddef.h> // size_t
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace {
+
+LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
+
+template <size_t bytes>
+LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
+  if constexpr (bytes == 1 || bytes == 2 || bytes == 4) {
+    memcpy_inline<bytes>(dst, src);
+  } else {
+    // We restrict loads/stores to 4 byte to prevent the use of load/store
+    // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they may
+    // fault (see notes below) and second, they use more registers which in turn
+    // adds push/pop instructions in the hot path.
+    static_assert(bytes % kWordSize == 0);
+    LIBC_LOOP_UNROLL
+    for (size_t i = 0; i < bytes / kWordSize; ++i) {
+      const uintptr_t offset = i * kWordSize;
+      memcpy_inline<kWordSize>(dst + offset, src + offset);
+    }
+  }
+  // In the 1, 2, 4 byte copy case, the compiler can fold pointer offsetting
+  // into the load/store instructions.
+  // e.g.,
+  // ldrb  r3, [r1], #1
+  // strb  r3, [r0], #1
+  dst += bytes;
+  src += bytes;
+}
+
+template <size_t block_size>
+LIBC_INLINE void copy_blocks(Ptr &dst, CPtr &src, size_t &size) {
+  LIBC_LOOP_NOUNROLL
+  for (size_t i = 0; i < size / block_size; ++i)
+    copy_and_bump_pointers<block_size>(dst, src);
+  // Update `size` once at the end instead of once per iteration.
+  size %= block_size;
+}
+
+LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) {
+  return cpp::bit_cast<CPtr>(cpp::bit_cast<uintptr_t>(a) |
+                             cpp::bit_cast<uintptr_t>(b));
+}
+
+LIBC_INLINE auto misaligned(CPtr a) {
+  return distance_to_align_down<kWordSize>(a);
+}
+
+} // namespace
+
+// Implementation for Cortex-M0, M0+, M1.
+// The implementation makes sure that all accesses are aligned.
+[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_low_end(Ptr dst, CPtr src,
+                                                            size_t size) {
+  // For now, dummy implementation that performs byte per byte copy.
+  LIBC_LOOP_NOUNROLL
+  for (size_t i = 0; i < size; ++i)
+    dst[i] = src[i];
+}
+
+// Implementation for Cortex-M3, M4, M7, M23, M33, M35P, M52 with hardware
+// support for unaligned loads and stores.
+// Notes:
+// - It compiles down to <300 bytes.
+// - `dst` and `src` are not `__restrict` to prevent the compiler from
+//   reordering loads/stores.
+// - We keep state variables to a strict minimum to keep everything in the free
+//   registers and prevent costly push / pop.
+// - If unaligned single loads/stores to normal memory are supported, unaligned
+//   accesses for load/store multiple (LDM, STM) and load/store double (LDRD,
+//   STRD) instructions are generally not supported and will still fault so we
+//   make sure to restrict unrolling to word loads/stores.
+[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_mid_end(Ptr dst, CPtr src,
+                                                            size_t size) {
+  if (misaligned(bitwise_or(src, dst))) [[unlikely]] {
+    if (size < 8) [[unlikely]] {
+      if (size & 1)
+        copy_and_bump_pointers<1>(dst, src);
+      if (size & 2)
+        copy_and_bump_pointers<2>(dst, src);
+      if (size & 4)
+        copy_and_bump_pointers<4>(dst, src);
+      return;
+    }
+    if (misaligned(src)) [[unlikely]] {
+      const size_t offset = distance_to_align_up<kWordSize>(dst);
+      if (offset & 1)
+        copy_and_bump_pointers<1>(dst, src);
+      if (offset & 2)
+        copy_and_bump_pointers<2>(dst, src);
+      size -= offset;
+    }
+  }
+  copy_blocks<64>(dst, src, size);
+  copy_blocks<16>(dst, src, size);
+  copy_blocks<4>(dst, src, size);
+  if (size & 1)
+    copy_and_bump_pointers<1>(dst, src);
+  if (size & 2) [[unlikely]]
+    copy_and_bump_pointers<2>(dst, src);
+}
+
+[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_,
+                                                    const void *__restrict src_,
+                                                    size_t size) {
+  Ptr dst = cpp::bit_cast<Ptr>(dst_);
+  CPtr src = cpp::bit_cast<CPtr>(src_);
+#ifdef __ARM_FEATURE_UNALIGNED
+  return inline_memcpy_arm_mid_end(dst, src, size);
+#else
+  return inline_memcpy_arm_low_end(dst, src, size);
+#endif
+}
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
diff --git a/libc/src/string/memory_utils/inline_memcpy.h b/libc/src/string/memory_utils/inline_memcpy.h
index f98e55321a9b5..13975e6b3bd0e 100644
--- a/libc/src/string/memory_utils/inline_memcpy.h
+++ b/libc/src/string/memory_utils/inline_memcpy.h
@@ -22,6 +22,9 @@
 #include "src/string/memory_utils/x86_64/inline_memcpy.h"
 #define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY                                    \
   inline_memcpy_x86_maybe_interpose_repmovsb
+#elif defined(LIBC_TARGET_ARCH_IS_ARM)
+#include "src/string/memory_utils/arm/inline_memcpy.h"
+#define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY inline_memcpy_arm
 #elif defined(LIBC_TARGET_ARCH_IS_AARCH64)
 #include "src/string/memory_utils/aarch64/inline_memcpy.h"
 #define LIBC_SRC_STRING_MEMORY_UTILS_MEMCPY inline_memcpy_aarch64
diff --git a/libc/src/string/memory_utils/utils.h b/libc/src/string/memory_utils/utils.h
index bdf0b8652188b..c08608c87bb25 100644
--- a/libc/src/string/memory_utils/utils.h
+++ b/libc/src/string/memory_utils/utils.h
@@ -101,7 +101,7 @@ LIBC_INLINE void memcpy_inline(void *__restrict dst,
 }
 
 using Ptr = cpp::byte *;        // Pointer to raw data.
-using CPtr = const cpp::byte *; // Const pointer to raw data.
+using CPtr = const cpp::byte *; // Pointer to const raw data.
 
 // This type makes sure that we don't accidentally promote an integral type to
 // another one. It is only constructible from the exact T type.
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 8e629270c89d2..9e2828eaf098c 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -4218,6 +4218,7 @@ libc_support_library(
         "src/string/memory_utils/aarch64/inline_memcpy.h",
         "src/string/memory_utils/aarch64/inline_memmove.h",
         "src/string/memory_utils/aarch64/inline_memset.h",
+        "src/string/memory_utils/arm/inline_memcpy.h",
         "src/string/memory_utils/generic/aligned_access.h",
         "src/string/memory_utils/generic/byte_per_byte.h",
         "src/string/memory_utils/inline_bcmp.h",

>From 82a60757c42f6169f40ef440f75c366da1bded22 Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Fri, 20 Jun 2025 06:40:18 +0000
Subject: [PATCH 2/4] Add copyright header.

---
 libc/src/string/memory_utils/arm/inline_memcpy.h | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index be8bc6459b6c4..910be959d927e 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -1,3 +1,10 @@
+//===-- Memcpy implementation for arm ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
 #ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
 #define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H
 

>From 25eac99c2038d63b9d9ed0faa6190a7e2c22dba2 Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Mon, 23 Jun 2025 13:48:28 +0000
Subject: [PATCH 3/4] Add an optimized `memcpy` version for Cortex M0 as well

---
 .../string/memory_utils/arm/inline_memcpy.h   | 92 +++++++++++++++----
 1 file changed, 74 insertions(+), 18 deletions(-)

diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index 910be959d927e..0a80774a5a4b9 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -20,19 +20,29 @@ namespace {
 
 LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t);
 
-template <size_t bytes>
+enum Strategy {
+  ForceWordLdStChain,
+  AssumeWordAligned,
+  AssumeUnaligned,
+};
+
+template <size_t bytes, Strategy strategy = AssumeUnaligned>
 LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
-  if constexpr (bytes == 1 || bytes == 2 || bytes == 4) {
-    memcpy_inline<bytes>(dst, src);
-  } else {
+  if constexpr (strategy == AssumeUnaligned) {
+    memcpy_inline<bytes>(assume_aligned<1>(dst), assume_aligned<1>(src));
+  } else if constexpr (strategy == AssumeWordAligned) {
+    static_assert(bytes >= kWordSize);
+    memcpy_inline<bytes>(assume_aligned<kWordSize>(dst),
+                         assume_aligned<kWordSize>(src));
+  } else if constexpr (strategy == ForceWordLdStChain) {
     // We restrict loads/stores to 4 byte to prevent the use of load/store
     // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they may
     // fault (see notes below) and second, they use more registers which in turn
     // adds push/pop instructions in the hot path.
-    static_assert(bytes % kWordSize == 0);
+    static_assert((bytes % kWordSize == 0) && (bytes >= kWordSize));
     LIBC_LOOP_UNROLL
     for (size_t i = 0; i < bytes / kWordSize; ++i) {
-      const uintptr_t offset = i * kWordSize;
+      const size_t offset = i * kWordSize;
       memcpy_inline<kWordSize>(dst + offset, src + offset);
     }
   }
@@ -45,11 +55,19 @@ LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) {
   src += bytes;
 }
 
-template <size_t block_size>
-LIBC_INLINE void copy_blocks(Ptr &dst, CPtr &src, size_t &size) {
+LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src,
+                                              const size_t size) {
+  LIBC_LOOP_NOUNROLL
+  for (size_t i = 0; i < size; ++i)
+    *dst++ = *src++;
+}
+
+template <size_t block_size, Strategy strategy>
+LIBC_INLINE void copy_blocks_and_update_args(Ptr &dst, CPtr &src,
+                                             size_t &size) {
   LIBC_LOOP_NOUNROLL
   for (size_t i = 0; i < size / block_size; ++i)
-    copy_and_bump_pointers<block_size>(dst, src);
+    copy_and_bump_pointers<block_size, strategy>(dst, src);
   // Update `size` once at the end instead of once per iteration.
   size %= block_size;
 }
@@ -66,19 +84,57 @@ LIBC_INLINE auto misaligned(CPtr a) {
 } // namespace
 
 // Implementation for Cortex-M0, M0+, M1.
-// The implementation makes sure that all accesses are aligned.
+// Notes:
+// - It compiles down to 196 bytes, but 220 bytes when used through `memcpy`
+//   that also needs to return the `dst` ptr.
+// - These cores do not allow for unaligned loads/stores.
+// - When `src` and `dst` are coaligned, we start by aligning them and perform
+//   bulk copies. We let the compiler know the pointers are aligned so it can
+//   use load/store multiple (LDM, STM). This significantly increase throughput
+//   but it also requires more registers and push/pop instructions. This impacts
+//   latency for small size copies.
+// - When `src` and `dst` are misaligned, we align `dst` and recompose words
+//   using multiple aligned loads. `load_aligned` takes care of endianness
+//   issues.
 [[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_low_end(Ptr dst, CPtr src,
                                                             size_t size) {
-  // For now, dummy implementation that performs byte per byte copy.
-  LIBC_LOOP_NOUNROLL
-  for (size_t i = 0; i < size; ++i)
-    dst[i] = src[i];
+  if (size >= 8) {
+    if (const size_t offset = distance_to_align_up<kWordSize>(dst))
+        [[unlikely]] {
+      copy_bytes_and_bump_pointers(dst, src, offset);
+      size -= offset;
+    }
+    const auto src_alignment = distance_to_align_down<kWordSize>(src);
+    if (src_alignment == 0) [[likely]] {
+      // Both `src` and `dst` are now word-aligned.
+      copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size);
+      copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size);
+      copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size);
+    } else {
+      // `dst` is aligned but `src` is not.
+      LIBC_LOOP_NOUNROLL
+      while (size >= kWordSize) {
+        // Recompose word from multiple loads depending on the alignment.
+        const uint32_t value =
+            src_alignment == 2
+                ? load_aligned<uint32_t, uint16_t, uint16_t>(src)
+                : load_aligned<uint32_t, uint8_t, uint16_t, uint8_t>(src);
+        memcpy_inline<kWordSize>(assume_aligned<kWordSize>(dst), &value);
+        dst += kWordSize;
+        src += kWordSize;
+        size -= kWordSize;
+      }
+    }
+    // Up to 3 bytes may still need to be copied.
+    // Handling them with the slow loop below.
+  }
+  copy_bytes_and_bump_pointers(dst, src, size);
 }
 
 // Implementation for Cortex-M3, M4, M7, M23, M33, M35P, M52 with hardware
 // support for unaligned loads and stores.
 // Notes:
-// - It compiles down to <300 bytes.
+// - It compiles down to 266 bytes.
 // - `dst` and `src` are not `__restrict` to prevent the compiler from
 //   reordering loads/stores.
 // - We keep state variables to a strict minimum to keep everything in the free
@@ -108,9 +164,9 @@ LIBC_INLINE auto misaligned(CPtr a) {
       size -= offset;
     }
   }
-  copy_blocks<64>(dst, src, size);
-  copy_blocks<16>(dst, src, size);
-  copy_blocks<4>(dst, src, size);
+  copy_blocks_and_update_args<64, ForceWordLdStChain>(dst, src, size);
+  copy_blocks_and_update_args<16, ForceWordLdStChain>(dst, src, size);
+  copy_blocks_and_update_args<4, AssumeUnaligned>(dst, src, size);
   if (size & 1)
     copy_and_bump_pointers<1>(dst, src);
   if (size & 2) [[unlikely]]

>From 61b486e45372d1a6f094eb9e37f2a92713424853 Mon Sep 17 00:00:00 2001
From: Guillaume Chatelet <gchatelet at google.com>
Date: Wed, 25 Jun 2025 08:44:23 +0000
Subject: [PATCH 4/4] Disable the use of `[[likely]]` / `[[unlikely]]`
 attributes for Clang 11

---
 .../string/memory_utils/arm/inline_memcpy.h   | 87 ++++++++++++-------
 1 file changed, 57 insertions(+), 30 deletions(-)

diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h
index 0a80774a5a4b9..61efebe29b485 100644
--- a/libc/src/string/memory_utils/arm/inline_memcpy.h
+++ b/libc/src/string/memory_utils/arm/inline_memcpy.h
@@ -14,6 +14,23 @@
 
 #include <stddef.h> // size_t
 
+// https://libc.llvm.org/compiler_support.html
+// Support for [[likely]] / [[unlikely]]
+//  [X] GCC 12.2
+//  [X] Clang 12
+//  [ ] Clang 11
+#define LIBC_ATTR_LIKELY [[likely]]
+#define LIBC_ATTR_UNLIKELY [[unlikely]]
+
+#if defined(LIBC_COMPILER_IS_CLANG)
+#if LIBC_COMPILER_CLANG_VER < 1200
+#undef LIBC_ATTR_LIKELY
+#undef LIBC_ATTR_UNLIKELY
+#define LIBC_ATTR_LIKELY
+#define LIBC_ATTR_UNLIKELY
+#endif
+#endif
+
 namespace LIBC_NAMESPACE_DECL {
 
 namespace {
@@ -100,17 +117,19 @@ LIBC_INLINE auto misaligned(CPtr a) {
                                                             size_t size) {
   if (size >= 8) {
     if (const size_t offset = distance_to_align_up<kWordSize>(dst))
-        [[unlikely]] {
-      copy_bytes_and_bump_pointers(dst, src, offset);
-      size -= offset;
-    }
+      LIBC_ATTR_UNLIKELY {
+        copy_bytes_and_bump_pointers(dst, src, offset);
+        size -= offset;
+      }
     const auto src_alignment = distance_to_align_down<kWordSize>(src);
-    if (src_alignment == 0) [[likely]] {
-      // Both `src` and `dst` are now word-aligned.
-      copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size);
-      copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size);
-      copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size);
-    } else {
+    if (src_alignment == 0)
+      LIBC_ATTR_LIKELY {
+        // Both `src` and `dst` are now word-aligned.
+        copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size);
+        copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size);
+        copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size);
+      }
+    else {
       // `dst` is aligned but `src` is not.
       LIBC_LOOP_NOUNROLL
       while (size >= kWordSize) {
@@ -145,32 +164,36 @@ LIBC_INLINE auto misaligned(CPtr a) {
 //   make sure to restrict unrolling to word loads/stores.
 [[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_mid_end(Ptr dst, CPtr src,
                                                             size_t size) {
-  if (misaligned(bitwise_or(src, dst))) [[unlikely]] {
-    if (size < 8) [[unlikely]] {
-      if (size & 1)
-        copy_and_bump_pointers<1>(dst, src);
-      if (size & 2)
-        copy_and_bump_pointers<2>(dst, src);
-      if (size & 4)
-        copy_and_bump_pointers<4>(dst, src);
-      return;
-    }
-    if (misaligned(src)) [[unlikely]] {
-      const size_t offset = distance_to_align_up<kWordSize>(dst);
-      if (offset & 1)
-        copy_and_bump_pointers<1>(dst, src);
-      if (offset & 2)
-        copy_and_bump_pointers<2>(dst, src);
-      size -= offset;
+  if (misaligned(bitwise_or(src, dst)))
+    LIBC_ATTR_UNLIKELY {
+      if (size < 8)
+        LIBC_ATTR_UNLIKELY {
+          if (size & 1)
+            copy_and_bump_pointers<1>(dst, src);
+          if (size & 2)
+            copy_and_bump_pointers<2>(dst, src);
+          if (size & 4)
+            copy_and_bump_pointers<4>(dst, src);
+          return;
+        }
+      if (misaligned(src))
+        LIBC_ATTR_UNLIKELY {
+          const size_t offset = distance_to_align_up<kWordSize>(dst);
+          if (offset & 1)
+            copy_and_bump_pointers<1>(dst, src);
+          if (offset & 2)
+            copy_and_bump_pointers<2>(dst, src);
+          size -= offset;
+        }
     }
-  }
   copy_blocks_and_update_args<64, ForceWordLdStChain>(dst, src, size);
   copy_blocks_and_update_args<16, ForceWordLdStChain>(dst, src, size);
   copy_blocks_and_update_args<4, AssumeUnaligned>(dst, src, size);
   if (size & 1)
     copy_and_bump_pointers<1>(dst, src);
-  if (size & 2) [[unlikely]]
-    copy_and_bump_pointers<2>(dst, src);
+  if (size & 2)
+    LIBC_ATTR_UNLIKELY
+  copy_and_bump_pointers<2>(dst, src);
 }
 
 [[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_,
@@ -187,4 +210,8 @@ LIBC_INLINE auto misaligned(CPtr a) {
 
 } // namespace LIBC_NAMESPACE_DECL
 
+// Cleanup local macros
+#undef LIBC_ATTR_LIKELY
+#undef LIBC_ATTR_UNLIKELY
+
 #endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H



More information about the llvm-commits mailing list