[llvm] [MemCpyOpt] Calculate the offset value to forward `memcpy` (PR #87190)

Quentin Dian via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 31 05:45:46 PDT 2024


https://github.com/DianQK created https://github.com/llvm/llvm-project/pull/87190

Fixes #85560.

We can forward the `memcpy` as long as the actual memory location being copied have not been altered.

alive2: https://alive2.llvm.org/ce/z/q9JaHV

>From 024faa7e7e6cd96e4faec9e4669326cc92091732 Mon Sep 17 00:00:00 2001
From: DianQK <dianqk at dianqk.net>
Date: Sun, 31 Mar 2024 17:58:52 +0800
Subject: [PATCH 1/2] Pre-commit test cases

---
 .../MemCpyOpt/memcpy-memcpy-offset.ll         | 176 ++++++++++++++++++
 1 file changed, 176 insertions(+)
 create mode 100644 llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll

diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
new file mode 100644
index 00000000000000..565fd089631ca8
--- /dev/null
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s
+
+%buf = type [7 x i8]
+
+; We can forward `memcpy` because the copy location are the same,
+define void @forward_offset(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  ret void
+}
+
+; We need to update the align value when forwarding.
+define void @forward_offset_align(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset_align(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 2 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 2 %dep_src, i64 7, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  ret void
+}
+
+; We need to create a GEP instruction when forwarding.
+define void @forward_offset_with_gep(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset_with_gep(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 2
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP1]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 2
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  ret void
+}
+
+; Make sure we pass the right parameters when calling `memcpy`.
+define void @forward_offset_memcpy(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset_memcpy(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    [[DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    call void @use(ptr [[DEST]])
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  %dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  call void @use(ptr %dest)
+  ret void
+}
+
+; Make sure we pass the right parameters when calling `memcpy.inline`.
+define void @forward_offset_memcpy_inline(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset_memcpy_inline(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    [[DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    call void @use(ptr [[DEST]])
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  %dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  call void @use(ptr %dest)
+  ret void
+}
+
+; We cannot forward `memcpy` because it exceeds the size of `memcpy` it depends on.
+define void @do_not_forward_oversize_offset(ptr %dep_src) {
+; CHECK-LABEL: define void @do_not_forward_oversize_offset(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 6, i1 false)
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 6, i1 false)
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 6, i1 false)
+  ret void
+}
+
+; We can forward `memcpy` because the write operation does not corrupt the location to be copied.
+define void @forward_offset_and_store(ptr %dep_src) {
+; CHECK-LABEL: define void @forward_offset_and_store(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    store i8 1, ptr [[DEP_SRC]], align 1
+; CHECK-NEXT:    [[DEP_SRC_END:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 6
+; CHECK-NEXT:    store i8 1, ptr [[DEP_SRC_END]], align 1
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 5, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  store i8 1, ptr %dep_src, align 1
+  %dep_src_end = getelementptr inbounds i8, ptr %dep_src, i64 6
+  store i8 1, ptr %dep_src_end, align 1
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 5, i1 false)
+  ret void
+}
+
+; We cannot forward `memcpy` because the write operation alters the location to be copied.
+; Also, make sure we have removed the GEP instruction that was created temporarily.
+define void @do_not_forward_offset_and_store(ptr %dep_src) {
+; CHECK-LABEL: define void @do_not_forward_offset_and_store(
+; CHECK-SAME: ptr [[DEP_SRC:%.*]]) {
+; CHECK-NEXT:    [[DEP_DEST:%.*]] = alloca [7 x i8], align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
+; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    store i8 1, ptr [[DEP]], align 1
+; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
+; CHECK-NEXT:    [[DEST:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 2
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[SRC]], i64 5, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %dep_dest = alloca %buf, align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dep_dest, ptr align 1 %dep_src, i64 7, i1 false)
+  %dep_src_offset = getelementptr inbounds i8, ptr %dep_src, i64 1
+  store i8 1, ptr %dep_src_offset, align 1
+  %src = getelementptr inbounds i8, ptr %dep_dest, i64 1
+  %dest = getelementptr inbounds i8, ptr %dep_src, i64 2
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %src, i64 5, i1 false)
+  ret void
+}
+
+declare void @use(ptr)
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)

>From 16454b7658974dfee96232b76881d75c8f7c1bbe Mon Sep 17 00:00:00 2001
From: DianQK <dianqk at dianqk.net>
Date: Sun, 31 Mar 2024 18:01:18 +0800
Subject: [PATCH 2/2] [MemCpyOpt] Calculate the offset value to forward
 `memcpy`

---
 .../lib/Transforms/Scalar/MemCpyOptimizer.cpp | 84 +++++++++++++------
 .../MemCpyOpt/memcpy-memcpy-offset.ll         | 15 ++--
 2 files changed, 69 insertions(+), 30 deletions(-)

diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 1036b8ae963a24..86c3f8b9bb335a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1121,28 +1121,64 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
                                                   MemCpyInst *MDep,
                                                   BatchAAResults &BAA) {
-  // We can only transforms memcpy's where the dest of one is the source of the
-  // other.
-  if (M->getSource() != MDep->getDest() || MDep->isVolatile())
-    return false;
-
   // If dep instruction is reading from our current input, then it is a noop
-  // transfer and substituting the input won't change this instruction.  Just
-  // ignore the input and let someone else zap MDep.  This handles cases like:
+  // transfer and substituting the input won't change this instruction. Just
+  // ignore the input and let someone else zap MDep. This handles cases like:
   //    memcpy(a <- a)
   //    memcpy(b <- a)
   if (M->getSource() == MDep->getSource())
     return false;
 
-  // Second, the length of the memcpy's must be the same, or the preceding one
+  // We can only optimize non-volatile memcpy's.
+  if (MDep->isVolatile())
+    return false;
+
+  int64_t MForwardOffset = 0;
+  const DataLayout &DL = M->getModule()->getDataLayout();
+  // We can only transforms memcpy's where the dest of one is the source of the
+  // other, or they have an offset in a range.
+  if (M->getSource() != MDep->getDest()) {
+    std::optional<int64_t> Offset =
+        M->getSource()->getPointerOffsetFrom(MDep->getDest(), DL);
+    if (!Offset || *Offset < 0)
+      return false;
+    MForwardOffset = *Offset;
+  }
+
+  // The length of the memcpy's must be the same, or the preceding one
   // must be larger than the following one.
-  if (MDep->getLength() != M->getLength()) {
+  if (MForwardOffset != 0 || (MDep->getLength() != M->getLength())) {
     auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
     auto *MLen = dyn_cast<ConstantInt>(M->getLength());
-    if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
+    if (!MDepLen || !MLen)
+      return false;
+    if (MDepLen->getZExtValue() < MLen->getZExtValue() + MForwardOffset)
       return false;
   }
 
+  IRBuilder<> Builder(M);
+  auto *CopySource = MDep->getRawSource();
+  MaybeAlign CopySourceAlign = MDep->getSourceAlign();
+  // We just need to calculate the actual size of the copy.
+  auto MCopyLoc = MemoryLocation::getForSource(MDep).getWithNewSize(
+      MemoryLocation::getForSource(M).Size);
+
+  // We need to update `MCopyLoc` if an offset exists.
+  if (MForwardOffset > 0) {
+    // The copy destination of `M` maybe can serve as the source of copying.
+    std::optional<int64_t> MDestOffset =
+        M->getRawDest()->getPointerOffsetFrom(MDep->getRawSource(), DL);
+    if (MDestOffset && *MDestOffset == MForwardOffset)
+      CopySource = M->getRawDest();
+    else
+      CopySource = Builder.CreateInBoundsPtrAdd(
+          CopySource, ConstantInt::get(Type::getInt64Ty(Builder.getContext()),
+                                       MForwardOffset));
+    MCopyLoc = MCopyLoc.getWithNewPtr(CopySource);
+    if (CopySourceAlign)
+      CopySourceAlign = commonAlignment(*CopySourceAlign, MForwardOffset);
+  }
+
   // Verify that the copied-from memory doesn't change in between the two
   // transfers.  For example, in:
   //    memcpy(a <- b)
@@ -1152,11 +1188,12 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
   //
   // TODO: If the code between M and MDep is transparent to the destination "c",
   // then we could still perform the xform by moving M up to the first memcpy.
-  // TODO: It would be sufficient to check the MDep source up to the memcpy
-  // size of M, rather than MDep.
-  if (writtenBetween(MSSA, BAA, MemoryLocation::getForSource(MDep),
-                     MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
+  if (writtenBetween(MSSA, BAA, MCopyLoc, MSSA->getMemoryAccess(MDep),
+                     MSSA->getMemoryAccess(M))) {
+    if (MForwardOffset > 0 && CopySource->use_empty())
+      cast<Instruction>(CopySource)->eraseFromParent();
     return false;
+  }
 
   // If the dest of the second might alias the source of the first, then the
   // source and dest might overlap. In addition, if the source of the first
@@ -1179,23 +1216,22 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
 
   // TODO: Is this worth it if we're creating a less aligned memcpy? For
   // example we could be moving from movaps -> movq on x86.
-  IRBuilder<> Builder(M);
   Instruction *NewM;
   if (UseMemMove)
-    NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
-                                 MDep->getRawSource(), MDep->getSourceAlign(),
-                                 M->getLength(), M->isVolatile());
+    NewM =
+        Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), CopySource,
+                              CopySourceAlign, M->getLength(), M->isVolatile());
   else if (isa<MemCpyInlineInst>(M)) {
     // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
     // never allowed since that would allow the latter to be lowered as a call
     // to an external function.
-    NewM = Builder.CreateMemCpyInline(
-        M->getRawDest(), M->getDestAlign(), MDep->getRawSource(),
-        MDep->getSourceAlign(), M->getLength(), M->isVolatile());
+    NewM = Builder.CreateMemCpyInline(M->getRawDest(), M->getDestAlign(),
+                                      CopySource, CopySourceAlign,
+                                      M->getLength(), M->isVolatile());
   } else
-    NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
-                                MDep->getRawSource(), MDep->getSourceAlign(),
-                                M->getLength(), M->isVolatile());
+    NewM =
+        Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), CopySource,
+                             CopySourceAlign, M->getLength(), M->isVolatile());
   NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID);
 
   assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)));
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
index 565fd089631ca8..abf051d55fc8b2 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
@@ -11,7 +11,7 @@ define void @forward_offset(ptr %dep_src) {
 ; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
 ; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[DEP]], i64 6, i1 false)
 ; CHECK-NEXT:    ret void
 ;
   %dep_dest = alloca %buf, align 1
@@ -30,7 +30,7 @@ define void @forward_offset_align(ptr %dep_src) {
 ; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 2 [[DEP_SRC]], i64 7, i1 false)
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
 ; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[DEP]], i64 6, i1 false)
 ; CHECK-NEXT:    ret void
 ;
   %dep_dest = alloca %buf, align 1
@@ -49,7 +49,8 @@ define void @forward_offset_with_gep(ptr %dep_src) {
 ; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
 ; CHECK-NEXT:    [[DEP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 2
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP1]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEP1]], ptr align 1 [[TMP1]], i64 6, i1 false)
 ; CHECK-NEXT:    ret void
 ;
   %dep_dest = alloca %buf, align 1
@@ -68,7 +69,8 @@ define void @forward_offset_memcpy(ptr %dep_src) {
 ; CHECK-NEXT:    [[DEST:%.*]] = alloca [7 x i8], align 1
 ; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
 ; CHECK-NEXT:    call void @use(ptr [[DEST]])
 ; CHECK-NEXT:    ret void
 ;
@@ -89,7 +91,8 @@ define void @forward_offset_memcpy_inline(ptr %dep_src) {
 ; CHECK-NEXT:    [[DEST:%.*]] = alloca [7 x i8], align 1
 ; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[DEP_SRC]], i64 7, i1 false)
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
-; CHECK-NEXT:    call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[SRC]], i64 6, i1 false)
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
+; CHECK-NEXT:    call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
 ; CHECK-NEXT:    call void @use(ptr [[DEST]])
 ; CHECK-NEXT:    ret void
 ;
@@ -132,7 +135,7 @@ define void @forward_offset_and_store(ptr %dep_src) {
 ; CHECK-NEXT:    store i8 1, ptr [[DEP_SRC_END]], align 1
 ; CHECK-NEXT:    [[SRC:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
 ; CHECK-NEXT:    [[DEP:%.*]] = getelementptr inbounds i8, ptr [[DEP_SRC]], i64 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[SRC]], i64 5, i1 false)
+; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEP]], ptr align 1 [[DEP]], i64 5, i1 false)
 ; CHECK-NEXT:    ret void
 ;
   %dep_dest = alloca %buf, align 1



More information about the llvm-commits mailing list