[llvm] [RISCV] Add test coverage for memmove (PR #120232)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 17 22:23:02 PST 2024


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/120232

>From 90d2f7ef21c4c723756f68e95c174d3fb4be3109 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 17 Dec 2024 21:22:09 +0800
Subject: [PATCH] [RISCV] Add test coverage for memmove

---
 llvm/test/CodeGen/RISCV/memmove.ll | 671 +++++++++++++++++++++++++++++
 1 file changed, 671 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/memmove.ll

diff --git a/llvm/test/CodeGen/RISCV/memmove.ll b/llvm/test/CodeGen/RISCV/memmove.ll
new file mode 100644
index 00000000000000..9fefe38c219cdd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/memmove.ll
@@ -0,0 +1,671 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
+; RUN: llc < %s -mtriple=riscv64 \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+
+; ----------------------------------------------------------------------
+; Fully unaligned cases
+
+define void @unaligned_memcpy0(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: unaligned_memcpy0:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: unaligned_memcpy0:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy1(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: unaligned_memcpy1:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lbu a1, 0(a1)
+; RV32-BOTH-NEXT:    sb a1, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: unaligned_memcpy1:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lbu a1, 0(a1)
+; RV64-BOTH-NEXT:    sb a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy2(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy2:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lbu a2, 0(a1)
+; RV32-NEXT:    lbu a1, 1(a1)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: unaligned_memcpy2:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lbu a2, 0(a1)
+; RV64-NEXT:    lbu a1, 1(a1)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: unaligned_memcpy2:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lh a1, 0(a1)
+; RV32-FAST-NEXT:    sh a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy2:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lh a1, 0(a1)
+; RV64-FAST-NEXT:    sh a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 2, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy3(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy3:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lbu a2, 0(a1)
+; RV32-NEXT:    lbu a3, 1(a1)
+; RV32-NEXT:    lbu a1, 2(a1)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: unaligned_memcpy3:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lbu a2, 0(a1)
+; RV64-NEXT:    lbu a3, 1(a1)
+; RV64-NEXT:    lbu a1, 2(a1)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    sb a3, 1(a0)
+; RV64-NEXT:    sb a1, 2(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: unaligned_memcpy3:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lh a2, 0(a1)
+; RV32-FAST-NEXT:    lbu a1, 2(a1)
+; RV32-FAST-NEXT:    sh a2, 0(a0)
+; RV32-FAST-NEXT:    sb a1, 2(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy3:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lh a2, 0(a1)
+; RV64-FAST-NEXT:    lbu a1, 2(a1)
+; RV64-FAST-NEXT:    sh a2, 0(a0)
+; RV64-FAST-NEXT:    sb a1, 2(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 3, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy4(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy4:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lbu a2, 0(a1)
+; RV32-NEXT:    lbu a3, 1(a1)
+; RV32-NEXT:    lbu a4, 2(a1)
+; RV32-NEXT:    lbu a1, 3(a1)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: unaligned_memcpy4:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lbu a2, 0(a1)
+; RV64-NEXT:    lbu a3, 1(a1)
+; RV64-NEXT:    lbu a4, 2(a1)
+; RV64-NEXT:    lbu a1, 3(a1)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    sb a3, 1(a0)
+; RV64-NEXT:    sb a4, 2(a0)
+; RV64-NEXT:    sb a1, 3(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: unaligned_memcpy4:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy4:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lw a1, 0(a1)
+; RV64-FAST-NEXT:    sw a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 4, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy7(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy7:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lbu a2, 4(a1)
+; RV32-NEXT:    lbu a3, 5(a1)
+; RV32-NEXT:    lbu a4, 6(a1)
+; RV32-NEXT:    lbu a5, 0(a1)
+; RV32-NEXT:    lbu a6, 1(a1)
+; RV32-NEXT:    lbu a7, 2(a1)
+; RV32-NEXT:    lbu a1, 3(a1)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    sb a3, 5(a0)
+; RV32-NEXT:    sb a4, 6(a0)
+; RV32-NEXT:    sb a5, 0(a0)
+; RV32-NEXT:    sb a6, 1(a0)
+; RV32-NEXT:    sb a7, 2(a0)
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: unaligned_memcpy7:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lbu a2, 4(a1)
+; RV64-NEXT:    lbu a3, 5(a1)
+; RV64-NEXT:    lbu a4, 6(a1)
+; RV64-NEXT:    lbu a5, 0(a1)
+; RV64-NEXT:    lbu a6, 1(a1)
+; RV64-NEXT:    lbu a7, 2(a1)
+; RV64-NEXT:    lbu a1, 3(a1)
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    sb a3, 5(a0)
+; RV64-NEXT:    sb a4, 6(a0)
+; RV64-NEXT:    sb a5, 0(a0)
+; RV64-NEXT:    sb a6, 1(a0)
+; RV64-NEXT:    sb a7, 2(a0)
+; RV64-NEXT:    sb a1, 3(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: unaligned_memcpy7:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lw a2, 0(a1)
+; RV32-FAST-NEXT:    lh a3, 4(a1)
+; RV32-FAST-NEXT:    lbu a1, 6(a1)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sh a3, 4(a0)
+; RV32-FAST-NEXT:    sb a1, 6(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy7:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lw a2, 0(a1)
+; RV64-FAST-NEXT:    lh a3, 4(a1)
+; RV64-FAST-NEXT:    lbu a1, 6(a1)
+; RV64-FAST-NEXT:    sw a2, 0(a0)
+; RV64-FAST-NEXT:    sh a3, 4(a0)
+; RV64-FAST-NEXT:    sb a1, 6(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 7, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy8(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lbu a2, 0(a1)
+; RV32-NEXT:    lbu a3, 1(a1)
+; RV32-NEXT:    lbu a4, 2(a1)
+; RV32-NEXT:    lbu a5, 3(a1)
+; RV32-NEXT:    lbu a6, 4(a1)
+; RV32-NEXT:    lbu a7, 5(a1)
+; RV32-NEXT:    lbu t0, 6(a1)
+; RV32-NEXT:    lbu a1, 7(a1)
+; RV32-NEXT:    sb a6, 4(a0)
+; RV32-NEXT:    sb a7, 5(a0)
+; RV32-NEXT:    sb t0, 6(a0)
+; RV32-NEXT:    sb a1, 7(a0)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    sb a5, 3(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: unaligned_memcpy8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lbu a2, 0(a1)
+; RV64-NEXT:    lbu a3, 1(a1)
+; RV64-NEXT:    lbu a4, 2(a1)
+; RV64-NEXT:    lbu a5, 3(a1)
+; RV64-NEXT:    lbu a6, 4(a1)
+; RV64-NEXT:    lbu a7, 5(a1)
+; RV64-NEXT:    lbu t0, 6(a1)
+; RV64-NEXT:    lbu a1, 7(a1)
+; RV64-NEXT:    sb a6, 4(a0)
+; RV64-NEXT:    sb a7, 5(a0)
+; RV64-NEXT:    sb t0, 6(a0)
+; RV64-NEXT:    sb a1, 7(a0)
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    sb a3, 1(a0)
+; RV64-NEXT:    sb a4, 2(a0)
+; RV64-NEXT:    sb a5, 3(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: unaligned_memcpy8:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lw a2, 0(a1)
+; RV32-FAST-NEXT:    lw a1, 4(a1)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sw a1, 4(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy8:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    ld a1, 0(a1)
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 8, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy15(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy15:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a2, 15
+; RV32-NEXT:    tail memmove
+;
+; RV64-LABEL: unaligned_memcpy15:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a2, 15
+; RV64-NEXT:    tail memmove
+;
+; RV32-FAST-LABEL: unaligned_memcpy15:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lbu a2, 14(a1)
+; RV32-FAST-NEXT:    lw a3, 0(a1)
+; RV32-FAST-NEXT:    lw a4, 4(a1)
+; RV32-FAST-NEXT:    lw a5, 8(a1)
+; RV32-FAST-NEXT:    lh a1, 12(a1)
+; RV32-FAST-NEXT:    sb a2, 14(a0)
+; RV32-FAST-NEXT:    sw a3, 0(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a5, 8(a0)
+; RV32-FAST-NEXT:    sh a1, 12(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy15:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    ld a2, 0(a1)
+; RV64-FAST-NEXT:    lw a3, 8(a1)
+; RV64-FAST-NEXT:    lh a4, 12(a1)
+; RV64-FAST-NEXT:    lbu a1, 14(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sw a3, 8(a0)
+; RV64-FAST-NEXT:    sh a4, 12(a0)
+; RV64-FAST-NEXT:    sb a1, 14(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 15, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy16(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-LABEL: unaligned_memcpy16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    tail memmove
+;
+; RV64-LABEL: unaligned_memcpy16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a2, 16
+; RV64-NEXT:    tail memmove
+;
+; RV32-FAST-LABEL: unaligned_memcpy16:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lw a2, 0(a1)
+; RV32-FAST-NEXT:    lw a3, 4(a1)
+; RV32-FAST-NEXT:    lw a4, 8(a1)
+; RV32-FAST-NEXT:    lw a1, 12(a1)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sw a3, 4(a0)
+; RV32-FAST-NEXT:    sw a4, 8(a0)
+; RV32-FAST-NEXT:    sw a1, 12(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: unaligned_memcpy16:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    ld a2, 0(a1)
+; RV64-FAST-NEXT:    ld a1, 8(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a1, 8(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 16, i1 false)
+  ret void
+}
+
+define void @unaligned_memcpy31(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: unaligned_memcpy31:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    li a2, 31
+; RV32-BOTH-NEXT:    tail memmove
+;
+; RV64-LABEL: unaligned_memcpy31:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a2, 31
+; RV64-NEXT:    tail memmove
+;
+; RV64-FAST-LABEL: unaligned_memcpy31:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lh a2, 28(a1)
+; RV64-FAST-NEXT:    lbu a3, 30(a1)
+; RV64-FAST-NEXT:    ld a4, 0(a1)
+; RV64-FAST-NEXT:    ld a5, 8(a1)
+; RV64-FAST-NEXT:    ld a6, 16(a1)
+; RV64-FAST-NEXT:    lw a1, 24(a1)
+; RV64-FAST-NEXT:    sh a2, 28(a0)
+; RV64-FAST-NEXT:    sb a3, 30(a0)
+; RV64-FAST-NEXT:    sd a4, 0(a0)
+; RV64-FAST-NEXT:    sd a5, 8(a0)
+; RV64-FAST-NEXT:    sd a6, 16(a0)
+; RV64-FAST-NEXT:    sw a1, 24(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 31, i1 false)
+  ret void
+}
+
+; ----------------------------------------------------------------------
+; Fully aligned cases
+
+define void @aligned_memcpy0(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy0:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy0:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 0, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy1(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy1:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lbu a1, 0(a1)
+; RV32-BOTH-NEXT:    sb a1, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy1:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lbu a1, 0(a1)
+; RV64-BOTH-NEXT:    sb a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 1, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy2(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy2:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lh a1, 0(a1)
+; RV32-BOTH-NEXT:    sh a1, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy2:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lh a1, 0(a1)
+; RV64-BOTH-NEXT:    sh a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 2, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy3(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy3:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lh a2, 0(a1)
+; RV32-BOTH-NEXT:    lbu a1, 2(a1)
+; RV32-BOTH-NEXT:    sh a2, 0(a0)
+; RV32-BOTH-NEXT:    sb a1, 2(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy3:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lh a2, 0(a1)
+; RV64-BOTH-NEXT:    lbu a1, 2(a1)
+; RV64-BOTH-NEXT:    sh a2, 0(a0)
+; RV64-BOTH-NEXT:    sb a1, 2(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 3, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy4(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy4:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    sw a1, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy4:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lw a1, 0(a1)
+; RV64-BOTH-NEXT:    sw a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 4, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy7(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy7:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lh a3, 4(a1)
+; RV32-BOTH-NEXT:    lbu a1, 6(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    sh a3, 4(a0)
+; RV32-BOTH-NEXT:    sb a1, 6(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy7:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lw a2, 0(a1)
+; RV64-BOTH-NEXT:    lh a3, 4(a1)
+; RV64-BOTH-NEXT:    lbu a1, 6(a1)
+; RV64-BOTH-NEXT:    sw a2, 0(a0)
+; RV64-BOTH-NEXT:    sh a3, 4(a0)
+; RV64-BOTH-NEXT:    sb a1, 6(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 7, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy8(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy8:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    lw a1, 4(a1)
+; RV32-BOTH-NEXT:    sw a1, 4(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy8:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ld a1, 0(a1)
+; RV64-BOTH-NEXT:    sd a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 8, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy15(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy15:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lh a4, 12(a1)
+; RV32-BOTH-NEXT:    lbu a5, 14(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    lw a1, 4(a1)
+; RV32-BOTH-NEXT:    sw a1, 4(a0)
+; RV32-BOTH-NEXT:    sw a3, 8(a0)
+; RV32-BOTH-NEXT:    sh a4, 12(a0)
+; RV32-BOTH-NEXT:    sb a5, 14(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy15:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ld a2, 0(a1)
+; RV64-BOTH-NEXT:    lw a3, 8(a1)
+; RV64-BOTH-NEXT:    lh a4, 12(a1)
+; RV64-BOTH-NEXT:    lbu a1, 14(a1)
+; RV64-BOTH-NEXT:    sd a2, 0(a0)
+; RV64-BOTH-NEXT:    sw a3, 8(a0)
+; RV64-BOTH-NEXT:    sh a4, 12(a0)
+; RV64-BOTH-NEXT:    sb a1, 14(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 15, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy16(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy16:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    lw a2, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
+; RV32-BOTH-NEXT:    sw a2, 4(a0)
+; RV32-BOTH-NEXT:    sw a3, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_memcpy16:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ld a2, 0(a1)
+; RV64-BOTH-NEXT:    ld a1, 8(a1)
+; RV64-BOTH-NEXT:    sd a2, 0(a0)
+; RV64-BOTH-NEXT:    sd a1, 8(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 16, i1 false)
+  ret void
+}
+
+define void @aligned_memcpy31(ptr nocapture %dest, ptr %src) nounwind {
+; RV32-BOTH-LABEL: aligned_memcpy31:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    li a2, 31
+; RV32-BOTH-NEXT:    tail memmove
+;
+; RV64-BOTH-LABEL: aligned_memcpy31:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lh a2, 28(a1)
+; RV64-BOTH-NEXT:    lbu a3, 30(a1)
+; RV64-BOTH-NEXT:    ld a4, 0(a1)
+; RV64-BOTH-NEXT:    ld a5, 8(a1)
+; RV64-BOTH-NEXT:    ld a6, 16(a1)
+; RV64-BOTH-NEXT:    lw a1, 24(a1)
+; RV64-BOTH-NEXT:    sh a2, 28(a0)
+; RV64-BOTH-NEXT:    sb a3, 30(a0)
+; RV64-BOTH-NEXT:    sd a4, 0(a0)
+; RV64-BOTH-NEXT:    sd a5, 8(a0)
+; RV64-BOTH-NEXT:    sd a6, 16(a0)
+; RV64-BOTH-NEXT:    sw a1, 24(a0)
+; RV64-BOTH-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 31, i1 false)
+  ret void
+}
+
+; ------------------------------------------------------------------------
+; A few partially aligned cases
+
+
+define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind {
+; RV32-BOTH-LABEL: memcpy16_align4:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 4(a1)
+; RV32-BOTH-NEXT:    lw a4, 8(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    sw a3, 4(a0)
+; RV32-BOTH-NEXT:    sw a4, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-LABEL: memcpy16_align4:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lw a2, 0(a1)
+; RV64-NEXT:    lw a3, 4(a1)
+; RV64-NEXT:    lw a4, 8(a1)
+; RV64-NEXT:    lw a1, 12(a1)
+; RV64-NEXT:    sw a2, 0(a0)
+; RV64-NEXT:    sw a3, 4(a0)
+; RV64-NEXT:    sw a4, 8(a0)
+; RV64-NEXT:    sw a1, 12(a0)
+; RV64-NEXT:    ret
+;
+; RV64-FAST-LABEL: memcpy16_align4:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    ld a2, 0(a1)
+; RV64-FAST-NEXT:    ld a1, 8(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a1, 8(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 16, i1 false)
+  ret void
+}
+
+define i32 @memcpy11_align8(ptr nocapture %dest, ptr %src) {
+; RV32-BOTH-LABEL: memcpy11_align8:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lh a3, 8(a1)
+; RV32-BOTH-NEXT:    lbu a4, 10(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    lw a1, 4(a1)
+; RV32-BOTH-NEXT:    sw a1, 4(a0)
+; RV32-BOTH-NEXT:    sh a3, 8(a0)
+; RV32-BOTH-NEXT:    sb a4, 10(a0)
+; RV32-BOTH-NEXT:    li a0, 0
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: memcpy11_align8:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    ld a2, 0(a1)
+; RV64-BOTH-NEXT:    lh a3, 8(a1)
+; RV64-BOTH-NEXT:    lbu a1, 10(a1)
+; RV64-BOTH-NEXT:    sd a2, 0(a0)
+; RV64-BOTH-NEXT:    sh a3, 8(a0)
+; RV64-BOTH-NEXT:    sb a1, 10(a0)
+; RV64-BOTH-NEXT:    li a0, 0
+; RV64-BOTH-NEXT:    ret
+entry:
+  call void @llvm.memmove.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 11, i1 false)
+  ret i32 0
+}
+
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind



More information about the llvm-commits mailing list