[llvm] [RISCV] Add test coverage for memmove (PR #120232)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 17 05:30:03 PST 2024


https://github.com/wangpc-pp created https://github.com/llvm/llvm-project/pull/120232

None

>From 211eace59f2f382c99542d3944c295bc7c659e46 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 17 Dec 2024 21:22:09 +0800
Subject: [PATCH] [RISCV] Add test coverage for memmove

---
 llvm/test/CodeGen/RISCV/memmove.ll | 398 +++++++++++++++++++++++++++++
 1 file changed, 398 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/memmove.ll

diff --git a/llvm/test/CodeGen/RISCV/memmove.ll b/llvm/test/CodeGen/RISCV/memmove.ll
new file mode 100644
index 00000000000000..db9f0386707251
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/memmove.ll
@@ -0,0 +1,398 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
+; RUN: llc < %s -mtriple=riscv64 \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+
+ at src = external dso_local global %struct.x
+ at dst = external dso_local global %struct.x
+
+ at .str1 = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 1
+ at .str2 = private unnamed_addr constant [36 x i8] c"DHRYSTONE PROGRAM, SOME STRING BLAH\00", align 1
+ at .str3 = private unnamed_addr constant [24 x i8] c"DHRYSTONE PROGRAM, SOME\00", align 1
+ at .str4 = private unnamed_addr constant [18 x i8] c"DHRYSTONE PROGR  \00", align 1
+ at .str5 = private unnamed_addr constant [7 x i8] c"DHRYST\00", align 1
+ at .str6 = private unnamed_addr constant [14 x i8] c"/tmp/rmXXXXXX\00", align 1
+ at spool.splbuf = internal global [512 x i8] zeroinitializer, align 16
+
+define i32 @t0() {
+; RV32-BOTH-LABEL: t0:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lui a0, %hi(src)
+; RV32-BOTH-NEXT:    lw a1, %lo(src)(a0)
+; RV32-BOTH-NEXT:    lui a2, %hi(dst)
+; RV32-BOTH-NEXT:    addi a0, a0, %lo(src)
+; RV32-BOTH-NEXT:    sw a1, %lo(dst)(a2)
+; RV32-BOTH-NEXT:    lw a1, 4(a0)
+; RV32-BOTH-NEXT:    lh a3, 8(a0)
+; RV32-BOTH-NEXT:    lbu a0, 10(a0)
+; RV32-BOTH-NEXT:    addi a2, a2, %lo(dst)
+; RV32-BOTH-NEXT:    sw a1, 4(a2)
+; RV32-BOTH-NEXT:    sh a3, 8(a2)
+; RV32-BOTH-NEXT:    sb a0, 10(a2)
+; RV32-BOTH-NEXT:    li a0, 0
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: t0:
+; RV64-BOTH:       # %bb.0: # %entry
+; RV64-BOTH-NEXT:    lui a0, %hi(src)
+; RV64-BOTH-NEXT:    lui a1, %hi(dst)
+; RV64-BOTH-NEXT:    ld a2, %lo(src)(a0)
+; RV64-BOTH-NEXT:    addi a0, a0, %lo(src)
+; RV64-BOTH-NEXT:    lh a3, 8(a0)
+; RV64-BOTH-NEXT:    lbu a0, 10(a0)
+; RV64-BOTH-NEXT:    sd a2, %lo(dst)(a1)
+; RV64-BOTH-NEXT:    addi a1, a1, %lo(dst)
+; RV64-BOTH-NEXT:    sh a3, 8(a1)
+; RV64-BOTH-NEXT:    sb a0, 10(a1)
+; RV64-BOTH-NEXT:    li a0, 0
+; RV64-BOTH-NEXT:    ret
+entry:
+  call void @llvm.memmove.p0.p0.i32(ptr align 8 @dst, ptr align 8 @src, i32 11, i1 false)
+  ret i32 0
+}
+
+define void @t1(ptr nocapture %C) nounwind {
+; RV32-BOTH-LABEL: t1:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lui a1, %hi(.L.str1)
+; RV32-BOTH-NEXT:    addi a1, a1, %lo(.L.str1)
+; RV32-BOTH-NEXT:    li a2, 31
+; RV32-BOTH-NEXT:    tail memmove
+;
+; RV64-LABEL: t1:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lui a1, %hi(.L.str1)
+; RV64-NEXT:    addi a1, a1, %lo(.L.str1)
+; RV64-NEXT:    li a2, 31
+; RV64-NEXT:    tail memmove
+;
+; RV64-FAST-LABEL: t1:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a1, %hi(.L.str1)
+; RV64-FAST-NEXT:    ld a2, %lo(.L.str1)(a1)
+; RV64-FAST-NEXT:    addi a1, a1, %lo(.L.str1)
+; RV64-FAST-NEXT:    lh a3, 28(a1)
+; RV64-FAST-NEXT:    lbu a4, 30(a1)
+; RV64-FAST-NEXT:    ld a5, 8(a1)
+; RV64-FAST-NEXT:    ld a6, 16(a1)
+; RV64-FAST-NEXT:    lw a1, 24(a1)
+; RV64-FAST-NEXT:    sh a3, 28(a0)
+; RV64-FAST-NEXT:    sb a4, 30(a0)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a5, 8(a0)
+; RV64-FAST-NEXT:    sd a6, 16(a0)
+; RV64-FAST-NEXT:    sw a1, 24(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %C, ptr @.str1, i64 31, i1 false)
+  ret void
+}
+
+define void @t2(ptr nocapture %C) nounwind {
+; RV32-BOTH-LABEL: t2:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lui a1, %hi(.L.str2)
+; RV32-BOTH-NEXT:    addi a1, a1, %lo(.L.str2)
+; RV32-BOTH-NEXT:    li a2, 36
+; RV32-BOTH-NEXT:    tail memmove
+;
+; RV64-LABEL: t2:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lui a1, %hi(.L.str2)
+; RV64-NEXT:    addi a1, a1, %lo(.L.str2)
+; RV64-NEXT:    li a2, 36
+; RV64-NEXT:    tail memmove
+;
+; RV64-FAST-LABEL: t2:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a1, %hi(.L.str2)
+; RV64-FAST-NEXT:    ld a2, %lo(.L.str2)(a1)
+; RV64-FAST-NEXT:    addi a1, a1, %lo(.L.str2)
+; RV64-FAST-NEXT:    lw a3, 32(a1)
+; RV64-FAST-NEXT:    ld a4, 8(a1)
+; RV64-FAST-NEXT:    ld a5, 16(a1)
+; RV64-FAST-NEXT:    ld a1, 24(a1)
+; RV64-FAST-NEXT:    sw a3, 32(a0)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a4, 8(a0)
+; RV64-FAST-NEXT:    sd a5, 16(a0)
+; RV64-FAST-NEXT:    sd a1, 24(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %C, ptr @.str2, i64 36, i1 false)
+  ret void
+}
+
+define void @t3(ptr nocapture %C) nounwind {
+; RV32-LABEL: t3:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lui a1, %hi(.L.str3)
+; RV32-NEXT:    addi a1, a1, %lo(.L.str3)
+; RV32-NEXT:    li a2, 24
+; RV32-NEXT:    tail memmove
+;
+; RV64-LABEL: t3:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lui a1, %hi(.L.str3)
+; RV64-NEXT:    addi a1, a1, %lo(.L.str3)
+; RV64-NEXT:    li a2, 24
+; RV64-NEXT:    tail memmove
+;
+; RV32-FAST-LABEL: t3:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lui a1, %hi(.L.str3)
+; RV32-FAST-NEXT:    lw a2, %lo(.L.str3)(a1)
+; RV32-FAST-NEXT:    addi a1, a1, %lo(.L.str3)
+; RV32-FAST-NEXT:    lw a3, 16(a1)
+; RV32-FAST-NEXT:    lw a4, 20(a1)
+; RV32-FAST-NEXT:    lw a5, 4(a1)
+; RV32-FAST-NEXT:    lw a6, 8(a1)
+; RV32-FAST-NEXT:    lw a1, 12(a1)
+; RV32-FAST-NEXT:    sw a3, 16(a0)
+; RV32-FAST-NEXT:    sw a4, 20(a0)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sw a5, 4(a0)
+; RV32-FAST-NEXT:    sw a6, 8(a0)
+; RV32-FAST-NEXT:    sw a1, 12(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: t3:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a1, %hi(.L.str3)
+; RV64-FAST-NEXT:    ld a2, %lo(.L.str3)(a1)
+; RV64-FAST-NEXT:    addi a1, a1, %lo(.L.str3)
+; RV64-FAST-NEXT:    ld a3, 8(a1)
+; RV64-FAST-NEXT:    ld a1, 16(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a3, 8(a0)
+; RV64-FAST-NEXT:    sd a1, 16(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %C, ptr @.str3, i64 24, i1 false)
+  ret void
+}
+
+define void @t4(ptr nocapture %C) nounwind {
+; RV32-LABEL: t4:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lui a1, %hi(.L.str4)
+; RV32-NEXT:    addi a1, a1, %lo(.L.str4)
+; RV32-NEXT:    li a2, 18
+; RV32-NEXT:    tail memmove
+;
+; RV64-LABEL: t4:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lui a1, %hi(.L.str4)
+; RV64-NEXT:    addi a1, a1, %lo(.L.str4)
+; RV64-NEXT:    li a2, 18
+; RV64-NEXT:    tail memmove
+;
+; RV32-FAST-LABEL: t4:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lui a1, %hi(.L.str4)
+; RV32-FAST-NEXT:    lw a2, %lo(.L.str4)(a1)
+; RV32-FAST-NEXT:    addi a1, a1, %lo(.L.str4)
+; RV32-FAST-NEXT:    lh a3, 16(a1)
+; RV32-FAST-NEXT:    lw a4, 4(a1)
+; RV32-FAST-NEXT:    lw a5, 8(a1)
+; RV32-FAST-NEXT:    lw a1, 12(a1)
+; RV32-FAST-NEXT:    sh a3, 16(a0)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a5, 8(a0)
+; RV32-FAST-NEXT:    sw a1, 12(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: t4:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a1, %hi(.L.str4)
+; RV64-FAST-NEXT:    ld a2, %lo(.L.str4)(a1)
+; RV64-FAST-NEXT:    addi a1, a1, %lo(.L.str4)
+; RV64-FAST-NEXT:    ld a3, 8(a1)
+; RV64-FAST-NEXT:    lh a1, 16(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a3, 8(a0)
+; RV64-FAST-NEXT:    sh a1, 16(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %C, ptr @.str4, i64 18, i1 false)
+  ret void
+}
+
+define void @t5(ptr nocapture %C) nounwind {
+; RV32-LABEL: t5:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lui a1, %hi(.L.str5)
+; RV32-NEXT:    addi a2, a1, %lo(.L.str5)
+; RV32-NEXT:    lbu a3, 1(a2)
+; RV32-NEXT:    lbu a4, 2(a2)
+; RV32-NEXT:    lbu a5, 3(a2)
+; RV32-NEXT:    lbu a6, 4(a2)
+; RV32-NEXT:    lbu a7, 5(a2)
+; RV32-NEXT:    lbu a2, 6(a2)
+; RV32-NEXT:    lbu a1, %lo(.L.str5)(a1)
+; RV32-NEXT:    sb a6, 4(a0)
+; RV32-NEXT:    sb a7, 5(a0)
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    sb a4, 2(a0)
+; RV32-NEXT:    sb a5, 3(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: t5:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lui a1, %hi(.L.str5)
+; RV64-NEXT:    addi a2, a1, %lo(.L.str5)
+; RV64-NEXT:    lbu a3, 1(a2)
+; RV64-NEXT:    lbu a4, 2(a2)
+; RV64-NEXT:    lbu a5, 3(a2)
+; RV64-NEXT:    lbu a6, 4(a2)
+; RV64-NEXT:    lbu a7, 5(a2)
+; RV64-NEXT:    lbu a2, 6(a2)
+; RV64-NEXT:    lbu a1, %lo(.L.str5)(a1)
+; RV64-NEXT:    sb a6, 4(a0)
+; RV64-NEXT:    sb a7, 5(a0)
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    sb a3, 1(a0)
+; RV64-NEXT:    sb a4, 2(a0)
+; RV64-NEXT:    sb a5, 3(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: t5:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lui a1, %hi(.L.str5)
+; RV32-FAST-NEXT:    lw a2, %lo(.L.str5)(a1)
+; RV32-FAST-NEXT:    addi a1, a1, %lo(.L.str5)
+; RV32-FAST-NEXT:    lh a3, 4(a1)
+; RV32-FAST-NEXT:    lbu a1, 6(a1)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sh a3, 4(a0)
+; RV32-FAST-NEXT:    sb a1, 6(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: t5:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a1, %hi(.L.str5)
+; RV64-FAST-NEXT:    lw a2, %lo(.L.str5)(a1)
+; RV64-FAST-NEXT:    addi a1, a1, %lo(.L.str5)
+; RV64-FAST-NEXT:    lh a3, 4(a1)
+; RV64-FAST-NEXT:    lbu a1, 6(a1)
+; RV64-FAST-NEXT:    sw a2, 0(a0)
+; RV64-FAST-NEXT:    sh a3, 4(a0)
+; RV64-FAST-NEXT:    sb a1, 6(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i64(ptr %C, ptr @.str5, i64 7, i1 false)
+  ret void
+}
+
+define void @t6() nounwind {
+; RV32-LABEL: t6:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a0, %hi(spool.splbuf)
+; RV32-NEXT:    addi a0, a0, %lo(spool.splbuf)
+; RV32-NEXT:    lui a1, %hi(.L.str6)
+; RV32-NEXT:    addi a1, a1, %lo(.L.str6)
+; RV32-NEXT:    li a2, 14
+; RV32-NEXT:    call memmove
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: t6:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a0, %hi(spool.splbuf)
+; RV64-NEXT:    addi a0, a0, %lo(spool.splbuf)
+; RV64-NEXT:    lui a1, %hi(.L.str6)
+; RV64-NEXT:    addi a1, a1, %lo(.L.str6)
+; RV64-NEXT:    li a2, 14
+; RV64-NEXT:    call memmove
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: t6:
+; RV32-FAST:       # %bb.0: # %entry
+; RV32-FAST-NEXT:    lui a0, %hi(.L.str6)
+; RV32-FAST-NEXT:    lui a1, %hi(spool.splbuf)
+; RV32-FAST-NEXT:    lw a2, %lo(.L.str6)(a0)
+; RV32-FAST-NEXT:    addi a0, a0, %lo(.L.str6)
+; RV32-FAST-NEXT:    lw a3, 4(a0)
+; RV32-FAST-NEXT:    lw a4, 8(a0)
+; RV32-FAST-NEXT:    lh a0, 12(a0)
+; RV32-FAST-NEXT:    sw a2, %lo(spool.splbuf)(a1)
+; RV32-FAST-NEXT:    sw a3, %lo(spool.splbuf+4)(a1)
+; RV32-FAST-NEXT:    sw a4, %lo(spool.splbuf+8)(a1)
+; RV32-FAST-NEXT:    sh a0, %lo(spool.splbuf+12)(a1)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: t6:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    lui a0, %hi(.L.str6)
+; RV64-FAST-NEXT:    ld a1, %lo(.L.str6)(a0)
+; RV64-FAST-NEXT:    addi a0, a0, %lo(.L.str6)
+; RV64-FAST-NEXT:    lw a2, 8(a0)
+; RV64-FAST-NEXT:    lh a0, 12(a0)
+; RV64-FAST-NEXT:    lui a3, %hi(spool.splbuf)
+; RV64-FAST-NEXT:    sd a1, %lo(spool.splbuf)(a3)
+; RV64-FAST-NEXT:    sw a2, %lo(spool.splbuf+8)(a3)
+; RV64-FAST-NEXT:    sh a0, %lo(spool.splbuf+12)(a3)
+; RV64-FAST-NEXT:    ret
+entry:
+  call void @llvm.memmove.p0.p0.i64(ptr @spool.splbuf, ptr @.str6, i64 14, i1 false)
+  ret void
+}
+
+%struct.Foo = type { i32, i32, i32, i32 }
+
+define void @t7(ptr nocapture %a, ptr nocapture %b) nounwind {
+; RV32-BOTH-LABEL: t7:
+; RV32-BOTH:       # %bb.0: # %entry
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 4(a1)
+; RV32-BOTH-NEXT:    lw a4, 8(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    sw a3, 4(a0)
+; RV32-BOTH-NEXT:    sw a4, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-LABEL: t7:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    lw a2, 0(a1)
+; RV64-NEXT:    lw a3, 4(a1)
+; RV64-NEXT:    lw a4, 8(a1)
+; RV64-NEXT:    lw a1, 12(a1)
+; RV64-NEXT:    sw a2, 0(a0)
+; RV64-NEXT:    sw a3, 4(a0)
+; RV64-NEXT:    sw a4, 8(a0)
+; RV64-NEXT:    sw a1, 12(a0)
+; RV64-NEXT:    ret
+;
+; RV64-FAST-LABEL: t7:
+; RV64-FAST:       # %bb.0: # %entry
+; RV64-FAST-NEXT:    ld a2, 0(a1)
+; RV64-FAST-NEXT:    ld a1, 8(a1)
+; RV64-FAST-NEXT:    sd a2, 0(a0)
+; RV64-FAST-NEXT:    sd a1, 8(a0)
+; RV64-FAST-NEXT:    ret
+entry:
+  tail call void @llvm.memmove.p0.p0.i32(ptr align 4 %a, ptr align 4 %b, i32 16, i1 false)
+  ret void
+}
+
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind



More information about the llvm-commits mailing list