[llvm] f51a479 - [LoongArch] Pre-commit test for aligning stack objects passed to memory intrinsics. NFC

WANG Rui via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 1 02:21:39 PDT 2024


Author: WANG Rui
Date: 2024-08-01T17:17:28+08:00
New Revision: f51a479520be75dec8117a0a6039604d8282ee38

URL: https://github.com/llvm/llvm-project/commit/f51a479520be75dec8117a0a6039604d8282ee38
DIFF: https://github.com/llvm/llvm-project/commit/f51a479520be75dec8117a0a6039604d8282ee38.diff

LOG: [LoongArch] Pre-commit test for aligning stack objects passed to memory intrinsics. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll b/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
index 37afe7e3ed2ac..efe8dc779449b 100644
--- a/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
+++ b/llvm/test/CodeGen/LoongArch/unaligned-memcpy-inline.ll
@@ -94,4 +94,152 @@ entry:
   ret void
 }
 
+ at .str = private constant [22 x i8] c"preemption imbalance \00", align 1
+
+define void @t3() {
+; LA32-LABEL: t3:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -64
+; LA32-NEXT:    .cfi_def_cfa_offset 64
+; LA32-NEXT:    pcalau12i $a0, %pc_hi20(.L.str)
+; LA32-NEXT:    addi.w $a0, $a0, %pc_lo12(.L.str)
+; LA32-NEXT:    ld.b $a1, $a0, 21
+; LA32-NEXT:    ld.bu $a2, $a0, 20
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    st.h $a1, $sp, 20
+; LA32-NEXT:    ld.bu $a1, $a0, 17
+; LA32-NEXT:    ld.bu $a2, $a0, 16
+; LA32-NEXT:    ld.bu $a3, $a0, 18
+; LA32-NEXT:    ld.bu $a4, $a0, 19
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a2, $a3, 16
+; LA32-NEXT:    slli.w $a3, $a4, 24
+; LA32-NEXT:    or $a2, $a3, $a2
+; LA32-NEXT:    or $a1, $a2, $a1
+; LA32-NEXT:    st.w $a1, $sp, 16
+; LA32-NEXT:    ld.bu $a1, $a0, 13
+; LA32-NEXT:    ld.bu $a2, $a0, 12
+; LA32-NEXT:    ld.bu $a3, $a0, 14
+; LA32-NEXT:    ld.bu $a4, $a0, 15
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a2, $a3, 16
+; LA32-NEXT:    slli.w $a3, $a4, 24
+; LA32-NEXT:    or $a2, $a3, $a2
+; LA32-NEXT:    or $a1, $a2, $a1
+; LA32-NEXT:    st.w $a1, $sp, 12
+; LA32-NEXT:    ld.bu $a1, $a0, 9
+; LA32-NEXT:    ld.bu $a2, $a0, 8
+; LA32-NEXT:    ld.bu $a3, $a0, 10
+; LA32-NEXT:    ld.bu $a4, $a0, 11
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a2, $a3, 16
+; LA32-NEXT:    slli.w $a3, $a4, 24
+; LA32-NEXT:    or $a2, $a3, $a2
+; LA32-NEXT:    or $a1, $a2, $a1
+; LA32-NEXT:    st.w $a1, $sp, 8
+; LA32-NEXT:    ld.bu $a1, $a0, 5
+; LA32-NEXT:    ld.bu $a2, $a0, 4
+; LA32-NEXT:    ld.bu $a3, $a0, 6
+; LA32-NEXT:    ld.bu $a4, $a0, 7
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a2, $a3, 16
+; LA32-NEXT:    slli.w $a3, $a4, 24
+; LA32-NEXT:    or $a2, $a3, $a2
+; LA32-NEXT:    or $a1, $a2, $a1
+; LA32-NEXT:    st.w $a1, $sp, 4
+; LA32-NEXT:    ld.bu $a1, $a0, 1
+; LA32-NEXT:    ld.bu $a2, $a0, 0
+; LA32-NEXT:    ld.bu $a3, $a0, 2
+; LA32-NEXT:    ld.bu $a0, $a0, 3
+; LA32-NEXT:    slli.w $a1, $a1, 8
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a2, $a3, 16
+; LA32-NEXT:    slli.w $a0, $a0, 24
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    st.w $a0, $sp, 0
+; LA32-NEXT:    addi.w $sp, $sp, 64
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: t3:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -64
+; LA64-NEXT:    .cfi_def_cfa_offset 64
+; LA64-NEXT:    pcalau12i $a0, %pc_hi20(.L.str)
+; LA64-NEXT:    addi.d $a0, $a0, %pc_lo12(.L.str)
+; LA64-NEXT:    ld.b $a1, $a0, 21
+; LA64-NEXT:    ld.bu $a2, $a0, 20
+; LA64-NEXT:    slli.d $a1, $a1, 8
+; LA64-NEXT:    or $a1, $a1, $a2
+; LA64-NEXT:    st.h $a1, $sp, 20
+; LA64-NEXT:    ld.bu $a1, $a0, 17
+; LA64-NEXT:    ld.bu $a2, $a0, 16
+; LA64-NEXT:    ld.bu $a3, $a0, 18
+; LA64-NEXT:    ld.b $a4, $a0, 19
+; LA64-NEXT:    slli.d $a1, $a1, 8
+; LA64-NEXT:    or $a1, $a1, $a2
+; LA64-NEXT:    slli.d $a2, $a3, 16
+; LA64-NEXT:    slli.d $a3, $a4, 24
+; LA64-NEXT:    or $a2, $a3, $a2
+; LA64-NEXT:    or $a1, $a2, $a1
+; LA64-NEXT:    st.w $a1, $sp, 16
+; LA64-NEXT:    ld.bu $a1, $a0, 9
+; LA64-NEXT:    ld.bu $a2, $a0, 8
+; LA64-NEXT:    ld.bu $a3, $a0, 10
+; LA64-NEXT:    ld.bu $a4, $a0, 11
+; LA64-NEXT:    slli.d $a1, $a1, 8
+; LA64-NEXT:    or $a1, $a1, $a2
+; LA64-NEXT:    slli.d $a2, $a3, 16
+; LA64-NEXT:    slli.d $a3, $a4, 24
+; LA64-NEXT:    or $a2, $a3, $a2
+; LA64-NEXT:    or $a1, $a2, $a1
+; LA64-NEXT:    ld.bu $a2, $a0, 13
+; LA64-NEXT:    ld.bu $a3, $a0, 12
+; LA64-NEXT:    ld.bu $a4, $a0, 14
+; LA64-NEXT:    ld.bu $a5, $a0, 15
+; LA64-NEXT:    slli.d $a2, $a2, 8
+; LA64-NEXT:    or $a2, $a2, $a3
+; LA64-NEXT:    slli.d $a3, $a4, 16
+; LA64-NEXT:    slli.d $a4, $a5, 24
+; LA64-NEXT:    or $a3, $a4, $a3
+; LA64-NEXT:    or $a2, $a3, $a2
+; LA64-NEXT:    slli.d $a2, $a2, 32
+; LA64-NEXT:    or $a1, $a2, $a1
+; LA64-NEXT:    st.d $a1, $sp, 8
+; LA64-NEXT:    ld.bu $a1, $a0, 1
+; LA64-NEXT:    ld.bu $a2, $a0, 0
+; LA64-NEXT:    ld.bu $a3, $a0, 2
+; LA64-NEXT:    ld.bu $a4, $a0, 3
+; LA64-NEXT:    slli.d $a1, $a1, 8
+; LA64-NEXT:    or $a1, $a1, $a2
+; LA64-NEXT:    slli.d $a2, $a3, 16
+; LA64-NEXT:    slli.d $a3, $a4, 24
+; LA64-NEXT:    or $a2, $a3, $a2
+; LA64-NEXT:    or $a1, $a2, $a1
+; LA64-NEXT:    ld.bu $a2, $a0, 5
+; LA64-NEXT:    ld.bu $a3, $a0, 4
+; LA64-NEXT:    ld.bu $a4, $a0, 6
+; LA64-NEXT:    ld.bu $a0, $a0, 7
+; LA64-NEXT:    slli.d $a2, $a2, 8
+; LA64-NEXT:    or $a2, $a2, $a3
+; LA64-NEXT:    slli.d $a3, $a4, 16
+; LA64-NEXT:    slli.d $a0, $a0, 24
+; LA64-NEXT:    or $a0, $a0, $a3
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    slli.d $a0, $a0, 32
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    st.d $a0, $sp, 0
+; LA64-NEXT:    addi.d $sp, $sp, 64
+; LA64-NEXT:    ret
+entry:
+  %msgbuf = alloca [64 x i8], align 1
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %msgbuf, ptr align 1 @.str, i64 22, i1 false)
+  ret void
+}
+
 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)


        


More information about the llvm-commits mailing list