[llvm] 3d6fc35 - [LoongArch] Pre-commit test for #76555. NFC

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 4 19:05:23 PST 2024


Author: wanglei
Date: 2024-01-05T10:57:40+08:00
New Revision: 3d6fc35b9071009c5ef37f879a12982c6a54db60

URL: https://github.com/llvm/llvm-project/commit/3d6fc35b9071009c5ef37f879a12982c6a54db60
DIFF: https://github.com/llvm/llvm-project/commit/3d6fc35b9071009c5ef37f879a12982c6a54db60.diff

LOG: [LoongArch] Pre-commit test for #76555. NFC

Added: 
    llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
new file mode 100644
index 00000000000000..150a935d7bf82e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
@@ -0,0 +1,172 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --code-model=medium --post-RA-scheduler=0 < %s \
+; RUN:     | FileCheck %s --check-prefix=MEDIUM_NO_SCH
+; RUN: llc --mtriple=loongarch64 --code-model=medium --post-RA-scheduler=1 < %s \
+; RUN:     | FileCheck %s --check-prefix=MEDIUM_SCH
+; RUN: llc --mtriple=loongarch64 --code-model=large --post-RA-scheduler=0 < %s \
+; RUN:     | FileCheck %s --check-prefix=LARGE_NO_SCH
+; RUN: llc --mtriple=loongarch64 --code-model=large --post-RA-scheduler=1 < %s \
+; RUN:     | FileCheck %s --check-prefix=LARGE_SCH
+
+;; FIXME: According to the description of the psABI v2.30, the code sequences
+;; of `PseudoLA*_LARGE` instruction and Medium code model's function call must
+;; be adjacent.
+
+ at g = dso_local global i64 zeroinitializer, align 4
+ at G = global i64 zeroinitializer, align 4
+ at gd = external thread_local global i64
+ at ld = external thread_local(localdynamic) global i64
+ at ie = external thread_local(initialexec) global i64
+
+declare ptr @bar(i64)
+
+define void @foo() nounwind {
+; MEDIUM_NO_SCH-LABEL: foo:
+; MEDIUM_NO_SCH:       # %bb.0:
+; MEDIUM_NO_SCH-NEXT:    addi.d $sp, $sp, -16
+; MEDIUM_NO_SCH-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %got_pc_hi20(G)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %got_pc_lo12(G)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, 0
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %pc_hi20(g)
+; MEDIUM_NO_SCH-NEXT:    addi.d $a0, $a0, %pc_lo12(g)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, 0
+; MEDIUM_NO_SCH-NEXT:    ori $a0, $zero, 1
+; MEDIUM_NO_SCH-NEXT:    pcaddu18i $ra, %call36(bar)
+; MEDIUM_NO_SCH-NEXT:    jirl $ra, $ra, 0
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ld)
+; MEDIUM_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ie)
+; MEDIUM_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_NO_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; MEDIUM_NO_SCH-NEXT:    addi.d $sp, $sp, 16
+; MEDIUM_NO_SCH-NEXT:    ret
+;
+; MEDIUM_SCH-LABEL: foo:
+; MEDIUM_SCH:       # %bb.0:
+; MEDIUM_SCH-NEXT:    addi.d $sp, $sp, -16
+; MEDIUM_SCH-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; MEDIUM_SCH-NEXT:    pcalau12i $a0, %got_pc_hi20(G)
+; MEDIUM_SCH-NEXT:    pcaddu18i $ra, %call36(bar)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %got_pc_lo12(G)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, 0
+; MEDIUM_SCH-NEXT:    pcalau12i $a0, %pc_hi20(g)
+; MEDIUM_SCH-NEXT:    addi.d $a0, $a0, %pc_lo12(g)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, 0
+; MEDIUM_SCH-NEXT:    ori $a0, $zero, 1
+; MEDIUM_SCH-NEXT:    jirl $ra, $ra, 0
+; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ld)
+; MEDIUM_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ie)
+; MEDIUM_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; MEDIUM_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; MEDIUM_SCH-NEXT:    addi.d $sp, $sp, 16
+; MEDIUM_SCH-NEXT:    ret
+;
+; LARGE_NO_SCH-LABEL: foo:
+; LARGE_NO_SCH:       # %bb.0:
+; LARGE_NO_SCH-NEXT:    addi.d $sp, $sp, -16
+; LARGE_NO_SCH-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %got_pc_hi20(G)
+; LARGE_NO_SCH-NEXT:    addi.d $a1, $zero, %got_pc_lo12(G)
+; LARGE_NO_SCH-NEXT:    lu32i.d $a1, %got64_pc_lo20(G)
+; LARGE_NO_SCH-NEXT:    lu52i.d $a1, $a1, %got64_pc_hi12(G)
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_NO_SCH-NEXT:    ld.d $a0, $a0, 0
+; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %pc_hi20(g)
+; LARGE_NO_SCH-NEXT:    addi.d $a1, $zero, %pc_lo12(g)
+; LARGE_NO_SCH-NEXT:    lu32i.d $a1, %pc64_lo20(g)
+; LARGE_NO_SCH-NEXT:    lu52i.d $a1, $a1, %pc64_hi12(g)
+; LARGE_NO_SCH-NEXT:    add.d $a0, $a1, $a0
+; LARGE_NO_SCH-NEXT:    ld.d $a0, $a0, 0
+; LARGE_NO_SCH-NEXT:    ori $a0, $zero, 1
+; LARGE_NO_SCH-NEXT:    pcalau12i $a1, %got_pc_hi20(bar)
+; LARGE_NO_SCH-NEXT:    addi.d $ra, $zero, %got_pc_lo12(bar)
+; LARGE_NO_SCH-NEXT:    lu32i.d $ra, %got64_pc_lo20(bar)
+; LARGE_NO_SCH-NEXT:    lu52i.d $ra, $ra, %got64_pc_hi12(bar)
+; LARGE_NO_SCH-NEXT:    ldx.d $ra, $ra, $a1
+; LARGE_NO_SCH-NEXT:    jirl $ra, $ra, 0
+; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
+; LARGE_NO_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(gd)
+; LARGE_NO_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(gd)
+; LARGE_NO_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(gd)
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_NO_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(ld)
+; LARGE_NO_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(ld)
+; LARGE_NO_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(ld)
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_NO_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(ie)
+; LARGE_NO_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(ie)
+; LARGE_NO_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(ie)
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_NO_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_NO_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LARGE_NO_SCH-NEXT:    addi.d $sp, $sp, 16
+; LARGE_NO_SCH-NEXT:    ret
+;
+; LARGE_SCH-LABEL: foo:
+; LARGE_SCH:       # %bb.0:
+; LARGE_SCH-NEXT:    addi.d $sp, $sp, -16
+; LARGE_SCH-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LARGE_SCH-NEXT:    addi.d $a1, $zero, %got_pc_lo12(G)
+; LARGE_SCH-NEXT:    pcalau12i $a0, %got_pc_hi20(G)
+; LARGE_SCH-NEXT:    addi.d $ra, $zero, %got_pc_lo12(bar)
+; LARGE_SCH-NEXT:    lu32i.d $a1, %got64_pc_lo20(G)
+; LARGE_SCH-NEXT:    lu32i.d $ra, %got64_pc_lo20(bar)
+; LARGE_SCH-NEXT:    lu52i.d $a1, $a1, %got64_pc_hi12(G)
+; LARGE_SCH-NEXT:    lu52i.d $ra, $ra, %got64_pc_hi12(bar)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_SCH-NEXT:    addi.d $a1, $zero, %pc_lo12(g)
+; LARGE_SCH-NEXT:    lu32i.d $a1, %pc64_lo20(g)
+; LARGE_SCH-NEXT:    lu52i.d $a1, $a1, %pc64_hi12(g)
+; LARGE_SCH-NEXT:    ld.d $a0, $a0, 0
+; LARGE_SCH-NEXT:    pcalau12i $a0, %pc_hi20(g)
+; LARGE_SCH-NEXT:    add.d $a0, $a1, $a0
+; LARGE_SCH-NEXT:    pcalau12i $a1, %got_pc_hi20(bar)
+; LARGE_SCH-NEXT:    ld.d $a0, $a0, 0
+; LARGE_SCH-NEXT:    ldx.d $ra, $ra, $a1
+; LARGE_SCH-NEXT:    ori $a0, $zero, 1
+; LARGE_SCH-NEXT:    jirl $ra, $ra, 0
+; LARGE_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(gd)
+; LARGE_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
+; LARGE_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(gd)
+; LARGE_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(gd)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(ld)
+; LARGE_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(ld)
+; LARGE_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(ld)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_SCH-NEXT:    addi.d $a1, $zero, %ie_pc_lo12(ie)
+; LARGE_SCH-NEXT:    lu32i.d $a1, %ie64_pc_lo20(ie)
+; LARGE_SCH-NEXT:    lu52i.d $a1, $a1, %ie64_pc_hi12(ie)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_SCH-NEXT:    ldx.d $a0, $a1, $a0
+; LARGE_SCH-NEXT:    ldx.d $a0, $a0, $tp
+; LARGE_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LARGE_SCH-NEXT:    addi.d $sp, $sp, 16
+; LARGE_SCH-NEXT:    ret
+  %V = load volatile i64, ptr @G
+  %v = load volatile i64, ptr @g
+  call void @bar(i64 1)
+  %v_gd = load volatile i64, ptr @gd
+  %v_ld = load volatile i64, ptr @ld
+  %v_ie = load volatile i64, ptr @ie
+  ret void
+}


        


More information about the llvm-commits mailing list