[llvm] 566befc - [NFC][LoongArch] Pre-commit tests for hoisting of PseudoLA*

WANG Rui via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 6 21:54:44 PDT 2024


Author: WANG Rui
Date: 2024-06-07T12:55:06+08:00
New Revision: 566befca28f5df9490ad4fcce31042d92526ba1e

URL: https://github.com/llvm/llvm-project/commit/566befca28f5df9490ad4fcce31042d92526ba1e
DIFF: https://github.com/llvm/llvm-project/commit/566befca28f5df9490ad4fcce31042d92526ba1e.diff

LOG: [NFC][LoongArch] Pre-commit tests for hoisting of PseudoLA*

Added: 
    llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
new file mode 100644
index 0000000000000..30a19adcfdce4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
@@ -0,0 +1,458 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --relocation-model=pic \
+; RUN:   --verify-machineinstrs < %s | FileCheck -check-prefixes=LA32 %s
+; RUN: llc --mtriple=loongarch64 --relocation-model=pic \
+; RUN:   --verify-machineinstrs < %s | FileCheck -check-prefixes=LA64 %s
+; RUN: llc --mtriple=loongarch64 --relocation-model=pic --code-model=large \
+; RUN:   --verify-machineinstrs < %s | FileCheck -check-prefixes=LA64LARGE %s
+
+; Verifies that MachineLICM can hoist address generation pseudos out of loops.
+
+ at l = protected global i32 0, align 4
+
+define void @test_la_pcrel(i32 signext %n) {
+; LA32-LABEL: test_la_pcrel:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    pcalau12i $a2, %pc_hi20(l)
+; LA32-NEXT:    addi.w $a2, $a2, %pc_lo12(l)
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB0_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ld.w $a3, $a2, 0
+; LA32-NEXT:    addi.w $a1, $a1, 1
+; LA32-NEXT:    blt $a1, $a0, .LBB0_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_pcrel:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    pcalau12i $a2, %pc_hi20(l)
+; LA64-NEXT:    addi.d $a2, $a2, %pc_lo12(l)
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB0_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ld.w $a3, $a2, 0
+; LA64-NEXT:    addi.w $a1, $a1, 1
+; LA64-NEXT:    blt $a1, $a0, .LBB0_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_pcrel:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    move $a1, $zero
+; LA64LARGE-NEXT:    pcalau12i $a2, %pc_hi20(l)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(l)
+; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(l)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %pc64_hi12(l)
+; LA64LARGE-NEXT:    add.d $a2, $t8, $a2
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB0_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ld.w $a3, $a2, 0
+; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
+; LA64LARGE-NEXT:    blt $a1, $a0, .LBB0_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @l, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}
+
+ at g = global i32 0, align 4
+
+define void @test_la_got(i32 signext %n) {
+; LA32-LABEL: test_la_got:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB1_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
+; LA32-NEXT:    ld.w $a2, $a2, %got_pc_lo12(g)
+; LA32-NEXT:    ld.w $a2, $a2, 0
+; LA32-NEXT:    addi.w $a1, $a1, 1
+; LA32-NEXT:    blt $a1, $a0, .LBB1_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_got:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB1_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
+; LA64-NEXT:    ld.d $a2, $a2, %got_pc_lo12(g)
+; LA64-NEXT:    ld.w $a2, $a2, 0
+; LA64-NEXT:    addi.w $a1, $a1, 1
+; LA64-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_got:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    move $a1, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB1_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(g)
+; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(g)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(g)
+; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
+; LA64LARGE-NEXT:    ld.w $a2, $a2, 0
+; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
+; LA64LARGE-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @g, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}
+
+ at ie = external thread_local(initialexec) global i32
+
+define void @test_la_tls_ie(i32 signext %n) {
+; LA32-LABEL: test_la_tls_ie:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB2_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; LA32-NEXT:    ld.w $a2, $a2, %ie_pc_lo12(ie)
+; LA32-NEXT:    add.w $a2, $a2, $tp
+; LA32-NEXT:    ld.w $a2, $a2, 0
+; LA32-NEXT:    addi.w $a1, $a1, 1
+; LA32-NEXT:    blt $a1, $a0, .LBB2_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_tls_ie:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB2_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; LA64-NEXT:    ld.d $a2, $a2, %ie_pc_lo12(ie)
+; LA64-NEXT:    ldx.w $a2, $a2, $tp
+; LA64-NEXT:    addi.w $a1, $a1, 1
+; LA64-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_tls_ie:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    move $a1, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB2_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ie)
+; LA64LARGE-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ie)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
+; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
+; LA64LARGE-NEXT:    ldx.w $a2, $a2, $tp
+; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
+; LA64LARGE-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @ie, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}
+
+ at ld = external thread_local(localdynamic) global i32
+
+define void @test_la_tls_ld(i32 signext %n) {
+; LA32-LABEL: test_la_tls_ld:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    .cfi_offset 22, -8
+; LA32-NEXT:    .cfi_offset 23, -12
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB3_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA32-NEXT:    addi.w $a0, $a0, %got_pc_lo12(ld)
+; LA32-NEXT:    bl %plt(__tls_get_addr)
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    addi.w $s0, $s0, 1
+; LA32-NEXT:    blt $s0, $fp, .LBB3_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_tls_ld:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -32
+; LA64-NEXT:    .cfi_def_cfa_offset 32
+; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    .cfi_offset 1, -8
+; LA64-NEXT:    .cfi_offset 22, -16
+; LA64-NEXT:    .cfi_offset 23, -24
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    move $s0, $zero
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB3_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64-NEXT:    addi.d $a0, $a0, %got_pc_lo12(ld)
+; LA64-NEXT:    bl %plt(__tls_get_addr)
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    addi.w $s0, $s0, 1
+; LA64-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 32
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_tls_ld:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    addi.d $sp, $sp, -32
+; LA64LARGE-NEXT:    .cfi_def_cfa_offset 32
+; LA64LARGE-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    .cfi_offset 1, -8
+; LA64LARGE-NEXT:    .cfi_offset 22, -16
+; LA64LARGE-NEXT:    .cfi_offset 23, -24
+; LA64LARGE-NEXT:    move $fp, $a0
+; LA64LARGE-NEXT:    move $s0, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB3_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(ld)
+; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(ld)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(ld)
+; LA64LARGE-NEXT:    add.d $a0, $t8, $a0
+; LA64LARGE-NEXT:    pcalau12i $ra, %pc_hi20(__tls_get_addr)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
+; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(__tls_get_addr)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %pc64_hi12(__tls_get_addr)
+; LA64LARGE-NEXT:    add.d $ra, $t8, $ra
+; LA64LARGE-NEXT:    jirl $ra, $ra, 0
+; LA64LARGE-NEXT:    ld.w $a0, $a0, 0
+; LA64LARGE-NEXT:    addi.w $s0, $s0, 1
+; LA64LARGE-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    addi.d $sp, $sp, 32
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @ld, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}
+
+ at le = external thread_local(localexec) global i32
+
+define void @test_la_tls_le(i32 signext %n) {
+; LA32-LABEL: test_la_tls_le:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    lu12i.w $a2, %le_hi20(le)
+; LA32-NEXT:    ori $a2, $a2, %le_lo12(le)
+; LA32-NEXT:    add.w $a2, $a2, $tp
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB4_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    ld.w $a3, $a2, 0
+; LA32-NEXT:    addi.w $a1, $a1, 1
+; LA32-NEXT:    blt $a1, $a0, .LBB4_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_tls_le:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    lu12i.w $a2, %le_hi20(le)
+; LA64-NEXT:    ori $a2, $a2, %le_lo12(le)
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB4_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ldx.w $a3, $a2, $tp
+; LA64-NEXT:    addi.w $a1, $a1, 1
+; LA64-NEXT:    blt $a1, $a0, .LBB4_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_tls_le:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    move $a1, $zero
+; LA64LARGE-NEXT:    lu12i.w $a2, %le_hi20(le)
+; LA64LARGE-NEXT:    ori $a2, $a2, %le_lo12(le)
+; LA64LARGE-NEXT:    lu32i.d $a2, %le64_lo20(le)
+; LA64LARGE-NEXT:    lu52i.d $a2, $a2, %le64_hi12(le)
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB4_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ldx.w $a3, $a2, $tp
+; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
+; LA64LARGE-NEXT:    blt $a1, $a0, .LBB4_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @le, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}
+
+ at gd = external thread_local global i32
+
+define void @test_la_tls_gd(i32 signext %n) nounwind {
+; LA32-LABEL: test_la_tls_gd:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    .p2align 4, , 16
+; LA32-NEXT:  .LBB5_1: # %loop
+; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA32-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA32-NEXT:    addi.w $a0, $a0, %got_pc_lo12(gd)
+; LA32-NEXT:    bl %plt(__tls_get_addr)
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    addi.w $s0, $s0, 1
+; LA32-NEXT:    blt $s0, $fp, .LBB5_1
+; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: test_la_tls_gd:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.d $sp, $sp, -32
+; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    move $s0, $zero
+; LA64-NEXT:    .p2align 4, , 16
+; LA64-NEXT:  .LBB5_1: # %loop
+; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64-NEXT:    addi.d $a0, $a0, %got_pc_lo12(gd)
+; LA64-NEXT:    bl %plt(__tls_get_addr)
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    addi.w $s0, $s0, 1
+; LA64-NEXT:    blt $s0, $fp, .LBB5_1
+; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 32
+; LA64-NEXT:    ret
+;
+; LA64LARGE-LABEL: test_la_tls_gd:
+; LA64LARGE:       # %bb.0: # %entry
+; LA64LARGE-NEXT:    addi.d $sp, $sp, -32
+; LA64LARGE-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    move $fp, $a0
+; LA64LARGE-NEXT:    move $s0, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB5_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(gd)
+; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(gd)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(gd)
+; LA64LARGE-NEXT:    add.d $a0, $t8, $a0
+; LA64LARGE-NEXT:    pcalau12i $ra, %pc_hi20(__tls_get_addr)
+; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
+; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(__tls_get_addr)
+; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %pc64_hi12(__tls_get_addr)
+; LA64LARGE-NEXT:    add.d $ra, $t8, $ra
+; LA64LARGE-NEXT:    jirl $ra, $ra, 0
+; LA64LARGE-NEXT:    ld.w $a0, $a0, 0
+; LA64LARGE-NEXT:    addi.w $s0, $s0, 1
+; LA64LARGE-NEXT:    blt $s0, $fp, .LBB5_1
+; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64LARGE-NEXT:    addi.d $sp, $sp, 32
+; LA64LARGE-NEXT:    ret
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [ %inc, %loop ], [ 0, %entry ]
+  %0 = load volatile i32, ptr @gd, align 4
+  %inc = add nuw nsw i32 %i, 1
+  %cmp = icmp slt i32 %inc, %n
+  br i1 %cmp, label %loop, label %ret
+
+ret:
+  ret void
+}


        


More information about the llvm-commits mailing list