[llvm] [LoongArch] Ensure PseudoLA* can be hoisted (PR #94723)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 7 01:45:11 PDT 2024


https://github.com/heiher updated https://github.com/llvm/llvm-project/pull/94723

>From 8e1a39d7f799a6ac0ba258cf4906323d8b483a32 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Wed, 5 Jun 2024 20:32:23 +0800
Subject: [PATCH] [LoongArch] Ensure PseudoLA* can be hoisted

Since we mark the pseudos as mayLoad but do not provide any MMOs,
isSafeToMove conservatively returns false, stopping MachineLICM from
hoisting the instructions. PseudoLA_TLS_{LD,GD} does not actually expand
to a load, so stop marking that as mayLoad to allow it to be hoisted, and
for the others make sure to add MMOs during lowering to indicate they're
GOT loads and thus can be freely moved.
---
 .../LoongArch/LoongArchISelLowering.cpp       |  60 ++++--
 .../Target/LoongArch/LoongArchISelLowering.h  |   2 +-
 .../Target/LoongArch/LoongArchInstrInfo.td    |  27 +--
 .../LoongArch/machinelicm-address-pseudos.ll  | 171 ++++++++++--------
 .../LoongArch/psabi-restricted-scheduling.ll  |  48 ++---
 5 files changed, 178 insertions(+), 130 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9d7e4636abac1..e957789d3a29c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -785,6 +785,7 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
   SDLoc DL(N);
   EVT Ty = getPointerTy(DAG.getDataLayout());
   SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+  SDValue Load;
 
   switch (M) {
   default:
@@ -796,33 +797,48 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
     // This is not actually used, but is necessary for successfully matching
     // the PseudoLA_*_LARGE nodes.
     SDValue Tmp = DAG.getConstant(0, DL, Ty);
-    if (IsLocal)
+    if (IsLocal) {
       // This generates the pattern (PseudoLA_PCREL_LARGE tmp sym), that
       // eventually becomes the desired 5-insn code sequence.
-      return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
+      Load = SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
                                         Tmp, Addr),
                      0);
-
-    // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that eventually
-    // becomes the desired 5-insn code sequence.
-    return SDValue(
-        DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
-        0);
+    } else {
+      // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that
+      // eventually becomes the desired 5-insn code sequence.
+      Load = SDValue(
+          DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
+          0);
+    }
+    break;
   }
 
   case CodeModel::Small:
   case CodeModel::Medium:
-    if (IsLocal)
+    if (IsLocal) {
       // This generates the pattern (PseudoLA_PCREL sym), which expands to
       // (addi.w/d (pcalau12i %pc_hi20(sym)) %pc_lo12(sym)).
-      return SDValue(
+      Load = SDValue(
           DAG.getMachineNode(LoongArch::PseudoLA_PCREL, DL, Ty, Addr), 0);
+    } else {
+      // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
+      // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
+      Load =
+          SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr), 0);
+    }
+  }
 
-    // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
-    // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
-    return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr),
-                   0);
+  if (!IsLocal) {
+    MachineFunction &MF = DAG.getMachineFunction();
+    MachineMemOperand *MemOp = MF.getMachineMemOperand(
+        MachinePointerInfo::getGOT(MF),
+        MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+            MachineMemOperand::MOInvariant,
+        LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+    DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
   }
+
+  return Load;
 }
 
 SDValue LoongArchTargetLowering::lowerBlockAddress(SDValue Op,
@@ -860,7 +876,7 @@ SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
 
 SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
                                                   SelectionDAG &DAG,
-                                                  unsigned Opc,
+                                                  unsigned Opc, bool UseGOT,
                                                   bool Large) const {
   SDLoc DL(N);
   EVT Ty = getPointerTy(DAG.getDataLayout());
@@ -873,6 +889,15 @@ SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
   SDValue Offset = Large
                        ? SDValue(DAG.getMachineNode(Opc, DL, Ty, Tmp, Addr), 0)
                        : SDValue(DAG.getMachineNode(Opc, DL, Ty, Addr), 0);
+  if (UseGOT) {
+    MachineFunction &MF = DAG.getMachineFunction();
+    MachineMemOperand *MemOp = MF.getMachineMemOperand(
+        MachinePointerInfo::getGOT(MF),
+        MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+            MachineMemOperand::MOInvariant,
+        LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+    DAG.setNodeMemRefs(cast<MachineSDNode>(Offset.getNode()), {MemOp});
+  }
 
   // Add the thread pointer.
   return DAG.getNode(ISD::ADD, DL, Ty, Offset,
@@ -972,13 +997,14 @@ LoongArchTargetLowering::lowerGlobalTLSAddress(SDValue Op,
     return getStaticTLSAddr(N, DAG,
                             Large ? LoongArch::PseudoLA_TLS_IE_LARGE
                                   : LoongArch::PseudoLA_TLS_IE,
-                            Large);
+                            /*UseGOT=*/true, Large);
   case TLSModel::LocalExec:
     // This model is used when static linking as the TLS offsets are resolved
     // during program linking.
     //
     // This node doesn't need an extra argument for the large code model.
-    return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE);
+    return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE,
+                            /*UseGOT=*/false);
   }
 
   return getTLSDescAddr(N, DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 9328831a17a30..f4c57f80fdbe4 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -267,7 +267,7 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, CodeModel::Model M,
                   bool IsLocal = true) const;
   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
-                           unsigned Opc, bool Large = false) const;
+                           unsigned Opc, bool UseGOT, bool Large = false) const;
   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
                             unsigned Opc, bool Large = false) const;
   SDValue getTLSDescAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index f72f46e39e2a4..a1e21a7ef527b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -1580,13 +1580,26 @@ def PseudoLA_ABS_LARGE : Pseudo<(outs GPR:$dst),
                                 "la.abs", "$dst, $src">;
 def PseudoLA_PCREL : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                             "la.pcrel", "$dst, $src">;
-let Defs = [R20], Size = 20 in
+def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+                             "la.tls.ld", "$dst, $src">;
+def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+                             "la.tls.gd", "$dst, $src">;
+let Defs = [R20], Size = 20 in {
 def PseudoLA_PCREL_LARGE : Pseudo<(outs GPR:$dst),
                                   (ins GPR:$tmp, bare_symbol:$src), [],
                                   "la.pcrel", "$dst, $tmp, $src">,
                            Requires<[IsLA64]>;
 def PseudoLA_TLS_LE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                              "la.tls.le", "$dst, $src">;
+def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
+                                   (ins GPR:$tmp, bare_symbol:$src), [],
+                                   "la.tls.ld", "$dst, $tmp, $src">,
+                            Requires<[IsLA64]>;
+def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
+                                   (ins GPR:$tmp, bare_symbol:$src), [],
+                                   "la.tls.gd", "$dst, $tmp, $src">,
+                            Requires<[IsLA64]>;
+} // Defs = [R20], Size = 20
 }
 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
     isAsmParserOnly = 1 in {
@@ -1594,10 +1607,6 @@ def PseudoLA_GOT : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                           "la.got", "$dst, $src">;
 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                              "la.tls.ie", "$dst, $src">;
-def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
-                             "la.tls.ld", "$dst, $src">;
-def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
-                             "la.tls.gd", "$dst, $src">;
 let Defs = [R20], Size = 20 in {
 def PseudoLA_GOT_LARGE : Pseudo<(outs GPR:$dst),
                                 (ins GPR:$tmp, bare_symbol:$src), [],
@@ -1607,14 +1616,6 @@ def PseudoLA_TLS_IE_LARGE : Pseudo<(outs GPR:$dst),
                                    (ins GPR:$tmp, bare_symbol:$src), [],
                                    "la.tls.ie", "$dst, $tmp, $src">,
                             Requires<[IsLA64]>;
-def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
-                                   (ins GPR:$tmp, bare_symbol:$src), [],
-                                   "la.tls.ld", "$dst, $tmp, $src">,
-                            Requires<[IsLA64]>;
-def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
-                                   (ins GPR:$tmp, bare_symbol:$src), [],
-                                   "la.tls.gd", "$dst, $tmp, $src">,
-                            Requires<[IsLA64]>;
 } // Defs = [R20], Size = 20
 }
 
diff --git a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
index cd308a1495914..ed1a24e82b4e4 100644
--- a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
@@ -74,46 +74,46 @@ ret:
 define void @test_la_got(i32 signext %n) {
 ; LA32-LABEL: test_la_got:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
+; LA32-NEXT:    ld.w $a1, $a1, %got_pc_lo12(g)
+; LA32-NEXT:    move $a2, $zero
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB1_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
-; LA32-NEXT:    ld.w $a2, $a2, %got_pc_lo12(g)
-; LA32-NEXT:    ld.w $zero, $a2, 0
-; LA32-NEXT:    addi.w $a1, $a1, 1
-; LA32-NEXT:    blt $a1, $a0, .LBB1_1
+; LA32-NEXT:    ld.w $zero, $a1, 0
+; LA32-NEXT:    addi.w $a2, $a2, 1
+; LA32-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA32-NEXT:  # %bb.2: # %ret
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_la_got:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
+; LA64-NEXT:    ld.d $a1, $a1, %got_pc_lo12(g)
+; LA64-NEXT:    move $a2, $zero
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB1_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
-; LA64-NEXT:    ld.d $a2, $a2, %got_pc_lo12(g)
-; LA64-NEXT:    ld.w $zero, $a2, 0
-; LA64-NEXT:    addi.w $a1, $a1, 1
-; LA64-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64-NEXT:    ld.w $zero, $a1, 0
+; LA64-NEXT:    addi.w $a2, $a2, 1
+; LA64-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA64-NEXT:  # %bb.2: # %ret
 ; LA64-NEXT:    ret
 ;
 ; LA64LARGE-LABEL: test_la_got:
 ; LA64LARGE:       # %bb.0: # %entry
-; LA64LARGE-NEXT:    move $a1, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB1_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
+; LA64LARGE-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(g)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(g)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(g)
-; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT:    ld.w $zero, $a2, 0
-; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
-; LA64LARGE-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64LARGE-NEXT:    ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT:    move $a2, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB1_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ld.w $zero, $a1, 0
+; LA64LARGE-NEXT:    addi.w $a2, $a2, 1
+; LA64LARGE-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
 ; LA64LARGE-NEXT:    ret
 entry:
@@ -135,13 +135,13 @@ ret:
 define void @test_la_tls_ie(i32 signext %n) {
 ; LA32-LABEL: test_la_tls_ie:
 ; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
+; LA32-NEXT:    ld.w $a2, $a1, %ie_pc_lo12(ie)
 ; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    add.w $a2, $a2, $tp
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB2_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
-; LA32-NEXT:    ld.w $a2, $a2, %ie_pc_lo12(ie)
-; LA32-NEXT:    add.w $a2, $a2, $tp
 ; LA32-NEXT:    ld.w $zero, $a2, 0
 ; LA32-NEXT:    addi.w $a1, $a1, 1
 ; LA32-NEXT:    blt $a1, $a0, .LBB2_1
@@ -150,32 +150,32 @@ define void @test_la_tls_ie(i32 signext %n) {
 ;
 ; LA64-LABEL: test_la_tls_ie:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
+; LA64-NEXT:    ld.d $a1, $a1, %ie_pc_lo12(ie)
+; LA64-NEXT:    move $a2, $zero
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB2_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
-; LA64-NEXT:    ld.d $a2, $a2, %ie_pc_lo12(ie)
-; LA64-NEXT:    ldx.w $zero, $a2, $tp
-; LA64-NEXT:    addi.w $a1, $a1, 1
-; LA64-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64-NEXT:    ldx.w $zero, $a1, $tp
+; LA64-NEXT:    addi.w $a2, $a2, 1
+; LA64-NEXT:    blt $a2, $a0, .LBB2_1
 ; LA64-NEXT:  # %bb.2: # %ret
 ; LA64-NEXT:    ret
 ;
 ; LA64LARGE-LABEL: test_la_tls_ie:
 ; LA64LARGE:       # %bb.0: # %entry
-; LA64LARGE-NEXT:    move $a1, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB2_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; LA64LARGE-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ie)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ie)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT:    ldx.w $zero, $a2, $tp
-; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
-; LA64LARGE-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64LARGE-NEXT:    ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT:    move $a2, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB2_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ldx.w $zero, $a1, $tp
+; LA64LARGE-NEXT:    addi.w $a2, $a2, 1
+; LA64LARGE-NEXT:    blt $a2, $a0, .LBB2_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
 ; LA64LARGE-NEXT:    ret
 entry:
@@ -202,21 +202,25 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 0 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    .cfi_offset 22, -8
 ; LA32-NEXT:    .cfi_offset 23, -12
+; LA32-NEXT:    .cfi_offset 24, -16
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    move $s1, $zero
+; LA32-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA32-NEXT:    addi.w $s0, $a0, %got_pc_lo12(ld)
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB3_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
-; LA32-NEXT:    addi.w $a0, $a0, %got_pc_lo12(ld)
+; LA32-NEXT:    move $a0, $s0
 ; LA32-NEXT:    bl %plt(__tls_get_addr)
 ; LA32-NEXT:    ld.w $zero, $a0, 0
-; LA32-NEXT:    addi.w $s0, $s0, 1
-; LA32-NEXT:    blt $s0, $fp, .LBB3_1
+; LA32-NEXT:    addi.w $s1, $s1, 1
+; LA32-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ld.w $s1, $sp, 0 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -230,21 +234,25 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64-NEXT:    .cfi_offset 1, -8
 ; LA64-NEXT:    .cfi_offset 22, -16
 ; LA64-NEXT:    .cfi_offset 23, -24
+; LA64-NEXT:    .cfi_offset 24, -32
 ; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $s0, $zero
+; LA64-NEXT:    move $s1, $zero
+; LA64-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64-NEXT:    addi.d $s0, $a0, %got_pc_lo12(ld)
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB3_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
-; LA64-NEXT:    addi.d $a0, $a0, %got_pc_lo12(ld)
+; LA64-NEXT:    move $a0, $s0
 ; LA64-NEXT:    bl %plt(__tls_get_addr)
 ; LA64-NEXT:    ld.w $zero, $a0, 0
-; LA64-NEXT:    addi.w $s0, $s0, 1
-; LA64-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64-NEXT:    addi.w $s1, $s1, 1
+; LA64-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -258,19 +266,22 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64LARGE-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    .cfi_offset 1, -8
 ; LA64LARGE-NEXT:    .cfi_offset 22, -16
 ; LA64LARGE-NEXT:    .cfi_offset 23, -24
+; LA64LARGE-NEXT:    .cfi_offset 24, -32
 ; LA64LARGE-NEXT:    move $fp, $a0
-; LA64LARGE-NEXT:    move $s0, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB3_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64LARGE-NEXT:    move $s1, $zero
+; LA64LARGE-NEXT:    pcalau12i $s0, %ld_pc_hi20(ld)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(ld)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(ld)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(ld)
-; LA64LARGE-NEXT:    add.d $a0, $t8, $a0
+; LA64LARGE-NEXT:    add.d $s0, $t8, $s0
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB3_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    move $a0, $s0
 ; LA64LARGE-NEXT:    pcalau12i $ra, %pc_hi20(__tls_get_addr)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(__tls_get_addr)
@@ -278,9 +289,10 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64LARGE-NEXT:    add.d $ra, $t8, $ra
 ; LA64LARGE-NEXT:    jirl $ra, $ra, 0
 ; LA64LARGE-NEXT:    ld.w $zero, $a0, 0
-; LA64LARGE-NEXT:    addi.w $s0, $s0, 1
-; LA64LARGE-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64LARGE-NEXT:    addi.w $s1, $s1, 1
+; LA64LARGE-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -370,18 +382,21 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 0 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    move $s1, $zero
+; LA32-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA32-NEXT:    addi.w $s0, $a0, %got_pc_lo12(gd)
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB5_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
-; LA32-NEXT:    addi.w $a0, $a0, %got_pc_lo12(gd)
+; LA32-NEXT:    move $a0, $s0
 ; LA32-NEXT:    bl %plt(__tls_get_addr)
 ; LA32-NEXT:    ld.w $zero, $a0, 0
-; LA32-NEXT:    addi.w $s0, $s0, 1
-; LA32-NEXT:    blt $s0, $fp, .LBB5_1
+; LA32-NEXT:    addi.w $s1, $s1, 1
+; LA32-NEXT:    blt $s1, $fp, .LBB5_1
 ; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ld.w $s1, $sp, 0 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -394,18 +409,21 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $s0, $zero
+; LA64-NEXT:    move $s1, $zero
+; LA64-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64-NEXT:    addi.d $s0, $a0, %got_pc_lo12(gd)
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB5_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
-; LA64-NEXT:    addi.d $a0, $a0, %got_pc_lo12(gd)
+; LA64-NEXT:    move $a0, $s0
 ; LA64-NEXT:    bl %plt(__tls_get_addr)
 ; LA64-NEXT:    ld.w $zero, $a0, 0
-; LA64-NEXT:    addi.w $s0, $s0, 1
-; LA64-NEXT:    blt $s0, $fp, .LBB5_1
+; LA64-NEXT:    addi.w $s1, $s1, 1
+; LA64-NEXT:    blt $s1, $fp, .LBB5_1
 ; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -418,16 +436,18 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA64LARGE-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    move $fp, $a0
-; LA64LARGE-NEXT:    move $s0, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB5_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64LARGE-NEXT:    move $s1, $zero
+; LA64LARGE-NEXT:    pcalau12i $s0, %gd_pc_hi20(gd)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(gd)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(gd)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(gd)
-; LA64LARGE-NEXT:    add.d $a0, $t8, $a0
+; LA64LARGE-NEXT:    add.d $s0, $t8, $s0
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB5_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    move $a0, $s0
 ; LA64LARGE-NEXT:    pcalau12i $ra, %pc_hi20(__tls_get_addr)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(__tls_get_addr)
@@ -435,9 +455,10 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA64LARGE-NEXT:    add.d $ra, $t8, $ra
 ; LA64LARGE-NEXT:    jirl $ra, $ra, 0
 ; LA64LARGE-NEXT:    ld.w $zero, $a0, 0
-; LA64LARGE-NEXT:    addi.w $s0, $s0, 1
-; LA64LARGE-NEXT:    blt $s0, $fp, .LBB5_1
+; LA64LARGE-NEXT:    addi.w $s1, $s1, 1
+; LA64LARGE-NEXT:    blt $s1, $fp, .LBB5_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
index 678546f1ef535..3a8e39c7f2fca 100644
--- a/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
+++ b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
@@ -36,13 +36,13 @@ define void @foo() nounwind {
 ; MEDIUM_NO_SCH-NEXT:    jirl $ra, $ra, 0
 ; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
 ; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a1, %ie_pc_hi20(ld)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a1, $a1, %ie_pc_lo12(ld)
+; MEDIUM_NO_SCH-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; MEDIUM_NO_SCH-NEXT:    ld.d $a2, $a2, %ie_pc_lo12(ie)
 ; MEDIUM_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
-; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ld)
-; MEDIUM_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; MEDIUM_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
-; MEDIUM_NO_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ie)
-; MEDIUM_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
+; MEDIUM_NO_SCH-NEXT:    ldx.d $zero, $a1, $tp
+; MEDIUM_NO_SCH-NEXT:    ldx.d $zero, $a2, $tp
 ; MEDIUM_NO_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; MEDIUM_NO_SCH-NEXT:    addi.d $sp, $sp, 16
 ; MEDIUM_NO_SCH-NEXT:    ret
@@ -61,14 +61,14 @@ define void @foo() nounwind {
 ; MEDIUM_SCH-NEXT:    pcaddu18i $ra, %call36(bar)
 ; MEDIUM_SCH-NEXT:    jirl $ra, $ra, 0
 ; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(gd)
+; MEDIUM_SCH-NEXT:    pcalau12i $a1, %ie_pc_hi20(ld)
+; MEDIUM_SCH-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
 ; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_SCH-NEXT:    ld.d $a1, $a1, %ie_pc_lo12(ld)
+; MEDIUM_SCH-NEXT:    ld.d $a2, $a2, %ie_pc_lo12(ie)
 ; MEDIUM_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
-; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ld)
-; MEDIUM_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; MEDIUM_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
-; MEDIUM_SCH-NEXT:    ld.d $a0, $a0, %ie_pc_lo12(ie)
-; MEDIUM_SCH-NEXT:    ldx.d $zero, $a0, $tp
+; MEDIUM_SCH-NEXT:    ldx.d $zero, $a1, $tp
+; MEDIUM_SCH-NEXT:    ldx.d $zero, $a2, $tp
 ; MEDIUM_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; MEDIUM_SCH-NEXT:    addi.d $sp, $sp, 16
 ; MEDIUM_SCH-NEXT:    ret
@@ -101,19 +101,19 @@ define void @foo() nounwind {
 ; LARGE_NO_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(gd)
 ; LARGE_NO_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(gd)
 ; LARGE_NO_SCH-NEXT:    ldx.d $a0, $t8, $a0
-; LARGE_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_NO_SCH-NEXT:    pcalau12i $a1, %ie_pc_hi20(ld)
 ; LARGE_NO_SCH-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ld)
 ; LARGE_NO_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ld)
 ; LARGE_NO_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ld)
-; LARGE_NO_SCH-NEXT:    ldx.d $a0, $t8, $a0
-; LARGE_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; LARGE_NO_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_NO_SCH-NEXT:    ldx.d $a1, $t8, $a1
+; LARGE_NO_SCH-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
 ; LARGE_NO_SCH-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ie)
 ; LARGE_NO_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ie)
 ; LARGE_NO_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LARGE_NO_SCH-NEXT:    ldx.d $a0, $t8, $a0
+; LARGE_NO_SCH-NEXT:    ldx.d $a2, $t8, $a2
 ; LARGE_NO_SCH-NEXT:    ldx.d $zero, $a0, $tp
+; LARGE_NO_SCH-NEXT:    ldx.d $zero, $a1, $tp
+; LARGE_NO_SCH-NEXT:    ldx.d $zero, $a2, $tp
 ; LARGE_NO_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LARGE_NO_SCH-NEXT:    addi.d $sp, $sp, 16
 ; LARGE_NO_SCH-NEXT:    ret
@@ -146,19 +146,19 @@ define void @foo() nounwind {
 ; LARGE_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(gd)
 ; LARGE_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(gd)
 ; LARGE_SCH-NEXT:    ldx.d $a0, $t8, $a0
-; LARGE_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; LARGE_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_SCH-NEXT:    pcalau12i $a1, %ie_pc_hi20(ld)
 ; LARGE_SCH-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ld)
 ; LARGE_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ld)
 ; LARGE_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ld)
-; LARGE_SCH-NEXT:    ldx.d $a0, $t8, $a0
-; LARGE_SCH-NEXT:    ldx.d $zero, $a0, $tp
-; LARGE_SCH-NEXT:    pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_SCH-NEXT:    ldx.d $a1, $t8, $a1
+; LARGE_SCH-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
 ; LARGE_SCH-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ie)
 ; LARGE_SCH-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ie)
 ; LARGE_SCH-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LARGE_SCH-NEXT:    ldx.d $a0, $t8, $a0
+; LARGE_SCH-NEXT:    ldx.d $a2, $t8, $a2
 ; LARGE_SCH-NEXT:    ldx.d $zero, $a0, $tp
+; LARGE_SCH-NEXT:    ldx.d $zero, $a1, $tp
+; LARGE_SCH-NEXT:    ldx.d $zero, $a2, $tp
 ; LARGE_SCH-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LARGE_SCH-NEXT:    addi.d $sp, $sp, 16
 ; LARGE_SCH-NEXT:    ret



More information about the llvm-commits mailing list