[llvm] [LoongArch] Ensure PseudoLA* can be hoisted (PR #94723)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 6 22:21:44 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-loongarch

Author: hev (heiher)

<details>
<summary>Changes</summary>

Since we mark the pseudos as mayLoad but do not provide any MMOs, isSafeToMove conservatively returns false, stopping MachineLICM from hoisting the instructions. PseudoLA_TLS_{LD,GD} does not actually expand to a load, so stop marking that as mayLoad to allow it to be hoisted, and for the others make sure to add MMOs during lowering to indicate they're GOT loads and thus can be freely moved.

---

Patch is 28.48 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/94723.diff


5 Files Affected:

- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp (+43-17) 
- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.h (+1-1) 
- (modified) llvm/lib/Target/LoongArch/LoongArchInstrInfo.td (+14-13) 
- (modified) llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll (+97-76) 
- (modified) llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll (+24-24) 


``````````diff
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9d7e4636abac1..e957789d3a29c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -785,6 +785,7 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
   SDLoc DL(N);
   EVT Ty = getPointerTy(DAG.getDataLayout());
   SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+  SDValue Load;
 
   switch (M) {
   default:
@@ -796,33 +797,48 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
     // This is not actually used, but is necessary for successfully matching
     // the PseudoLA_*_LARGE nodes.
     SDValue Tmp = DAG.getConstant(0, DL, Ty);
-    if (IsLocal)
+    if (IsLocal) {
       // This generates the pattern (PseudoLA_PCREL_LARGE tmp sym), that
       // eventually becomes the desired 5-insn code sequence.
-      return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
+      Load = SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
                                         Tmp, Addr),
                      0);
-
-    // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that eventually
-    // becomes the desired 5-insn code sequence.
-    return SDValue(
-        DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
-        0);
+    } else {
+      // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that
+      // eventually becomes the desired 5-insn code sequence.
+      Load = SDValue(
+          DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
+          0);
+    }
+    break;
   }
 
   case CodeModel::Small:
   case CodeModel::Medium:
-    if (IsLocal)
+    if (IsLocal) {
       // This generates the pattern (PseudoLA_PCREL sym), which expands to
       // (addi.w/d (pcalau12i %pc_hi20(sym)) %pc_lo12(sym)).
-      return SDValue(
+      Load = SDValue(
           DAG.getMachineNode(LoongArch::PseudoLA_PCREL, DL, Ty, Addr), 0);
+    } else {
+      // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
+      // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
+      Load =
+          SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr), 0);
+    }
+  }
 
-    // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
-    // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
-    return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr),
-                   0);
+  if (!IsLocal) {
+    MachineFunction &MF = DAG.getMachineFunction();
+    MachineMemOperand *MemOp = MF.getMachineMemOperand(
+        MachinePointerInfo::getGOT(MF),
+        MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+            MachineMemOperand::MOInvariant,
+        LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+    DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
   }
+
+  return Load;
 }
 
 SDValue LoongArchTargetLowering::lowerBlockAddress(SDValue Op,
@@ -860,7 +876,7 @@ SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
 
 SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
                                                   SelectionDAG &DAG,
-                                                  unsigned Opc,
+                                                  unsigned Opc, bool UseGOT,
                                                   bool Large) const {
   SDLoc DL(N);
   EVT Ty = getPointerTy(DAG.getDataLayout());
@@ -873,6 +889,15 @@ SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
   SDValue Offset = Large
                        ? SDValue(DAG.getMachineNode(Opc, DL, Ty, Tmp, Addr), 0)
                        : SDValue(DAG.getMachineNode(Opc, DL, Ty, Addr), 0);
+  if (UseGOT) {
+    MachineFunction &MF = DAG.getMachineFunction();
+    MachineMemOperand *MemOp = MF.getMachineMemOperand(
+        MachinePointerInfo::getGOT(MF),
+        MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+            MachineMemOperand::MOInvariant,
+        LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+    DAG.setNodeMemRefs(cast<MachineSDNode>(Offset.getNode()), {MemOp});
+  }
 
   // Add the thread pointer.
   return DAG.getNode(ISD::ADD, DL, Ty, Offset,
@@ -972,13 +997,14 @@ LoongArchTargetLowering::lowerGlobalTLSAddress(SDValue Op,
     return getStaticTLSAddr(N, DAG,
                             Large ? LoongArch::PseudoLA_TLS_IE_LARGE
                                   : LoongArch::PseudoLA_TLS_IE,
-                            Large);
+                            /*UseGOT=*/true, Large);
   case TLSModel::LocalExec:
     // This model is used when static linking as the TLS offsets are resolved
     // during program linking.
     //
     // This node doesn't need an extra argument for the large code model.
-    return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE);
+    return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE,
+                            /*UseGOT=*/false);
   }
 
   return getTLSDescAddr(N, DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 9328831a17a30..f4c57f80fdbe4 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -267,7 +267,7 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, CodeModel::Model M,
                   bool IsLocal = true) const;
   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
-                           unsigned Opc, bool Large = false) const;
+                           unsigned Opc, bool UseGOT, bool Large = false) const;
   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
                             unsigned Opc, bool Large = false) const;
   SDValue getTLSDescAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 66bd74e068b95..faac2bb15a1c5 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -1569,13 +1569,26 @@ def PseudoLA_ABS_LARGE : Pseudo<(outs GPR:$dst),
                                 "la.abs", "$dst, $src">;
 def PseudoLA_PCREL : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                             "la.pcrel", "$dst, $src">;
-let Defs = [R20], Size = 20 in
+def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+                             "la.tls.ld", "$dst, $src">;
+def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+                             "la.tls.gd", "$dst, $src">;
+let Defs = [R20], Size = 20 in {
 def PseudoLA_PCREL_LARGE : Pseudo<(outs GPR:$dst),
                                   (ins GPR:$tmp, bare_symbol:$src), [],
                                   "la.pcrel", "$dst, $tmp, $src">,
                            Requires<[IsLA64]>;
 def PseudoLA_TLS_LE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                              "la.tls.le", "$dst, $src">;
+def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
+                                   (ins GPR:$tmp, bare_symbol:$src), [],
+                                   "la.tls.ld", "$dst, $tmp, $src">,
+                            Requires<[IsLA64]>;
+def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
+                                   (ins GPR:$tmp, bare_symbol:$src), [],
+                                   "la.tls.gd", "$dst, $tmp, $src">,
+                            Requires<[IsLA64]>;
+} // Defs = [R20], Size = 20
 }
 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
     isAsmParserOnly = 1 in {
@@ -1583,10 +1596,6 @@ def PseudoLA_GOT : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                           "la.got", "$dst, $src">;
 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
                              "la.tls.ie", "$dst, $src">;
-def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
-                             "la.tls.ld", "$dst, $src">;
-def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
-                             "la.tls.gd", "$dst, $src">;
 let Defs = [R20], Size = 20 in {
 def PseudoLA_GOT_LARGE : Pseudo<(outs GPR:$dst),
                                 (ins GPR:$tmp, bare_symbol:$src), [],
@@ -1596,14 +1605,6 @@ def PseudoLA_TLS_IE_LARGE : Pseudo<(outs GPR:$dst),
                                    (ins GPR:$tmp, bare_symbol:$src), [],
                                    "la.tls.ie", "$dst, $tmp, $src">,
                             Requires<[IsLA64]>;
-def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
-                                   (ins GPR:$tmp, bare_symbol:$src), [],
-                                   "la.tls.ld", "$dst, $tmp, $src">,
-                            Requires<[IsLA64]>;
-def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
-                                   (ins GPR:$tmp, bare_symbol:$src), [],
-                                   "la.tls.gd", "$dst, $tmp, $src">,
-                            Requires<[IsLA64]>;
 } // Defs = [R20], Size = 20
 }
 
diff --git a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
index 30a19adcfdce4..d69a984ea6f07 100644
--- a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
@@ -74,46 +74,46 @@ ret:
 define void @test_la_got(i32 signext %n) {
 ; LA32-LABEL: test_la_got:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
+; LA32-NEXT:    ld.w $a1, $a1, %got_pc_lo12(g)
+; LA32-NEXT:    move $a2, $zero
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB1_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
-; LA32-NEXT:    ld.w $a2, $a2, %got_pc_lo12(g)
-; LA32-NEXT:    ld.w $a2, $a2, 0
-; LA32-NEXT:    addi.w $a1, $a1, 1
-; LA32-NEXT:    blt $a1, $a0, .LBB1_1
+; LA32-NEXT:    ld.w $a3, $a1, 0
+; LA32-NEXT:    addi.w $a2, $a2, 1
+; LA32-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA32-NEXT:  # %bb.2: # %ret
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_la_got:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
+; LA64-NEXT:    ld.d $a1, $a1, %got_pc_lo12(g)
+; LA64-NEXT:    move $a2, $zero
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB1_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
-; LA64-NEXT:    ld.d $a2, $a2, %got_pc_lo12(g)
-; LA64-NEXT:    ld.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a1, $a1, 1
-; LA64-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64-NEXT:    ld.w $a3, $a1, 0
+; LA64-NEXT:    addi.w $a2, $a2, 1
+; LA64-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA64-NEXT:  # %bb.2: # %ret
 ; LA64-NEXT:    ret
 ;
 ; LA64LARGE-LABEL: test_la_got:
 ; LA64LARGE:       # %bb.0: # %entry
-; LA64LARGE-NEXT:    move $a1, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB1_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a2, %got_pc_hi20(g)
+; LA64LARGE-NEXT:    pcalau12i $a1, %got_pc_hi20(g)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(g)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(g)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(g)
-; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT:    ld.w $a2, $a2, 0
-; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
-; LA64LARGE-NEXT:    blt $a1, $a0, .LBB1_1
+; LA64LARGE-NEXT:    ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT:    move $a2, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB1_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ld.w $a3, $a1, 0
+; LA64LARGE-NEXT:    addi.w $a2, $a2, 1
+; LA64LARGE-NEXT:    blt $a2, $a0, .LBB1_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
 ; LA64LARGE-NEXT:    ret
 entry:
@@ -135,14 +135,14 @@ ret:
 define void @test_la_tls_ie(i32 signext %n) {
 ; LA32-LABEL: test_la_tls_ie:
 ; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
+; LA32-NEXT:    ld.w $a2, $a1, %ie_pc_lo12(ie)
 ; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    add.w $a2, $a2, $tp
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB2_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
-; LA32-NEXT:    ld.w $a2, $a2, %ie_pc_lo12(ie)
-; LA32-NEXT:    add.w $a2, $a2, $tp
-; LA32-NEXT:    ld.w $a2, $a2, 0
+; LA32-NEXT:    ld.w $a3, $a2, 0
 ; LA32-NEXT:    addi.w $a1, $a1, 1
 ; LA32-NEXT:    blt $a1, $a0, .LBB2_1
 ; LA32-NEXT:  # %bb.2: # %ret
@@ -150,32 +150,32 @@ define void @test_la_tls_ie(i32 signext %n) {
 ;
 ; LA64-LABEL: test_la_tls_ie:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    move $a1, $zero
+; LA64-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
+; LA64-NEXT:    ld.d $a1, $a1, %ie_pc_lo12(ie)
+; LA64-NEXT:    move $a2, $zero
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB2_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
-; LA64-NEXT:    ld.d $a2, $a2, %ie_pc_lo12(ie)
-; LA64-NEXT:    ldx.w $a2, $a2, $tp
-; LA64-NEXT:    addi.w $a1, $a1, 1
-; LA64-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64-NEXT:    ldx.w $a3, $a1, $tp
+; LA64-NEXT:    addi.w $a2, $a2, 1
+; LA64-NEXT:    blt $a2, $a0, .LBB2_1
 ; LA64-NEXT:  # %bb.2: # %ret
 ; LA64-NEXT:    ret
 ;
 ; LA64LARGE-LABEL: test_la_tls_ie:
 ; LA64LARGE:       # %bb.0: # %entry
-; LA64LARGE-NEXT:    move $a1, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB2_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a2, %ie_pc_hi20(ie)
+; LA64LARGE-NEXT:    pcalau12i $a1, %ie_pc_hi20(ie)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %ie_pc_lo12(ie)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %ie64_pc_lo20(ie)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LA64LARGE-NEXT:    ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT:    ldx.w $a2, $a2, $tp
-; LA64LARGE-NEXT:    addi.w $a1, $a1, 1
-; LA64LARGE-NEXT:    blt $a1, $a0, .LBB2_1
+; LA64LARGE-NEXT:    ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT:    move $a2, $zero
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB2_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    ldx.w $a3, $a1, $tp
+; LA64LARGE-NEXT:    addi.w $a2, $a2, 1
+; LA64LARGE-NEXT:    blt $a2, $a0, .LBB2_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
 ; LA64LARGE-NEXT:    ret
 entry:
@@ -202,21 +202,25 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 0 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    .cfi_offset 22, -8
 ; LA32-NEXT:    .cfi_offset 23, -12
+; LA32-NEXT:    .cfi_offset 24, -16
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    move $s1, $zero
+; LA32-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA32-NEXT:    addi.w $s0, $a0, %got_pc_lo12(ld)
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB3_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
-; LA32-NEXT:    addi.w $a0, $a0, %got_pc_lo12(ld)
+; LA32-NEXT:    move $a0, $s0
 ; LA32-NEXT:    bl %plt(__tls_get_addr)
 ; LA32-NEXT:    ld.w $a0, $a0, 0
-; LA32-NEXT:    addi.w $s0, $s0, 1
-; LA32-NEXT:    blt $s0, $fp, .LBB3_1
+; LA32-NEXT:    addi.w $s1, $s1, 1
+; LA32-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA32-NEXT:  # %bb.2: # %ret
+; LA32-NEXT:    ld.w $s1, $sp, 0 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -230,21 +234,25 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64-NEXT:    .cfi_offset 1, -8
 ; LA64-NEXT:    .cfi_offset 22, -16
 ; LA64-NEXT:    .cfi_offset 23, -24
+; LA64-NEXT:    .cfi_offset 24, -32
 ; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $s0, $zero
+; LA64-NEXT:    move $s1, $zero
+; LA64-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64-NEXT:    addi.d $s0, $a0, %got_pc_lo12(ld)
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB3_1: # %loop
 ; LA64-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
-; LA64-NEXT:    addi.d $a0, $a0, %got_pc_lo12(ld)
+; LA64-NEXT:    move $a0, $s0
 ; LA64-NEXT:    bl %plt(__tls_get_addr)
 ; LA64-NEXT:    ld.w $a0, $a0, 0
-; LA64-NEXT:    addi.w $s0, $s0, 1
-; LA64-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64-NEXT:    addi.w $s1, $s1, 1
+; LA64-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA64-NEXT:  # %bb.2: # %ret
+; LA64-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -258,19 +266,22 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64LARGE-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT:    st.d $s1, $sp, 0 # 8-byte Folded Spill
 ; LA64LARGE-NEXT:    .cfi_offset 1, -8
 ; LA64LARGE-NEXT:    .cfi_offset 22, -16
 ; LA64LARGE-NEXT:    .cfi_offset 23, -24
+; LA64LARGE-NEXT:    .cfi_offset 24, -32
 ; LA64LARGE-NEXT:    move $fp, $a0
-; LA64LARGE-NEXT:    move $s0, $zero
-; LA64LARGE-NEXT:    .p2align 4, , 16
-; LA64LARGE-NEXT:  .LBB3_1: # %loop
-; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT:    pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64LARGE-NEXT:    move $s1, $zero
+; LA64LARGE-NEXT:    pcalau12i $s0, %ld_pc_hi20(ld)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %got_pc_lo12(ld)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %got64_pc_lo20(ld)
 ; LA64LARGE-NEXT:    lu52i.d $t8, $t8, %got64_pc_hi12(ld)
-; LA64LARGE-NEXT:    add.d $a0, $t8, $a0
+; LA64LARGE-NEXT:    add.d $s0, $t8, $s0
+; LA64LARGE-NEXT:    .p2align 4, , 16
+; LA64LARGE-NEXT:  .LBB3_1: # %loop
+; LA64LARGE-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT:    move $a0, $s0
 ; LA64LARGE-NEXT:    pcalau12i $ra, %pc_hi20(__tls_get_addr)
 ; LA64LARGE-NEXT:    addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
 ; LA64LARGE-NEXT:    lu32i.d $t8, %pc64_lo20(__tls_get_addr)
@@ -278,9 +289,10 @@ define void @test_la_tls_ld(i32 signext %n) {
 ; LA64LARGE-NEXT:    add.d $ra, $t8, $ra
 ; LA64LARGE-NEXT:    jirl $ra, $ra, 0
 ; LA64LARGE-NEXT:    ld.w $a0, $a0, 0
-; LA64LARGE-NEXT:    addi.w $s0, $s0, 1
-; LA64LARGE-NEXT:    blt $s0, $fp, .LBB3_1
+; LA64LARGE-NEXT:    addi.w $s1, $s1, 1
+; LA64LARGE-NEXT:    blt $s1, $fp, .LBB3_1
 ; LA64LARGE-NEXT:  # %bb.2: # %ret
+; LA64LARGE-NEXT:    ld.d $s1, $sp, 0 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
 ; LA64LARGE-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -370,18 +382,21 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 0 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $s0, $zero
+; LA32-NEXT:    move $s1, $zero
+; LA32-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
+; LA32-NEXT:    addi.w $s0, $a0, %got_pc_lo12(gd)
 ; LA32-NEXT:    .p2align 4, , 16
 ; LA32-NEXT:  .LBB5_1: # %loop
 ; LA32-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA32-NEXT:    pcalau12i $a0, %gd_pc_hi20(gd)
-; LA32-NEXT:    addi.w...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/94723


More information about the llvm-commits mailing list