[llvm] bfad875 - [LoongArch] Ensure PseudoLA* can be hoisted (#94723)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 25 03:53:20 PDT 2024
Author: hev
Date: 2024-06-25T18:53:17+08:00
New Revision: bfad8757159a3def7c07d4f5ef23ed76fafdf441
URL: https://github.com/llvm/llvm-project/commit/bfad8757159a3def7c07d4f5ef23ed76fafdf441
DIFF: https://github.com/llvm/llvm-project/commit/bfad8757159a3def7c07d4f5ef23ed76fafdf441.diff
LOG: [LoongArch] Ensure PseudoLA* can be hoisted (#94723)
Since we mark the pseudos as mayLoad but do not provide any MMOs,
isSafeToMove conservatively returns false, stopping MachineLICM from
hoisting the instructions. PseudoLA_TLS_{LD,GD} does not actually expand
to a load, so stop marking that as mayLoad to allow it to be hoisted,
and for the others make sure to add MMOs during lowering to indicate
they're GOT loads and thus can be freely moved.
Added:
Modified:
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/lib/Target/LoongArch/LoongArchISelLowering.h
llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 1721287dab4dd..f0a18b42f481e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -785,6 +785,7 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
+ SDValue Load;
switch (M) {
default:
@@ -796,33 +797,49 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
// This is not actually used, but is necessary for successfully matching
// the PseudoLA_*_LARGE nodes.
SDValue Tmp = DAG.getConstant(0, DL, Ty);
- if (IsLocal)
+ if (IsLocal) {
// This generates the pattern (PseudoLA_PCREL_LARGE tmp sym), that
// eventually becomes the desired 5-insn code sequence.
- return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
+ Load = SDValue(DAG.getMachineNode(LoongArch::PseudoLA_PCREL_LARGE, DL, Ty,
Tmp, Addr),
0);
-
- // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that eventually
- // becomes the desired 5-insn code sequence.
- return SDValue(
- DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
- 0);
+ } else {
+ // This generates the pattern (PseudoLA_GOT_LARGE tmp sym), that
+ // eventually becomes the desired 5-insn code sequence.
+ Load = SDValue(
+ DAG.getMachineNode(LoongArch::PseudoLA_GOT_LARGE, DL, Ty, Tmp, Addr),
+ 0);
+ }
+ break;
}
case CodeModel::Small:
case CodeModel::Medium:
- if (IsLocal)
+ if (IsLocal) {
// This generates the pattern (PseudoLA_PCREL sym), which expands to
// (addi.w/d (pcalau12i %pc_hi20(sym)) %pc_lo12(sym)).
- return SDValue(
+ Load = SDValue(
DAG.getMachineNode(LoongArch::PseudoLA_PCREL, DL, Ty, Addr), 0);
+ } else {
+ // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
+ // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
+ Load =
+ SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr), 0);
+ }
+ }
- // This generates the pattern (PseudoLA_GOT sym), which expands to (ld.w/d
- // (pcalau12i %got_pc_hi20(sym)) %got_pc_lo12(sym)).
- return SDValue(DAG.getMachineNode(LoongArch::PseudoLA_GOT, DL, Ty, Addr),
- 0);
+ if (!IsLocal) {
+ // Mark the load instruction as invariant to enable hoisting in MachineLICM.
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+ DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
}
+
+ return Load;
}
SDValue LoongArchTargetLowering::lowerBlockAddress(SDValue Op,
@@ -860,7 +877,7 @@ SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
SelectionDAG &DAG,
- unsigned Opc,
+ unsigned Opc, bool UseGOT,
bool Large) const {
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
@@ -873,6 +890,16 @@ SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
SDValue Offset = Large
? SDValue(DAG.getMachineNode(Opc, DL, Ty, Tmp, Addr), 0)
: SDValue(DAG.getMachineNode(Opc, DL, Ty, Addr), 0);
+ if (UseGOT) {
+ // Mark the load instruction as invariant to enable hoisting in MachineLICM.
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
+ DAG.setNodeMemRefs(cast<MachineSDNode>(Offset.getNode()), {MemOp});
+ }
// Add the thread pointer.
return DAG.getNode(ISD::ADD, DL, Ty, Offset,
@@ -976,13 +1003,14 @@ LoongArchTargetLowering::lowerGlobalTLSAddress(SDValue Op,
return getStaticTLSAddr(N, DAG,
Large ? LoongArch::PseudoLA_TLS_IE_LARGE
: LoongArch::PseudoLA_TLS_IE,
- Large);
+ /*UseGOT=*/true, Large);
case TLSModel::LocalExec:
// This model is used when static linking as the TLS offsets are resolved
// during program linking.
//
// This node doesn't need an extra argument for the large code model.
- return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE);
+ return getStaticTLSAddr(N, DAG, LoongArch::PseudoLA_TLS_LE,
+ /*UseGOT=*/false);
}
return getTLSDescAddr(N, DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 9328831a17a30..f4c57f80fdbe4 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -267,7 +267,7 @@ class LoongArchTargetLowering : public TargetLowering {
SDValue getAddr(NodeTy *N, SelectionDAG &DAG, CodeModel::Model M,
bool IsLocal = true) const;
SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
- unsigned Opc, bool Large = false) const;
+ unsigned Opc, bool UseGOT, bool Large = false) const;
SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
unsigned Opc, bool Large = false) const;
SDValue getTLSDescAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index ff63af5dec4dc..67c3dfd3b2599 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -1582,13 +1582,26 @@ def PseudoLA_ABS_LARGE : Pseudo<(outs GPR:$dst),
"la.abs", "$dst, $src">;
def PseudoLA_PCREL : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
"la.pcrel", "$dst, $src">;
-let Defs = [R20], Size = 20 in
+def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "la.tls.ld", "$dst, $src">;
+def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "la.tls.gd", "$dst, $src">;
+let Defs = [R20], Size = 20 in {
def PseudoLA_PCREL_LARGE : Pseudo<(outs GPR:$dst),
(ins GPR:$tmp, bare_symbol:$src), [],
"la.pcrel", "$dst, $tmp, $src">,
Requires<[IsLA64]>;
def PseudoLA_TLS_LE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
"la.tls.le", "$dst, $src">;
+def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
+ (ins GPR:$tmp, bare_symbol:$src), [],
+ "la.tls.ld", "$dst, $tmp, $src">,
+ Requires<[IsLA64]>;
+def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
+ (ins GPR:$tmp, bare_symbol:$src), [],
+ "la.tls.gd", "$dst, $tmp, $src">,
+ Requires<[IsLA64]>;
+} // Defs = [R20], Size = 20
}
let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
isAsmParserOnly = 1 in {
@@ -1596,10 +1609,6 @@ def PseudoLA_GOT : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
"la.got", "$dst, $src">;
def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
"la.tls.ie", "$dst, $src">;
-def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
- "la.tls.ld", "$dst, $src">;
-def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
- "la.tls.gd", "$dst, $src">;
let Defs = [R20], Size = 20 in {
def PseudoLA_GOT_LARGE : Pseudo<(outs GPR:$dst),
(ins GPR:$tmp, bare_symbol:$src), [],
@@ -1609,14 +1618,6 @@ def PseudoLA_TLS_IE_LARGE : Pseudo<(outs GPR:$dst),
(ins GPR:$tmp, bare_symbol:$src), [],
"la.tls.ie", "$dst, $tmp, $src">,
Requires<[IsLA64]>;
-def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
- (ins GPR:$tmp, bare_symbol:$src), [],
- "la.tls.ld", "$dst, $tmp, $src">,
- Requires<[IsLA64]>;
-def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
- (ins GPR:$tmp, bare_symbol:$src), [],
- "la.tls.gd", "$dst, $tmp, $src">,
- Requires<[IsLA64]>;
} // Defs = [R20], Size = 20
}
diff --git a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
index cd308a1495914..ed1a24e82b4e4 100644
--- a/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/LoongArch/machinelicm-address-pseudos.ll
@@ -74,46 +74,46 @@ ret:
define void @test_la_got(i32 signext %n) {
; LA32-LABEL: test_la_got:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: pcalau12i $a1, %got_pc_hi20(g)
+; LA32-NEXT: ld.w $a1, $a1, %got_pc_lo12(g)
+; LA32-NEXT: move $a2, $zero
; LA32-NEXT: .p2align 4, , 16
; LA32-NEXT: .LBB1_1: # %loop
; LA32-NEXT: # =>This Inner Loop Header: Depth=1
-; LA32-NEXT: pcalau12i $a2, %got_pc_hi20(g)
-; LA32-NEXT: ld.w $a2, $a2, %got_pc_lo12(g)
-; LA32-NEXT: ld.w $zero, $a2, 0
-; LA32-NEXT: addi.w $a1, $a1, 1
-; LA32-NEXT: blt $a1, $a0, .LBB1_1
+; LA32-NEXT: ld.w $zero, $a1, 0
+; LA32-NEXT: addi.w $a2, $a2, 1
+; LA32-NEXT: blt $a2, $a0, .LBB1_1
; LA32-NEXT: # %bb.2: # %ret
; LA32-NEXT: ret
;
; LA64-LABEL: test_la_got:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: move $a1, $zero
+; LA64-NEXT: pcalau12i $a1, %got_pc_hi20(g)
+; LA64-NEXT: ld.d $a1, $a1, %got_pc_lo12(g)
+; LA64-NEXT: move $a2, $zero
; LA64-NEXT: .p2align 4, , 16
; LA64-NEXT: .LBB1_1: # %loop
; LA64-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT: pcalau12i $a2, %got_pc_hi20(g)
-; LA64-NEXT: ld.d $a2, $a2, %got_pc_lo12(g)
-; LA64-NEXT: ld.w $zero, $a2, 0
-; LA64-NEXT: addi.w $a1, $a1, 1
-; LA64-NEXT: blt $a1, $a0, .LBB1_1
+; LA64-NEXT: ld.w $zero, $a1, 0
+; LA64-NEXT: addi.w $a2, $a2, 1
+; LA64-NEXT: blt $a2, $a0, .LBB1_1
; LA64-NEXT: # %bb.2: # %ret
; LA64-NEXT: ret
;
; LA64LARGE-LABEL: test_la_got:
; LA64LARGE: # %bb.0: # %entry
-; LA64LARGE-NEXT: move $a1, $zero
-; LA64LARGE-NEXT: .p2align 4, , 16
-; LA64LARGE-NEXT: .LBB1_1: # %loop
-; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT: pcalau12i $a2, %got_pc_hi20(g)
+; LA64LARGE-NEXT: pcalau12i $a1, %got_pc_hi20(g)
; LA64LARGE-NEXT: addi.d $t8, $zero, %got_pc_lo12(g)
; LA64LARGE-NEXT: lu32i.d $t8, %got64_pc_lo20(g)
; LA64LARGE-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(g)
-; LA64LARGE-NEXT: ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT: ld.w $zero, $a2, 0
-; LA64LARGE-NEXT: addi.w $a1, $a1, 1
-; LA64LARGE-NEXT: blt $a1, $a0, .LBB1_1
+; LA64LARGE-NEXT: ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT: move $a2, $zero
+; LA64LARGE-NEXT: .p2align 4, , 16
+; LA64LARGE-NEXT: .LBB1_1: # %loop
+; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT: ld.w $zero, $a1, 0
+; LA64LARGE-NEXT: addi.w $a2, $a2, 1
+; LA64LARGE-NEXT: blt $a2, $a0, .LBB1_1
; LA64LARGE-NEXT: # %bb.2: # %ret
; LA64LARGE-NEXT: ret
entry:
@@ -135,13 +135,13 @@ ret:
define void @test_la_tls_ie(i32 signext %n) {
; LA32-LABEL: test_la_tls_ie:
; LA32: # %bb.0: # %entry
+; LA32-NEXT: pcalau12i $a1, %ie_pc_hi20(ie)
+; LA32-NEXT: ld.w $a2, $a1, %ie_pc_lo12(ie)
; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: add.w $a2, $a2, $tp
; LA32-NEXT: .p2align 4, , 16
; LA32-NEXT: .LBB2_1: # %loop
; LA32-NEXT: # =>This Inner Loop Header: Depth=1
-; LA32-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
-; LA32-NEXT: ld.w $a2, $a2, %ie_pc_lo12(ie)
-; LA32-NEXT: add.w $a2, $a2, $tp
; LA32-NEXT: ld.w $zero, $a2, 0
; LA32-NEXT: addi.w $a1, $a1, 1
; LA32-NEXT: blt $a1, $a0, .LBB2_1
@@ -150,32 +150,32 @@ define void @test_la_tls_ie(i32 signext %n) {
;
; LA64-LABEL: test_la_tls_ie:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: move $a1, $zero
+; LA64-NEXT: pcalau12i $a1, %ie_pc_hi20(ie)
+; LA64-NEXT: ld.d $a1, $a1, %ie_pc_lo12(ie)
+; LA64-NEXT: move $a2, $zero
; LA64-NEXT: .p2align 4, , 16
; LA64-NEXT: .LBB2_1: # %loop
; LA64-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
-; LA64-NEXT: ld.d $a2, $a2, %ie_pc_lo12(ie)
-; LA64-NEXT: ldx.w $zero, $a2, $tp
-; LA64-NEXT: addi.w $a1, $a1, 1
-; LA64-NEXT: blt $a1, $a0, .LBB2_1
+; LA64-NEXT: ldx.w $zero, $a1, $tp
+; LA64-NEXT: addi.w $a2, $a2, 1
+; LA64-NEXT: blt $a2, $a0, .LBB2_1
; LA64-NEXT: # %bb.2: # %ret
; LA64-NEXT: ret
;
; LA64LARGE-LABEL: test_la_tls_ie:
; LA64LARGE: # %bb.0: # %entry
-; LA64LARGE-NEXT: move $a1, $zero
-; LA64LARGE-NEXT: .p2align 4, , 16
-; LA64LARGE-NEXT: .LBB2_1: # %loop
-; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
+; LA64LARGE-NEXT: pcalau12i $a1, %ie_pc_hi20(ie)
; LA64LARGE-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ie)
; LA64LARGE-NEXT: lu32i.d $t8, %ie64_pc_lo20(ie)
; LA64LARGE-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LA64LARGE-NEXT: ldx.d $a2, $t8, $a2
-; LA64LARGE-NEXT: ldx.w $zero, $a2, $tp
-; LA64LARGE-NEXT: addi.w $a1, $a1, 1
-; LA64LARGE-NEXT: blt $a1, $a0, .LBB2_1
+; LA64LARGE-NEXT: ldx.d $a1, $t8, $a1
+; LA64LARGE-NEXT: move $a2, $zero
+; LA64LARGE-NEXT: .p2align 4, , 16
+; LA64LARGE-NEXT: .LBB2_1: # %loop
+; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT: ldx.w $zero, $a1, $tp
+; LA64LARGE-NEXT: addi.w $a2, $a2, 1
+; LA64LARGE-NEXT: blt $a2, $a0, .LBB2_1
; LA64LARGE-NEXT: # %bb.2: # %ret
; LA64LARGE-NEXT: ret
entry:
@@ -202,21 +202,25 @@ define void @test_la_tls_ld(i32 signext %n) {
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
; LA32-NEXT: st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s1, $sp, 0 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: .cfi_offset 22, -8
; LA32-NEXT: .cfi_offset 23, -12
+; LA32-NEXT: .cfi_offset 24, -16
; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: move $s0, $zero
+; LA32-NEXT: move $s1, $zero
+; LA32-NEXT: pcalau12i $a0, %ld_pc_hi20(ld)
+; LA32-NEXT: addi.w $s0, $a0, %got_pc_lo12(ld)
; LA32-NEXT: .p2align 4, , 16
; LA32-NEXT: .LBB3_1: # %loop
; LA32-NEXT: # =>This Inner Loop Header: Depth=1
-; LA32-NEXT: pcalau12i $a0, %ld_pc_hi20(ld)
-; LA32-NEXT: addi.w $a0, $a0, %got_pc_lo12(ld)
+; LA32-NEXT: move $a0, $s0
; LA32-NEXT: bl %plt(__tls_get_addr)
; LA32-NEXT: ld.w $zero, $a0, 0
-; LA32-NEXT: addi.w $s0, $s0, 1
-; LA32-NEXT: blt $s0, $fp, .LBB3_1
+; LA32-NEXT: addi.w $s1, $s1, 1
+; LA32-NEXT: blt $s1, $fp, .LBB3_1
; LA32-NEXT: # %bb.2: # %ret
+; LA32-NEXT: ld.w $s1, $sp, 0 # 4-byte Folded Reload
; LA32-NEXT: ld.w $s0, $sp, 4 # 4-byte Folded Reload
; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -230,21 +234,25 @@ define void @test_la_tls_ld(i32 signext %n) {
; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s1, $sp, 0 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: .cfi_offset 23, -24
+; LA64-NEXT: .cfi_offset 24, -32
; LA64-NEXT: move $fp, $a0
-; LA64-NEXT: move $s0, $zero
+; LA64-NEXT: move $s1, $zero
+; LA64-NEXT: pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64-NEXT: addi.d $s0, $a0, %got_pc_lo12(ld)
; LA64-NEXT: .p2align 4, , 16
; LA64-NEXT: .LBB3_1: # %loop
; LA64-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT: pcalau12i $a0, %ld_pc_hi20(ld)
-; LA64-NEXT: addi.d $a0, $a0, %got_pc_lo12(ld)
+; LA64-NEXT: move $a0, $s0
; LA64-NEXT: bl %plt(__tls_get_addr)
; LA64-NEXT: ld.w $zero, $a0, 0
-; LA64-NEXT: addi.w $s0, $s0, 1
-; LA64-NEXT: blt $s0, $fp, .LBB3_1
+; LA64-NEXT: addi.w $s1, $s1, 1
+; LA64-NEXT: blt $s1, $fp, .LBB3_1
; LA64-NEXT: # %bb.2: # %ret
+; LA64-NEXT: ld.d $s1, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -258,19 +266,22 @@ define void @test_la_tls_ld(i32 signext %n) {
; LA64LARGE-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64LARGE-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64LARGE-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT: st.d $s1, $sp, 0 # 8-byte Folded Spill
; LA64LARGE-NEXT: .cfi_offset 1, -8
; LA64LARGE-NEXT: .cfi_offset 22, -16
; LA64LARGE-NEXT: .cfi_offset 23, -24
+; LA64LARGE-NEXT: .cfi_offset 24, -32
; LA64LARGE-NEXT: move $fp, $a0
-; LA64LARGE-NEXT: move $s0, $zero
-; LA64LARGE-NEXT: .p2align 4, , 16
-; LA64LARGE-NEXT: .LBB3_1: # %loop
-; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT: pcalau12i $a0, %ld_pc_hi20(ld)
+; LA64LARGE-NEXT: move $s1, $zero
+; LA64LARGE-NEXT: pcalau12i $s0, %ld_pc_hi20(ld)
; LA64LARGE-NEXT: addi.d $t8, $zero, %got_pc_lo12(ld)
; LA64LARGE-NEXT: lu32i.d $t8, %got64_pc_lo20(ld)
; LA64LARGE-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(ld)
-; LA64LARGE-NEXT: add.d $a0, $t8, $a0
+; LA64LARGE-NEXT: add.d $s0, $t8, $s0
+; LA64LARGE-NEXT: .p2align 4, , 16
+; LA64LARGE-NEXT: .LBB3_1: # %loop
+; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT: move $a0, $s0
; LA64LARGE-NEXT: pcalau12i $ra, %pc_hi20(__tls_get_addr)
; LA64LARGE-NEXT: addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
; LA64LARGE-NEXT: lu32i.d $t8, %pc64_lo20(__tls_get_addr)
@@ -278,9 +289,10 @@ define void @test_la_tls_ld(i32 signext %n) {
; LA64LARGE-NEXT: add.d $ra, $t8, $ra
; LA64LARGE-NEXT: jirl $ra, $ra, 0
; LA64LARGE-NEXT: ld.w $zero, $a0, 0
-; LA64LARGE-NEXT: addi.w $s0, $s0, 1
-; LA64LARGE-NEXT: blt $s0, $fp, .LBB3_1
+; LA64LARGE-NEXT: addi.w $s1, $s1, 1
+; LA64LARGE-NEXT: blt $s1, $fp, .LBB3_1
; LA64LARGE-NEXT: # %bb.2: # %ret
+; LA64LARGE-NEXT: ld.d $s1, $sp, 0 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -370,18 +382,21 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
; LA32-NEXT: st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s1, $sp, 0 # 4-byte Folded Spill
; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: move $s0, $zero
+; LA32-NEXT: move $s1, $zero
+; LA32-NEXT: pcalau12i $a0, %gd_pc_hi20(gd)
+; LA32-NEXT: addi.w $s0, $a0, %got_pc_lo12(gd)
; LA32-NEXT: .p2align 4, , 16
; LA32-NEXT: .LBB5_1: # %loop
; LA32-NEXT: # =>This Inner Loop Header: Depth=1
-; LA32-NEXT: pcalau12i $a0, %gd_pc_hi20(gd)
-; LA32-NEXT: addi.w $a0, $a0, %got_pc_lo12(gd)
+; LA32-NEXT: move $a0, $s0
; LA32-NEXT: bl %plt(__tls_get_addr)
; LA32-NEXT: ld.w $zero, $a0, 0
-; LA32-NEXT: addi.w $s0, $s0, 1
-; LA32-NEXT: blt $s0, $fp, .LBB5_1
+; LA32-NEXT: addi.w $s1, $s1, 1
+; LA32-NEXT: blt $s1, $fp, .LBB5_1
; LA32-NEXT: # %bb.2: # %ret
+; LA32-NEXT: ld.w $s1, $sp, 0 # 4-byte Folded Reload
; LA32-NEXT: ld.w $s0, $sp, 4 # 4-byte Folded Reload
; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -394,18 +409,21 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s1, $sp, 0 # 8-byte Folded Spill
; LA64-NEXT: move $fp, $a0
-; LA64-NEXT: move $s0, $zero
+; LA64-NEXT: move $s1, $zero
+; LA64-NEXT: pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64-NEXT: addi.d $s0, $a0, %got_pc_lo12(gd)
; LA64-NEXT: .p2align 4, , 16
; LA64-NEXT: .LBB5_1: # %loop
; LA64-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT: pcalau12i $a0, %gd_pc_hi20(gd)
-; LA64-NEXT: addi.d $a0, $a0, %got_pc_lo12(gd)
+; LA64-NEXT: move $a0, $s0
; LA64-NEXT: bl %plt(__tls_get_addr)
; LA64-NEXT: ld.w $zero, $a0, 0
-; LA64-NEXT: addi.w $s0, $s0, 1
-; LA64-NEXT: blt $s0, $fp, .LBB5_1
+; LA64-NEXT: addi.w $s1, $s1, 1
+; LA64-NEXT: blt $s1, $fp, .LBB5_1
; LA64-NEXT: # %bb.2: # %ret
+; LA64-NEXT: ld.d $s1, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
@@ -418,16 +436,18 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; LA64LARGE-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64LARGE-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64LARGE-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64LARGE-NEXT: st.d $s1, $sp, 0 # 8-byte Folded Spill
; LA64LARGE-NEXT: move $fp, $a0
-; LA64LARGE-NEXT: move $s0, $zero
-; LA64LARGE-NEXT: .p2align 4, , 16
-; LA64LARGE-NEXT: .LBB5_1: # %loop
-; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
-; LA64LARGE-NEXT: pcalau12i $a0, %gd_pc_hi20(gd)
+; LA64LARGE-NEXT: move $s1, $zero
+; LA64LARGE-NEXT: pcalau12i $s0, %gd_pc_hi20(gd)
; LA64LARGE-NEXT: addi.d $t8, $zero, %got_pc_lo12(gd)
; LA64LARGE-NEXT: lu32i.d $t8, %got64_pc_lo20(gd)
; LA64LARGE-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(gd)
-; LA64LARGE-NEXT: add.d $a0, $t8, $a0
+; LA64LARGE-NEXT: add.d $s0, $t8, $s0
+; LA64LARGE-NEXT: .p2align 4, , 16
+; LA64LARGE-NEXT: .LBB5_1: # %loop
+; LA64LARGE-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64LARGE-NEXT: move $a0, $s0
; LA64LARGE-NEXT: pcalau12i $ra, %pc_hi20(__tls_get_addr)
; LA64LARGE-NEXT: addi.d $t8, $zero, %pc_lo12(__tls_get_addr)
; LA64LARGE-NEXT: lu32i.d $t8, %pc64_lo20(__tls_get_addr)
@@ -435,9 +455,10 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; LA64LARGE-NEXT: add.d $ra, $t8, $ra
; LA64LARGE-NEXT: jirl $ra, $ra, 0
; LA64LARGE-NEXT: ld.w $zero, $a0, 0
-; LA64LARGE-NEXT: addi.w $s0, $s0, 1
-; LA64LARGE-NEXT: blt $s0, $fp, .LBB5_1
+; LA64LARGE-NEXT: addi.w $s1, $s1, 1
+; LA64LARGE-NEXT: blt $s1, $fp, .LBB5_1
; LA64LARGE-NEXT: # %bb.2: # %ret
+; LA64LARGE-NEXT: ld.d $s1, $sp, 0 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64LARGE-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
index a6b04e9536d51..3e8ef59a75633 100644
--- a/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
+++ b/llvm/test/CodeGen/LoongArch/psabi-restricted-scheduling.ll
@@ -32,13 +32,13 @@ define void @foo() nounwind {
; MEDIUM_NO_SCH-NEXT: jirl $ra, $ra, 0
; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd)
; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_NO_SCH-NEXT: pcalau12i $a1, %ie_pc_hi20(ld)
+; MEDIUM_NO_SCH-NEXT: ld.d $a1, $a1, %ie_pc_lo12(ld)
+; MEDIUM_NO_SCH-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
+; MEDIUM_NO_SCH-NEXT: ld.d $a2, $a2, %ie_pc_lo12(ie)
; MEDIUM_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
-; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld)
-; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ld)
-; MEDIUM_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
-; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie)
-; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ie)
-; MEDIUM_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
+; MEDIUM_NO_SCH-NEXT: ldx.d $zero, $a1, $tp
+; MEDIUM_NO_SCH-NEXT: ldx.d $zero, $a2, $tp
; MEDIUM_NO_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; MEDIUM_NO_SCH-NEXT: addi.d $sp, $sp, 16
; MEDIUM_NO_SCH-NEXT: ret
@@ -57,14 +57,14 @@ define void @foo() nounwind {
; MEDIUM_SCH-NEXT: pcaddu18i $ra, %call36(bar)
; MEDIUM_SCH-NEXT: jirl $ra, $ra, 0
; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd)
+; MEDIUM_SCH-NEXT: pcalau12i $a1, %ie_pc_hi20(ld)
+; MEDIUM_SCH-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(gd)
+; MEDIUM_SCH-NEXT: ld.d $a1, $a1, %ie_pc_lo12(ld)
+; MEDIUM_SCH-NEXT: ld.d $a2, $a2, %ie_pc_lo12(ie)
; MEDIUM_SCH-NEXT: ldx.d $zero, $a0, $tp
-; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld)
-; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ld)
-; MEDIUM_SCH-NEXT: ldx.d $zero, $a0, $tp
-; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie)
-; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ie)
-; MEDIUM_SCH-NEXT: ldx.d $zero, $a0, $tp
+; MEDIUM_SCH-NEXT: ldx.d $zero, $a1, $tp
+; MEDIUM_SCH-NEXT: ldx.d $zero, $a2, $tp
; MEDIUM_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; MEDIUM_SCH-NEXT: addi.d $sp, $sp, 16
; MEDIUM_SCH-NEXT: ret
@@ -97,19 +97,19 @@ define void @foo() nounwind {
; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(gd)
; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(gd)
; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0
-; LARGE_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
-; LARGE_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_NO_SCH-NEXT: pcalau12i $a1, %ie_pc_hi20(ld)
; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ld)
; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ld)
; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ld)
-; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0
-; LARGE_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
-; LARGE_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_NO_SCH-NEXT: ldx.d $a1, $t8, $a1
+; LARGE_NO_SCH-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ie)
; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ie)
; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0
+; LARGE_NO_SCH-NEXT: ldx.d $a2, $t8, $a2
; LARGE_NO_SCH-NEXT: ldx.d $zero, $a0, $tp
+; LARGE_NO_SCH-NEXT: ldx.d $zero, $a1, $tp
+; LARGE_NO_SCH-NEXT: ldx.d $zero, $a2, $tp
; LARGE_NO_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; LARGE_NO_SCH-NEXT: addi.d $sp, $sp, 16
; LARGE_NO_SCH-NEXT: ret
@@ -142,19 +142,19 @@ define void @foo() nounwind {
; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(gd)
; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(gd)
; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0
-; LARGE_SCH-NEXT: ldx.d $zero, $a0, $tp
-; LARGE_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld)
+; LARGE_SCH-NEXT: pcalau12i $a1, %ie_pc_hi20(ld)
; LARGE_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ld)
; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ld)
; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ld)
-; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0
-; LARGE_SCH-NEXT: ldx.d $zero, $a0, $tp
-; LARGE_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie)
+; LARGE_SCH-NEXT: ldx.d $a1, $t8, $a1
+; LARGE_SCH-NEXT: pcalau12i $a2, %ie_pc_hi20(ie)
; LARGE_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ie)
; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ie)
; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ie)
-; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0
+; LARGE_SCH-NEXT: ldx.d $a2, $t8, $a2
; LARGE_SCH-NEXT: ldx.d $zero, $a0, $tp
+; LARGE_SCH-NEXT: ldx.d $zero, $a1, $tp
+; LARGE_SCH-NEXT: ldx.d $zero, $a2, $tp
; LARGE_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; LARGE_SCH-NEXT: addi.d $sp, $sp, 16
; LARGE_SCH-NEXT: ret
More information about the llvm-commits
mailing list