[llvm] [LoongArch] Enable tail calls for sret and byval functions (PR #168506)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 12 01:30:41 PST 2026
https://github.com/heiher updated https://github.com/llvm/llvm-project/pull/168506
>From 7fa089a80803085ddda2fd874efdd94e44c9ec03 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Mon, 17 Nov 2025 17:55:22 +0800
Subject: [PATCH 1/5] [LoongArch] Enable tail calls for sret and byval
functions
Allow tail calls for functions returning via sret when the caller's sret
pointer can be reused. Also support tail calls for byval arguments.
The previous restriction requiring exact match of caller and callee
arguments is relaxed: tail calls are allowed as long as the callee
does not use more stack space than the caller.
---
.../LoongArch/LoongArchISelLowering.cpp | 127 +++-
.../Target/LoongArch/LoongArchISelLowering.h | 6 +
.../LoongArch/LoongArchMachineFunctionInfo.h | 14 +
llvm/test/CodeGen/LoongArch/musttail.ll | 566 ++++++++++++++++++
llvm/test/CodeGen/LoongArch/tail-calls.ll | 13 +-
5 files changed, 692 insertions(+), 34 deletions(-)
create mode 100644 llvm/test/CodeGen/LoongArch/musttail.ll
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index ba9d0682b26dd..7aceb1917af4e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -8162,6 +8162,7 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
+ auto *LoongArchFI = MF.getInfo<LoongArchMachineFunctionInfo>();
switch (CallConv) {
default:
@@ -8225,6 +8226,8 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
continue;
}
InVals.push_back(ArgValue);
+ if (Ins[InsIdx].Flags.isByVal())
+ LoongArchFI->addIncomingByValArgs(ArgValue);
}
if (IsVarArg) {
@@ -8233,7 +8236,6 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
const TargetRegisterClass *RC = &LoongArch::GPRRegClass;
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- auto *LoongArchFI = MF.getInfo<LoongArchMachineFunctionInfo>();
// Offset of the first variable argument from stack pointer, and size of
// the vararg save area. For now, the varargs save area is either zero or
@@ -8283,6 +8285,8 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
}
+ LoongArchFI->setArgumentStackSize(CCInfo.getStackSize());
+
// All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens for vararg functions.
if (!OutChains.empty()) {
@@ -8339,9 +8343,11 @@ bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
auto &Outs = CLI.Outs;
auto &Caller = MF.getFunction();
auto CallerCC = Caller.getCallingConv();
+ auto *LoongArchFI = MF.getInfo<LoongArchMachineFunctionInfo>();
- // Do not tail call opt if the stack is used to pass parameters.
- if (CCInfo.getStackSize() != 0)
+ // If the stack arguments for this call do not fit into our own save area then
+ // the call cannot be made tail.
+ if (CCInfo.getStackSize() > LoongArchFI->getArgumentStackSize())
return false;
// Do not tail call opt if any parameters need to be passed indirectly.
@@ -8353,13 +8359,18 @@ bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
// semantics.
auto IsCallerStructRet = Caller.hasStructRetAttr();
auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
- if (IsCallerStructRet || IsCalleeStructRet)
+ if (IsCallerStructRet != IsCalleeStructRet)
return false;
- // Do not tail call opt if either the callee or caller has a byval argument.
- for (auto &Arg : Outs)
- if (Arg.Flags.isByVal())
+ // Do not tail call opt if caller's and callee's byval arguments do not match.
+ for (unsigned i = 0, j = 0; i < Outs.size(); i++) {
+ if (!Outs[i].Flags.isByVal())
+ continue;
+ if (j++ >= LoongArchFI->getIncomingByValArgsSize())
+ return false;
+ if (LoongArchFI->getIncomingByValArgs(i).getValueType() != Outs[i].ArgVT)
return false;
+ }
// The callee has to preserve all registers the caller needs to preserve.
const LoongArchRegisterInfo *TRI = Subtarget.getRegisterInfo();
@@ -8369,9 +8380,47 @@ bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
}
+
+ // If the callee takes no arguments then go on to check the results of the
+ // call.
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ return false;
+
return true;
}
+SDValue LoongArchTargetLowering::addTokenForArgument(SDValue Chain,
+ SelectionDAG &DAG,
+ MachineFrameInfo &MFI,
+ int ClobberedFI) const {
+ SmallVector<SDValue, 8> ArgChains;
+ int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
+ int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
+
+ // Include the original chain at the beginning of the list. When this is
+ // used by target LowerCall hooks, this helps legalize find the
+ // CALLSEQ_BEGIN node.
+ ArgChains.push_back(Chain);
+
+ // Add a chain value for each stack argument corresponding
+ for (SDNode *U : DAG.getEntryNode().getNode()->users())
+ if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
+ if (FI->getIndex() < 0) {
+ int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
+ int64_t InLastByte = InFirstByte;
+ InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
+
+ if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
+ (FirstByte <= InFirstByte && InFirstByte <= LastByte))
+ ArgChains.push_back(SDValue(L, 1));
+ }
+
+ // Build a tokenfactor for all the chains.
+ return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
+}
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
return DAG.getDataLayout().getPrefTypeAlign(
VT.getTypeForEVT(*DAG.getContext()));
@@ -8396,6 +8445,7 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
bool &IsTailCall = CLI.IsTailCall;
MachineFunction &MF = DAG.getMachineFunction();
+ auto *LoongArchFI = MF.getInfo<LoongArchMachineFunctionInfo>();
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign> ArgLocs;
@@ -8421,7 +8471,7 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Create local copies for byval args.
SmallVector<SDValue> ByValArgs;
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ for (unsigned i = 0, j = 0, e = Outs.size(); i != e; ++i) {
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (!Flags.isByVal())
continue;
@@ -8429,17 +8479,27 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
SDValue Arg = OutVals[i];
unsigned Size = Flags.getByValSize();
Align Alignment = Flags.getNonZeroByValAlign();
-
- int FI =
- MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
- SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue SizeNode = DAG.getConstant(Size, DL, GRLenVT);
+ SDValue Dst;
- Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
- /*IsVolatile=*/false,
- /*AlwaysInline=*/false, /*CI=*/nullptr, std::nullopt,
- MachinePointerInfo(), MachinePointerInfo());
- ByValArgs.push_back(FIPtr);
+ if (IsTailCall) {
+ SDValue CallerArg = LoongArchFI->getIncomingByValArgs(j++);
+ if (isa<GlobalAddressSDNode>(Arg) || isa<ExternalSymbolSDNode>(Arg) ||
+ isa<FrameIndexSDNode>(Arg))
+ Dst = CallerArg;
+ } else {
+ int FI =
+ MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
+ Dst = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ }
+ if (Dst) {
+ Chain =
+ DAG.getMemcpy(Chain, DL, Dst, Arg, SizeNode, Alignment,
+ /*IsVolatile=*/false,
+ /*AlwaysInline=*/false, /*CI=*/nullptr, std::nullopt,
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(Dst);
+ }
}
if (!IsTailCall)
@@ -8539,27 +8599,44 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
}
// Use local copy if it is a byval arg.
- if (Flags.isByVal())
- ArgValue = ByValArgs[j++];
+ if (Flags.isByVal()) {
+ if (!IsTailCall || (isa<GlobalAddressSDNode>(ArgValue) ||
+ isa<ExternalSymbolSDNode>(ArgValue) ||
+ isa<FrameIndexSDNode>(ArgValue)))
+ ArgValue = ByValArgs[j++];
+ }
if (VA.isRegLoc()) {
// Queue up the argument copies and emit them at the end.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
} else {
assert(VA.isMemLoc() && "Argument not register or memory");
- assert(!IsTailCall && "Tail call not allowed if stack is used "
- "for passing parameters");
+ SDValue DstAddr;
+ MachinePointerInfo DstInfo;
+ int32_t Offset = VA.getLocMemOffset();
// Work out the address of the stack slot.
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, DL, LoongArch::R3, PtrVT);
- SDValue Address =
- DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
- DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+
+ if (IsTailCall) {
+ unsigned OpSize = (VA.getValVT().getSizeInBits() + 7) / 8;
+ int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
+ DstAddr = DAG.getFrameIndex(FI, PtrVT);
+ DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
+ // Make sure any stack arguments overlapping with where we're storing
+ // are loaded before this eventual operation. Otherwise they'll be
+ // clobbered.
+ Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
+ } else {
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
+ DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
+ DstInfo = MachinePointerInfo::getStack(MF, Offset);
+ }
// Emit the store.
MemOpChains.push_back(
- DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
+ DAG.getStore(Chain, DL, ArgValue, DstAddr, DstInfo));
}
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 0c09fb6afd2d1..889b4ba3277e7 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -263,6 +263,12 @@ class LoongArchTargetLowering : public TargetLowering {
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
const SmallVectorImpl<CCValAssign> &ArgLocs) const;
+ /// Finds the incoming stack arguments which overlap the given fixed stack
+ /// object and incorporates their load into the current chain. This prevents
+ /// an upcoming store from clobbering the stack argument before it's used.
+ SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
+ MachineFrameInfo &MFI, int ClobberedFI) const;
+
bool softPromoteHalfType() const override { return true; }
bool
diff --git a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
index 904985c189dba..c8e03b0f08e00 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
@@ -32,10 +32,17 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
/// Size of stack frame to save callee saved registers
unsigned CalleeSavedStackSize = 0;
+ /// ArgumentStackSize - amount of bytes on stack consumed by the arguments
+ /// being passed on the stack
+ unsigned ArgumentStackSize = 0;
+
/// FrameIndex of the spill slot when there is no scavenged register in
/// insertIndirectBranch.
int BranchRelaxationSpillFrameIndex = -1;
+ /// Incoming ByVal arguments
+ SmallVector<SDValue, 8> IncomingByValArgs;
+
/// Registers that have been sign extended from i32.
SmallVector<Register, 8> SExt32Registers;
@@ -63,6 +70,9 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
unsigned getCalleeSavedStackSize() const { return CalleeSavedStackSize; }
void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; }
+ unsigned getArgumentStackSize() const { return ArgumentStackSize; }
+ void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; }
+
int getBranchRelaxationSpillFrameIndex() {
return BranchRelaxationSpillFrameIndex;
}
@@ -70,6 +80,10 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
BranchRelaxationSpillFrameIndex = Index;
}
+ void addIncomingByValArgs(SDValue Val) { IncomingByValArgs.push_back(Val); }
+ SDValue &getIncomingByValArgs(int Idx) { return IncomingByValArgs[Idx]; }
+ unsigned getIncomingByValArgsSize() { return IncomingByValArgs.size(); }
+
void addSExt32Register(Register Reg) { SExt32Registers.push_back(Reg); }
bool isSExt32Register(Register Reg) const {
diff --git a/llvm/test/CodeGen/LoongArch/musttail.ll b/llvm/test/CodeGen/LoongArch/musttail.ll
new file mode 100644
index 0000000000000..282917b03e527
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/musttail.ll
@@ -0,0 +1,566 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=loongarch32 %s -o - | FileCheck %s --check-prefix=LA32
+; RUN: llc -mtriple=loongarch64 %s -o - | FileCheck %s --check-prefix=LA64
+
+declare i32 @many_args_callee(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9)
+
+define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
+; LA32-LABEL: many_args_tail:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: b many_args_callee
+;
+; LA64-LABEL: many_args_tail:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
+; LA64-NEXT: jr $t8
+ %ret = tail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
+; LA32-LABEL: many_args_musttail:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: b many_args_callee
+;
+; LA64-LABEL: many_args_musttail:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
+; LA64-NEXT: jr $t8
+ %ret = musttail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+; This function has more arguments than it's tail-callee. This isn't valid for
+; the musttail attribute, but can still be tail-called as a non-guaranteed
+; optimisation, because the outgoing arguments to @many_args_callee fit in the
+; stack space allocated by the caller of @more_args_tail.
+define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
+; LA32-LABEL: more_args_tail:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: b many_args_callee
+;
+; LA64-LABEL: more_args_tail:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
+; LA64-NEXT: jr $t8
+ %ret = tail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+; Again, this isn't valid for musttail, but can be tail-called in practice
+; because the stack size if the same.
+define i32 @different_args_tail_32bit(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4) nounwind {
+; LA32-LABEL: different_args_tail_32bit:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: b many_args_callee
+;
+; LA64-LABEL: different_args_tail_32bit:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $ra, %call36(many_args_callee)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %ret = tail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+define i32 @different_args_tail_64bit(i128 %0, i128 %1, i128 %2, i128 %3, i128 %4) nounwind {
+; LA32-LABEL: different_args_tail_64bit:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: bl many_args_callee
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: different_args_tail_64bit:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
+; LA64-NEXT: jr $t8
+ %ret = tail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+; Here, the caller requires less stack space for it's arguments than the
+; callee, so it would not ba valid to do a tail-call.
+define i32 @fewer_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) nounwind {
+; LA32-LABEL: fewer_args_tail:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: ori $a0, $zero, 9
+; LA32-NEXT: st.w $a0, $sp, 4
+; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: ori $a5, $zero, 5
+; LA32-NEXT: ori $a6, $zero, 6
+; LA32-NEXT: ori $a7, $zero, 7
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $zero
+; LA32-NEXT: bl many_args_callee
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: fewer_args_tail:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: ori $a0, $zero, 9
+; LA64-NEXT: st.d $a0, $sp, 8
+; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: ori $a2, $zero, 2
+; LA64-NEXT: ori $a3, $zero, 3
+; LA64-NEXT: ori $a4, $zero, 4
+; LA64-NEXT: ori $a5, $zero, 5
+; LA64-NEXT: ori $a6, $zero, 6
+; LA64-NEXT: ori $a7, $zero, 7
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $zero
+; LA64-NEXT: pcaddu18i $ra, %call36(many_args_callee)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %ret = tail call i32 @many_args_callee(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9)
+ ret i32 %ret
+}
+
+declare void @foo(i32, i32, i32, i32, i32, i32, i32, i32, i32)
+
+define void @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8) nounwind {
+; LA32-LABEL: bar:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: addi.w $sp, $sp, -48
+; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s0, $sp, 36 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s1, $sp, 32 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s2, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s3, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s4, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s5, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s6, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: move $fp, $a7
+; LA32-NEXT: move $s0, $a6
+; LA32-NEXT: move $s1, $a5
+; LA32-NEXT: move $s2, $a4
+; LA32-NEXT: move $s3, $a3
+; LA32-NEXT: move $s4, $a2
+; LA32-NEXT: move $s5, $a1
+; LA32-NEXT: move $s6, $a0
+; LA32-NEXT: ori $a0, $zero, 1
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: move $a0, $s6
+; LA32-NEXT: bl foo
+; LA32-NEXT: ori $a0, $zero, 2
+; LA32-NEXT: st.w $a0, $sp, 48
+; LA32-NEXT: move $a0, $s6
+; LA32-NEXT: move $a1, $s5
+; LA32-NEXT: move $a2, $s4
+; LA32-NEXT: move $a3, $s3
+; LA32-NEXT: move $a4, $s2
+; LA32-NEXT: move $a5, $s1
+; LA32-NEXT: move $a6, $s0
+; LA32-NEXT: move $a7, $fp
+; LA32-NEXT: ld.w $s6, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s5, $sp, 16 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s4, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s3, $sp, 24 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s2, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s1, $sp, 32 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s0, $sp, 36 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 48
+; LA32-NEXT: b foo
+;
+; LA64-LABEL: bar:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: addi.d $sp, $sp, -96
+; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s0, $sp, 72 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s1, $sp, 64 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s2, $sp, 56 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s3, $sp, 48 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s4, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s5, $sp, 32 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s6, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a7
+; LA64-NEXT: move $s0, $a6
+; LA64-NEXT: move $s1, $a5
+; LA64-NEXT: move $s2, $a4
+; LA64-NEXT: move $s3, $a3
+; LA64-NEXT: move $s4, $a2
+; LA64-NEXT: move $s5, $a1
+; LA64-NEXT: move $s6, $a0
+; LA64-NEXT: ori $a0, $zero, 1
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: move $a0, $s6
+; LA64-NEXT: pcaddu18i $ra, %call36(foo)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: ori $a0, $zero, 2
+; LA64-NEXT: st.d $a0, $sp, 96
+; LA64-NEXT: move $a0, $s6
+; LA64-NEXT: move $a1, $s5
+; LA64-NEXT: move $a2, $s4
+; LA64-NEXT: move $a3, $s3
+; LA64-NEXT: move $a4, $s2
+; LA64-NEXT: move $a5, $s1
+; LA64-NEXT: move $a6, $s0
+; LA64-NEXT: move $a7, $fp
+; LA64-NEXT: ld.d $s6, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s5, $sp, 32 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s4, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s3, $sp, 48 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s2, $sp, 56 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s1, $sp, 64 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s0, $sp, 72 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 96
+; LA64-NEXT: pcaddu18i $t8, %call36(foo)
+; LA64-NEXT: jr $t8
+entry:
+ call void @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 1)
+ musttail call void @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 2)
+ ret void
+}
+
+declare void @sret_callee(ptr sret({ double, double }) align 8)
+
+; Functions which return by sret can be tail-called because the incoming sret
+; pointer gets passed through to the callee.
+define void @sret_caller_tail(ptr sret({ double, double }) align 8 %result) {
+; LA32-LABEL: sret_caller_tail:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: b sret_callee
+;
+; LA64-LABEL: sret_caller_tail:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: pcaddu18i $t8, %call36(sret_callee)
+; LA64-NEXT: jr $t8
+entry:
+ tail call void @sret_callee(ptr sret({ double, double }) align 8 %result)
+ ret void
+}
+
+define void @sret_caller_musttail(ptr sret({ double, double }) align 8 %result) {
+; LA32-LABEL: sret_caller_musttail:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: b sret_callee
+;
+; LA64-LABEL: sret_caller_musttail:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: pcaddu18i $t8, %call36(sret_callee)
+; LA64-NEXT: jr $t8
+entry:
+ musttail call void @sret_callee(ptr sret({ double, double }) align 8 %result)
+ ret void
+}
+
+%twenty_bytes = type { [5 x i32] }
+declare void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
+
+; Functions with byval parameters can be tail-called, because the value is
+; actually passed in registers in the same way for the caller and callee.
+define void @large_caller(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; LA32-LABEL: large_caller:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: b large_callee
+;
+; LA64-LABEL: large_caller:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: pcaddu18i $t8, %call36(large_callee)
+; LA64-NEXT: jr $t8
+entry:
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
+ ret void
+}
+
+; As above, but with some inline asm to test that the arguments in r4 is
+; re-loaded before the call.
+define void @large_caller_check_regs(%twenty_bytes* byval(%twenty_bytes) align 4 %a) nounwind {
+; LA32-LABEL: large_caller_check_regs:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: move $a1, $a0
+; LA32-NEXT: #APP
+; LA32-NEXT: #NO_APP
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: b large_callee
+;
+; LA64-LABEL: large_caller_check_regs:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: move $a1, $a0
+; LA64-NEXT: #APP
+; LA64-NEXT: #NO_APP
+; LA64-NEXT: move $a0, $a1
+; LA64-NEXT: pcaddu18i $t8, %call36(large_callee)
+; LA64-NEXT: jr $t8
+entry:
+ tail call void asm sideeffect "", "~{r4}"()
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
+ ret void
+}
+
+; The IR for this one looks dodgy, because it has an alloca passed to a
+; musttail function, but it is passed as a byval argument, so will be copied
+; into the stack space allocated by @large_caller_new_value's caller, so is
+; valid.
+define void @large_caller_new_value(%twenty_bytes* byval(%twenty_bytes) align 4 %a) nounwind {
+; LA32-LABEL: large_caller_new_value:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $zero, $sp, 12
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: st.w $a1, $sp, 16
+; LA32-NEXT: ori $a2, $zero, 2
+; LA32-NEXT: st.w $a2, $sp, 20
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: st.w $a3, $sp, 24
+; LA32-NEXT: ori $a4, $zero, 4
+; LA32-NEXT: st.w $a4, $sp, 28
+; LA32-NEXT: st.w $a4, $a0, 16
+; LA32-NEXT: st.w $a3, $a0, 12
+; LA32-NEXT: st.w $a2, $a0, 8
+; LA32-NEXT: st.w $a1, $a0, 4
+; LA32-NEXT: st.w $zero, $a0, 0
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: b large_callee
+;
+; LA64-LABEL: large_caller_new_value:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: ori $a1, $zero, 0
+; LA64-NEXT: lu32i.d $a1, 1
+; LA64-NEXT: st.d $a1, $sp, 12
+; LA64-NEXT: ori $a1, $zero, 2
+; LA64-NEXT: lu32i.d $a1, 3
+; LA64-NEXT: st.d $a1, $sp, 20
+; LA64-NEXT: ori $a1, $zero, 4
+; LA64-NEXT: st.w $a1, $sp, 28
+; LA64-NEXT: st.w $a1, $a0, 16
+; LA64-NEXT: vld $vr0, $sp, 12
+; LA64-NEXT: vst $vr0, $a0, 0
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: pcaddu18i $t8, %call36(large_callee)
+; LA64-NEXT: jr $t8
+entry:
+ %y = alloca %twenty_bytes, align 4
+ store i32 0, ptr %y, align 4
+ %0 = getelementptr inbounds i8, ptr %y, i32 4
+ store i32 1, ptr %0, align 4
+ %1 = getelementptr inbounds i8, ptr %y, i32 8
+ store i32 2, ptr %1, align 4
+ %2 = getelementptr inbounds i8, ptr %y, i32 12
+ store i32 3, ptr %2, align 4
+ %3 = getelementptr inbounds i8, ptr %y, i32 16
+ store i32 4, ptr %3, align 4
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %y)
+ ret void
+}
+
+declare void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4, %twenty_bytes* byval(%twenty_bytes) align 4)
+define void @swap_byvals(%twenty_bytes* byval(%twenty_bytes) align 4 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
+; LA32-LABEL: swap_byvals:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: move $a2, $a0
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: b two_byvals_callee
+;
+; LA64-LABEL: swap_byvals:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: move $a2, $a0
+; LA64-NEXT: move $a0, $a1
+; LA64-NEXT: move $a1, $a2
+; LA64-NEXT: pcaddu18i $t8, %call36(two_byvals_callee)
+; LA64-NEXT: jr $t8
+entry:
+ musttail call void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b, %twenty_bytes* byval(%twenty_bytes) align 4 %a)
+ ret void
+}
+
+; A forwarded byval arg, but in a different argument register, so it needs to
+; be moved between registers first. This can't be musttail because of the
+; different signatures, but is still tail-called as an optimisation.
+declare void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
+define void @shift_byval(i32 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
+; LA32-LABEL: shift_byval:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: b shift_byval_callee
+;
+; LA64-LABEL: shift_byval:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: move $a0, $a1
+; LA64-NEXT: pcaddu18i $t8, %call36(shift_byval_callee)
+; LA64-NEXT: jr $t8
+entry:
+ tail call void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b)
+ ret void
+}
+
+; A global object passed to a byval argument, so it must be copied, but doesn't
+; need a stack temporary.
+ at large_global = external global %twenty_bytes
+define void @large_caller_from_global(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; LA32-LABEL: large_caller_from_global:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: pcalau12i $a1, %got_pc_hi20(large_global)
+; LA32-NEXT: ld.w $a1, $a1, %got_pc_lo12(large_global)
+; LA32-NEXT: ld.w $a2, $a1, 16
+; LA32-NEXT: st.w $a2, $a0, 16
+; LA32-NEXT: ld.w $a2, $a1, 12
+; LA32-NEXT: st.w $a2, $a0, 12
+; LA32-NEXT: ld.w $a2, $a1, 8
+; LA32-NEXT: st.w $a2, $a0, 8
+; LA32-NEXT: ld.w $a2, $a1, 4
+; LA32-NEXT: st.w $a2, $a0, 4
+; LA32-NEXT: ld.w $a1, $a1, 0
+; LA32-NEXT: st.w $a1, $a0, 0
+; LA32-NEXT: b large_callee
+;
+; LA64-LABEL: large_caller_from_global:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: pcalau12i $a1, %got_pc_hi20(large_global)
+; LA64-NEXT: ld.d $a1, $a1, %got_pc_lo12(large_global)
+; LA64-NEXT: ld.w $a2, $a1, 16
+; LA64-NEXT: st.w $a2, $a0, 16
+; LA64-NEXT: vld $vr0, $a1, 0
+; LA64-NEXT: vst $vr0, $a0, 0
+; LA64-NEXT: pcaddu18i $t8, %call36(large_callee)
+; LA64-NEXT: jr $t8
+entry:
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 @large_global)
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/tail-calls.ll b/llvm/test/CodeGen/LoongArch/tail-calls.ll
index 533761c8a1c70..e14fbc2302cce 100644
--- a/llvm/test/CodeGen/LoongArch/tail-calls.ll
+++ b/llvm/test/CodeGen/LoongArch/tail-calls.ll
@@ -80,20 +80,15 @@ entry:
ret void
}
-;; Do not tail call optimize if stack is used to pass parameters.
+;; Perform tail call optimization if callee arg stack usage ≤ caller
declare i32 @callee_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i)
define i32 @caller_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i) nounwind {
; CHECK-LABEL: caller_args:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT: ld.d $t0, $sp, 16
+; CHECK-NEXT: ld.d $t0, $sp, 0
; CHECK-NEXT: st.d $t0, $sp, 0
-; CHECK-NEXT: pcaddu18i $ra, %call36(callee_args)
-; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 16
-; CHECK-NEXT: ret
+; CHECK-NEXT: pcaddu18i $t8, %call36(callee_args)
+; CHECK-NEXT: jr $t8
entry:
%r = tail call i32 @callee_args(i32 %a, i32 %b, i32 %c, i32 %dd, i32 %e, i32 %ff, i32 %g, i32 %h, i32 %i)
ret i32 %r
>From eb45efbd08ed83c4475a714ebb6b1c67c8118ff7 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Thu, 4 Dec 2025 19:31:27 +0800
Subject: [PATCH 2/5] Address weining's comments
---
llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
index c8e03b0f08e00..75db3365415e8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
@@ -32,8 +32,8 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
/// Size of stack frame to save callee saved registers
unsigned CalleeSavedStackSize = 0;
- /// ArgumentStackSize - amount of bytes on stack consumed by the arguments
- /// being passed on the stack
+ /// Amount of bytes on stack consumed by the arguments being passed on
+ /// the stack
unsigned ArgumentStackSize = 0;
/// FrameIndex of the spill slot when there is no scavenged register in
>From 25e3c8f3a0534014fa36f50818e7e23e8a1d8c24 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Mon, 15 Dec 2025 20:07:12 +0800
Subject: [PATCH 3/5] Replace addTokenForArgument with
getStackArgumentTokenFactor
---
.../LoongArch/LoongArchISelLowering.cpp | 45 +++++------------
.../Target/LoongArch/LoongArchISelLowering.h | 6 ---
llvm/test/CodeGen/LoongArch/musttail.ll | 48 +++++++++----------
3 files changed, 35 insertions(+), 64 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 7aceb1917af4e..377ca44da2d9e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -8391,36 +8391,6 @@ bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
return true;
}
-SDValue LoongArchTargetLowering::addTokenForArgument(SDValue Chain,
- SelectionDAG &DAG,
- MachineFrameInfo &MFI,
- int ClobberedFI) const {
- SmallVector<SDValue, 8> ArgChains;
- int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
- int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
-
- // Include the original chain at the beginning of the list. When this is
- // used by target LowerCall hooks, this helps legalize find the
- // CALLSEQ_BEGIN node.
- ArgChains.push_back(Chain);
-
- // Add a chain value for each stack argument corresponding
- for (SDNode *U : DAG.getEntryNode().getNode()->users())
- if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
- if (FI->getIndex() < 0) {
- int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
- int64_t InLastByte = InFirstByte;
- InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
-
- if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
- (FirstByte <= InFirstByte && InFirstByte <= LastByte))
- ArgChains.push_back(SDValue(L, 1));
- }
-
- // Build a tokenfactor for all the chains.
- return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
-}
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
return DAG.getDataLayout().getPrefTypeAlign(
VT.getTypeForEVT(*DAG.getContext()));
@@ -8505,6 +8475,13 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (!IsTailCall)
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
+ // During a tail call, stores to the argument area must happen after all of
+ // the function's incoming arguments have been loaded because they may alias.
+ // This is done by folding in a TokenFactor from LowerFormalArguments, but
+ // there's no point in doing so repeatedly so this tracks whether that's
+ // happened yet.
+ bool AfterFormalArgLoads = false;
+
// Copy argument values to their designated locations.
SmallVector<std::pair<Register, SDValue>> RegsToPass;
SmallVector<SDValue> MemOpChains;
@@ -8624,10 +8601,10 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
DstAddr = DAG.getFrameIndex(FI, PtrVT);
DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
- // Make sure any stack arguments overlapping with where we're storing
- // are loaded before this eventual operation. Otherwise they'll be
- // clobbered.
- Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
+ if (!AfterFormalArgLoads) {
+ Chain = DAG.getStackArgumentTokenFactor(Chain);
+ AfterFormalArgLoads = true;
+ }
} else {
SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 889b4ba3277e7..0c09fb6afd2d1 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -263,12 +263,6 @@ class LoongArchTargetLowering : public TargetLowering {
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
const SmallVectorImpl<CCValAssign> &ArgLocs) const;
- /// Finds the incoming stack arguments which overlap the given fixed stack
- /// object and incorporates their load into the current chain. This prevents
- /// an upcoming store from clobbering the stack argument before it's used.
- SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
- MachineFrameInfo &MFI, int ClobberedFI) const;
-
bool softPromoteHalfType() const override { return true; }
bool
diff --git a/llvm/test/CodeGen/LoongArch/musttail.ll b/llvm/test/CodeGen/LoongArch/musttail.ll
index 282917b03e527..4d9be2869fd9f 100644
--- a/llvm/test/CodeGen/LoongArch/musttail.ll
+++ b/llvm/test/CodeGen/LoongArch/musttail.ll
@@ -7,9 +7,9 @@ declare i32 @many_args_callee(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i3
define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
; LA32-LABEL: many_args_tail:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 9
-; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: ori $a0, $zero, 9
; LA32-NEXT: ori $a1, $zero, 1
; LA32-NEXT: ori $a2, $zero, 2
; LA32-NEXT: ori $a3, $zero, 3
@@ -17,15 +17,15 @@ define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
; LA32-NEXT: ori $a5, $zero, 5
; LA32-NEXT: ori $a6, $zero, 6
; LA32-NEXT: ori $a7, $zero, 7
-; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: move $a0, $zero
; LA32-NEXT: b many_args_callee
;
; LA64-LABEL: many_args_tail:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 9
-; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: ori $a0, $zero, 9
; LA64-NEXT: ori $a1, $zero, 1
; LA64-NEXT: ori $a2, $zero, 2
; LA64-NEXT: ori $a3, $zero, 3
@@ -33,7 +33,7 @@ define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
; LA64-NEXT: ori $a5, $zero, 5
; LA64-NEXT: ori $a6, $zero, 6
; LA64-NEXT: ori $a7, $zero, 7
-; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: move $a0, $zero
; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
; LA64-NEXT: jr $t8
@@ -44,9 +44,9 @@ define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
; LA32-LABEL: many_args_musttail:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 9
-; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: ori $a0, $zero, 9
; LA32-NEXT: ori $a1, $zero, 1
; LA32-NEXT: ori $a2, $zero, 2
; LA32-NEXT: ori $a3, $zero, 3
@@ -54,15 +54,15 @@ define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i
; LA32-NEXT: ori $a5, $zero, 5
; LA32-NEXT: ori $a6, $zero, 6
; LA32-NEXT: ori $a7, $zero, 7
-; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: move $a0, $zero
; LA32-NEXT: b many_args_callee
;
; LA64-LABEL: many_args_musttail:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 9
-; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: ori $a0, $zero, 9
; LA64-NEXT: ori $a1, $zero, 1
; LA64-NEXT: ori $a2, $zero, 2
; LA64-NEXT: ori $a3, $zero, 3
@@ -70,7 +70,7 @@ define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i
; LA64-NEXT: ori $a5, $zero, 5
; LA64-NEXT: ori $a6, $zero, 6
; LA64-NEXT: ori $a7, $zero, 7
-; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: move $a0, $zero
; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
; LA64-NEXT: jr $t8
@@ -85,9 +85,9 @@ define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i
define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
; LA32-LABEL: more_args_tail:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 9
-; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: ori $a0, $zero, 9
; LA32-NEXT: ori $a1, $zero, 1
; LA32-NEXT: ori $a2, $zero, 2
; LA32-NEXT: ori $a3, $zero, 3
@@ -95,15 +95,15 @@ define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
; LA32-NEXT: ori $a5, $zero, 5
; LA32-NEXT: ori $a6, $zero, 6
; LA32-NEXT: ori $a7, $zero, 7
-; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: move $a0, $zero
; LA32-NEXT: b many_args_callee
;
; LA64-LABEL: more_args_tail:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 9
-; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: ori $a0, $zero, 9
; LA64-NEXT: ori $a1, $zero, 1
; LA64-NEXT: ori $a2, $zero, 2
; LA64-NEXT: ori $a3, $zero, 3
@@ -111,7 +111,7 @@ define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
; LA64-NEXT: ori $a5, $zero, 5
; LA64-NEXT: ori $a6, $zero, 6
; LA64-NEXT: ori $a7, $zero, 7
-; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: move $a0, $zero
; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
; LA64-NEXT: jr $t8
@@ -124,9 +124,9 @@ define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %
define i32 @different_args_tail_32bit(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4) nounwind {
; LA32-LABEL: different_args_tail_32bit:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a0, $zero, 9
-; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: ori $a0, $zero, 8
+; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: ori $a0, $zero, 9
; LA32-NEXT: ori $a1, $zero, 1
; LA32-NEXT: ori $a2, $zero, 2
; LA32-NEXT: ori $a3, $zero, 3
@@ -134,7 +134,7 @@ define i32 @different_args_tail_32bit(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4) no
; LA32-NEXT: ori $a5, $zero, 5
; LA32-NEXT: ori $a6, $zero, 6
; LA32-NEXT: ori $a7, $zero, 7
-; LA32-NEXT: st.w $a0, $sp, 0
+; LA32-NEXT: st.w $a0, $sp, 4
; LA32-NEXT: move $a0, $zero
; LA32-NEXT: b many_args_callee
;
@@ -187,9 +187,9 @@ define i32 @different_args_tail_64bit(i128 %0, i128 %1, i128 %2, i128 %3, i128 %
;
; LA64-LABEL: different_args_tail_64bit:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a0, $zero, 9
-; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: ori $a0, $zero, 8
+; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: ori $a0, $zero, 9
; LA64-NEXT: ori $a1, $zero, 1
; LA64-NEXT: ori $a2, $zero, 2
; LA64-NEXT: ori $a3, $zero, 3
@@ -197,7 +197,7 @@ define i32 @different_args_tail_64bit(i128 %0, i128 %1, i128 %2, i128 %3, i128 %
; LA64-NEXT: ori $a5, $zero, 5
; LA64-NEXT: ori $a6, $zero, 6
; LA64-NEXT: ori $a7, $zero, 7
-; LA64-NEXT: st.d $a0, $sp, 0
+; LA64-NEXT: st.d $a0, $sp, 8
; LA64-NEXT: move $a0, $zero
; LA64-NEXT: pcaddu18i $t8, %call36(many_args_callee)
; LA64-NEXT: jr $t8
>From ea60b3414f9f1e45765d56c912cb1a32f1b1523d Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Mon, 15 Dec 2025 20:12:38 +0800
Subject: [PATCH 4/5] Use divideCeil
---
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 377ca44da2d9e..c54452b13898f 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -8597,7 +8597,7 @@ LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
StackPtr = DAG.getCopyFromReg(Chain, DL, LoongArch::R3, PtrVT);
if (IsTailCall) {
- unsigned OpSize = (VA.getValVT().getSizeInBits() + 7) / 8;
+ unsigned OpSize = divideCeil(VA.getValVT().getSizeInBits(), 8);
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
DstAddr = DAG.getFrameIndex(FI, PtrVT);
DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
>From 747e4735166072d6438e5e8ba1ebb10a52f31dfa Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Fri, 19 Dec 2025 07:57:13 +0800
Subject: [PATCH 5/5] Update the incoming ByVal args signature in MFI
---
llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
index 75db3365415e8..4159b97bcf598 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
@@ -81,8 +81,8 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
}
void addIncomingByValArgs(SDValue Val) { IncomingByValArgs.push_back(Val); }
- SDValue &getIncomingByValArgs(int Idx) { return IncomingByValArgs[Idx]; }
- unsigned getIncomingByValArgsSize() { return IncomingByValArgs.size(); }
+ SDValue getIncomingByValArgs(int Idx) { return IncomingByValArgs[Idx]; }
+ unsigned getIncomingByValArgsSize() const { return IncomingByValArgs.size(); }
void addSExt32Register(Register Reg) { SExt32Registers.push_back(Reg); }
More information about the llvm-commits
mailing list