[clang] [llvm] [ARM] Fix musttail calls (PR #109943)
Oliver Stannard via cfe-commits
cfe-commits at lists.llvm.org
Thu Sep 26 03:44:53 PDT 2024
https://github.com/ostannard updated https://github.com/llvm/llvm-project/pull/109943
>From f96d3a1a44ade11f5a9879b49bb3d5296c4b225e Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Thu, 9 May 2024 12:58:41 +0100
Subject: [PATCH 01/11] [ARM] Re-generate a test
---
llvm/test/CodeGen/ARM/fp-arg-shuffle.ll | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll b/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
index 4996cc8ecbf022..36f5a4b30af409 100644
--- a/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
+++ b/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
@@ -1,8 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=arm-eabi -mattr=+neon -float-abi=soft %s -o - | FileCheck %s
; CHECK: function1
; CHECK-NOT: vmov
define double @function1(double %a, double %b, double %c, double %d, double %e, double %f) nounwind noinline ssp {
+; CHECK-LABEL: function1:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r11, lr}
+; CHECK-NEXT: push {r4, r5, r11, lr}
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: add lr, sp, #64
+; CHECK-NEXT: vldr d16, [sp, #56]
+; CHECK-NEXT: str r2, [sp, #16]
+; CHECK-NEXT: ldm lr, {r4, r5, r12, lr}
+; CHECK-NEXT: str r3, [sp, #20]
+; CHECK-NEXT: mov r3, r5
+; CHECK-NEXT: str r0, [sp, #24]
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: str r1, [sp, #28]
+; CHECK-NEXT: mov r1, lr
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: vldr d17, [sp, #48]
+; CHECK-NEXT: vstmia sp, {d16, d17}
+; CHECK-NEXT: bl function2
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: pop {r4, r5, r11, lr}
+; CHECK-NEXT: mov pc, lr
entry:
%call = tail call double @function2(double %f, double %e, double %d, double %c, double %b, double %a) nounwind
ret double %call
>From 5d8d80070c9e660d1fe59a1d02fba3f5bc1a0217 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Tue, 24 Sep 2024 10:46:47 +0100
Subject: [PATCH 02/11] [ARM] Fix comment typo
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a03928b618df03..7bc62969624e7a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2407,8 +2407,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isTailCall = false;
// For both the non-secure calls and the returns from a CMSE entry function,
- // the function needs to do some extra work afte r the call, or before the
- // return, respectively, thus it cannot end with atail call
+ // the function needs to do some extra work after the call, or before the
+ // return, respectively, thus it cannot end with a tail call
if (isCmseNSCall || AFI->isCmseNSEntryFunction())
isTailCall = false;
>From 855d73d6a3852aa013576e8bf7fb679a955d9260 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Thu, 9 May 2024 13:00:46 +0100
Subject: [PATCH 03/11] [ARM] Add debug trace for tail-call optimisation
There are lots of reasons a call might not be eligible for tail-call
optimisation, this adds debug trace to help understand the compiler's
decisions here.
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 62 +++++++++++++++++++------
1 file changed, 47 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 7bc62969624e7a..b7d52a69cdb9ab 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3046,8 +3046,10 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
for (const CCValAssign &AL : ArgLocs)
if (AL.isRegLoc())
AddressRegisters.erase(AL.getLocReg());
- if (AddressRegisters.empty())
+ if (AddressRegisters.empty()) {
+ LLVM_DEBUG(dbgs() << "false (no reg to hold function pointer)\n");
return false;
+ }
}
// Look for obvious safe cases to perform tail call optimization that do not
@@ -3056,18 +3058,25 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// Exception-handling functions need a special set of instructions to indicate
// a return to the hardware. Tail-calling another function would probably
// break this.
- if (CallerF.hasFnAttribute("interrupt"))
+ if (CallerF.hasFnAttribute("interrupt")) {
+ LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n");
return false;
+ }
- if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
+ if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt)) {
+ LLVM_DEBUG(dbgs() << (CalleeCC == CallerCC ? "true" : "false")
+ << " (guaranteed tail-call CC)\n");
return CalleeCC == CallerCC;
+ }
// Also avoid sibcall optimization if either caller or callee uses struct
// return semantics.
bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
bool isCallerStructRet = MF.getFunction().hasStructRetAttr();
- if (isCalleeStructRet || isCallerStructRet)
+ if (isCalleeStructRet || isCallerStructRet) {
+ LLVM_DEBUG(dbgs() << "false (struct-ret)\n");
return false;
+ }
// Externally-defined functions with weak linkage should not be
// tail-called on ARM when the OS does not support dynamic
@@ -3080,8 +3089,10 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
const GlobalValue *GV = G->getGlobal();
const Triple &TT = getTargetMachine().getTargetTriple();
if (GV->hasExternalWeakLinkage() &&
- (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
+ (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) {
+ LLVM_DEBUG(dbgs() << "false (external weak linkage)\n");
return false;
+ }
}
// Check that the call results are passed in the same way.
@@ -3090,23 +3101,29 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
getEffectiveCallingConv(CalleeCC, isVarArg),
getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins,
CCAssignFnForReturn(CalleeCC, isVarArg),
- CCAssignFnForReturn(CallerCC, CallerF.isVarArg())))
+ CCAssignFnForReturn(CallerCC, CallerF.isVarArg()))) {
+ LLVM_DEBUG(dbgs() << "false (incompatible results)\n");
return false;
+ }
// The callee has to preserve all registers the caller needs to preserve.
const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (CalleeCC != CallerCC) {
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
- if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) {
+ LLVM_DEBUG(dbgs() << "false (not all registers preserved)\n");
return false;
+ }
}
// If Caller's vararg or byval argument has been split between registers and
// stack, do not perform tail call, since part of the argument is in caller's
// local frame.
const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
- if (AFI_Caller->getArgRegsSaveSize())
+ if (AFI_Caller->getArgRegsSaveSize()) {
+ LLVM_DEBUG(dbgs() << "false (arg reg save area)\n");
return false;
+ }
// If the callee takes no arguments then go on to check the results of the
// call.
@@ -3124,36 +3141,51 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
EVT RegVT = VA.getLocVT();
SDValue Arg = OutVals[realArgIdx];
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
- if (VA.getLocInfo() == CCValAssign::Indirect)
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ LLVM_DEBUG(dbgs() << "false (indirect arg)\n");
return false;
+ }
if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
// f64 and vector types are split into multiple registers or
// register/stack-slot combinations. The types will not match
// the registers; give up on memory f64 refs until we figure
// out what to do about this.
- if (!VA.isRegLoc())
+ if (!VA.isRegLoc()) {
+ LLVM_DEBUG(dbgs() << "false (f64 not in register)\n");
return false;
- if (!ArgLocs[++i].isRegLoc())
+ }
+ if (!ArgLocs[++i].isRegLoc()) {
+ LLVM_DEBUG(dbgs() << "false (f64 not in register, second half)\n");
return false;
+ }
if (RegVT == MVT::v2f64) {
- if (!ArgLocs[++i].isRegLoc())
+ if (!ArgLocs[++i].isRegLoc()) {
+ LLVM_DEBUG(dbgs() << "false (v2f64 not in register)\n");
return false;
- if (!ArgLocs[++i].isRegLoc())
+ }
+ if (!ArgLocs[++i].isRegLoc()) {
+ LLVM_DEBUG(dbgs() << "false (v2f64 not in register, second half)\n");
return false;
+ }
}
} else if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
- MFI, MRI, TII))
+ MFI, MRI, TII)) {
+ LLVM_DEBUG(dbgs() << "false (non-matching stack offset)\n");
return false;
+ }
}
}
}
const MachineRegisterInfo &MRI = MF.getRegInfo();
- if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) {
+ LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n");
return false;
+ }
}
+ LLVM_DEBUG(dbgs() << "true\n");
return true;
}
>From ebe25e8f611ea6e45ddc1e6cdcc0c3ed20ac2bf9 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Thu, 9 May 2024 13:11:33 +0100
Subject: [PATCH 04/11] [ARM] Tail-calls do not require caller and callee
arguments to match
The ARM backend was checking that the outgoing values for a tail-call
matched the incoming argument values of the caller. This isn't
necessary, because the caller can change the values in both registers
and the stack before doing the tail-call. The actual limitation is that
the callee can't need more stack space for it's arguments than the
caller does.
This is needed for code using the musttail attribute, as well as
enabling tail calls as an optimisation in more cases.
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 109 ++----------------
llvm/test/CodeGen/ARM/fp-arg-shuffle.ll | 29 +++--
llvm/test/CodeGen/ARM/fp16-vector-argument.ll | 41 +++----
llvm/test/CodeGen/ARM/musttail.ll | 97 ++++++++++++++++
4 files changed, 134 insertions(+), 142 deletions(-)
create mode 100644 llvm/test/CodeGen/ARM/musttail.ll
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b7d52a69cdb9ab..f8d54dd849b211 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2961,50 +2961,6 @@ void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
Size = std::max<int>(Size - Excess, 0);
}
-/// MatchingStackOffset - Return true if the given stack call argument is
-/// already available in the same position (relatively) of the caller's
-/// incoming argument stack.
-static
-bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
- MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
- const TargetInstrInfo *TII) {
- unsigned Bytes = Arg.getValueSizeInBits() / 8;
- int FI = std::numeric_limits<int>::max();
- if (Arg.getOpcode() == ISD::CopyFromReg) {
- Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!VR.isVirtual())
- return false;
- MachineInstr *Def = MRI->getVRegDef(VR);
- if (!Def)
- return false;
- if (!Flags.isByVal()) {
- if (!TII->isLoadFromStackSlot(*Def, FI))
- return false;
- } else {
- return false;
- }
- } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
- if (Flags.isByVal())
- // ByVal argument is passed in as a pointer but it's now being
- // dereferenced. e.g.
- // define @foo(%struct.X* %A) {
- // tail call @bar(%struct.X* byval %A)
- // }
- return false;
- SDValue Ptr = Ld->getBasePtr();
- FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
- if (!FINode)
- return false;
- FI = FINode->getIndex();
- } else
- return false;
-
- assert(FI != std::numeric_limits<int>::max());
- if (!MFI.isFixedObjectIndex(FI))
- return false;
- return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
-}
-
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function. Note that this function also
@@ -3127,64 +3083,17 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// If the callee takes no arguments then go on to check the results of the
// call.
- if (!Outs.empty()) {
- if (CCInfo.getStackSize()) {
- // Check if the arguments are already laid out in the right way as
- // the caller's fixed stack objects.
- MachineFrameInfo &MFI = MF.getFrameInfo();
- const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
- i != e;
- ++i, ++realArgIdx) {
- CCValAssign &VA = ArgLocs[i];
- EVT RegVT = VA.getLocVT();
- SDValue Arg = OutVals[realArgIdx];
- ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
- if (VA.getLocInfo() == CCValAssign::Indirect) {
- LLVM_DEBUG(dbgs() << "false (indirect arg)\n");
- return false;
- }
- if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
- // f64 and vector types are split into multiple registers or
- // register/stack-slot combinations. The types will not match
- // the registers; give up on memory f64 refs until we figure
- // out what to do about this.
- if (!VA.isRegLoc()) {
- LLVM_DEBUG(dbgs() << "false (f64 not in register)\n");
- return false;
- }
- if (!ArgLocs[++i].isRegLoc()) {
- LLVM_DEBUG(dbgs() << "false (f64 not in register, second half)\n");
- return false;
- }
- if (RegVT == MVT::v2f64) {
- if (!ArgLocs[++i].isRegLoc()) {
- LLVM_DEBUG(dbgs() << "false (v2f64 not in register)\n");
- return false;
- }
- if (!ArgLocs[++i].isRegLoc()) {
- LLVM_DEBUG(dbgs() << "false (v2f64 not in register, second half)\n");
- return false;
- }
- }
- } else if (!VA.isRegLoc()) {
- if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
- MFI, MRI, TII)) {
- LLVM_DEBUG(dbgs() << "false (non-matching stack offset)\n");
- return false;
- }
- }
- }
- }
-
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) {
- LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n");
- return false;
- }
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) {
+ LLVM_DEBUG(dbgs() << "false (parameters in CSRs do not match)\n");
+ return false;
}
+ // If the stack arguments for this call do not fit into our own save area then
+ // the call cannot be made tail.
+ if (CCInfo.getStackSize() > AFI_Caller->getArgumentStackSize())
+ return false;
+
LLVM_DEBUG(dbgs() << "true\n");
return true;
}
diff --git a/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll b/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
index 36f5a4b30af409..99c9602eee58bf 100644
--- a/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
+++ b/llvm/test/CodeGen/ARM/fp-arg-shuffle.ll
@@ -8,25 +8,24 @@ define double @function1(double %a, double %b, double %c, double %d, double %e,
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: add lr, sp, #64
-; CHECK-NEXT: vldr d16, [sp, #56]
-; CHECK-NEXT: str r2, [sp, #16]
-; CHECK-NEXT: ldm lr, {r4, r5, r12, lr}
-; CHECK-NEXT: str r3, [sp, #20]
-; CHECK-NEXT: mov r3, r5
-; CHECK-NEXT: str r0, [sp, #24]
+; CHECK-NEXT: vldr d16, [sp, #40]
+; CHECK-NEXT: vldr d17, [sp, #32]
+; CHECK-NEXT: vmov r12, lr, d16
+; CHECK-NEXT: vldr d16, [sp, #16]
+; CHECK-NEXT: vmov r4, r5, d17
+; CHECK-NEXT: vldr d17, [sp, #24]
+; CHECK-NEXT: str r3, [sp, #36]
+; CHECK-NEXT: str r2, [sp, #32]
+; CHECK-NEXT: str r1, [sp, #44]
+; CHECK-NEXT: str r0, [sp, #40]
+; CHECK-NEXT: vstr d17, [sp, #16]
+; CHECK-NEXT: vstr d16, [sp, #24]
; CHECK-NEXT: mov r0, r12
-; CHECK-NEXT: str r1, [sp, #28]
; CHECK-NEXT: mov r1, lr
; CHECK-NEXT: mov r2, r4
-; CHECK-NEXT: vldr d17, [sp, #48]
-; CHECK-NEXT: vstmia sp, {d16, d17}
-; CHECK-NEXT: bl function2
-; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: mov r3, r5
; CHECK-NEXT: pop {r4, r5, r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: b function2
entry:
%call = tail call double @function2(double %f, double %e, double %d, double %c, double %b, double %a) nounwind
ret double %call
diff --git a/llvm/test/CodeGen/ARM/fp16-vector-argument.ll b/llvm/test/CodeGen/ARM/fp16-vector-argument.ll
index 6fc56967bc7aa9..65aff46658fd1d 100644
--- a/llvm/test/CodeGen/ARM/fp16-vector-argument.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vector-argument.ll
@@ -145,26 +145,21 @@ entry:
define void @many_args_test(double, float, i16, <4 x half>, <8 x half>, <8 x half>, <8 x half>) {
; SOFT-LABEL: many_args_test:
; SOFT: @ %bb.0: @ %entry
-; SOFT-NEXT: push {r11, lr}
-; SOFT-NEXT: sub sp, sp, #32
-; SOFT-NEXT: add r12, sp, #80
+; SOFT-NEXT: add r12, sp, #40
; SOFT-NEXT: vld1.64 {d16, d17}, [r12]
-; SOFT-NEXT: add r12, sp, #48
+; SOFT-NEXT: add r12, sp, #8
; SOFT-NEXT: vabs.f16 q8, q8
; SOFT-NEXT: vld1.64 {d18, d19}, [r12]
-; SOFT-NEXT: add r12, sp, #64
+; SOFT-NEXT: add r12, sp, #24
; SOFT-NEXT: vadd.f16 q8, q8, q9
; SOFT-NEXT: vld1.64 {d18, d19}, [r12]
; SOFT-NEXT: add r12, sp, #16
; SOFT-NEXT: vmul.f16 q8, q9, q8
; SOFT-NEXT: vst1.64 {d16, d17}, [r12]
-; SOFT-NEXT: mov r12, sp
-; SOFT-NEXT: vldr d16, [sp, #40]
-; SOFT-NEXT: vst1.16 {d16}, [r12:64]!
-; SOFT-NEXT: str r3, [r12]
-; SOFT-NEXT: bl use
-; SOFT-NEXT: add sp, sp, #32
-; SOFT-NEXT: pop {r11, pc}
+; SOFT-NEXT: vldr d16, [sp]
+; SOFT-NEXT: vstr d16, [sp]
+; SOFT-NEXT: str r3, [sp, #8]
+; SOFT-NEXT: b use
;
; HARD-LABEL: many_args_test:
; HARD: @ %bb.0: @ %entry
@@ -177,33 +172,25 @@ define void @many_args_test(double, float, i16, <4 x half>, <8 x half>, <8 x hal
;
; SOFTEB-LABEL: many_args_test:
; SOFTEB: @ %bb.0: @ %entry
-; SOFTEB-NEXT: .save {r11, lr}
-; SOFTEB-NEXT: push {r11, lr}
-; SOFTEB-NEXT: .pad #32
-; SOFTEB-NEXT: sub sp, sp, #32
-; SOFTEB-NEXT: add r12, sp, #80
-; SOFTEB-NEXT: mov lr, sp
+; SOFTEB-NEXT: add r12, sp, #40
; SOFTEB-NEXT: vld1.64 {d16, d17}, [r12]
-; SOFTEB-NEXT: add r12, sp, #48
+; SOFTEB-NEXT: add r12, sp, #8
; SOFTEB-NEXT: vrev64.16 q8, q8
; SOFTEB-NEXT: vabs.f16 q8, q8
; SOFTEB-NEXT: vld1.64 {d18, d19}, [r12]
-; SOFTEB-NEXT: add r12, sp, #64
+; SOFTEB-NEXT: add r12, sp, #24
; SOFTEB-NEXT: vrev64.16 q9, q9
; SOFTEB-NEXT: vadd.f16 q8, q8, q9
; SOFTEB-NEXT: vld1.64 {d18, d19}, [r12]
; SOFTEB-NEXT: add r12, sp, #16
; SOFTEB-NEXT: vrev64.16 q9, q9
; SOFTEB-NEXT: vmul.f16 q8, q9, q8
-; SOFTEB-NEXT: vldr d18, [sp, #40]
-; SOFTEB-NEXT: vrev64.16 d18, d18
-; SOFTEB-NEXT: vst1.16 {d18}, [lr:64]!
-; SOFTEB-NEXT: str r3, [lr]
+; SOFTEB-NEXT: vldr d18, [sp]
; SOFTEB-NEXT: vrev64.16 q8, q8
; SOFTEB-NEXT: vst1.64 {d16, d17}, [r12]
-; SOFTEB-NEXT: bl use
-; SOFTEB-NEXT: add sp, sp, #32
-; SOFTEB-NEXT: pop {r11, pc}
+; SOFTEB-NEXT: vstr d18, [sp]
+; SOFTEB-NEXT: str r3, [sp, #8]
+; SOFTEB-NEXT: b use
;
; HARDEB-LABEL: many_args_test:
; HARDEB: @ %bb.0: @ %entry
diff --git a/llvm/test/CodeGen/ARM/musttail.ll b/llvm/test/CodeGen/ARM/musttail.ll
new file mode 100644
index 00000000000000..622bea3f876351
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/musttail.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=armv7a-none-eabi %s -o - | FileCheck %s
+
+declare i32 @many_args_callee(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5)
+
+define i32 @many_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
+; CHECK-LABEL: many_args_tail:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: mov r1, #2
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: mov r0, #6
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: mov r3, #4
+; CHECK-NEXT: b many_args_callee
+ %ret = tail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
+ ret i32 %ret
+}
+
+define i32 @many_args_musttail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
+; CHECK-LABEL: many_args_musttail:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: mov r1, #2
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: mov r0, #6
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: mov r3, #4
+; CHECK-NEXT: b many_args_callee
+ %ret = musttail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
+ ret i32 %ret
+}
+
+; This function has more arguments than it's tail-callee. This isn't valid for
+; the musttail attribute, but can still be tail-called as a non-guaranteed
+; optimisation, because the outgoing arguments to @many_args_callee fit in the
+; stack space allocated by the caller of @more_args_tail.
+define i32 @more_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6) {
+; CHECK-LABEL: more_args_tail:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: mov r1, #2
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: mov r0, #6
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: mov r3, #4
+; CHECK-NEXT: b many_args_callee
+ %ret = tail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
+ ret i32 %ret
+}
+
+; Again, this isn't valid for musttail, but can be tail-called in practice
+; because the stack size if the same.
+define i32 @different_args_tail(i64 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: different_args_tail:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: mov r1, #2
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: mov r0, #6
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: mov r3, #4
+; CHECK-NEXT: b many_args_callee
+ %ret = tail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
+ ret i32 %ret
+}
+
+; Here, the caller requires less stack space for it's arguments than the
+; callee, so it would not ba valid to do a tail-call.
+define i32 @fewer_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) {
+; CHECK-LABEL: fewer_args_tail:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: mov r1, #6
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: strd r0, r1, [sp]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: mov r1, #2
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: mov r3, #4
+; CHECK-NEXT: bl many_args_callee
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: pop {r11, pc}
+ %ret = tail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
+ ret i32 %ret
+}
>From 2f4be47cebd3827f23b6cc4f82f18c950d726875 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Tue, 24 Sep 2024 17:47:01 +0100
Subject: [PATCH 05/11] [ARM] Allow functions with sret returns to be
tail-called
It is valid to tail-call a function which returns through an sret
argument, as long as we have an incoming sret pointer to pass on.
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 2 +-
llvm/test/CodeGen/ARM/musttail.ll | 22 ++++++++++++++++++++++
2 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f8d54dd849b211..74b45c89ee7ea2 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3029,7 +3029,7 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// return semantics.
bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
bool isCallerStructRet = MF.getFunction().hasStructRetAttr();
- if (isCalleeStructRet || isCallerStructRet) {
+ if (isCalleeStructRet != isCallerStructRet) {
LLVM_DEBUG(dbgs() << "false (struct-ret)\n");
return false;
}
diff --git a/llvm/test/CodeGen/ARM/musttail.ll b/llvm/test/CodeGen/ARM/musttail.ll
index 622bea3f876351..6db45aa9e6285a 100644
--- a/llvm/test/CodeGen/ARM/musttail.ll
+++ b/llvm/test/CodeGen/ARM/musttail.ll
@@ -95,3 +95,25 @@ define i32 @fewer_args_tail(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) {
%ret = tail call i32 @many_args_callee(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6)
ret i32 %ret
}
+
+declare void @sret_callee(ptr sret({ double, double }) align 8)
+
+; Functions which return by sret can be tail-called because the incoming sret
+; pointer gets passed through to the callee.
+define void @sret_caller_tail(ptr sret({ double, double }) align 8 %result) {
+; CHECK-LABEL: sret_caller_tail:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b sret_callee
+entry:
+ tail call void @sret_callee(ptr sret({ double, double }) align 8 %result)
+ ret void
+}
+
+define void @sret_caller_musttail(ptr sret({ double, double }) align 8 %result) {
+; CHECK-LABEL: sret_caller_musttail:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b sret_callee
+entry:
+ musttail call void @sret_callee(ptr sret({ double, double }) align 8 %result)
+ ret void
+}
>From c2520c9f27a424f0f5afcddd8a8b71d8f76a3a4a Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Tue, 24 Sep 2024 11:07:19 +0100
Subject: [PATCH 06/11] [LangRef] Disallow accessing byval arguments from
tail-called functions
We already disallow accessing the callee's allocas from a tail-called
function, because their stack memory will have been de-allocated before
the tail call. I think this should apply to byval arguments too, as they
also occupy space in the celler's stack frame.
---
llvm/docs/LangRef.rst | 8 ++++----
llvm/test/CodeGen/ARM/struct_byval.ll | 19 -------------------
2 files changed, 4 insertions(+), 23 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 91c3e60bb0acb1..441a1998a04606 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -12658,10 +12658,10 @@ This instruction requires several arguments:
the return value of the callee is returned to the caller's caller, even
if a void return type is in use.
- Both markers imply that the callee does not access allocas from the caller.
- The ``tail`` marker additionally implies that the callee does not access
- varargs from the caller. Calls marked ``musttail`` must obey the following
- additional rules:
+ Both markers imply that the callee does not access allocas or ``byval``
+ arguments from the caller. The ``tail`` marker additionally implies that the
+ callee does not access varargs from the caller. Calls marked ``musttail``
+ must obey the following additional rules:
- The call must immediately precede a :ref:`ret <i_ret>` instruction,
or a pointer bitcast followed by a ret instruction.
diff --git a/llvm/test/CodeGen/ARM/struct_byval.ll b/llvm/test/CodeGen/ARM/struct_byval.ll
index 73a1b5ee33bca9..2bc4f9c816d539 100644
--- a/llvm/test/CodeGen/ARM/struct_byval.ll
+++ b/llvm/test/CodeGen/ARM/struct_byval.ll
@@ -63,25 +63,6 @@ declare i32 @e1(ptr nocapture byval(%struct.SmallStruct) %in) nounwind
declare i32 @e2(ptr nocapture byval(%struct.LargeStruct) %in) nounwind
declare i32 @e3(ptr nocapture byval(%struct.LargeStruct) align 16 %in) nounwind
-; rdar://12442472
-; We can't do tail call since address of s is passed to the callee and part of
-; s is in caller's local frame.
-define void @f3(ptr nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
-; CHECK-LABEL: f3
-; CHECK: bl _consumestruct
-entry:
- tail call void @consumestruct(ptr %s, i32 80) optsize
- ret void
-}
-
-define void @f4(ptr nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
-; CHECK-LABEL: f4
-; CHECK: bl _consumestruct
-entry:
- tail call void @consumestruct(ptr %s, i32 80) optsize
- ret void
-}
-
; We can do tail call here since s is in the incoming argument area.
define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, ptr nocapture byval(%struct.SmallStruct) %s) nounwind optsize {
; CHECK-LABEL: f5
>From 467f28e7a146cb769cab3e4f00e6eae0e2c7268f Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Tue, 24 Sep 2024 11:25:17 +0100
Subject: [PATCH 07/11] [ARM] Allow tail calls with byval args
Byval arguments which are passed partially in registers get stored into
the local stack frame, but it is valid to tail-call them because the
part which gets spilled is always re-loaded into registers before doing
the tail-call, so it's OK for the spill area to be deallocated.
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 8 +-
.../ARM/2013-05-13-AAPCS-byval-padding.ll | 16 +-
.../ARM/2013-05-13-AAPCS-byval-padding2.ll | 13 +-
llvm/test/CodeGen/ARM/musttail.ll | 202 ++++++++++++++++++
4 files changed, 216 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 74b45c89ee7ea2..dfb401487e1ded 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3072,11 +3072,11 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
}
}
- // If Caller's vararg or byval argument has been split between registers and
- // stack, do not perform tail call, since part of the argument is in caller's
- // local frame.
+ // If Caller's vararg argument has been split between registers and stack, do
+ // not perform tail call, since part of the argument is in caller's local
+ // frame.
const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
- if (AFI_Caller->getArgRegsSaveSize()) {
+ if (CLI.IsVarArg && AFI_Caller->getArgRegsSaveSize()) {
LLVM_DEBUG(dbgs() << "false (arg reg save area)\n");
return false;
}
diff --git a/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll b/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll
index d8e22f4f5312ae..e186ae3a961502 100644
--- a/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll
+++ b/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll
@@ -12,17 +12,11 @@ define void @check227(
; arg1 --> SP+188
entry:
-
-;CHECK: sub sp, sp, #12
-;CHECK: push {r11, lr}
-;CHECK: sub sp, sp, #4
-;CHECK: add r0, sp, #12
-;CHECK: stm r0, {r1, r2, r3}
-;CHECK: ldr r0, [sp, #212]
-;CHECK: bl useInt
-;CHECK: add sp, sp, #4
-;CHECK: pop {r11, lr}
-;CHECK: add sp, sp, #12
+; CHECK: sub sp, sp, #12
+; CHECK: stm sp, {r1, r2, r3}
+; CHECK: ldr r0, [sp, #200]
+; CHECK: add sp, sp, #12
+; CHECK: b useInt
%0 = ptrtoint ptr %arg1 to i32
tail call void @useInt(i32 %0)
diff --git a/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll b/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll
index 0c5d22984b99e1..efdecce9ae723a 100644
--- a/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll
+++ b/llvm/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll
@@ -7,14 +7,11 @@
define void @foo(ptr byval(%struct4bytes) %p0, ; --> R0
ptr byval(%struct20bytes) %p1 ; --> R1,R2,R3, [SP+0 .. SP+8)
) {
-;CHECK: sub sp, sp, #16
-;CHECK: push {r11, lr}
-;CHECK: add r12, sp, #8
-;CHECK: stm r12, {r0, r1, r2, r3}
-;CHECK: add r0, sp, #12
-;CHECK: bl useInt
-;CHECK: pop {r11, lr}
-;CHECK: add sp, sp, #16
+;CHECK: sub sp, sp, #16
+;CHECK: stm sp, {r0, r1, r2, r3}
+;CHECK: add r0, sp, #4
+;CHECK: add sp, sp, #16
+;CHECK: b useInt
%1 = ptrtoint ptr %p1 to i32
tail call void @useInt(i32 %1)
diff --git a/llvm/test/CodeGen/ARM/musttail.ll b/llvm/test/CodeGen/ARM/musttail.ll
index 6db45aa9e6285a..c59e32c1503031 100644
--- a/llvm/test/CodeGen/ARM/musttail.ll
+++ b/llvm/test/CodeGen/ARM/musttail.ll
@@ -117,3 +117,205 @@ entry:
musttail call void @sret_callee(ptr sret({ double, double }) align 8 %result)
ret void
}
+
+%large_struct = type { [20 x i32] }
+declare void @large_callee(%large_struct* byval(%large_struct) align 4)
+
+; Functions with sret parameters can be tail-called, because the value is
+; actually passed in registers and the stack in the same way for the caller and
+; callee. Within @large_caller the first 16 bytes of the argument are spilled
+; to the local stack frame, but for the tail-call they are passed in r0-r3, so
+; it's safe to de-allocate that memory before the call. Most of the code
+; generated for this isn't needed, but that's a missed optimisation, not a
+; correctness issue.
+define void @large_caller(%large_struct* byval(%large_struct) align 4 %a) {
+; CHECK-LABEL: large_caller:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, r12, #16
+; CHECK-NEXT: add r12, sp, #24
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: pop {r4, lr}
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b large_callee
+entry:
+ musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %a)
+ ret void
+}
+
+; As above, but with some inline asm to test that the arguments in r0-r3 are
+; re-loaded before the call.
+define void @large_caller_check_regs(%large_struct* byval(%large_struct) align 4 %a) {
+; CHECK-LABEL: large_caller_check_regs:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT: @APP
+; CHECK-NEXT: @NO_APP
+; CHECK-NEXT: add r12, sp, #24
+; CHECK-NEXT: add r0, sp, #8
+; CHECK-NEXT: add lr, r0, #16
+; CHECK-NEXT: add r3, sp, #8
+; CHECK-NEXT: ldm r3, {r0, r1, r2, r3}
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [lr], #4
+; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: pop {r4, lr}
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b large_callee
+entry:
+ tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3}"()
+ musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %a)
+ ret void
+}
+
+; The IR for this one looks dodgy, because it has an alloca passed to a
+; musttail function, but it is passed as a byval argument, so will be copied
+; into the stack space allocated by @large_caller_new_value's caller, so is
+; valid.
+define void @large_caller_new_value(%large_struct* byval(%large_struct) align 4 %a) {
+; CHECK-LABEL: large_caller_new_value:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .pad #96
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: add r12, sp, #80
+; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT: mov r0, #5
+; CHECK-NEXT: mov r3, #3
+; CHECK-NEXT: str r0, [sp, #20]
+; CHECK-NEXT: mov r0, #4
+; CHECK-NEXT: str r0, [sp, #16]
+; CHECK-NEXT: mov r0, #3
+; CHECK-NEXT: str r0, [sp, #12]
+; CHECK-NEXT: mov r0, #2
+; CHECK-NEXT: str r0, [sp, #8]
+; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: mov r0, sp
+; CHECK-NEXT: add r1, r0, #16
+; CHECK-NEXT: add r0, sp, #96
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: ldr r2, [r1], #4
+; CHECK-NEXT: str r2, [r0], #4
+; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: mov r2, #2
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: b large_callee
+entry:
+ %y = alloca %large_struct, align 4
+ store i32 0, ptr %y, align 4
+ %0 = getelementptr inbounds i8, ptr %y, i32 4
+ store i32 1, ptr %0, align 4
+ %1 = getelementptr inbounds i8, ptr %y, i32 8
+ store i32 2, ptr %1, align 4
+ %2 = getelementptr inbounds i8, ptr %y, i32 12
+ store i32 3, ptr %2, align 4
+ %3 = getelementptr inbounds i8, ptr %y, i32 16
+ store i32 4, ptr %3, align 4
+ %4 = getelementptr inbounds i8, ptr %y, i32 20
+ store i32 5, ptr %4, align 4
+ musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %y)
+ ret void
+}
>From 5ec1238756677fc68d3a4081b3d58e930d24b9ac Mon Sep 17 00:00:00 2001
From: Kiran <kiran.sturt at arm.com>
Date: Mon, 19 Aug 2024 14:44:50 +0100
Subject: [PATCH 08/11] [Clang] Always forward sret parameters to musttail
calls
If a call using the musttail attribute returns it's value through an
sret argument pointer, we must forward an incoming sret pointer to it,
instead of creating a new alloca. This is always possible because the
musttail attribute requires the caller and callee to have the same
argument and return types.
---
clang/lib/CodeGen/CGCall.cpp | 2 +-
clang/test/CodeGen/musttail-sret.cpp | 84 ++++++++++++++++++++++++++++
2 files changed, 85 insertions(+), 1 deletion(-)
create mode 100644 clang/test/CodeGen/musttail-sret.cpp
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 4ae981e4013e9c..ecb72c265d7088 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -5112,7 +5112,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
RawAddress SRetAlloca = RawAddress::invalid();
llvm::Value *UnusedReturnSizePtr = nullptr;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
- if (IsVirtualFunctionPointerThunk && RetAI.isIndirect()) {
+ if ((IsVirtualFunctionPointerThunk && RetAI.isIndirect()) || IsMustTail) {
SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() +
IRFunctionArgs.getSRetArgNo(),
RetTy, CharUnits::fromQuantity(1));
diff --git a/clang/test/CodeGen/musttail-sret.cpp b/clang/test/CodeGen/musttail-sret.cpp
new file mode 100644
index 00000000000000..ca67c218cd67f6
--- /dev/null
+++ b/clang/test/CodeGen/musttail-sret.cpp
@@ -0,0 +1,84 @@
+// RUN: %clang_cc1 -triple=arm %s -emit-llvm -O3 -o - | FileCheck %s --check-prefix=CHECK-ARM
+// RUN: %clang_cc1 -triple=arm64 %s -emit-llvm -O3 -o - | FileCheck %s --check-prefix=CHECK-ARM64
+// RUN: %clang_cc1 -triple=i686 %s -emit-llvm -O3 -o - | FileCheck %s --check-prefix=CHECK-X86
+// RUN: %clang_cc1 -triple=x86_64 %s -emit-llvm -O3 -o - | FileCheck %s --check-prefix=CHECK-X64
+
+// Sret tests
+struct Big {
+ int a, b, c, d, e, f, g, h;
+};
+
+struct Big F1(signed short P0);
+
+struct Big F2(signed short P0) {
+ signed short P1 = 20391;
+ [[clang::musttail]] return F1(P1);
+}
+
+// CHECK-NOT: alloca
+// CHECK-ARM: musttail call arm_aapcscc void @_Z2F1s(ptr dead_on_unwind writable sret(%struct.Big) align 4 %agg.result, i16 noundef signext 20391)
+// CHECK-ARM64: musttail call void @_Z2F1s(ptr dead_on_unwind writable sret(%struct.Big) align 4 %agg.result, i16 noundef 20391)
+// CHECK-X86: musttail call void @_Z2F1s(ptr dead_on_unwind writable sret(%struct.Big) align 4 %agg.result, i16 noundef signext 20391)
+// CHECK-X64: musttail call void @_Z2F1s(ptr dead_on_unwind writable sret(%struct.Big) align 4 %agg.result, i16 noundef signext 20391)
+
+struct ReallyBig {
+ int a[100];
+};
+
+// Indirect sret tests
+// Function pointer for testing indirect musttail call.
+struct FunctionPointers {
+ ReallyBig (*F3)(int, int, int, int, float, double);
+ ReallyBig (*F4)(int, int, int, char, float, double);
+};
+
+struct ReallyBig F3(int P0, int P1, int P2, int P3, float P4, double P5);
+struct ReallyBig F4(int P0, int P1, int P2, char P3, float P4, double P5);
+
+static struct FunctionPointers FP = {F3, F4};
+
+struct ReallyBig F5 (int P0, int P1, int P2, int P3, float P4, double P5) {
+ [[clang::musttail]] return FP.F3(P0, P1, P2, P3, P4, P5);
+}
+
+// CHECK-NOT: alloca
+// CHECK-ARM: musttail call arm_aapcscc void @_Z2F3iiiifd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i32 noundef %P3, float noundef %P4, double noundef %P5)
+// CHECK-ARM64: musttail call void @_Z2F3iiiifd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i32 noundef %P3, float noundef %P4, double noundef %P5)
+// CHECK-X86: musttail call void @_Z2F3iiiifd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i32 noundef %P3, float noundef %P4, double noundef %P5)
+// CHECK-X64: musttail call void @_Z2F3iiiifd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i32 noundef %P3, float noundef %P4, double noundef %P5)
+
+struct ReallyBig F6 (int P0, int P1, int P2, char P3, float P4, double P5) {
+ [[clang::musttail]] return FP.F4(P0, P1, P2, P3, P4, P5);
+}
+
+// Complex and BitInt. Special cases for sret.
+// CHECK-NOT: alloca
+// CHECK-ARM: musttail call arm_aapcscc void @_Z2F4iiicfd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i8 noundef signext %P3, float noundef %P4, double noundef %P5)
+// CHECK-ARM64: musttail call void @_Z2F4iiicfd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i8 noundef %P3, float noundef %P4, double noundef %P5)
+// CHECK-X86: musttail call void @_Z2F4iiicfd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i8 noundef signext %P3, float noundef %P4, double noundef %P5)
+// CHECK-X64: musttail call void @_Z2F4iiicfd(ptr dead_on_unwind writable sret(%struct.ReallyBig) align 4 %agg.result, i32 noundef %P0, i32 noundef %P1, i32 noundef %P2, i8 noundef signext %P3, float noundef %P4, double noundef %P5)
+
+double _Complex F7(signed short P0);
+
+double _Complex F8(signed short P0) {
+ signed short P1 = 20391;
+ [[clang::musttail]] return F7(P1);
+}
+
+// CHECK-NOT: alloca
+// CHECK-ARM: musttail call arm_aapcscc void @_Z2F7s(ptr dead_on_unwind writable sret({ double, double }) align 8 %agg.result, i16 noundef signext 20391)
+// CHECK-ARM64: musttail call noundef { double, double } @_Z2F7s(i16 noundef 20391)
+// CHECK-X86: musttail call void @_Z2F7s(ptr dead_on_unwind writable sret({ double, double }) align 4 %agg.result, i16 noundef signext 20391)
+// CHECK-X64: musttail call noundef { double, double } @_Z2F7s(i16 noundef signext 20391)
+
+signed _BitInt(100) F9(float P0, float P1, double P2, char P3);
+
+signed _BitInt(100) F10(float P0, float P1, double P2, char P3) {
+ [[clang::musttail]] return F9(P0, P1, P2, P3);
+}
+
+// CHECK-NOT: alloca
+// CHECK-ARM: musttail call arm_aapcscc void @_Z2F9ffdc(ptr dead_on_unwind writable sret(i128) align 8 %agg.result, float noundef %P0, float noundef %P1, double noundef %P2, i8 noundef signext %P3)
+// CHECK-ARM64: musttail call noundef i100 @_Z2F9ffdc(float noundef %P0, float noundef %P1, double noundef %P2, i8 noundef %P3)
+// CHECK-X86: musttail call void @_Z2F9ffdc(ptr dead_on_unwind writable sret(i128) align 4 %agg.result, float noundef %P0, float noundef %P1, double noundef %P2, i8 noundef signext %P3)
+// CHECK-X64: musttail call noundef { i64, i64 } @_Z2F9ffdc(float noundef %P0, float noundef %P1, double noundef %P2, i8 noundef signext %P3)
\ No newline at end of file
>From 1246cdc91802ef74c39036dee1379371d80ad7ac Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Wed, 25 Sep 2024 10:56:40 +0100
Subject: [PATCH 09/11] fixup! [ARM] Add debug trace for tail-call optimisation
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index dfb401487e1ded..65522802147dda 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3015,11 +3015,12 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// a return to the hardware. Tail-calling another function would probably
// break this.
if (CallerF.hasFnAttribute("interrupt")) {
- LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n");
+ LLVM_DEBUG(dbgs() << "false (interrupt attribute)\n");
return false;
}
- if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt)) {
+ if (canGuaranteeTCO(CalleeCC,
+ getTargetMachine().Options.GuaranteedTailCallOpt)) {
LLVM_DEBUG(dbgs() << (CalleeCC == CallerCC ? "true" : "false")
<< " (guaranteed tail-call CC)\n");
return CalleeCC == CallerCC;
@@ -3030,7 +3031,7 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
bool isCallerStructRet = MF.getFunction().hasStructRetAttr();
if (isCalleeStructRet != isCallerStructRet) {
- LLVM_DEBUG(dbgs() << "false (struct-ret)\n");
+ LLVM_DEBUG(dbgs() << "false (struct-ret)\n");
return false;
}
@@ -3045,7 +3046,8 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
const GlobalValue *GV = G->getGlobal();
const Triple &TT = getTargetMachine().getTargetTriple();
if (GV->hasExternalWeakLinkage() &&
- (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) {
+ (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
+ TT.isOSBinFormatMachO())) {
LLVM_DEBUG(dbgs() << "false (external weak linkage)\n");
return false;
}
>From f6edcce0acbc3bbdf1d473a3d6a550a457112a37 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Thu, 26 Sep 2024 11:19:43 +0100
Subject: [PATCH 10/11] Reduce size of struct in musttail test
---
llvm/test/CodeGen/ARM/musttail.ll | 154 +++++++-----------------------
1 file changed, 32 insertions(+), 122 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/musttail.ll b/llvm/test/CodeGen/ARM/musttail.ll
index c59e32c1503031..0ae425070e7bb9 100644
--- a/llvm/test/CodeGen/ARM/musttail.ll
+++ b/llvm/test/CodeGen/ARM/musttail.ll
@@ -118,8 +118,12 @@ entry:
ret void
}
-%large_struct = type { [20 x i32] }
-declare void @large_callee(%large_struct* byval(%large_struct) align 4)
+; Clang only uses byval for arguments of 65 bytes or larger, but we test with a
+; 20 byte struct to keep the tests more readable. This size was chosen to still
+; make sure that it will be split between registers and the stack, to test all
+; of the interesting code paths in the backend.
+%twenty_bytes = type { [5 x i32] }
+declare void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
; Functions with sret parameters can be tail-called, because the value is
; actually passed in registers and the stack in the same way for the caller and
@@ -128,7 +132,7 @@ declare void @large_callee(%large_struct* byval(%large_struct) align 4)
; it's safe to de-allocate that memory before the call. Most of the code
; generated for this isn't needed, but that's a missed optimisation, not a
; correctness issue.
-define void @large_caller(%large_struct* byval(%large_struct) align 4 %a) {
+define void @large_caller(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
; CHECK-LABEL: large_caller:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .pad #16
@@ -136,53 +140,23 @@ define void @large_caller(%large_struct* byval(%large_struct) align 4 %a) {
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
; CHECK-NEXT: add r12, sp, #8
-; CHECK-NEXT: add lr, r12, #16
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: add r12, r12, #16
+; CHECK-NEXT: ldr r4, [r12], #4
+; CHECK-NEXT: str r4, [lr], #4
; CHECK-NEXT: pop {r4, lr}
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: b large_callee
entry:
- musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %a)
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
ret void
}
; As above, but with some inline asm to test that the arguments in r0-r3 are
; re-loaded before the call.
-define void @large_caller_check_regs(%large_struct* byval(%large_struct) align 4 %a) {
+define void @large_caller_check_regs(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
; CHECK-LABEL: large_caller_check_regs:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .pad #16
@@ -190,52 +164,22 @@ define void @large_caller_check_regs(%large_struct* byval(%large_struct) align 4
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
; CHECK-NEXT: @APP
; CHECK-NEXT: @NO_APP
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: add r0, sp, #8
-; CHECK-NEXT: add lr, r0, #16
; CHECK-NEXT: add r3, sp, #8
+; CHECK-NEXT: add r0, sp, #8
+; CHECK-NEXT: add r12, r0, #16
; CHECK-NEXT: ldm r3, {r0, r1, r2, r3}
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
-; CHECK-NEXT: ldr r4, [lr], #4
-; CHECK-NEXT: str r4, [r12], #4
+; CHECK-NEXT: ldr r4, [r12], #4
+; CHECK-NEXT: str r4, [lr], #4
; CHECK-NEXT: pop {r4, lr}
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: b large_callee
entry:
tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3}"()
- musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %a)
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
ret void
}
@@ -243,17 +187,15 @@ entry:
; musttail function, but it is passed as a byval argument, so will be copied
; into the stack space allocated by @large_caller_new_value's caller, so is
; valid.
-define void @large_caller_new_value(%large_struct* byval(%large_struct) align 4 %a) {
+define void @large_caller_new_value(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
; CHECK-LABEL: large_caller_new_value:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: .pad #96
-; CHECK-NEXT: sub sp, sp, #96
-; CHECK-NEXT: add r12, sp, #80
+; CHECK-NEXT: .pad #36
+; CHECK-NEXT: sub sp, sp, #36
+; CHECK-NEXT: add r12, sp, #20
; CHECK-NEXT: stm r12, {r0, r1, r2, r3}
-; CHECK-NEXT: mov r0, #5
-; CHECK-NEXT: mov r3, #3
-; CHECK-NEXT: str r0, [sp, #20]
; CHECK-NEXT: mov r0, #4
+; CHECK-NEXT: add r1, sp, #36
; CHECK-NEXT: str r0, [sp, #16]
; CHECK-NEXT: mov r0, #3
; CHECK-NEXT: str r0, [sp, #12]
@@ -264,47 +206,17 @@ define void @large_caller_new_value(%large_struct* byval(%large_struct) align 4
; CHECK-NEXT: mov r0, #0
; CHECK-NEXT: str r0, [sp]
; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: add r1, r0, #16
-; CHECK-NEXT: add r0, sp, #96
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: ldr r2, [r1], #4
-; CHECK-NEXT: str r2, [r0], #4
-; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: add r0, r0, #16
+; CHECK-NEXT: mov r3, #3
+; CHECK-NEXT: ldr r2, [r0], #4
+; CHECK-NEXT: str r2, [r1], #4
; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: mov r2, #2
-; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: add sp, sp, #36
; CHECK-NEXT: b large_callee
entry:
- %y = alloca %large_struct, align 4
+ %y = alloca %twenty_bytes, align 4
store i32 0, ptr %y, align 4
%0 = getelementptr inbounds i8, ptr %y, i32 4
store i32 1, ptr %0, align 4
@@ -314,8 +226,6 @@ entry:
store i32 3, ptr %2, align 4
%3 = getelementptr inbounds i8, ptr %y, i32 16
store i32 4, ptr %3, align 4
- %4 = getelementptr inbounds i8, ptr %y, i32 20
- store i32 5, ptr %4, align 4
- musttail call void @large_callee(%large_struct* byval(%large_struct) align 4 %y)
+ musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %y)
ret void
}
>From a24cccf8cb2f65113ca3891adff4b64e9c74a5ba Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Thu, 26 Sep 2024 11:33:24 +0100
Subject: [PATCH 11/11] Limit clang change to RetAI.isIndirect returns, add
comment
---
clang/lib/CodeGen/CGCall.cpp | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index ecb72c265d7088..8f45ba4575401b 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -5112,7 +5112,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
RawAddress SRetAlloca = RawAddress::invalid();
llvm::Value *UnusedReturnSizePtr = nullptr;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
- if ((IsVirtualFunctionPointerThunk && RetAI.isIndirect()) || IsMustTail) {
+ // For virtual function pointer thunks and musttail calls, we must always
+ // forward an incoming SRet pointer to the callee, because a local alloca
+ // would be de-allocated before the call. These cases both guarantee that
+ // there will be an incoming SRet argument of the correct type.
+ if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.isIndirect()) {
SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() +
IRFunctionArgs.getSRetArgNo(),
RetTy, CharUnits::fromQuantity(1));
More information about the cfe-commits
mailing list