[llvm] x86: fix musttail sibcall miscompilation (PR #168956)

Folkert de Vries via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 25 11:09:12 PST 2025


https://github.com/folkertdev updated https://github.com/llvm/llvm-project/pull/168956

>From 2482d8f87147edc42710f1232a702ba386496ea0 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sat, 22 Nov 2025 16:43:06 +0100
Subject: [PATCH 01/12] for musttail, also use tail call code to forward
 arguments

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 37d77728882b1..cce324e786de3 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2285,7 +2285,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
         if (ShadowReg)
           RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
       }
-    } else if (!IsSibcall && (!isTailCall || isByVal)) {
+    } else if (!IsSibcall && (!isTailCall || (isByVal && !IsMustTail))) {
       assert(VA.isMemLoc());
       if (!StackPtr.getNode())
         StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),

>From e12da2ee1668210e91771e88dfbc37118de8086b Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sat, 22 Nov 2025 17:21:37 +0100
Subject: [PATCH 02/12] add `X86TargetLowering::ByValNeedsCopyForTailCall`

---
 llvm/lib/Target/X86/X86ISelLowering.h       | 16 ++++++
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 55 +++++++++++++++++++++
 2 files changed, 71 insertions(+)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index e28b9c11a04cd..157642e14f68d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1067,6 +1067,19 @@ namespace llvm {
   //===--------------------------------------------------------------------===//
   //  X86 Implementation of the TargetLowering interface
   class X86TargetLowering final : public TargetLowering {
+    // Copying needed for an outgoing byval argument.
+    enum ByValCopyKind {
+      // Argument is already in the correct location, no copy needed.
+      NoCopy,
+      // Argument value is currently in the local stack frame, needs copying to
+      // outgoing arguemnt area.
+      CopyOnce,
+      // Argument value is currently in the outgoing argument area, but not at
+      // the correct offset, so needs copying via a temporary in local stack
+      // space.
+      CopyViaTemp,
+    };
+
   public:
     explicit X86TargetLowering(const X86TargetMachine &TM,
                                const X86Subtarget &STI);
@@ -1775,6 +1788,9 @@ namespace llvm {
     SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
+    ByValCopyKind ByValNeedsCopyForTailCall(SelectionDAG &DAG, SDValue Src,
+                                            SDValue Dst,
+                                            ISD::ArgFlagsTy Flags) const;
     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
     SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
     SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index cce324e786de3..2868a94f1e099 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2018,6 +2018,61 @@ SDValue X86TargetLowering::getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
 }
 
+// Returns the type of copying which is required to set up a byval argument to
+// a tail-called function. This isn't needed for non-tail calls, because they
+// always need the equivalent of CopyOnce, but tail-calls sometimes need two to
+// avoid clobbering another argument (CopyViaTemp), and sometimes can be
+// optimised to zero copies when forwarding an argument from the caller's
+// caller (NoCopy).
+X86TargetLowering::ByValCopyKind X86TargetLowering::ByValNeedsCopyForTailCall(
+    SelectionDAG &DAG, SDValue Src, SDValue Dst, ISD::ArgFlagsTy Flags) const {
+  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+
+  // Globals are always safe to copy from.
+  if (isa<GlobalAddressSDNode>(Src) || isa<ExternalSymbolSDNode>(Src))
+    return CopyOnce;
+
+  // Can only analyse frame index nodes, conservatively assume we need a
+  // temporary.
+  auto *SrcFrameIdxNode = dyn_cast<FrameIndexSDNode>(Src);
+  auto *DstFrameIdxNode = dyn_cast<FrameIndexSDNode>(Dst);
+  if (!SrcFrameIdxNode || !DstFrameIdxNode)
+    return CopyViaTemp;
+
+  int SrcFI = SrcFrameIdxNode->getIndex();
+  int DstFI = DstFrameIdxNode->getIndex();
+  assert(MFI.isFixedObjectIndex(DstFI) &&
+         "byval passed in non-fixed stack slot");
+
+  int64_t SrcOffset = MFI.getObjectOffset(SrcFI);
+  int64_t DstOffset = MFI.getObjectOffset(DstFI);
+
+  // FIXME:
+
+  //  // If the source is in the local frame, then the copy to the argument
+  //  memory
+  //  // is always valid.
+  //  bool FixedSrc = MFI.isFixedObjectIndex(SrcFI);
+  //  if (!FixedSrc ||
+  //      (FixedSrc && SrcOffset < -(int64_t)AFI->getArgRegsSaveSize()))
+  //    return CopyOnce;
+
+  // In the case of byval arguments split between registers and the stack,
+  // computeAddrForCallArg returns a FrameIndex which corresponds only to the
+  // stack portion, but the Src SDValue will refer to the full value, including
+  // the local stack memory that the register portion gets stored into. We only
+  // need to compare them for equality, so normalise on the full value version.
+  uint64_t RegSize = Flags.getByValSize() - MFI.getObjectSize(DstFI);
+  DstOffset -= RegSize;
+
+  // If the value is already in the correct location, then no copying is
+  // needed. If not, then we need to copy via a temporary.
+  if (SrcOffset == DstOffset)
+    return NoCopy;
+  else
+    return CopyViaTemp;
+}
+
 SDValue
 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                              SmallVectorImpl<SDValue> &InVals) const {

>From 9b5a3214891aa0cbe604386fb818229429f7eb40 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sat, 22 Nov 2025 18:11:18 +0100
Subject: [PATCH 03/12] start using `ByValCopyKind`

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 87 +++++++++++++++++++--
 1 file changed, 80 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 2868a94f1e099..9162f7b0389f2 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2201,6 +2201,74 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   unsigned NumBytesToPush = NumBytes;
   unsigned NumBytesToPop = NumBytes;
 
+  SDValue StackPtr;
+  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+  int RetAddrSize = 8;
+
+  // If we are doing a tail-call, any byval arguments will be written to stack
+  // space which was used for incoming arguments. If any the values being used
+  // are incoming byval arguments to this function, then they might be
+  // overwritten by the stores of the outgoing arguments. To avoid this, we
+  // need to make a temporary copy of them in local stack space, then copy back
+  // to the argument area.
+  DenseMap<unsigned, SDValue> ByValTemporaries;
+  SDValue ByValTempChain;
+  if (isTailCall) {
+    SmallVector<SDValue, 8> ByValCopyChains;
+    for (const CCValAssign &VA : ArgLocs) {
+      unsigned ArgIdx = VA.getValNo();
+      SDValue Src = OutVals[ArgIdx];
+      ISD::ArgFlagsTy Flags = Outs[ArgIdx].Flags;
+
+      if (!Flags.isByVal())
+        continue;
+
+      auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+      if (!StackPtr.getNode())
+        StackPtr =
+            DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), PtrVT);
+
+      // Destination: where this byval should live in the callee’s frame
+      // after the tail call.
+      int32_t Offset = VA.getLocMemOffset() + FPDiff + RetAddrSize;
+      SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
+                                DAG.getIntPtrConstant(Offset, dl));
+
+      ByValCopyKind Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags);
+
+      if (Copy == NoCopy) {
+        // If the argument is already at the correct offset on the stack
+        // (because we are forwarding a byval argument from our caller), we
+        // don't need any copying.
+        continue;
+      } else if (Copy == CopyOnce) {
+        // If the argument is in our local stack frame, no other argument
+        // preparation can clobber it, so we can copy it to the final location
+        // later.
+        ByValTemporaries[ArgIdx] = Src;
+      } else {
+        assert(Copy == CopyViaTemp && "unexpected enum value");
+        // If we might be copying this argument from the outgoing argument
+        // stack area, we need to copy via a temporary in the local stack
+        // frame.
+        MachineFrameInfo &MFI = MF.getFrameInfo();
+        int TempFrameIdx = MFI.CreateStackObject(Flags.getByValSize(),
+                                                 Flags.getNonZeroByValAlign(),
+                                                 /*isSS=*/false);
+        SDValue Temp =
+            DAG.getFrameIndex(TempFrameIdx, getPointerTy(DAG.getDataLayout()));
+
+        SDValue CopyChain =
+            CreateCopyOfByValArgument(Src, Temp, Chain, Flags, DAG, dl);
+        ByValCopyChains.push_back(CopyChain);
+      }
+    }
+    if (!ByValCopyChains.empty())
+      ByValTempChain =
+          DAG.getNode(ISD::TokenFactor, dl, MVT::Other, ByValCopyChains);
+  }
+
   // If we have an inalloca argument, all stack space has already been allocated
   // for us and be right at the top of the stack.  We don't support multiple
   // arguments passed in memory when using inalloca.
@@ -2241,7 +2309,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
 
   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
   SmallVector<SDValue, 8> MemOpChains;
-  SDValue StackPtr;
 
   // The next loop assumes that the locations are in the same order of the
   // input arguments.
@@ -2250,7 +2317,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
 
   // Walk the register/memloc assignments, inserting copies/loads.  In the case
   // of tail call optimization arguments are handle later.
-  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
   for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
        ++I, ++OutIndex) {
     assert(OutIndex < Outs.size() && "Invalid Out index");
@@ -2459,13 +2525,20 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
       FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
 
       if (Flags.isByVal()) {
-        // Copy relative to framepointer.
+        SDValue ByValSrc;
+        bool NeedsStackCopy;
+        if (auto It = ByValTemporaries.find(OutsIndex);
+            It != ByValTemporaries.end()) {
+          ByValSrc = It->second;
+          NeedsStackCopy = true;
+        } else {
+          ByValSrc = Arg;
+          NeedsStackCopy = !isTailCall;
+        }
+
         SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
-        if (!StackPtr.getNode())
-          StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
-                                        getPointerTy(DAG.getDataLayout()));
         Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
-                             StackPtr, Source);
+                             ByValSrc, Source);
 
         MemOpChains2.push_back(
             CreateCopyOfByValArgument(Source, FIN, Chain, Flags, DAG, dl));

>From 83ec93f23564069a741c92410679fbd935fec67e Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sat, 22 Nov 2025 18:43:56 +0100
Subject: [PATCH 04/12] fix byval arguments in tail calls

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 9162f7b0389f2..479b186c2b8fb 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2493,6 +2493,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
     // would clobber.
     Chain = DAG.getStackArgumentTokenFactor(Chain);
 
+    if (ByValTempChain)
+      Chain =
+          DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain, ByValTempChain);
+
     SmallVector<SDValue, 8> MemOpChains2;
     SDValue FIN;
     int FI = 0;
@@ -2536,12 +2540,12 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
           NeedsStackCopy = !isTailCall;
         }
 
-        SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
-        Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
-                             ByValSrc, Source);
+        auto PtrVT = getPointerTy(DAG.getDataLayout());
+        SDValue DstAddr = DAG.getFrameIndex(FI, PtrVT);
 
-        MemOpChains2.push_back(
-            CreateCopyOfByValArgument(Source, FIN, Chain, Flags, DAG, dl));
+        // Copy the struct contents from ByValSrc to DstAddr.
+        MemOpChains2.push_back(CreateCopyOfByValArgument(
+            ByValSrc, DstAddr, Chain, Flags, DAG, dl));
       } else {
         // Store relative to framepointer.
         MemOpChains2.push_back(DAG.getStore(

>From 4636011334cfea6c7953e83d47297ab21dd3e323 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Thu, 20 Nov 2025 22:58:28 +0100
Subject: [PATCH 05/12] Treat a musttail sibcall like a sibcall

In particular, we don't need to bother with shuffling arguments around
on the stack, because in the x86 backend, only functions that do not
need to move arguments around on the stack are considered sibcalls.
---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp   | 20 +++--
 llvm/test/CodeGen/X86/musttail-struct.ll      | 88 +++++++++++++++++++
 llvm/test/CodeGen/X86/musttail-tailcc.ll      | 18 ----
 ...c-store-ret-address-aliasing-stack-slot.ll |  6 +-
 4 files changed, 102 insertions(+), 30 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/musttail-struct.ll

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 479b186c2b8fb..29a9b27132844 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2153,15 +2153,18 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
       isTailCall = false;
   }
 
-  if (isTailCall && !IsMustTail) {
+  if (isTailCall) {
     // Check if it's really possible to do a tail call.
-    isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
-                                                   IsCalleePopSRet);
+    IsSibcall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
+                                                  IsCalleePopSRet);
+
+    if (!IsMustTail) {
+      isTailCall = IsSibcall;
 
-    // Sibcalls are automatically detected tailcalls which do not require
-    // ABI changes.
-    if (!IsGuaranteeTCO && isTailCall)
-      IsSibcall = true;
+      // Sibcalls are automatically detected tailcalls which do not require
+      // ABI changes.
+      IsSibcall = IsSibcall && !IsGuaranteeTCO;
+    }
 
     if (isTailCall)
       ++NumTailCalls;
@@ -2183,8 +2186,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
     NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
 
+  // A sibcall is ABI-compatible and does not need to adjust the stack pointer.
   int FPDiff = 0;
-  if (isTailCall &&
+  if (isTailCall && !IsSibcall &&
       shouldGuaranteeTCO(CallConv,
                          MF.getTarget().Options.GuaranteedTailCallOpt)) {
     // Lower arguments at fp - stackoffset + fpdiff.
diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
new file mode 100644
index 0000000000000..257ea8c2b81d5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -x86-asm-syntax=intel | FileCheck %s
+; ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
+
+; Test correct handling of a musttail call with a byval struct argument.
+
+%struct.1xi32 = type { [1 x i32] }
+%struct.3xi32 = type { [3 x i32] }
+%struct.5xi32 = type { [5 x i32] }
+
+declare dso_local i32 @Func1(ptr byval(%struct.1xi32) %0)
+declare dso_local i32 @Func3(ptr byval(%struct.3xi32) %0)
+declare dso_local i32 @Func5(ptr byval(%struct.5xi32) %0)
+declare dso_local i32 @FuncManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
+
+define dso_local i32 @test1(ptr byval(%struct.1xi32) %0) {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp Func1 # TAILCALL
+  %r = musttail call i32 @Func1(ptr byval(%struct.1xi32) %0)
+  ret i32 %r
+}
+
+define dso_local i32 @test3(ptr byval(%struct.3xi32) %0) {
+; CHECK-LABEL: test3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp Func3 # TAILCALL
+  %r = musttail call i32 @Func3(ptr byval(%struct.3xi32) %0)
+  ret i32 %r
+}
+
+; sizeof(%struct.5xi32) > 16, in x64 this is passed on stack.
+define dso_local i32 @test5(ptr byval(%struct.5xi32) %0) {
+; CHECK-LABEL: test5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp Func5 # TAILCALL
+  %r = musttail call i32 @Func5(ptr byval(%struct.5xi32) %0)
+  ret i32 %r
+}
+
+; Test passing multiple arguments with different sizes on stack. In x64 Linux
+; the first 6 are passed by register.
+define dso_local i32 @testManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
+; CHECK-LABEL: testManyArgs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp FuncManyArgs # TAILCALL
+  %r = musttail call i32 @FuncManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
+  ret i32 %r
+}
+
+define dso_local i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
+; CHECK-LABEL: testRecursion:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp testRecursion # TAILCALL
+  %r = musttail call i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
+  ret i32 %r
+}
+
+define dso_local i32 @swap(ptr byval(%struct.1xi32) %0, ptr byval(%struct.1xi32) %1) noinline {
+; CHECK-LABEL: swap:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mov eax, dword ptr [rsp + 8]
+; CHECK-NEXT:    add eax, dword ptr [rsp + 16]
+; CHECK-NEXT:    ret
+entry:
+  %a.ptr = getelementptr inbounds %struct.1xi32, ptr %0, i32 0, i32 0, i32 0
+  %a     = load i32, ptr %a.ptr, align 4
+  %b.ptr = getelementptr inbounds %struct.1xi32, ptr %1, i32 0, i32 0, i32 0
+  %b     = load i32, ptr %b.ptr, align 4
+  %sum   = add i32 %a, %b
+  ret i32 %sum
+}
+
+define dso_local i32 @swapByValArguments(ptr byval(%struct.1xi32) %0, ptr byval(%struct.1xi32) %1) {
+; CHECK-LABEL: swapArguments:
+; CHECK:       # %bb.0:
+
+; CHECK-NEXT:    mov eax, dword ptr [rsp + 8]
+; CHECK-NEXT:    mov dword ptr [rsp - 16], eax
+; CHECK-NEXT:    mov ecx, dword ptr [rsp + 16]
+; CHECK-NEXT:    mov dword ptr [rsp - 8], ecx
+
+; CHECK-NEXT:    mov dword ptr [rsp + 8], ecx
+; CHECK-NEXT:    mov dword ptr [rsp + 16], eax
+; CHECK-NEXT:    jmp swap # TAILCALL
+  %r = musttail call i32 @swap(ptr byval(%struct.1xi32) %1, ptr byval(%struct.1xi32) %0)
+  ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/musttail-tailcc.ll b/llvm/test/CodeGen/X86/musttail-tailcc.ll
index fae698d53b927..f1ffbcb1142c5 100644
--- a/llvm/test/CodeGen/X86/musttail-tailcc.ll
+++ b/llvm/test/CodeGen/X86/musttail-tailcc.ll
@@ -55,15 +55,6 @@ define dso_local tailcc void @void_test(i32, i32, i32, i32) {
 ;
 ; X86-LABEL: void_test:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %esi, -8
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    jmp void_test # TAILCALL
   entry:
    musttail call tailcc void @void_test( i32 %0, i32 %1, i32 %2, i32 %3)
@@ -77,15 +68,6 @@ define dso_local tailcc i1 @i1test(i32, i32, i32, i32) {
 ;
 ; X86-LABEL: i1test:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %esi, -8
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    jmp i1test # TAILCALL
   entry:
   %4 = musttail call tailcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
diff --git a/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll
index cd669768705e5..b901d22f66392 100644
--- a/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll
+++ b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc %s -o - | FileCheck %s
 
 target triple = "x86_64-apple-macosx"
@@ -24,9 +25,7 @@ define swifttailcc void @test(ptr %0, ptr swiftasync %1, i64 %2, i64 %3, ptr %4,
 ; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %r15
 ; CHECK-NEXT:    callq _foo
 ; CHECK-NEXT:    movq %r14, (%rax)
-; CHECK-NEXT:    movl [[OFF:[0-9]+]](%rsp), %edx
-; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
-; CHECK-NEXT:    movq %rcx, [[OFF]](%rsp)
+; CHECK-NEXT:    movl {{[0-9]+}}(%rsp), %edx
 ; CHECK-NEXT:    movq %rax, %r14
 ; CHECK-NEXT:    movq %r13, %rdi
 ; CHECK-NEXT:    movq %r15, %rsi
@@ -34,7 +33,6 @@ define swifttailcc void @test(ptr %0, ptr swiftasync %1, i64 %2, i64 %3, ptr %4,
 ; CHECK-NEXT:    addq $8, %rsp
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    popq %r15
-; CHECK-NEXT:    addq $16, %rsp
 ; CHECK-NEXT:    jmp _tc_fn ## TAILCALL
 entry:
   %res = tail call ptr @foo()

>From 731bf99dfa4c146c7b1ae356d037a3b26a396fdb Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sun, 23 Nov 2025 15:06:20 +0100
Subject: [PATCH 06/12] add byval arm tests

---
 llvm/test/CodeGen/X86/musttail-struct.ll | 222 +++++++++++++++++++++++
 1 file changed, 222 insertions(+)

diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
index 257ea8c2b81d5..ba74c35a1ee49 100644
--- a/llvm/test/CodeGen/X86/musttail-struct.ll
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -86,3 +86,225 @@ define dso_local i32 @swapByValArguments(ptr byval(%struct.1xi32) %0, ptr byval(
   %r = musttail call i32 @swap(ptr byval(%struct.1xi32) %1, ptr byval(%struct.1xi32) %0)
   ret i32 %r
 }
+
+; Clang only uses byval for arguments of 65 bytes or larger, but e.g. rustc
+; does use byval for smaller types. Here we use a 20 byte struct to keep
+; the tests more readable.
+%twenty_bytes = type { [5 x i32] }
+declare void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
+
+; Functions with byval parameters can be tail-called, because the value is
+; actually passed in registers and the stack in the same way for the caller and
+; callee. Within @large_caller the first 16 bytes of the argument are spilled
+; to the local stack frame, but for the tail-call they are passed in r0-r3, so
+; it's safe to de-allocate that memory before the call.
+; TODO: The SUB and STM instructions are unnecessary and could be optimised
+; out, but the behaviour of this is still correct.
+define void @large_caller(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; CHECK-LABEL: large_caller:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    stm sp!, {r0, r1, r2, r3}
+; CHECK-NEXT:    b large_callee
+entry:
+  musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
+  ret void
+}
+
+; As above, but with some inline asm to test that the arguments in r0-r3 are
+; re-loaded before the call.
+define void @large_caller_check_regs(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; CHECK-LABEL: large_caller_check_regs:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    stm sp, {r0, r1, r2, r3}
+; CHECK-NEXT:    @APP
+; CHECK-NEXT:    @NO_APP
+; CHECK-NEXT:    pop {r0, r1, r2, r3}
+; CHECK-NEXT:    b large_callee
+entry:
+  tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3}"()
+  musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
+  ret void
+}
+
+; The IR for this one looks dodgy, because it has an alloca passed to a
+; musttail function, but it is passed as a byval argument, so will be copied
+; into the stack space allocated by @large_caller_new_value's caller, so is
+; valid.
+define void @large_caller_new_value(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; CHECK-LABEL: large_caller_new_value:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #36
+; CHECK-NEXT:    sub sp, sp, #36
+; CHECK-NEXT:    add r12, sp, #20
+; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT:    mov r0, #4
+; CHECK-NEXT:    add r1, sp, #36
+; CHECK-NEXT:    str r0, [sp, #16]
+; CHECK-NEXT:    mov r0, #3
+; CHECK-NEXT:    str r0, [sp, #12]
+; CHECK-NEXT:    mov r0, #2
+; CHECK-NEXT:    str r0, [sp, #8]
+; CHECK-NEXT:    mov r0, #1
+; CHECK-NEXT:    str r0, [sp, #4]
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    str r0, [sp]
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    add r0, r0, #16
+; CHECK-NEXT:    mov r3, #3
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    mov r1, #1
+; CHECK-NEXT:    mov r2, #2
+; CHECK-NEXT:    add sp, sp, #36
+; CHECK-NEXT:    b large_callee
+entry:
+  %y = alloca %twenty_bytes, align 4
+  store i32 0, ptr %y, align 4
+  %0 = getelementptr inbounds i8, ptr %y, i32 4
+  store i32 1, ptr %0, align 4
+  %1 = getelementptr inbounds i8, ptr %y, i32 8
+  store i32 2, ptr %1, align 4
+  %2 = getelementptr inbounds i8, ptr %y, i32 12
+  store i32 3, ptr %2, align 4
+  %3 = getelementptr inbounds i8, ptr %y, i32 16
+  store i32 4, ptr %3, align 4
+  musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %y)
+  ret void
+}
+
+declare void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4, %twenty_bytes* byval(%twenty_bytes) align 4)
+define void @swap_byvals(%twenty_bytes* byval(%twenty_bytes) align 4 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
+; CHECK-LABEL: swap_byvals:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    .pad #40
+; CHECK-NEXT:    sub sp, sp, #40
+; CHECK-NEXT:    add r12, sp, #56
+; CHECK-NEXT:    add lr, sp, #20
+; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT:    add r0, sp, #56
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    add r3, sp, #20
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    add r4, sp, #76
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    add r0, sp, #76
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    mov r2, lr
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    str r1, [r2], #4
+; CHECK-NEXT:    ldm r3, {r0, r1, r2, r3}
+; CHECK-NEXT:    ldr r5, [r12], #4
+; CHECK-NEXT:    str r5, [r4], #4
+; CHECK-NEXT:    ldr r5, [r12], #4
+; CHECK-NEXT:    str r5, [r4], #4
+; CHECK-NEXT:    ldr r5, [r12], #4
+; CHECK-NEXT:    str r5, [r4], #4
+; CHECK-NEXT:    ldr r5, [r12], #4
+; CHECK-NEXT:    str r5, [r4], #4
+; CHECK-NEXT:    ldr r5, [r12], #4
+; CHECK-NEXT:    str r5, [r4], #4
+; CHECK-NEXT:    add r5, lr, #16
+; CHECK-NEXT:    add r12, sp, #72
+; CHECK-NEXT:    ldr r4, [r5], #4
+; CHECK-NEXT:    str r4, [r12], #4
+; CHECK-NEXT:    add sp, sp, #40
+; CHECK-NEXT:    pop {r4, r5, r11, lr}
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    b two_byvals_callee
+entry:
+  musttail call void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b, %twenty_bytes* byval(%twenty_bytes) align 4 %a)
+  ret void
+}
+
+; A forwarded byval arg, but at a different offset on the stack, so it needs to
+; be copied to the local stack frame first. This can't be musttail because of
+; the different signatures, but is still tail-called as an optimisation.
+declare void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
+define void @shift_byval(i32 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
+; CHECK-LABEL: shift_byval:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #12
+; CHECK-NEXT:    sub sp, sp, #12
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .pad #20
+; CHECK-NEXT:    sub sp, sp, #20
+; CHECK-NEXT:    add r0, sp, #28
+; CHECK-NEXT:    add lr, sp, #40
+; CHECK-NEXT:    stm r0, {r1, r2, r3}
+; CHECK-NEXT:    add r0, sp, #28
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    add r12, r1, #16
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    ldr r2, [r0], #4
+; CHECK-NEXT:    str r2, [r1], #4
+; CHECK-NEXT:    ldm sp, {r0, r1, r2, r3}
+; CHECK-NEXT:    ldr r4, [r12], #4
+; CHECK-NEXT:    str r4, [lr], #4
+; CHECK-NEXT:    add sp, sp, #20
+; CHECK-NEXT:    pop {r4, lr}
+; CHECK-NEXT:    add sp, sp, #12
+; CHECK-NEXT:    b shift_byval_callee
+entry:
+  tail call void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b)
+  ret void
+}
+
+; A global object passed to a byval argument, so it must be copied, but doesn't
+; need a stack temporary.
+ at large_global = external global %twenty_bytes
+define void @large_caller_from_global(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
+; CHECK-LABEL: large_caller_from_global:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    add r12, sp, #8
+; CHECK-NEXT:    add lr, sp, #24
+; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
+; CHECK-NEXT:    movw r3, :lower16:large_global
+; CHECK-NEXT:    movt r3, :upper16:large_global
+; CHECK-NEXT:    add r12, r3, #16
+; CHECK-NEXT:    ldm r3, {r0, r1, r2, r3}
+; CHECK-NEXT:    ldr r4, [r12], #4
+; CHECK-NEXT:    str r4, [lr], #4
+; CHECK-NEXT:    pop {r4, lr}
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    b large_callee
+entry:
+  musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 @large_global)
+  ret void
+}

>From 114e1365fa53596f8609d79e6f5e554d19cbe14e Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Sun, 23 Nov 2025 15:30:36 +0100
Subject: [PATCH 07/12] copy arm byval musttail tests

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp |   4 +
 llvm/test/CodeGen/X86/musttail-struct.ll    | 217 +++++---------------
 2 files changed, 58 insertions(+), 163 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 29a9b27132844..9bb39099faba2 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2544,6 +2544,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
           NeedsStackCopy = !isTailCall;
         }
 
+        // FIXME: contrary to the arm backend, with the current logic we always
+        // seem to need a stack copy.
+        (void)NeedsStackCopy;
+
         auto PtrVT = getPointerTy(DAG.getDataLayout());
         SDValue DstAddr = DAG.getFrameIndex(FI, PtrVT);
 
diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
index ba74c35a1ee49..e6dcf0b701b4c 100644
--- a/llvm/test/CodeGen/X86/musttail-struct.ll
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -72,17 +72,17 @@ entry:
 }
 
 define dso_local i32 @swapByValArguments(ptr byval(%struct.1xi32) %0, ptr byval(%struct.1xi32) %1) {
-; CHECK-LABEL: swapArguments:
+; CHECK-LABEL: swapByValArguments:
 ; CHECK:       # %bb.0:
-
 ; CHECK-NEXT:    mov eax, dword ptr [rsp + 8]
 ; CHECK-NEXT:    mov dword ptr [rsp - 16], eax
 ; CHECK-NEXT:    mov ecx, dword ptr [rsp + 16]
 ; CHECK-NEXT:    mov dword ptr [rsp - 8], ecx
-
 ; CHECK-NEXT:    mov dword ptr [rsp + 8], ecx
 ; CHECK-NEXT:    mov dword ptr [rsp + 16], eax
 ; CHECK-NEXT:    jmp swap # TAILCALL
+
+
   %r = musttail call i32 @swap(ptr byval(%struct.1xi32) %1, ptr byval(%struct.1xi32) %0)
   ret i32 %r
 }
@@ -95,37 +95,12 @@ declare void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
 
 ; Functions with byval parameters can be tail-called, because the value is
 ; actually passed in registers and the stack in the same way for the caller and
-; callee. Within @large_caller the first 16 bytes of the argument are spilled
-; to the local stack frame, but for the tail-call they are passed in r0-r3, so
-; it's safe to de-allocate that memory before the call.
-; TODO: The SUB and STM instructions are unnecessary and could be optimised
-; out, but the behaviour of this is still correct.
+; callee. On x86 byval arguments are never (partially) passed via registers.
 define void @large_caller(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
 ; CHECK-LABEL: large_caller:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    stm sp!, {r0, r1, r2, r3}
-; CHECK-NEXT:    b large_callee
-entry:
-  musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
-  ret void
-}
-
-; As above, but with some inline asm to test that the arguments in r0-r3 are
-; re-loaded before the call.
-define void @large_caller_check_regs(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
-; CHECK-LABEL: large_caller_check_regs:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    stm sp, {r0, r1, r2, r3}
-; CHECK-NEXT:    @APP
-; CHECK-NEXT:    @NO_APP
-; CHECK-NEXT:    pop {r0, r1, r2, r3}
-; CHECK-NEXT:    b large_callee
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    jmp large_callee at PLT # TAILCALL
 entry:
-  tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3}"()
   musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %a)
   ret void
 }
@@ -136,32 +111,19 @@ entry:
 ; valid.
 define void @large_caller_new_value(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
 ; CHECK-LABEL: large_caller_new_value:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #36
-; CHECK-NEXT:    sub sp, sp, #36
-; CHECK-NEXT:    add r12, sp, #20
-; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
-; CHECK-NEXT:    mov r0, #4
-; CHECK-NEXT:    add r1, sp, #36
-; CHECK-NEXT:    str r0, [sp, #16]
-; CHECK-NEXT:    mov r0, #3
-; CHECK-NEXT:    str r0, [sp, #12]
-; CHECK-NEXT:    mov r0, #2
-; CHECK-NEXT:    str r0, [sp, #8]
-; CHECK-NEXT:    mov r0, #1
-; CHECK-NEXT:    str r0, [sp, #4]
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    str r0, [sp]
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    add r0, r0, #16
-; CHECK-NEXT:    mov r3, #3
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    mov r1, #1
-; CHECK-NEXT:    mov r2, #2
-; CHECK-NEXT:    add sp, sp, #36
-; CHECK-NEXT:    b large_callee
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movabs rax, 4294967296
+; CHECK-NEXT:    mov qword ptr [rsp - 20], rax
+; CHECK-NEXT:    movabs rcx, 12884901890
+; CHECK-NEXT:    mov qword ptr [rsp - 12], rcx
+; CHECK-NEXT:    mov dword ptr [rsp - 4], 4
+; CHECK-NEXT:    mov qword ptr [rsp - 40], rax
+; CHECK-NEXT:    mov qword ptr [rsp - 32], rcx
+; CHECK-NEXT:    mov qword ptr [rsp + 8], rax
+; CHECK-NEXT:    mov qword ptr [rsp + 16], rcx
+; CHECK-NEXT:    mov dword ptr [rsp - 24], 4
+; CHECK-NEXT:    mov dword ptr [rsp + 24], 4
+; CHECK-NEXT:    jmp large_callee at PLT # TAILCALL
 entry:
   %y = alloca %twenty_bytes, align 4
   store i32 0, ptr %y, align 4
@@ -180,103 +142,40 @@ entry:
 declare void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4, %twenty_bytes* byval(%twenty_bytes) align 4)
 define void @swap_byvals(%twenty_bytes* byval(%twenty_bytes) align 4 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
 ; CHECK-LABEL: swap_byvals:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .save {r4, r5, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r11, lr}
-; CHECK-NEXT:    .pad #40
-; CHECK-NEXT:    sub sp, sp, #40
-; CHECK-NEXT:    add r12, sp, #56
-; CHECK-NEXT:    add lr, sp, #20
-; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
-; CHECK-NEXT:    add r0, sp, #56
-; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    mov r2, r12
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    add r3, sp, #20
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    add r4, sp, #76
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    add r0, sp, #76
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    mov r2, lr
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldr r1, [r0], #4
-; CHECK-NEXT:    str r1, [r2], #4
-; CHECK-NEXT:    ldm r3, {r0, r1, r2, r3}
-; CHECK-NEXT:    ldr r5, [r12], #4
-; CHECK-NEXT:    str r5, [r4], #4
-; CHECK-NEXT:    ldr r5, [r12], #4
-; CHECK-NEXT:    str r5, [r4], #4
-; CHECK-NEXT:    ldr r5, [r12], #4
-; CHECK-NEXT:    str r5, [r4], #4
-; CHECK-NEXT:    ldr r5, [r12], #4
-; CHECK-NEXT:    str r5, [r4], #4
-; CHECK-NEXT:    ldr r5, [r12], #4
-; CHECK-NEXT:    str r5, [r4], #4
-; CHECK-NEXT:    add r5, lr, #16
-; CHECK-NEXT:    add r12, sp, #72
-; CHECK-NEXT:    ldr r4, [r5], #4
-; CHECK-NEXT:    str r4, [r12], #4
-; CHECK-NEXT:    add sp, sp, #40
-; CHECK-NEXT:    pop {r4, r5, r11, lr}
-; CHECK-NEXT:    add sp, sp, #16
-; CHECK-NEXT:    b two_byvals_callee
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mov eax, dword ptr [rsp + 24]
+; CHECK-NEXT:    mov dword ptr [rsp - 8], eax
+; CHECK-NEXT:    movaps xmm0, xmmword ptr [rsp + 8]
+; CHECK-NEXT:    movaps xmmword ptr [rsp - 24], xmm0
+; CHECK-NEXT:    mov ecx, dword ptr [rsp + 48]
+; CHECK-NEXT:    mov dword ptr [rsp - 32], ecx
+; CHECK-NEXT:    mov rdx, qword ptr [rsp + 32]
+; CHECK-NEXT:    mov rsi, qword ptr [rsp + 40]
+; CHECK-NEXT:    mov qword ptr [rsp - 40], rsi
+; CHECK-NEXT:    mov qword ptr [rsp - 48], rdx
+; CHECK-NEXT:    mov qword ptr [rsp + 8], rdx
+; CHECK-NEXT:    mov qword ptr [rsp + 16], rsi
+; CHECK-NEXT:    mov dword ptr [rsp + 24], ecx
+; CHECK-NEXT:    mov rcx, qword ptr [rsp + 8]
+; CHECK-NEXT:    mov rdx, qword ptr [rsp + 16]
+; CHECK-NEXT:    mov qword ptr [rsp + 32], rcx
+; CHECK-NEXT:    mov qword ptr [rsp + 40], rdx
+; CHECK-NEXT:    mov dword ptr [rsp + 48], eax
+; CHECK-NEXT:    jmp two_byvals_callee at PLT # TAILCALL
 entry:
   musttail call void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b, %twenty_bytes* byval(%twenty_bytes) align 4 %a)
   ret void
 }
 
-; A forwarded byval arg, but at a different offset on the stack, so it needs to
-; be copied to the local stack frame first. This can't be musttail because of
-; the different signatures, but is still tail-called as an optimisation.
+; A forwarded byval arg, but at a different argument position. Because
+; x86 does not (partially) pass byval arguments in registers, the byval
+; arg is in the correct position already, so this is not a sibcall but
+; can be tail-call optimized.
 declare void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4)
 define void @shift_byval(i32 %a, %twenty_bytes* byval(%twenty_bytes) align 4 %b) {
 ; CHECK-LABEL: shift_byval:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #12
-; CHECK-NEXT:    sub sp, sp, #12
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    .pad #20
-; CHECK-NEXT:    sub sp, sp, #20
-; CHECK-NEXT:    add r0, sp, #28
-; CHECK-NEXT:    add lr, sp, #40
-; CHECK-NEXT:    stm r0, {r1, r2, r3}
-; CHECK-NEXT:    add r0, sp, #28
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    add r12, r1, #16
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    ldr r2, [r0], #4
-; CHECK-NEXT:    str r2, [r1], #4
-; CHECK-NEXT:    ldm sp, {r0, r1, r2, r3}
-; CHECK-NEXT:    ldr r4, [r12], #4
-; CHECK-NEXT:    str r4, [lr], #4
-; CHECK-NEXT:    add sp, sp, #20
-; CHECK-NEXT:    pop {r4, lr}
-; CHECK-NEXT:    add sp, sp, #12
-; CHECK-NEXT:    b shift_byval_callee
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    jmp shift_byval_callee at PLT # TAILCALL
 entry:
   tail call void @shift_byval_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b)
   ret void
@@ -287,23 +186,15 @@ entry:
 @large_global = external global %twenty_bytes
 define void @large_caller_from_global(%twenty_bytes* byval(%twenty_bytes) align 4 %a) {
 ; CHECK-LABEL: large_caller_from_global:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    add r12, sp, #8
-; CHECK-NEXT:    add lr, sp, #24
-; CHECK-NEXT:    stm r12, {r0, r1, r2, r3}
-; CHECK-NEXT:    movw r3, :lower16:large_global
-; CHECK-NEXT:    movt r3, :upper16:large_global
-; CHECK-NEXT:    add r12, r3, #16
-; CHECK-NEXT:    ldm r3, {r0, r1, r2, r3}
-; CHECK-NEXT:    ldr r4, [r12], #4
-; CHECK-NEXT:    str r4, [lr], #4
-; CHECK-NEXT:    pop {r4, lr}
-; CHECK-NEXT:    add sp, sp, #16
-; CHECK-NEXT:    b large_callee
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    mov rax, qword ptr [rip + large_global at GOTPCREL]
+; CHECK-NEXT:    mov ecx, dword ptr [rax + 16]
+; CHECK-NEXT:    mov dword ptr [rsp + 24], ecx
+; CHECK-NEXT:    mov rcx, qword ptr [rax]
+; CHECK-NEXT:    mov rax, qword ptr [rax + 8]
+; CHECK-NEXT:    mov qword ptr [rsp + 16], rax
+; CHECK-NEXT:    mov qword ptr [rsp + 8], rcx
+; CHECK-NEXT:    jmp large_callee at PLT # TAILCALL
 entry:
   musttail call void @large_callee(%twenty_bytes* byval(%twenty_bytes) align 4 @large_global)
   ret void

>From ecba68c5973a44f974b3934fc78d2e48d4798cd5 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Tue, 25 Nov 2025 12:18:22 +0100
Subject: [PATCH 08/12] uncomment CopyOnce return

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 9bb39099faba2..9512c673a3172 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2047,15 +2047,12 @@ X86TargetLowering::ByValCopyKind X86TargetLowering::ByValNeedsCopyForTailCall(
   int64_t SrcOffset = MFI.getObjectOffset(SrcFI);
   int64_t DstOffset = MFI.getObjectOffset(DstFI);
 
-  // FIXME:
-
-  //  // If the source is in the local frame, then the copy to the argument
-  //  memory
-  //  // is always valid.
-  //  bool FixedSrc = MFI.isFixedObjectIndex(SrcFI);
-  //  if (!FixedSrc ||
-  //      (FixedSrc && SrcOffset < -(int64_t)AFI->getArgRegsSaveSize()))
-  //    return CopyOnce;
+
+  // If the source is in the local frame, then the copy to the argument
+  // memory is always valid.
+  bool FixedSrc = MFI.isFixedObjectIndex(SrcFI);
+  if (!FixedSrc || (FixedSrc && SrcOffset < 0))
+    return CopyOnce;
 
   // In the case of byval arguments split between registers and the stack,
   // computeAddrForCallArg returns a FrameIndex which corresponds only to the

>From c7eabbd79141dabf98301ccb3e5c6c8ec6a53a80 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Tue, 25 Nov 2025 12:58:16 +0100
Subject: [PATCH 09/12] correct Dst location calculation

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 6 +++---
 llvm/test/CodeGen/X86/musttail-inalloca.ll  | 2 ++
 llvm/test/CodeGen/X86/musttail-struct.ll    | 4 ----
 llvm/test/CodeGen/X86/sibcall.ll            | 9 +++++++--
 4 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 9512c673a3172..0d15ae3f2d1c1 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2047,7 +2047,6 @@ X86TargetLowering::ByValCopyKind X86TargetLowering::ByValNeedsCopyForTailCall(
   int64_t SrcOffset = MFI.getObjectOffset(SrcFI);
   int64_t DstOffset = MFI.getObjectOffset(DstFI);
 
-
   // If the source is in the local frame, then the copy to the argument
   // memory is always valid.
   bool FixedSrc = MFI.isFixedObjectIndex(SrcFI);
@@ -2233,8 +2232,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
       // Destination: where this byval should live in the callee’s frame
       // after the tail call.
       int32_t Offset = VA.getLocMemOffset() + FPDiff + RetAddrSize;
-      SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
-                                DAG.getIntPtrConstant(Offset, dl));
+      int Size = VA.getLocVT().getFixedSizeInBits() / 8;
+      int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
+      SDValue Dst = DAG.getFrameIndex(FI, PtrVT);
 
       ByValCopyKind Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags);
 
diff --git a/llvm/test/CodeGen/X86/musttail-inalloca.ll b/llvm/test/CodeGen/X86/musttail-inalloca.ll
index ab6159520d925..a673f816e4682 100644
--- a/llvm/test/CodeGen/X86/musttail-inalloca.ll
+++ b/llvm/test/CodeGen/X86/musttail-inalloca.ll
@@ -18,6 +18,7 @@ define dso_local x86_thiscallcc void @methodWithVtorDisp_thunk(ptr %0, ptr inall
 ; CHECK-LABEL: methodWithVtorDisp_thunk:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %esi
+; CHECK-NEXT:    subl $20, %esp
 ; CHECK-NEXT:    movl %ecx, %esi
 ; CHECK-NEXT:    subl -4(%ecx), %esi
 ; CHECK-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -25,6 +26,7 @@ define dso_local x86_thiscallcc void @methodWithVtorDisp_thunk(ptr %0, ptr inall
 ; CHECK-NEXT:    calll ___cyg_profile_func_exit
 ; CHECK-NEXT:    addl $8, %esp
 ; CHECK-NEXT:    movl %esi, %ecx
+; CHECK-NEXT:    addl $20, %esp
 ; CHECK-NEXT:    popl %esi
 ; CHECK-NEXT:    jmp _methodWithVtorDisp # TAILCALL
   %3 = getelementptr inbounds i8, ptr %0, i32 -4
diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
index e6dcf0b701b4c..3a746cde816b3 100644
--- a/llvm/test/CodeGen/X86/musttail-struct.ll
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -x86-asm-syntax=intel | FileCheck %s
-; ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
 
 ; Test correct handling of a musttail call with a byval struct argument.
 
@@ -117,11 +116,8 @@ define void @large_caller_new_value(%twenty_bytes* byval(%twenty_bytes) align 4
 ; CHECK-NEXT:    movabs rcx, 12884901890
 ; CHECK-NEXT:    mov qword ptr [rsp - 12], rcx
 ; CHECK-NEXT:    mov dword ptr [rsp - 4], 4
-; CHECK-NEXT:    mov qword ptr [rsp - 40], rax
-; CHECK-NEXT:    mov qword ptr [rsp - 32], rcx
 ; CHECK-NEXT:    mov qword ptr [rsp + 8], rax
 ; CHECK-NEXT:    mov qword ptr [rsp + 16], rcx
-; CHECK-NEXT:    mov dword ptr [rsp - 24], 4
 ; CHECK-NEXT:    mov dword ptr [rsp + 24], 4
 ; CHECK-NEXT:    jmp large_callee at PLT # TAILCALL
 entry:
diff --git a/llvm/test/CodeGen/X86/sibcall.ll b/llvm/test/CodeGen/X86/sibcall.ll
index 2759a9883975e..d1137cac7d365 100644
--- a/llvm/test/CodeGen/X86/sibcall.ll
+++ b/llvm/test/CodeGen/X86/sibcall.ll
@@ -295,10 +295,15 @@ declare dso_local i32 @foo5(i32, i32, i32, i32, i32)
 define dso_local i32 @t12(i32 %x, i32 %y, ptr byval(%struct.t) align 4 %z) nounwind ssp {
 ; X86-LABEL: t12:
 ; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $20, %esp
 ; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
-; X86-NEXT:    jne foo6 # TAILCALL
-; X86-NEXT:  # %bb.1: # %bb2
+; X86-NEXT:    je .LBB12_1
+; X86-NEXT:  # %bb.2: # %bb
+; X86-NEXT:    addl $20, %esp
+; X86-NEXT:    jmp foo6 # TAILCALL
+; X86-NEXT:  .LBB12_1: # %bb2
 ; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    addl $20, %esp
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t12:

>From dc1c03f02960be4c2fef77501b857b17efb8c4f2 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Tue, 25 Nov 2025 19:17:25 +0100
Subject: [PATCH 10/12] error when musttail cannot tail

---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 70 +++++++--------------
 llvm/test/CodeGen/X86/musttail-struct.ll    | 30 ++++-----
 2 files changed, 35 insertions(+), 65 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 0d15ae3f2d1c1..e1318051711b2 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2053,14 +2053,6 @@ X86TargetLowering::ByValCopyKind X86TargetLowering::ByValNeedsCopyForTailCall(
   if (!FixedSrc || (FixedSrc && SrcOffset < 0))
     return CopyOnce;
 
-  // In the case of byval arguments split between registers and the stack,
-  // computeAddrForCallArg returns a FrameIndex which corresponds only to the
-  // stack portion, but the Src SDValue will refer to the full value, including
-  // the local stack memory that the register portion gets stored into. We only
-  // need to compare them for equality, so normalise on the full value version.
-  uint64_t RegSize = Flags.getByValSize() - MFI.getObjectSize(DstFI);
-  DstOffset -= RegSize;
-
   // If the value is already in the correct location, then no copying is
   // needed. If not, then we need to copy via a temporary.
   if (SrcOffset == DstOffset)
@@ -2154,19 +2146,15 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
     IsSibcall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
                                                   IsCalleePopSRet);
 
-    if (!IsMustTail) {
-      isTailCall = IsSibcall;
-
-      // Sibcalls are automatically detected tailcalls which do not require
-      // ABI changes.
-      IsSibcall = IsSibcall && !IsGuaranteeTCO;
-    }
+    if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt &&
+        CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail)
+      IsSibcall = true;
 
     if (isTailCall)
       ++NumTailCalls;
   }
 
-  if (IsMustTail && !isTailCall)
+  if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
     report_fatal_error("failed to perform tail call elimination on a call "
                        "site marked musttail");
 
@@ -2231,7 +2219,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
 
       // Destination: where this byval should live in the callee’s frame
       // after the tail call.
-      int32_t Offset = VA.getLocMemOffset() + FPDiff + RetAddrSize;
+      int32_t Offset = VA.getLocMemOffset() + FPDiff;
       int Size = VA.getLocVT().getFixedSizeInBits() / 8;
       int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
       SDValue Dst = DAG.getFrameIndex(FI, PtrVT);
@@ -2265,6 +2253,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
         ByValCopyChains.push_back(CopyChain);
       }
     }
+
     if (!ByValCopyChains.empty())
       ByValTempChain =
           DAG.getNode(ISD::TokenFactor, dl, MVT::Other, ByValCopyChains);
@@ -2484,7 +2473,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
   // don't need this because the eligibility check rejects calls that require
   // shuffling arguments passed in memory.
-  if (!IsSibcall && isTailCall) {
+  if (isTailCall) {
     // Force all the incoming stack arguments to be loaded from the stack
     // before any new outgoing arguments or the return address are stored to the
     // stack, because the outgoing stack slots may alias the incoming argument
@@ -2543,14 +2532,15 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
 
         // FIXME: contrary to the arm backend, with the current logic we always
         // seem to need a stack copy.
-        (void)NeedsStackCopy;
+        if (NeedsStackCopy) {
 
-        auto PtrVT = getPointerTy(DAG.getDataLayout());
-        SDValue DstAddr = DAG.getFrameIndex(FI, PtrVT);
+          auto PtrVT = getPointerTy(DAG.getDataLayout());
+          SDValue DstAddr = DAG.getFrameIndex(FI, PtrVT);
 
-        // Copy the struct contents from ByValSrc to DstAddr.
-        MemOpChains2.push_back(CreateCopyOfByValArgument(
-            ByValSrc, DstAddr, Chain, Flags, DAG, dl));
+          // Copy the struct contents from ByValSrc to DstAddr.
+          MemOpChains2.push_back(CreateCopyOfByValArgument(
+              ByValSrc, DstAddr, Chain, Flags, DAG, dl));
+        }
       } else {
         // Store relative to framepointer.
         MemOpChains2.push_back(DAG.getStore(
@@ -2951,9 +2941,10 @@ mayBeSRetTailCallCompatible(const TargetLowering::CallLoweringInfo &CLI,
 
 /// Check whether the call is eligible for tail call optimization. Targets
 /// that want to do tail call optimization should implement this function.
-/// Note that the x86 backend does not check musttail calls for eligibility! The
-/// rest of x86 tail call lowering must be prepared to forward arguments of any
-/// type.
+///
+/// Note that this function also processes musttail calls, so when this
+/// function returns false on a valid musttail call, a fatal backend error
+/// occurs.
 bool X86TargetLowering::IsEligibleForTailCallOptimization(
     TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
     SmallVectorImpl<CCValAssign> &ArgLocs, bool IsCalleePopSRet) const {
@@ -3080,26 +3071,6 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
   // If the callee takes no arguments then go on to check the results of the
   // call.
   if (!Outs.empty()) {
-    if (StackArgsSize > 0) {
-      // Check if the arguments are already laid out in the right way as
-      // the caller's fixed stack objects.
-      MachineFrameInfo &MFI = MF.getFrameInfo();
-      const MachineRegisterInfo *MRI = &MF.getRegInfo();
-      const X86InstrInfo *TII = Subtarget.getInstrInfo();
-      for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
-        const CCValAssign &VA = ArgLocs[I];
-        SDValue Arg = OutVals[I];
-        ISD::ArgFlagsTy Flags = Outs[I].Flags;
-        if (VA.getLocInfo() == CCValAssign::Indirect)
-          return false;
-        if (!VA.isRegLoc()) {
-          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
-                                   TII, VA))
-            return false;
-        }
-      }
-    }
-
     bool PositionIndependent = isPositionIndependent();
     // If the tailcall address may be in a register, then make sure it's
     // possible to register allocate for it. In 32-bit, the call address can
@@ -3137,6 +3108,11 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
       X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
                        MF.getTarget().Options.GuaranteedTailCallOpt);
 
+  // If the stack arguments for this call do not fit into our own save area then
+  // the call cannot be made tail.
+  if (CCInfo.getStackSize() > FuncInfo->getArgumentStackSize())
+    return false;
+
   if (unsigned BytesToPop = FuncInfo->getBytesToPopOnReturn()) {
     // If we have bytes to pop, the callee must pop them.
     bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
index 3a746cde816b3..23864cf5a6fe2 100644
--- a/llvm/test/CodeGen/X86/musttail-struct.ll
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -42,6 +42,8 @@ define dso_local i32 @test5(ptr byval(%struct.5xi32) %0) {
 define dso_local i32 @testManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
 ; CHECK-LABEL: testManyArgs:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzx eax, byte ptr [rsp + 8]
+; CHECK-NEXT:    mov byte ptr [rsp + 8], al
 ; CHECK-NEXT:    jmp FuncManyArgs # TAILCALL
   %r = musttail call i32 @FuncManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
   ret i32 %r
@@ -50,6 +52,8 @@ define dso_local i32 @testManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %
 define dso_local i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
 ; CHECK-LABEL: testRecursion:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzx eax, byte ptr [rsp + 8]
+; CHECK-NEXT:    mov byte ptr [rsp + 8], al
 ; CHECK-NEXT:    jmp testRecursion # TAILCALL
   %r = musttail call i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
   ret i32 %r
@@ -75,10 +79,8 @@ define dso_local i32 @swapByValArguments(ptr byval(%struct.1xi32) %0, ptr byval(
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mov eax, dword ptr [rsp + 8]
 ; CHECK-NEXT:    mov dword ptr [rsp - 16], eax
-; CHECK-NEXT:    mov ecx, dword ptr [rsp + 16]
-; CHECK-NEXT:    mov dword ptr [rsp - 8], ecx
-; CHECK-NEXT:    mov dword ptr [rsp + 8], ecx
-; CHECK-NEXT:    mov dword ptr [rsp + 16], eax
+; CHECK-NEXT:    mov eax, dword ptr [rsp + 16]
+; CHECK-NEXT:    mov dword ptr [rsp - 8], eax
 ; CHECK-NEXT:    jmp swap # TAILCALL
 
 
@@ -143,20 +145,12 @@ define void @swap_byvals(%twenty_bytes* byval(%twenty_bytes) align 4 %a, %twenty
 ; CHECK-NEXT:    mov dword ptr [rsp - 8], eax
 ; CHECK-NEXT:    movaps xmm0, xmmword ptr [rsp + 8]
 ; CHECK-NEXT:    movaps xmmword ptr [rsp - 24], xmm0
-; CHECK-NEXT:    mov ecx, dword ptr [rsp + 48]
-; CHECK-NEXT:    mov dword ptr [rsp - 32], ecx
-; CHECK-NEXT:    mov rdx, qword ptr [rsp + 32]
-; CHECK-NEXT:    mov rsi, qword ptr [rsp + 40]
-; CHECK-NEXT:    mov qword ptr [rsp - 40], rsi
-; CHECK-NEXT:    mov qword ptr [rsp - 48], rdx
-; CHECK-NEXT:    mov qword ptr [rsp + 8], rdx
-; CHECK-NEXT:    mov qword ptr [rsp + 16], rsi
-; CHECK-NEXT:    mov dword ptr [rsp + 24], ecx
-; CHECK-NEXT:    mov rcx, qword ptr [rsp + 8]
-; CHECK-NEXT:    mov rdx, qword ptr [rsp + 16]
-; CHECK-NEXT:    mov qword ptr [rsp + 32], rcx
-; CHECK-NEXT:    mov qword ptr [rsp + 40], rdx
-; CHECK-NEXT:    mov dword ptr [rsp + 48], eax
+; CHECK-NEXT:    mov eax, dword ptr [rsp + 48]
+; CHECK-NEXT:    mov dword ptr [rsp - 32], eax
+; CHECK-NEXT:    mov rax, qword ptr [rsp + 32]
+; CHECK-NEXT:    mov rcx, qword ptr [rsp + 40]
+; CHECK-NEXT:    mov qword ptr [rsp - 40], rcx
+; CHECK-NEXT:    mov qword ptr [rsp - 48], rax
 ; CHECK-NEXT:    jmp two_byvals_callee at PLT # TAILCALL
 entry:
   musttail call void @two_byvals_callee(%twenty_bytes* byval(%twenty_bytes) align 4 %b, %twenty_bytes* byval(%twenty_bytes) align 4 %a)

>From b5351c4b215e385f54c1ca381410bbd96f0388fe Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Tue, 25 Nov 2025 19:36:25 +0100
Subject: [PATCH 11/12] prevent copy an argument has a matching stack offset

this covers non-byval arguments that are passed via the stack because registers are full
---
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 213 ++++++++++----------
 1 file changed, 110 insertions(+), 103 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index e1318051711b2..a281a02077991 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2018,6 +2018,103 @@ SDValue X86TargetLowering::getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
 }
 
+/// Return true if the given stack call argument is already available in the
+/// same position (relatively) of the caller's incoming argument stack.
+static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
+                                ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI,
+                                const MachineRegisterInfo *MRI,
+                                const X86InstrInfo *TII,
+                                const CCValAssign &VA) {
+  unsigned Bytes = Arg.getValueSizeInBits() / 8;
+
+  for (;;) {
+    // Look through nodes that don't alter the bits of the incoming value.
+    unsigned Op = Arg.getOpcode();
+    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST ||
+        Op == ISD::AssertZext) {
+      Arg = Arg.getOperand(0);
+      continue;
+    }
+    if (Op == ISD::TRUNCATE) {
+      const SDValue &TruncInput = Arg.getOperand(0);
+      if (TruncInput.getOpcode() == ISD::AssertZext &&
+          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
+              Arg.getValueType()) {
+        Arg = TruncInput.getOperand(0);
+        continue;
+      }
+    }
+    break;
+  }
+
+  int FI = INT_MAX;
+  if (Arg.getOpcode() == ISD::CopyFromReg) {
+    Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+    if (!VR.isVirtual())
+      return false;
+    MachineInstr *Def = MRI->getVRegDef(VR);
+    if (!Def)
+      return false;
+    if (!Flags.isByVal()) {
+      if (!TII->isLoadFromStackSlot(*Def, FI))
+        return false;
+    } else {
+      unsigned Opcode = Def->getOpcode();
+      if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
+           Opcode == X86::LEA64_32r) &&
+          Def->getOperand(1).isFI()) {
+        FI = Def->getOperand(1).getIndex();
+        Bytes = Flags.getByValSize();
+      } else
+        return false;
+    }
+  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
+    if (Flags.isByVal())
+      // ByVal argument is passed in as a pointer but it's now being
+      // dereferenced. e.g.
+      // define @foo(%struct.X* %A) {
+      //   tail call @bar(%struct.X* byval %A)
+      // }
+      return false;
+    SDValue Ptr = Ld->getBasePtr();
+    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
+    if (!FINode)
+      return false;
+    FI = FINode->getIndex();
+  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
+    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
+    FI = FINode->getIndex();
+    Bytes = Flags.getByValSize();
+  } else
+    return false;
+
+  assert(FI != INT_MAX);
+  if (!MFI.isFixedObjectIndex(FI))
+    return false;
+
+  if (Offset != MFI.getObjectOffset(FI))
+    return false;
+
+  // If this is not byval, check that the argument stack object is immutable.
+  // inalloca and argument copy elision can create mutable argument stack
+  // objects. Byval objects can be mutated, but a byval call intends to pass the
+  // mutated memory.
+  if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
+    return false;
+
+  if (VA.getLocVT().getFixedSizeInBits() >
+      Arg.getValueSizeInBits().getFixedValue()) {
+    // If the argument location is wider than the argument type, check that any
+    // extension flags match.
+    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
+        Flags.isSExt() != MFI.isObjectSExt(FI)) {
+      return false;
+    }
+  }
+
+  return Bytes == MFI.getObjectSize(FI);
+}
+
 // Returns the type of copying which is required to set up a byval argument to
 // a tail-called function. This isn't needed for non-tail calls, because they
 // always need the equivalent of CopyOnce, but tail-calls sometimes need two to
@@ -2191,7 +2288,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
 
   SDValue StackPtr;
   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
-  int RetAddrSize = 8;
 
   // If we are doing a tail-call, any byval arguments will be written to stack
   // space which was used for incoming arguments. If any the values being used
@@ -2530,8 +2626,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
           NeedsStackCopy = !isTailCall;
         }
 
-        // FIXME: contrary to the arm backend, with the current logic we always
-        // seem to need a stack copy.
         if (NeedsStackCopy) {
 
           auto PtrVT = getPointerTy(DAG.getDataLayout());
@@ -2542,10 +2636,19 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
               ByValSrc, DstAddr, Chain, Flags, DAG, dl));
         }
       } else {
-        // Store relative to framepointer.
-        MemOpChains2.push_back(DAG.getStore(
-            Chain, dl, Arg, FIN,
-            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
+        // Check if the arguments are already laid out in the right way as
+        // the caller's fixed stack objects.
+        MachineFrameInfo &MFI = MF.getFrameInfo();
+        const MachineRegisterInfo *MRI = &MF.getRegInfo();
+        const X86InstrInfo *TII = Subtarget.getInstrInfo();
+
+        if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
+                                 TII, VA)) {
+          // Store relative to framepointer.
+          MemOpChains2.push_back(DAG.getStore(
+              Chain, dl, Arg, FIN,
+              MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
+        }
       }
     }
 
@@ -2811,102 +2914,6 @@ X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
   return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
 }
 
-/// Return true if the given stack call argument is already available in the
-/// same position (relatively) of the caller's incoming argument stack.
-static
-bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
-                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
-                         const X86InstrInfo *TII, const CCValAssign &VA) {
-  unsigned Bytes = Arg.getValueSizeInBits() / 8;
-
-  for (;;) {
-    // Look through nodes that don't alter the bits of the incoming value.
-    unsigned Op = Arg.getOpcode();
-    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST ||
-        Op == ISD::AssertZext) {
-      Arg = Arg.getOperand(0);
-      continue;
-    }
-    if (Op == ISD::TRUNCATE) {
-      const SDValue &TruncInput = Arg.getOperand(0);
-      if (TruncInput.getOpcode() == ISD::AssertZext &&
-          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
-              Arg.getValueType()) {
-        Arg = TruncInput.getOperand(0);
-        continue;
-      }
-    }
-    break;
-  }
-
-  int FI = INT_MAX;
-  if (Arg.getOpcode() == ISD::CopyFromReg) {
-    Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
-    if (!VR.isVirtual())
-      return false;
-    MachineInstr *Def = MRI->getVRegDef(VR);
-    if (!Def)
-      return false;
-    if (!Flags.isByVal()) {
-      if (!TII->isLoadFromStackSlot(*Def, FI))
-        return false;
-    } else {
-      unsigned Opcode = Def->getOpcode();
-      if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
-           Opcode == X86::LEA64_32r) &&
-          Def->getOperand(1).isFI()) {
-        FI = Def->getOperand(1).getIndex();
-        Bytes = Flags.getByValSize();
-      } else
-        return false;
-    }
-  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
-    if (Flags.isByVal())
-      // ByVal argument is passed in as a pointer but it's now being
-      // dereferenced. e.g.
-      // define @foo(%struct.X* %A) {
-      //   tail call @bar(%struct.X* byval %A)
-      // }
-      return false;
-    SDValue Ptr = Ld->getBasePtr();
-    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
-    if (!FINode)
-      return false;
-    FI = FINode->getIndex();
-  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
-    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
-    FI = FINode->getIndex();
-    Bytes = Flags.getByValSize();
-  } else
-    return false;
-
-  assert(FI != INT_MAX);
-  if (!MFI.isFixedObjectIndex(FI))
-    return false;
-
-  if (Offset != MFI.getObjectOffset(FI))
-    return false;
-
-  // If this is not byval, check that the argument stack object is immutable.
-  // inalloca and argument copy elision can create mutable argument stack
-  // objects. Byval objects can be mutated, but a byval call intends to pass the
-  // mutated memory.
-  if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
-    return false;
-
-  if (VA.getLocVT().getFixedSizeInBits() >
-      Arg.getValueSizeInBits().getFixedValue()) {
-    // If the argument location is wider than the argument type, check that any
-    // extension flags match.
-    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
-        Flags.isSExt() != MFI.isObjectSExt(FI)) {
-      return false;
-    }
-  }
-
-  return Bytes == MFI.getObjectSize(FI);
-}
-
 static bool
 mayBeSRetTailCallCompatible(const TargetLowering::CallLoweringInfo &CLI,
                             Register CallerSRetReg) {

>From 346a911e105521a80fe4ad7fdb70f2a4c75a3721 Mon Sep 17 00:00:00 2001
From: Folkert de Vries <folkert at folkertdev.nl>
Date: Tue, 25 Nov 2025 20:08:04 +0100
Subject: [PATCH 12/12] update tests

---
 llvm/test/CodeGen/X86/musttail-inalloca.ll |  2 -
 llvm/test/CodeGen/X86/musttail-struct.ll   |  4 --
 llvm/test/CodeGen/X86/musttail-tailcc.ll   | 18 +++++++++
 llvm/test/CodeGen/X86/musttail-varargs.ll  | 44 ++++++++++------------
 llvm/test/CodeGen/X86/musttail.ll          |  4 +-
 5 files changed, 40 insertions(+), 32 deletions(-)

diff --git a/llvm/test/CodeGen/X86/musttail-inalloca.ll b/llvm/test/CodeGen/X86/musttail-inalloca.ll
index a673f816e4682..ab6159520d925 100644
--- a/llvm/test/CodeGen/X86/musttail-inalloca.ll
+++ b/llvm/test/CodeGen/X86/musttail-inalloca.ll
@@ -18,7 +18,6 @@ define dso_local x86_thiscallcc void @methodWithVtorDisp_thunk(ptr %0, ptr inall
 ; CHECK-LABEL: methodWithVtorDisp_thunk:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushl %esi
-; CHECK-NEXT:    subl $20, %esp
 ; CHECK-NEXT:    movl %ecx, %esi
 ; CHECK-NEXT:    subl -4(%ecx), %esi
 ; CHECK-NEXT:    pushl {{[0-9]+}}(%esp)
@@ -26,7 +25,6 @@ define dso_local x86_thiscallcc void @methodWithVtorDisp_thunk(ptr %0, ptr inall
 ; CHECK-NEXT:    calll ___cyg_profile_func_exit
 ; CHECK-NEXT:    addl $8, %esp
 ; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    addl $20, %esp
 ; CHECK-NEXT:    popl %esi
 ; CHECK-NEXT:    jmp _methodWithVtorDisp # TAILCALL
   %3 = getelementptr inbounds i8, ptr %0, i32 -4
diff --git a/llvm/test/CodeGen/X86/musttail-struct.ll b/llvm/test/CodeGen/X86/musttail-struct.ll
index 23864cf5a6fe2..62ca16589b3ee 100644
--- a/llvm/test/CodeGen/X86/musttail-struct.ll
+++ b/llvm/test/CodeGen/X86/musttail-struct.ll
@@ -42,8 +42,6 @@ define dso_local i32 @test5(ptr byval(%struct.5xi32) %0) {
 define dso_local i32 @testManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
 ; CHECK-LABEL: testManyArgs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzx eax, byte ptr [rsp + 8]
-; CHECK-NEXT:    mov byte ptr [rsp + 8], al
 ; CHECK-NEXT:    jmp FuncManyArgs # TAILCALL
   %r = musttail call i32 @FuncManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
   ret i32 %r
@@ -52,8 +50,6 @@ define dso_local i32 @testManyArgs(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %
 define dso_local i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7) {
 ; CHECK-LABEL: testRecursion:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzx eax, byte ptr [rsp + 8]
-; CHECK-NEXT:    mov byte ptr [rsp + 8], al
 ; CHECK-NEXT:    jmp testRecursion # TAILCALL
   %r = musttail call i32 @testRecursion(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i8 %6, ptr byval(%struct.5xi32) %7)
   ret i32 %r
diff --git a/llvm/test/CodeGen/X86/musttail-tailcc.ll b/llvm/test/CodeGen/X86/musttail-tailcc.ll
index f1ffbcb1142c5..fae698d53b927 100644
--- a/llvm/test/CodeGen/X86/musttail-tailcc.ll
+++ b/llvm/test/CodeGen/X86/musttail-tailcc.ll
@@ -55,6 +55,15 @@ define dso_local tailcc void @void_test(i32, i32, i32, i32) {
 ;
 ; X86-LABEL: void_test:
 ; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %esi, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    jmp void_test # TAILCALL
   entry:
    musttail call tailcc void @void_test( i32 %0, i32 %1, i32 %2, i32 %3)
@@ -68,6 +77,15 @@ define dso_local tailcc i1 @i1test(i32, i32, i32, i32) {
 ;
 ; X86-LABEL: i1test:
 ; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %esi, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    jmp i1test # TAILCALL
   entry:
   %4 = musttail call tailcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
diff --git a/llvm/test/CodeGen/X86/musttail-varargs.ll b/llvm/test/CodeGen/X86/musttail-varargs.ll
index 65cd1edd92e31..c1ec7ccbde177 100644
--- a/llvm/test/CodeGen/X86/musttail-varargs.ll
+++ b/llvm/test/CodeGen/X86/musttail-varargs.ll
@@ -243,18 +243,15 @@ define void @f_thunk(ptr %this, ...) {
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %ebp
 ; X86-NOSSE-NEXT:    movl %esp, %ebp
-; X86-NOSSE-NEXT:    pushl %esi
 ; X86-NOSSE-NEXT:    andl $-16, %esp
 ; X86-NOSSE-NEXT:    subl $32, %esp
-; X86-NOSSE-NEXT:    movl 8(%ebp), %esi
-; X86-NOSSE-NEXT:    leal 12(%ebp), %eax
-; X86-NOSSE-NEXT:    movl %eax, (%esp)
-; X86-NOSSE-NEXT:    pushl %esi
+; X86-NOSSE-NEXT:    movl 8(%ebp), %eax
+; X86-NOSSE-NEXT:    leal 12(%ebp), %ecx
+; X86-NOSSE-NEXT:    movl %ecx, (%esp)
+; X86-NOSSE-NEXT:    pushl %eax
 ; X86-NOSSE-NEXT:    calll _get_f
 ; X86-NOSSE-NEXT:    addl $4, %esp
-; X86-NOSSE-NEXT:    movl %esi, 8(%ebp)
-; X86-NOSSE-NEXT:    leal -4(%ebp), %esp
-; X86-NOSSE-NEXT:    popl %esi
+; X86-NOSSE-NEXT:    movl %ebp, %esp
 ; X86-NOSSE-NEXT:    popl %ebp
 ; X86-NOSSE-NEXT:    jmpl *%eax # TAILCALL
 ;
@@ -262,24 +259,21 @@ define void @f_thunk(ptr %this, ...) {
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    pushl %ebp
 ; X86-SSE-NEXT:    movl %esp, %ebp
-; X86-SSE-NEXT:    pushl %esi
 ; X86-SSE-NEXT:    andl $-16, %esp
 ; X86-SSE-NEXT:    subl $80, %esp
 ; X86-SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
 ; X86-SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
 ; X86-SSE-NEXT:    movaps %xmm0, (%esp) # 16-byte Spill
-; X86-SSE-NEXT:    movl 8(%ebp), %esi
-; X86-SSE-NEXT:    leal 12(%ebp), %eax
-; X86-SSE-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    pushl %esi
+; X86-SSE-NEXT:    movl 8(%ebp), %eax
+; X86-SSE-NEXT:    leal 12(%ebp), %ecx
+; X86-SSE-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    pushl %eax
 ; X86-SSE-NEXT:    calll _get_f
 ; X86-SSE-NEXT:    addl $4, %esp
-; X86-SSE-NEXT:    movl %esi, 8(%ebp)
 ; X86-SSE-NEXT:    movaps (%esp), %xmm0 # 16-byte Reload
 ; X86-SSE-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
 ; X86-SSE-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-SSE-NEXT:    leal -4(%ebp), %esp
-; X86-SSE-NEXT:    popl %esi
+; X86-SSE-NEXT:    movl %ebp, %esp
 ; X86-SSE-NEXT:    popl %ebp
 ; X86-SSE-NEXT:    jmpl *%eax # TAILCALL
   %ap = alloca [4 x ptr], align 16
@@ -310,11 +304,14 @@ define void @g_thunk(ptr %fptr_i8, ...) {
 ; WINDOWS:       # %bb.0:
 ; WINDOWS-NEXT:    rex64 jmpq *%rcx # TAILCALL
 ;
-; X86-LABEL: g_thunk:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    jmpl *%eax # TAILCALL
+; X86-NOSSE-LABEL: g_thunk:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    jmpl *{{[0-9]+}}(%esp) # TAILCALL
+;
+; X86-SSE-LABEL: g_thunk:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    jmpl *%eax # TAILCALL
   musttail call void (ptr, ...) %fptr_i8(ptr %fptr_i8, ...)
   ret void
 }
@@ -374,10 +371,9 @@ define void @h_thunk(ptr %this, ...) {
 ; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; X86-NEXT:    jmpl *%ecx # TAILCALL
 ; X86-NEXT:  LBB2_2: # %else
-; X86-NEXT:    movl 8(%eax), %ecx
+; X86-NEXT:    movl 8(%eax), %eax
 ; X86-NEXT:    movl $42, _g
-; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    jmpl *%ecx # TAILCALL
+; X86-NEXT:    jmpl *%eax # TAILCALL
   %cond = load i1, ptr %this
   br i1 %cond, label %then, label %else
 
diff --git a/llvm/test/CodeGen/X86/musttail.ll b/llvm/test/CodeGen/X86/musttail.ll
index 9e02585a3ffdc..57c47ad683127 100644
--- a/llvm/test/CodeGen/X86/musttail.ll
+++ b/llvm/test/CodeGen/X86/musttail.ll
@@ -46,7 +46,7 @@ define i32 @t4(ptr %fn, i32 %n, i32 %r) {
 ; CHECK: decl %[[n:.*]]
 ; CHECK-DAG: movl %[[r]], {{[0-9]+}}(%esp)
 ; CHECK-DAG: movl %[[n]], {{[0-9]+}}(%esp)
-; CHECK: jmpl *%{{.*}}
+; CHECK: jmpl *{{.*}} # TAILCALL
 
 entry:
   %r1 = add i32 %r, 1
@@ -74,7 +74,7 @@ define i32 @t5(ptr %fn, i32 %n, i32 %r) alignstack(32) {
 ; CHECK: leal {{[-0-9]+}}(%ebp), %esp
 ; CHECK: popl %esi
 ; CHECK: popl %ebp
-; CHECK: jmpl *%{{.*}}
+; CHECK: jmpl *{{.*}} # TAILCALL
 
 entry:
   %a = alloca i8, i32 %n



More information about the llvm-commits mailing list