[llvm] [AArch64] Initial compiler support for SVE unwind on Windows. (PR #138609)

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Mon May 12 22:14:42 PDT 2025


https://github.com/efriedma-quic updated https://github.com/llvm/llvm-project/pull/138609

>From 84f3c20abc45eec8b05ba633d99f5f38c428277f Mon Sep 17 00:00:00 2001
From: Eli Friedman <efriedma at quicinc.com>
Date: Tue, 25 Mar 2025 11:37:23 -0700
Subject: [PATCH 1/3] [AArch64] Initial compiler support for SVE unwind on
 Windows.

Most bits of this are straightforward: when we emit SVE instructions in
the prologue/epilogue, emit corresponding opcodes.

The unfortunately nasty bit is the handling of the frame pointer in
functions that use the SVE calling convention.  If we have SVE
callee saves, and need to restore the stack pointer from the frame
pointer, it's impossible to encode callee saves that happen after the
frame pointer. So this patch rearranges the stack to put SVE callee
saves first.  This isn't really that complicated on its own, but it
leads to a lot of tricky conditionals (see FPAfterSVECalleeSaves).
---
 llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp |   26 +
 .../AArch64/AArch64CallingConvention.td       |    8 +
 .../Target/AArch64/AArch64FrameLowering.cpp   |  153 ++-
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp  |   32 +-
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |    3 +
 .../Target/AArch64/AArch64RegisterInfo.cpp    |    7 +
 llvm/test/CodeGen/AArch64/win-sve.ll          | 1034 +++++++++++++++++
 7 files changed, 1233 insertions(+), 30 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/win-sve.ll

diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 870df4c387ca4..47bc44dab9d90 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -3294,6 +3294,32 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
                                      -MI->getOperand(2).getImm());
     return;
 
+  case AArch64::SEH_AllocZ:
+    assert(MI->getOperand(0).getImm() >= 0 &&
+           "AllocZ SEH opcode offset must be non-negative");
+    assert(MI->getOperand(0).getImm() <= 255 &&
+           "AllocZ SEH opcode offset must fit into 8 bits");
+    TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
+    return;
+
+  case AArch64::SEH_SaveZReg:
+    assert(MI->getOperand(1).getImm() >= 0 &&
+           "SaveZReg SEH opcode offset must be non-negative");
+    assert(MI->getOperand(1).getImm() <= 255 &&
+           "SaveZReg SEH opcode offset must fit into 8 bits");
+    TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
+                                MI->getOperand(1).getImm());
+    return;
+
+  case AArch64::SEH_SavePReg:
+    assert(MI->getOperand(1).getImm() >= 0 &&
+           "SavePReg SEH opcode offset must be non-negative");
+    assert(MI->getOperand(1).getImm() <= 255 &&
+           "SavePReg SEH opcode offset must fit into 8 bits");
+    TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
+                                MI->getOperand(1).getImm());
+    return;
+
   case AArch64::BLR:
   case AArch64::BR: {
     recordIfImportCall(MI);
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index 7cca6d9bc6b9c..287bbbce95bd9 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -606,6 +606,9 @@ def CSR_Win_AArch64_Arm64EC_Thunk : CalleeSavedRegs<(add (sequence "Q%u", 6, 15)
 def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
                                           X25, X26, X27, X28, LR, FP,
                                           (sequence "Q%u", 8, 23))>;
+def CSR_Win_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
+                                              X25, X26, X27, X28, FP, LR,
+                                              (sequence "Q%u", 8, 23))>;
 
 // Functions taking SVE arguments or returning an SVE type
 // must (additionally) preserve full Z8-Z23 and predicate registers P4-P15
@@ -619,6 +622,11 @@ def CSR_Darwin_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23),
                                                         LR, FP, X19, X20, X21, X22,
                                                         X23, X24, X25, X26, X27, X28)>;
 
+def CSR_Win_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "P%u", 4, 11),
+                                                     (sequence "Z%u", 8, 23),
+                                                     X19, X20, X21, X22, X23, X24,
+                                                     X25, X26, X27, X28, FP, LR)>;
+
 // SME ABI support routines such as __arm_tpidr2_save/restore preserve most registers.
 def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
                           : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 78ac57e3e92a6..6b7e494b2c59b 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1200,7 +1200,25 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
 
   switch (Opc) {
   default:
-    llvm_unreachable("No SEH Opcode for this instruction");
+    report_fatal_error("No SEH Opcode for this instruction");
+  case AArch64::STR_ZXI:
+  case AArch64::LDR_ZXI: {
+    unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
+    MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveZReg))
+              .addImm(Reg0)
+              .addImm(Imm)
+              .setMIFlag(Flag);
+    break;
+  }
+  case AArch64::STR_PXI:
+  case AArch64::LDR_PXI: {
+    unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
+    MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SavePReg))
+              .addImm(Reg0)
+              .addImm(Imm)
+              .setMIFlag(Flag);
+    break;
+  }
   case AArch64::LDPDpost:
     Imm = -Imm;
     [[fallthrough]];
@@ -1592,6 +1610,9 @@ static bool IsSVECalleeSave(MachineBasicBlock::iterator I) {
   case AArch64::CMPNE_PPzZI_B:
     return I->getFlag(MachineInstr::FrameSetup) ||
            I->getFlag(MachineInstr::FrameDestroy);
+  case AArch64::SEH_SavePReg:
+  case AArch64::SEH_SaveZReg:
+    return true;
   }
 }
 
@@ -1874,12 +1895,48 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
   bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
   unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet);
 
+  // Windows unwind can't represent the required stack adjustments if we have
+  // both SVE callee-saves and dynamic stack allocations, and the frame
+  // pointer is before the SVE spills.  The allocation of the frame pointer
+  // must be the last instruction in the prologue so the unwinder can restore
+  // the stack pointer correctly. (And there isn't any unwind opcode for
+  // `addvl sp, x29, -17`.)
+  //
+  // Because of this, we do spills in the opposite order on Windows: first SVE,
+  // then GPRs. The main side-effect of this is that it makes accessing
+  // parameters passed on the stack more expensive.
+  //
+  // We could consider rearranging the spills for simpler cases.
+  bool FPAfterSVECalleeSaves =
+      Subtarget.isTargetWindows() && AFI->getSVECalleeSavedStackSize();
+
   auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
   // All of the remaining stack allocations are for locals.
   AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
   bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
   bool HomPrologEpilog = homogeneousPrologEpilog(MF);
-  if (CombineSPBump) {
+  if (FPAfterSVECalleeSaves) {
+    // If we're doing SVE saves first, we need to immediately allocate space
+    // for fixed objects, then space for the SVE callee saves.
+    //
+    // Windows unwind requires that the scalable size is a multiple of 16;
+    // that's handled when the callee-saved size is computed.
+    auto SaveSize =
+        StackOffset::getScalable(AFI->getSVECalleeSavedStackSize()) +
+        StackOffset::getFixed(FixedObject);
+    allocateStackSpace(MBB, MBBI, 0, SaveSize, NeedsWinCFI, &HasWinCFI,
+                       /*EmitCFI=*/false, StackOffset{},
+                       /*FollowupAllocs=*/true);
+    NumBytes -= FixedObject;
+
+    // Now allocate space for the GPR callee saves.
+    while (MBBI != End && IsSVECalleeSave(MBBI))
+      ++MBBI;
+    MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(
+        MBB, MBBI, DL, TII, -AFI->getCalleeSavedStackSize(), NeedsWinCFI,
+        &HasWinCFI, EmitAsyncCFI);
+    NumBytes -= AFI->getCalleeSavedStackSize();
+  } else if (CombineSPBump) {
     assert(!SVEStackSize && "Cannot combine SP bump with SVE");
     emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
                     StackOffset::getFixed(-NumBytes), TII,
@@ -1982,6 +2039,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
           : 0;
 
   if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) {
+    if (AFI->getSVECalleeSavedStackSize())
+      report_fatal_error("SVE callee saves not yet supported");
     uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
     if (NeedsWinCFI) {
       HasWinCFI = true;
@@ -2116,9 +2175,11 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
                       << "\n");
     // Find callee save instructions in frame.
     CalleeSavesBegin = MBBI;
-    assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction");
-    while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator())
-      ++MBBI;
+    if (!FPAfterSVECalleeSaves) {
+      assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction");
+      while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator())
+        ++MBBI;
+    }
     CalleeSavesEnd = MBBI;
 
     SVECalleeSavesSize = StackOffset::getScalable(CalleeSavedSize);
@@ -2129,9 +2190,11 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
   StackOffset CFAOffset =
       StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes);
   StackOffset LocalsSize = SVELocalsSize + StackOffset::getFixed(NumBytes);
-  allocateStackSpace(MBB, CalleeSavesBegin, 0, SVECalleeSavesSize, false,
-                     nullptr, EmitAsyncCFI && !HasFP, CFAOffset,
-                     MFI.hasVarSizedObjects() || LocalsSize);
+  if (!FPAfterSVECalleeSaves) {
+    allocateStackSpace(MBB, CalleeSavesBegin, 0, SVECalleeSavesSize, false,
+                       nullptr, EmitAsyncCFI && !HasFP, CFAOffset,
+                       MFI.hasVarSizedObjects() || LocalsSize);
+  }
   CFAOffset += SVECalleeSavesSize;
 
   if (EmitAsyncCFI)
@@ -2303,10 +2366,16 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     assert(AfterCSRPopSize == 0);
     return;
   }
+
+  bool FPAfterSVECalleeSaves =
+      Subtarget.isTargetWindows() && AFI->getSVECalleeSavedStackSize();
+
   bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes);
   // Assume we can't combine the last pop with the sp restore.
   bool CombineAfterCSRBump = false;
-  if (!CombineSPBump && PrologueSaveSize != 0) {
+  if (FPAfterSVECalleeSaves) {
+    AfterCSRPopSize = FixedObject;
+  } else if (!CombineSPBump && PrologueSaveSize != 0) {
     MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
     while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
            AArch64InstrInfo::isSEHInstruction(*Pop))
@@ -2339,7 +2408,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   while (LastPopI != Begin) {
     --LastPopI;
     if (!LastPopI->getFlag(MachineInstr::FrameDestroy) ||
-        IsSVECalleeSave(LastPopI)) {
+        (!FPAfterSVECalleeSaves && IsSVECalleeSave(LastPopI))) {
       ++LastPopI;
       break;
     } else if (CombineSPBump)
@@ -2415,6 +2484,9 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
   MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI;
   if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
+    if (FPAfterSVECalleeSaves)
+      RestoreEnd = MBB.getFirstTerminator();
+
     RestoreBegin = std::prev(RestoreEnd);
     while (RestoreBegin != MBB.begin() &&
            IsSVECalleeSave(std::prev(RestoreBegin)))
@@ -2430,7 +2502,31 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   }
 
   // Deallocate the SVE area.
-  if (SVEStackSize) {
+  if (FPAfterSVECalleeSaves) {
+    // If the callee-save area is before FP, restoring the FP implicitly
+    // deallocates non-callee-save SVE allocations.  Otherwise, deallocate
+    // them explicitly.
+    if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) {
+      emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
+                      DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
+                      NeedsWinCFI, &HasWinCFI);
+    }
+
+    // Deallocate callee-save non-SVE registers.
+    emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+                    StackOffset::getFixed(AFI->getCalleeSavedStackSize()), TII,
+                    MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
+
+    // Deallocate fixed objects.
+    emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+                    StackOffset::getFixed(FixedObject), TII,
+                    MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
+
+    // Deallocate callee-save SVE registers.
+    emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+                    DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
+                    NeedsWinCFI, &HasWinCFI);
+  } else if (SVEStackSize) {
     // If we have stack realignment or variable sized objects on the stack,
     // restore the stack pointer from the frame pointer prior to SVE CSR
     // restoration.
@@ -2450,20 +2546,20 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
         emitFrameOffset(
             MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
             StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy,
-            false, false, nullptr, EmitCFI && !hasFP(MF),
+            false, NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
             SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize));
         NumBytes = 0;
       }
 
       emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
                       DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
-                      false, nullptr, EmitCFI && !hasFP(MF),
+                      NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
                       SVEStackSize +
                           StackOffset::getFixed(NumBytes + PrologueSaveSize));
 
       emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
                       DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
-                      false, nullptr, EmitCFI && !hasFP(MF),
+                      NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
                       DeallocateAfter +
                           StackOffset::getFixed(NumBytes + PrologueSaveSize));
     }
@@ -2757,10 +2853,27 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
   }
 
   StackOffset ScalableOffset = {};
-  if (UseFP && !(isFixed || isCSR))
-    ScalableOffset = -SVEStackSize;
-  if (!UseFP && (isFixed || isCSR))
-    ScalableOffset = SVEStackSize;
+  bool FPAfterSVECalleeSaves =
+      isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
+  if (FPAfterSVECalleeSaves) {
+    // In this stack layout, the FP is in between the callee saves and other
+    // SVE allocations.
+    StackOffset SVECalleeSavedStack =
+        StackOffset::getScalable(AFI->getSVECalleeSavedStackSize());
+    if (UseFP) {
+      if (!(isFixed || isCSR))
+        ScalableOffset = SVECalleeSavedStack - SVEStackSize;
+      else
+        ScalableOffset = SVECalleeSavedStack;
+    } else if (!UseFP && (isFixed || isCSR)) {
+      ScalableOffset = SVEStackSize;
+    }
+  } else {
+    if (UseFP && !(isFixed || isCSR))
+      ScalableOffset = -SVEStackSize;
+    if (!UseFP && (isFixed || isCSR))
+      ScalableOffset = SVEStackSize;
+  }
 
   if (UseFP) {
     FrameReg = RegInfo->getFrameRegister(MF);
@@ -2934,7 +3047,9 @@ static void computeCalleeSaveRegisterPairs(
     RegInc = -1;
     FirstReg = Count - 1;
   }
-  int ScalableByteOffset = AFI->getSVECalleeSavedStackSize();
+  bool FPAfterSVECalleeSaves = IsWindows && AFI->getSVECalleeSavedStackSize();
+  int ScalableByteOffset =
+      FPAfterSVECalleeSaves ? 0 : AFI->getSVECalleeSavedStackSize();
   bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace();
   Register LastReg = 0;
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 1a13adc300d2b..c1ac18fb09180 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1176,6 +1176,9 @@ bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
     case AArch64::SEH_PACSignLR:
     case AArch64::SEH_SaveAnyRegQP:
     case AArch64::SEH_SaveAnyRegQPX:
+    case AArch64::SEH_AllocZ:
+    case AArch64::SEH_SaveZReg:
+    case AArch64::SEH_SavePReg:
       return true;
   }
 }
@@ -5988,10 +5991,16 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
     }
 
     if (NeedsWinCFI) {
-      assert(Sign == 1 && "SEH directives should always have a positive sign");
       int Imm = (int)(ThisVal << LocalShiftSize);
-      if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
-          (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
+      if (VScale != 1 && DestReg == AArch64::SP) {
+        if (HasWinCFI)
+          *HasWinCFI = true;
+        BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AllocZ))
+            .addImm(ThisVal)
+            .setMIFlag(Flag);
+      } else if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
+                 (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
+        assert(VScale == 1 && "Expected non-scalable operation");
         if (HasWinCFI)
           *HasWinCFI = true;
         if (Imm == 0)
@@ -6003,6 +6012,7 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
         assert(Offset == 0 && "Expected remaining offset to be zero to "
                               "emit a single SEH directive");
       } else if (DestReg == AArch64::SP) {
+        assert(VScale == 1 && "Expected non-scalable operation");
         if (HasWinCFI)
           *HasWinCFI = true;
         assert(SrcReg == AArch64::SP && "Unexpected SrcReg for SEH_StackAlloc");
@@ -6057,14 +6067,14 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
 
   assert(!(SetNZCV && (NumPredicateVectors || NumDataVectors)) &&
          "SetNZCV not supported with SVE vectors");
-  assert(!(NeedsWinCFI && (NumPredicateVectors || NumDataVectors)) &&
-         "WinCFI not supported with SVE vectors");
+  assert(!(NeedsWinCFI && NumPredicateVectors) &&
+         "WinCFI can't allocate fractions of an SVE data vector");
 
   if (NumDataVectors) {
     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumDataVectors,
-                       UseSVL ? AArch64::ADDSVL_XXI : AArch64::ADDVL_XXI,
-                       TII, Flag, NeedsWinCFI, nullptr, EmitCFAOffset,
-                       CFAOffset, FrameReg);
+                       UseSVL ? AArch64::ADDSVL_XXI : AArch64::ADDVL_XXI, TII,
+                       Flag, NeedsWinCFI, HasWinCFI, EmitCFAOffset, CFAOffset,
+                       FrameReg);
     CFAOffset += StackOffset::getScalable(-NumDataVectors * 16);
     SrcReg = DestReg;
   }
@@ -6072,9 +6082,9 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
   if (NumPredicateVectors) {
     assert(DestReg != AArch64::SP && "Unaligned access to SP");
     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumPredicateVectors,
-                       UseSVL ? AArch64::ADDSPL_XXI : AArch64::ADDPL_XXI,
-                       TII, Flag, NeedsWinCFI, nullptr, EmitCFAOffset,
-                       CFAOffset, FrameReg);
+                       UseSVL ? AArch64::ADDSPL_XXI : AArch64::ADDPL_XXI, TII,
+                       Flag, NeedsWinCFI, HasWinCFI, EmitCFAOffset, CFAOffset,
+                       FrameReg);
   }
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 3962c7eba5833..6165a1ac3e079 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5408,6 +5408,9 @@ let isPseudo = 1 in {
   def SEH_PACSignLR : Pseudo<(outs), (ins), []>, Sched<[]>;
   def SEH_SaveAnyRegQP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
   def SEH_SaveAnyRegQPX : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
+  def SEH_AllocZ : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
+  def SEH_SaveZReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
+  def SEH_SavePReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
 }
 
 // Pseudo instructions for Windows EH
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 52b362875b4ef..e9a1b558b2dfe 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -98,6 +98,13 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
       return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
     if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail)
       return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
+    if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
+      return CSR_Win_AArch64_AAVPCS_SaveList;
+    if (MF->getFunction().getCallingConv() ==
+        CallingConv::AArch64_SVE_VectorCall)
+      return CSR_Win_AArch64_SVE_AAPCS_SaveList;
+    if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
+      return CSR_Win_AArch64_SVE_AAPCS_SaveList;
     return CSR_Win_AArch64_AAPCS_SaveList;
   }
   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
diff --git a/llvm/test/CodeGen/AArch64/win-sve.ll b/llvm/test/CodeGen/AArch64/win-sve.ll
new file mode 100644
index 0000000000000..8e0a9ad170e84
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/win-sve.ll
@@ -0,0 +1,1034 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve -filetype=obj < %s -o /dev/null
+
+declare void @g()
+define i32 @f(<vscale x 2 x i64> %x) {
+; CHECK-LABEL: f:
+; CHECK:       .seh_proc f
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr_x 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    bl g
+; CHECK-NEXT:    mov w0, #3 // =0x3
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    ldp x29, x30, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  call void @g()
+  ret i32 3
+}
+
+declare void @g2(ptr,ptr)
+define void @f2(i64 %n, <vscale x 2 x i64> %x) {
+; CHECK-LABEL: f2:
+; CHECK:       .seh_proc f2
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    str x19, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x19, 32
+; CHECK-NEXT:    str x28, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    add x29, sp, #16
+; CHECK-NEXT:    .seh_add_fp 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    lsl x8, x0, #3
+; CHECK-NEXT:    mov x19, sp
+; CHECK-NEXT:    add x8, x8, #15
+; CHECK-NEXT:    lsr x15, x8, #4
+; CHECK-NEXT:    bl __chkstk
+; CHECK-NEXT:    sub x1, sp, x15, lsl #4
+; CHECK-NEXT:    mov sp, x1
+; CHECK-NEXT:    add x0, x19, #0
+; CHECK-NEXT:    bl g2
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    sub sp, x29, #16
+; CHECK-NEXT:    .seh_add_fp 16
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    ldr x28, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    ldr x19, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x19, 0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p1 = alloca [4 x i64]
+  %p2 = alloca i64, i64 %n
+  call void @g2(ptr %p1, ptr %p2)
+  ret void
+}
+
+declare void @g3(ptr,ptr)
+define void @f3(i64 %n, <vscale x 2 x i64> %x) {
+; CHECK-LABEL: f3:
+; CHECK:       .seh_proc f3
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr_x 16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    add x0, sp, #8
+; CHECK-NEXT:    mov x1, sp
+; CHECK-NEXT:    bl g3
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldp x29, x30, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p1 = alloca i64
+  %p2 = alloca i64
+  call void @g3(ptr %p1, ptr %p2)
+  ret void
+}
+
+declare void @g4(ptr,ptr)
+define void @f4(i64 %n, <vscale x 2 x i64> %x) {
+; CHECK-LABEL: f4:
+; CHECK:       .seh_proc f4
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr_x 16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    mov x0, sp
+; CHECK-NEXT:    add x1, sp, #16
+; CHECK-NEXT:    bl g4
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldp x29, x30, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p1 = alloca [2 x i64]
+  %p2 = alloca <vscale x 2 x i64>
+  call void @g4(ptr %p1, ptr %p2)
+  ret void
+}
+
+declare void @g5(ptr,ptr,ptr)
+define void @f5(i64 %n, <vscale x 2 x i64> %x) {
+; CHECK-LABEL: f5:
+; CHECK:       .seh_proc f5
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    str x19, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x19, 32
+; CHECK-NEXT:    str x28, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    add x29, sp, #16
+; CHECK-NEXT:    .seh_add_fp 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    lsl x8, x0, #3
+; CHECK-NEXT:    mov x19, sp
+; CHECK-NEXT:    add x8, x8, #15
+; CHECK-NEXT:    lsr x15, x8, #4
+; CHECK-NEXT:    bl __chkstk
+; CHECK-NEXT:    sub x2, sp, x15, lsl #4
+; CHECK-NEXT:    mov sp, x2
+; CHECK-NEXT:    sub x1, x29, #16
+; CHECK-NEXT:    add x0, x19, #0
+; CHECK-NEXT:    addvl x1, x1, #-18
+; CHECK-NEXT:    bl g5
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    sub sp, x29, #16
+; CHECK-NEXT:    .seh_add_fp 16
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    ldr x28, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    ldr x19, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x19, 0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p1 = alloca [8 x i64]
+  %p2 = alloca <vscale x 2 x i64>
+  %p3 = alloca i64, i64 %n
+  call void @g5(ptr %p1, ptr %p2, ptr %p3)
+  ret void
+}
+
+declare dso_local i32 @__CxxFrameHandler3(...)
+declare void @g6(ptr, i64)
+declare void @cleanup()
+define void @f6(<vscale x 2 x i64> %x, i64 %n1, i64 %n2, i64 %n3, i64 %n4,
+; CHECK-LABEL: f6:
+; CHECK:       .Lfunc_begin0:
+; CHECK-NEXT:  .seh_proc f6
+; CHECK-NEXT:    .seh_handler __CxxFrameHandler3, @unwind, @except
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    str x19, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x19, 32
+; CHECK-NEXT:    str x28, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    add x29, sp, #16
+; CHECK-NEXT:    .seh_add_fp 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    mov x0, #-2 // =0xfffffffffffffffe
+; CHECK-NEXT:    addvl x8, x29, #17
+; CHECK-NEXT:    mov x19, sp
+; CHECK-NEXT:    stur x0, [x8, #16]
+; CHECK-NEXT:    addvl x8, x29, #17
+; CHECK-NEXT:    ldr x1, [x8, #32]
+; CHECK-NEXT:  .Ltmp0:
+; CHECK-NEXT:    add x0, x19, #0
+; CHECK-NEXT:    bl g6
+; CHECK-NEXT:  .Ltmp1:
+; CHECK-NEXT:  // %bb.1: // %invoke.cont
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    .seh_stackalloc 64
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    ldr x28, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    ldr x19, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x19, 0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .word $cppxdata$f6 at IMGREL
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
+; CHECK-NEXT:    .def "?dtor$2@?0?f6 at 4HA";
+; CHECK-NEXT:    .scl 3;
+; CHECK-NEXT:    .type 32;
+; CHECK-NEXT:    .endef
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  "?dtor$2@?0?f6 at 4HA":
+; CHECK-NEXT:  .seh_proc "?dtor$2@?0?f6 at 4HA"
+; CHECK-NEXT:  .LBB5_2: // %ehcleanup
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    str x19, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x19, 32
+; CHECK-NEXT:    str x28, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    bl cleanup
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 16
+; CHECK-NEXT:    ldr x28, [sp, #8] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x28, 8
+; CHECK-NEXT:    ldr x19, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x19, 0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+                i64 %n5, i64 %n6, i64 %n7, i64 %n8, i64 %n9) personality ptr @__CxxFrameHandler3 {
+  %p1 = alloca [8 x i64]
+  invoke void @g6(ptr %p1, i64 %n9) to label %invoke.cont unwind label %ehcleanup
+
+invoke.cont:
+  ret void
+
+ehcleanup:                                        ; preds = %entry
+  %c = cleanuppad within none []
+  call void @cleanup() [ "funclet"(token %c) ]
+  cleanupret from %c unwind to caller
+}
+
+declare void @g7(ptr)
+define void @f7(i64 %n) {
+; CHECK-LABEL: f7:
+; CHECK:       .seh_proc f7
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr_x 16
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    mov x0, sp
+; CHECK-NEXT:    bl g7
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr_x 16
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p = alloca <vscale x 2 x i64>
+  call void @g7(ptr %p)
+  ret void
+}
+
+define void @f8(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: f8:
+; CHECK:       .seh_proc f8
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 0
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x30, 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    //APP
+; CHECK-NEXT:    //NO_APP
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    ldr x30, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x30, 0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 0
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  call void asm "", "~{d8}"()
+  ret void
+}

>From 7b1f63203a5a59afd4bacac2e130c3599763f328 Mon Sep 17 00:00:00 2001
From: Eli Friedman <efriedma at quicinc.com>
Date: Wed, 7 May 2025 18:08:13 -0700
Subject: [PATCH 2/3] Partially address review comments.

---
 .../Target/AArch64/AArch64FrameLowering.cpp   |  20 +-
 llvm/test/CodeGen/AArch64/win-sve.ll          | 202 +++++++++++++++++-
 2 files changed, 215 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 6b7e494b2c59b..05f1b67c42bd5 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2040,7 +2040,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
 
   if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) {
     if (AFI->getSVECalleeSavedStackSize())
-      report_fatal_error("SVE callee saves not yet supported");
+      report_fatal_error("SVE callee saves not yet supported with stack probing");
     uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
     if (NeedsWinCFI) {
       HasWinCFI = true;
@@ -2683,7 +2683,11 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
     return StackOffset::getFixed(ObjectOffset - getOffsetOfLocalArea());
 
   const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
+  bool FPAfterSVECalleeSaves =
+      isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
   if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+    if (FPAfterSVECalleeSaves && -ObjectOffset <= AFI->getSVECalleeSavedStackSize())
+      return StackOffset::get(0, ObjectOffset);
     return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()),
                             ObjectOffset);
   }
@@ -2693,8 +2697,12 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
       !IsFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI));
 
   StackOffset ScalableOffset = {};
-  if (!IsFixed && !IsCSR)
+  if (!IsFixed && !IsCSR) {
     ScalableOffset = -SVEStackSize;
+  } else if (FPAfterSVECalleeSaves && IsCSR) {
+    ScalableOffset =
+        -StackOffset::getScalable(AFI->getSVECalleeSavedStackSize());
+  }
 
   return StackOffset::getFixed(ObjectOffset) + ScalableOffset;
 }
@@ -2832,6 +2840,9 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
       "In the presence of dynamic stack pointer realignment, "
       "non-argument/CSR objects cannot be accessed through the frame pointer");
 
+  bool FPAfterSVECalleeSaves =
+      isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
+
   if (isSVE) {
     StackOffset FPOffset =
         StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset);
@@ -2839,6 +2850,9 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
         SVEStackSize +
         StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(),
                          ObjectOffset);
+    if (FPAfterSVECalleeSaves) {
+      FPOffset += StackOffset::getScalable(AFI->getSVECalleeSavedStackSize());
+    }
     // Always use the FP for SVE spills if available and beneficial.
     if (hasFP(MF) && (SPOffset.getFixed() ||
                       FPOffset.getScalable() < SPOffset.getScalable() ||
@@ -2853,8 +2867,6 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
   }
 
   StackOffset ScalableOffset = {};
-  bool FPAfterSVECalleeSaves =
-      isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
   if (FPAfterSVECalleeSaves) {
     // In this stack layout, the FP is in between the callee saves and other
     // SVE allocations.
diff --git a/llvm/test/CodeGen/AArch64/win-sve.ll b/llvm/test/CodeGen/AArch64/win-sve.ll
index 8e0a9ad170e84..11c994a58149e 100644
--- a/llvm/test/CodeGen/AArch64/win-sve.ll
+++ b/llvm/test/CodeGen/AArch64/win-sve.ll
@@ -609,7 +609,7 @@ define void @f5(i64 %n, <vscale x 2 x i64> %x) {
 ; CHECK-NEXT:    mov sp, x2
 ; CHECK-NEXT:    sub x1, x29, #16
 ; CHECK-NEXT:    add x0, x19, #0
-; CHECK-NEXT:    addvl x1, x1, #-18
+; CHECK-NEXT:    addvl x1, x1, #-1
 ; CHECK-NEXT:    bl g5
 ; CHECK-NEXT:    .seh_startepilogue
 ; CHECK-NEXT:    sub sp, x29, #16
@@ -686,7 +686,7 @@ define void @f5(i64 %n, <vscale x 2 x i64> %x) {
 declare dso_local i32 @__CxxFrameHandler3(...)
 declare void @g6(ptr, i64)
 declare void @cleanup()
-define void @f6(<vscale x 2 x i64> %x, i64 %n1, i64 %n2, i64 %n3, i64 %n4,
+define void @f6(<vscale x 2 x i64> %x, [8 x i64] %pad, i64 %n9) personality ptr @__CxxFrameHandler3 {
 ; CHECK-LABEL: f6:
 ; CHECK:       .Lfunc_begin0:
 ; CHECK-NEXT:  .seh_proc f6
@@ -964,7 +964,6 @@ define void @f6(<vscale x 2 x i64> %x, i64 %n1, i64 %n2, i64 %n3, i64 %n4,
 ; CHECK-NEXT:    .seh_allocz 17
 ; CHECK-NEXT:    .seh_endepilogue
 ; CHECK-NEXT:    ret
-                i64 %n5, i64 %n6, i64 %n7, i64 %n8, i64 %n9) personality ptr @__CxxFrameHandler3 {
   %p1 = alloca [8 x i64]
   invoke void @g6(ptr %p1, i64 %n9) to label %invoke.cont unwind label %ehcleanup
 
@@ -1032,3 +1031,200 @@ define void @f8(<vscale x 2 x i64> %v) {
   call void asm "", "~{d8}"()
   ret void
 }
+
+define void @f9(<vscale x 2 x i64> %v, ...) {
+; CHECK-LABEL: f9:
+; CHECK:       .seh_proc f9
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    .seh_stackalloc 64
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 0
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x30, 16
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    add x9, sp, #8
+; CHECK-NEXT:    str x2, [x8, #32]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x0, [x8, #16]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x1, [x8, #24]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x3, [x8, #40]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x4, [x8, #48]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x5, [x8, #56]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x6, [x8, #64]
+; CHECK-NEXT:    addvl x8, sp, #1
+; CHECK-NEXT:    str x7, [x8, #72]
+; CHECK-NEXT:    add x8, sp, #16
+; CHECK-NEXT:    addvl x8, x8, #1
+; CHECK-NEXT:    str x8, [sp, #8]
+; CHECK-NEXT:    //APP
+; CHECK-NEXT:    //NO_APP
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    ldr x30, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x30, 0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 0
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    .seh_stackalloc 64
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    .seh_stackalloc 64
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %va_list = alloca ptr
+  call void @llvm.va_start.p0(ptr %va_list)
+  call void asm "", "r,~{d8},~{memory}"(ptr %va_list)
+  ret void
+}
+
+declare void @g10(ptr,ptr)
+define void @f10(i64 %n, <vscale x 2 x i64> %x) "frame-pointer"="all" {
+; CHECK-LABEL: f10:
+; CHECK:       .seh_proc f10
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    addvl sp, sp, #-17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    str p4, [sp] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    str p5, [sp, #1, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    str p6, [sp, #2, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    str p7, [sp, #3, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    str p8, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    str p9, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    str p10, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    str p11, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    str z10, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    str z11, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    str z12, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    str z14, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    str z17, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    str z18, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    str z19, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    str z20, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    str z21, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    str z22, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    str z23, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    str x28, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .seh_save_reg_x x28, 32
+; CHECK-NEXT:    stp x29, x30, [sp, #8] // 16-byte Folded Spill
+; CHECK-NEXT:    .seh_save_fplr 8
+; CHECK-NEXT:    add x29, sp, #8
+; CHECK-NEXT:    .seh_add_fp 8
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    sub x1, x29, #8
+; CHECK-NEXT:    mov x0, sp
+; CHECK-NEXT:    addvl x1, x1, #-1
+; CHECK-NEXT:    bl g10
+; CHECK-NEXT:    .seh_startepilogue
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    .seh_allocz 1
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    .seh_stackalloc 16
+; CHECK-NEXT:    ldp x29, x30, [sp, #8] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_fplr 8
+; CHECK-NEXT:    ldr x28, [sp] // 8-byte Folded Reload
+; CHECK-NEXT:    .seh_save_reg x28, 0
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    .seh_stackalloc 32
+; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z8, 1
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z9, 2
+; CHECK-NEXT:    ldr z10, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z10, 3
+; CHECK-NEXT:    ldr z11, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z11, 4
+; CHECK-NEXT:    ldr z12, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z12, 5
+; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z13, 6
+; CHECK-NEXT:    ldr z14, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z14, 7
+; CHECK-NEXT:    ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z15, 8
+; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z16, 9
+; CHECK-NEXT:    ldr z17, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z17, 10
+; CHECK-NEXT:    ldr z18, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z18, 11
+; CHECK-NEXT:    ldr z19, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z19, 12
+; CHECK-NEXT:    ldr z20, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z20, 13
+; CHECK-NEXT:    ldr z21, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z21, 14
+; CHECK-NEXT:    ldr z22, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z22, 15
+; CHECK-NEXT:    ldr z23, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    .seh_save_zreg z23, 16
+; CHECK-NEXT:    ldr p4, [sp] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p4, 0
+; CHECK-NEXT:    ldr p5, [sp, #1, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p5, 1
+; CHECK-NEXT:    ldr p6, [sp, #2, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p6, 2
+; CHECK-NEXT:    ldr p7, [sp, #3, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p7, 3
+; CHECK-NEXT:    ldr p8, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p8, 4
+; CHECK-NEXT:    ldr p9, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p9, 5
+; CHECK-NEXT:    ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p10, 6
+; CHECK-NEXT:    ldr p11, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    .seh_save_preg p11, 7
+; CHECK-NEXT:    addvl sp, sp, #17
+; CHECK-NEXT:    .seh_allocz 17
+; CHECK-NEXT:    .seh_endepilogue
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .seh_endfunclet
+; CHECK-NEXT:    .seh_endproc
+  %p1 = alloca [2 x i64]
+  %p2 = alloca <vscale x 2 x i64>
+  call void @g10(ptr %p1, ptr %p2)
+  ret void
+}

>From b1d8fff2feb4cd11ad804d038f62e83112797f1b Mon Sep 17 00:00:00 2001
From: Eli Friedman <efriedma at quicinc.com>
Date: Mon, 12 May 2025 22:02:33 -0700
Subject: [PATCH 3/3] More review comment fixes.

---
 .../Target/AArch64/AArch64FrameLowering.cpp   |  8 +++-
 .../AArch64/sve-stack-frame-layout-win.ll     | 40 +++++++++++++++++++
 2 files changed, 46 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-stack-frame-layout-win.ll

diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 05f1b67c42bd5..49b0d877c5e98 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2040,7 +2040,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
 
   if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) {
     if (AFI->getSVECalleeSavedStackSize())
-      report_fatal_error("SVE callee saves not yet supported with stack probing");
+      report_fatal_error(
+          "SVE callee saves not yet supported with stack probing");
     uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
     if (NeedsWinCFI) {
       HasWinCFI = true;
@@ -2686,7 +2687,8 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
   bool FPAfterSVECalleeSaves =
       isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
   if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
-    if (FPAfterSVECalleeSaves && -ObjectOffset <= AFI->getSVECalleeSavedStackSize())
+    if (FPAfterSVECalleeSaves &&
+        -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize())
       return StackOffset::get(0, ObjectOffset);
     return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()),
                             ObjectOffset);
@@ -2844,6 +2846,8 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
       isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
 
   if (isSVE) {
+    assert(-ObjectOffset > (int64_t)AFI->getSVECalleeSavedStackSize() &&
+          "Math isn't correct for CSRs with FPAfterSVECalleeSaves");
     StackOffset FPOffset =
         StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset);
     StackOffset SPOffset =
diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout-win.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout-win.ll
new file mode 100644
index 0000000000000..3e226c64c2f74
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout-win.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve -pass-remarks-analysis=stack-frame-layout < %s 2>&1 -o /dev/null | FileCheck %s
+
+; CHECK: Function: f10
+; CHECK: Offset: [SP+0-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP+0-258 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-260 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-262 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-264 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-266 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-268 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-270 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP+0-272 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK: Offset: [SP-16-272 x vscale], Type: Spill, Align: 16, Size: 8
+; CHECK: Offset: [SP-24-272 x vscale], Type: Spill, Align: 8, Size: 8
+; CHECK: Offset: [SP-32-272 x vscale], Type: Spill, Align: 8, Size: 8
+; CHECK: Offset: [SP-32-288 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK: Offset: [SP-48-288 x vscale], Type: Variable, Align: 8, Size: 16
+
+declare void @g10(ptr,ptr)
+define void @f10(i64 %n, <vscale x 2 x i64> %x) "frame-pointer"="all" {
+  %p1 = alloca [2 x i64]
+  %p2 = alloca <vscale x 2 x i64>
+  call void @g10(ptr %p1, ptr %p2)
+  ret void
+}



More information about the llvm-commits mailing list