[llvm] 85c53c7 - Revert "[AArch64] Async unwind - function prologues"

Hans Wennborg via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 4 08:36:50 PST 2022


Author: Hans Wennborg
Date: 2022-03-04T17:36:26+01:00
New Revision: 85c53c7092176e5f611fe620867eb2b82a30c80d

URL: https://github.com/llvm/llvm-project/commit/85c53c7092176e5f611fe620867eb2b82a30c80d
DIFF: https://github.com/llvm/llvm-project/commit/85c53c7092176e5f611fe620867eb2b82a30c80d.diff

LOG: Revert "[AArch64] Async unwind - function prologues"

It caused builds to assert with:

  (StackSize == 0 && "We already have the CFA offset!"),
  function generateCompactUnwindEncoding, file AArch64AsmBackend.cpp, line 624.

when targeting iOS. See comment on the code review for reproducer.

> This patch rearranges emission of CFI instructions, so the resulting
> DWARF and `.eh_frame` information is precise at every instruction.
>
> The current state is that the unwind info is emitted only after the
> function prologue. This is fine for synchronous (e.g. C++) exceptions,
> but the information is generally incorrect when the program counter is
> at an instruction in the prologue or the epilogue, for example:
>
> ```
> stp     x29, x30, [sp, #-16]!           // 16-byte Folded Spill
> mov     x29, sp
> .cfi_def_cfa w29, 16
> ...
> ```
>
> after the `stp` is executed the (initial) rule for the CFA still says
> the CFA is in the `sp`, even though it's already offset by 16 bytes
>
> A correct unwind info could look like:
> ```
> stp     x29, x30, [sp, #-16]!           // 16-byte Folded Spill
> .cfi_def_cfa_offset 16
> mov     x29, sp
> .cfi_def_cfa w29, 16
> ...
> ```
>
> Having this information precise up to an instruction is useful for
> sampling profilers that would like to get a stack backtrace. The end
> goal (towards this patch is just a step) is to have fully working
> `-fasynchronous-unwind-tables`.
>
> Reviewed By: danielkiss, MaskRay
>
> Differential Revision: https://reviews.llvm.org/D111411

This reverts commit 32e8b550e5439c7e4aafa73894faffd5f25d0d05.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
    llvm/lib/Target/AArch64/AArch64FrameLowering.h
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/lib/Target/AArch64/AArch64InstrInfo.h
    llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
    llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
    llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
    llvm/test/CodeGen/AArch64/aarch64-mops.ll
    llvm/test/CodeGen/AArch64/active_lane_mask.ll
    llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
    llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
    llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
    llvm/test/CodeGen/AArch64/arm64-fp128.ll
    llvm/test/CodeGen/AArch64/arm64-large-frame.ll
    llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
    llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
    llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
    llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
    llvm/test/CodeGen/AArch64/cmp-select-sign.ll
    llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
    llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
    llvm/test/CodeGen/AArch64/csr-split.ll
    llvm/test/CodeGen/AArch64/fastcc.ll
    llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
    llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
    llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
    llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
    llvm/test/CodeGen/AArch64/framelayout-sve.mir
    llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
    llvm/test/CodeGen/AArch64/isinf.ll
    llvm/test/CodeGen/AArch64/large-stack-cmp.ll
    llvm/test/CodeGen/AArch64/large-stack.ll
    llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
    llvm/test/CodeGen/AArch64/local_vars.ll
    llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
    llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll
    llvm/test/CodeGen/AArch64/neg-imm.ll
    llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
    llvm/test/CodeGen/AArch64/settag.ll
    llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
    llvm/test/CodeGen/AArch64/sibling-call.ll
    llvm/test/CodeGen/AArch64/split-vector-insert.ll
    llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
    llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
    llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
    llvm/test/CodeGen/AArch64/sve-alloca.ll
    llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
    llvm/test/CodeGen/AArch64/sve-insert-element.ll
    llvm/test/CodeGen/AArch64/sve-insert-vector.ll
    llvm/test/CodeGen/AArch64/sve-ldnf1.mir
    llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
    llvm/test/CodeGen/AArch64/sve-pred-arith.ll
    llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
    llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
    llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll
    llvm/test/CodeGen/AArch64/sve-trunc.ll
    llvm/test/CodeGen/AArch64/swifterror.ll
    llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
    llvm/test/CodeGen/AArch64/unwind-preserved.ll
    llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 0ddfa9715f90a..5057cab7e7e52 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -117,72 +117,6 @@
 //
 // FIXME: also explain the redzone concept.
 //
-// An example of the prologue:
-//
-//     .globl __foo
-//     .align 2
-//  __foo:
-// Ltmp0:
-//     .cfi_startproc
-//     .cfi_personality 155, ___gxx_personality_v0
-// Leh_func_begin:
-//     .cfi_lsda 16, Lexception33
-//
-//     stp  xa,bx, [sp, -#offset]!
-//     ...
-//     stp  x28, x27, [sp, #offset-32]
-//     stp  fp, lr, [sp, #offset-16]
-//     add  fp, sp, #offset - 16
-//     sub  sp, sp, #1360
-//
-// The Stack:
-//       +-------------------------------------------+
-// 10000 | ........ | ........ | ........ | ........ |
-// 10004 | ........ | ........ | ........ | ........ |
-//       +-------------------------------------------+
-// 10008 | ........ | ........ | ........ | ........ |
-// 1000c | ........ | ........ | ........ | ........ |
-//       +===========================================+
-// 10010 |                X28 Register               |
-// 10014 |                X28 Register               |
-//       +-------------------------------------------+
-// 10018 |                X27 Register               |
-// 1001c |                X27 Register               |
-//       +===========================================+
-// 10020 |                Frame Pointer              |
-// 10024 |                Frame Pointer              |
-//       +-------------------------------------------+
-// 10028 |                Link Register              |
-// 1002c |                Link Register              |
-//       +===========================================+
-// 10030 | ........ | ........ | ........ | ........ |
-// 10034 | ........ | ........ | ........ | ........ |
-//       +-------------------------------------------+
-// 10038 | ........ | ........ | ........ | ........ |
-// 1003c | ........ | ........ | ........ | ........ |
-//       +-------------------------------------------+
-//
-//     [sp] = 10030        ::    >>initial value<<
-//     sp = 10020          ::  stp fp, lr, [sp, #-16]!
-//     fp = sp == 10020    ::  mov fp, sp
-//     [sp] == 10020       ::  stp x28, x27, [sp, #-16]!
-//     sp == 10010         ::    >>final value<<
-//
-// The frame pointer (w29) points to address 10020. If we use an offset of
-// '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
-// for w27, and -32 for w28:
-//
-//  Ltmp1:
-//     .cfi_def_cfa w29, 16
-//  Ltmp2:
-//     .cfi_offset w30, -8
-//  Ltmp3:
-//     .cfi_offset w29, -16
-//  Ltmp4:
-//     .cfi_offset w27, -24
-//  Ltmp5:
-//     .cfi_offset w28, -32
-//
 //===----------------------------------------------------------------------===//
 
 #include "AArch64FrameLowering.h"
@@ -220,6 +154,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LEB128.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetMachine.h"
@@ -505,81 +440,140 @@ MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
   return MBB.erase(I);
 }
 
-void AArch64FrameLowering::emitCalleeSavedGPRLocations(
-    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
-  MachineFunction &MF = *MBB.getParent();
-  MachineFrameInfo &MFI = MF.getFrameInfo();
+// Convenience function to create a DWARF expression for
+//   Expr + NumBytes + NumVGScaledBytes * AArch64::VG
+static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr,
+                                     int NumBytes, int NumVGScaledBytes, unsigned VG,
+                                     llvm::raw_string_ostream &Comment) {
+  uint8_t buffer[16];
 
-  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
-  if (CSI.empty())
-    return;
+  if (NumBytes) {
+    Expr.push_back(dwarf::DW_OP_consts);
+    Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer));
+    Expr.push_back((uint8_t)dwarf::DW_OP_plus);
+    Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes);
+  }
 
-  const TargetSubtargetInfo &STI = MF.getSubtarget();
-  const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
-  const TargetInstrInfo &TII = *STI.getInstrInfo();
-  DebugLoc DL = MBB.findDebugLoc(MBBI);
+  if (NumVGScaledBytes) {
+    Expr.push_back((uint8_t)dwarf::DW_OP_consts);
+    Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer));
 
-  for (const auto &Info : CSI) {
-    if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)
-      continue;
+    Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
+    Expr.append(buffer, buffer + encodeULEB128(VG, buffer));
+    Expr.push_back(0);
 
-    assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
-    unsigned DwarfReg = TRI.getDwarfRegNum(Info.getReg(), true);
+    Expr.push_back((uint8_t)dwarf::DW_OP_mul);
+    Expr.push_back((uint8_t)dwarf::DW_OP_plus);
 
-    int64_t Offset =
-        MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
-    unsigned CFIIndex = MF.addFrameInst(
-        MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
-    BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
-        .addCFIIndex(CFIIndex)
-        .setMIFlags(MachineInstr::FrameSetup);
+    Comment << (NumVGScaledBytes < 0 ? " - " : " + ")
+            << std::abs(NumVGScaledBytes) << " * VG";
   }
 }
 
-void AArch64FrameLowering::emitCalleeSavedSVELocations(
+// Creates an MCCFIInstruction:
+//    { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr }
+MCCFIInstruction AArch64FrameLowering::createDefCFAExpressionFromSP(
+    const TargetRegisterInfo &TRI, const StackOffset &OffsetFromSP) const {
+  int64_t NumBytes, NumVGScaledBytes;
+  AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(OffsetFromSP, NumBytes,
+                                                        NumVGScaledBytes);
+
+  std::string CommentBuffer = "sp";
+  llvm::raw_string_ostream Comment(CommentBuffer);
+
+  // Build up the expression (SP + NumBytes + NumVGScaledBytes * AArch64::VG)
+  SmallString<64> Expr;
+  Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + /*SP*/ 31));
+  Expr.push_back(0);
+  appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes,
+                           TRI.getDwarfRegNum(AArch64::VG, true), Comment);
+
+  // Wrap this into DW_CFA_def_cfa.
+  SmallString<64> DefCfaExpr;
+  DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
+  uint8_t buffer[16];
+  DefCfaExpr.append(buffer,
+                    buffer + encodeULEB128(Expr.size(), buffer));
+  DefCfaExpr.append(Expr.str());
+  return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(),
+                                        Comment.str());
+}
+
+MCCFIInstruction AArch64FrameLowering::createCfaOffset(
+    const TargetRegisterInfo &TRI, unsigned Reg,
+    const StackOffset &OffsetFromDefCFA) const {
+  int64_t NumBytes, NumVGScaledBytes;
+  AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
+      OffsetFromDefCFA, NumBytes, NumVGScaledBytes);
+
+  unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
+
+  // Non-scalable offsets can use DW_CFA_offset directly.
+  if (!NumVGScaledBytes)
+    return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes);
+
+  std::string CommentBuffer;
+  llvm::raw_string_ostream Comment(CommentBuffer);
+  Comment << printReg(Reg, &TRI) << "  @ cfa";
+
+  // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG)
+  SmallString<64> OffsetExpr;
+  appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes,
+                           TRI.getDwarfRegNum(AArch64::VG, true), Comment);
+
+  // Wrap this into DW_CFA_expression
+  SmallString<64> CfaExpr;
+  CfaExpr.push_back(dwarf::DW_CFA_expression);
+  uint8_t buffer[16];
+  CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
+  CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer));
+  CfaExpr.append(OffsetExpr.str());
+
+  return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str());
+}
+
+void AArch64FrameLowering::emitCalleeSavedFrameMoves(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
   MachineFunction &MF = *MBB.getParent();
   MachineFrameInfo &MFI = MF.getFrameInfo();
+  const TargetSubtargetInfo &STI = MF.getSubtarget();
+  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+  const TargetInstrInfo *TII = STI.getInstrInfo();
+  DebugLoc DL = MBB.findDebugLoc(MBBI);
 
   // Add callee saved registers to move list.
   const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
   if (CSI.empty())
     return;
 
-  const TargetSubtargetInfo &STI = MF.getSubtarget();
-  const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
-  const TargetInstrInfo &TII = *STI.getInstrInfo();
-  DebugLoc DL = MBB.findDebugLoc(MBBI);
-  AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
-
   for (const auto &Info : CSI) {
-    if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
-      continue;
+    Register Reg = Info.getReg();
 
     // Not all unwinders may know about SVE registers, so assume the lowest
     // common demoninator.
-    assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
-    unsigned Reg = Info.getReg();
-    if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
+    unsigned NewReg;
+    if (static_cast<const AArch64RegisterInfo *>(TRI)->regNeedsCFI(Reg, NewReg))
+      Reg = NewReg;
+    else
       continue;
 
-    StackOffset Offset =
-        StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) -
-        StackOffset::getFixed(AFI.getCalleeSavedStackSize(MFI));
-
-    unsigned CFIIndex = MF.addFrameInst(createCFAOffset(TRI, Reg, Offset));
-    BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+    StackOffset Offset;
+    if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) {
+      AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+      Offset =
+          StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) -
+          StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI));
+    } else {
+      Offset = StackOffset::getFixed(MFI.getObjectOffset(Info.getFrameIdx()) -
+                                     getOffsetOfLocalArea());
+    }
+    unsigned CFIIndex = MF.addFrameInst(createCfaOffset(*TRI, Reg, Offset));
+    BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
         .addCFIIndex(CFIIndex)
         .setMIFlags(MachineInstr::FrameSetup);
   }
 }
 
-void AArch64FrameLowering::emitCalleeSavedFrameMoves(
-    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
-  emitCalleeSavedGPRLocations(MBB, MBBI);
-  emitCalleeSavedSVELocations(MBB, MBBI);
-}
-
 // Find a scratch register that we can use at the start of the prologue to
 // re-align the stack pointer.  We avoid using callee-save registers since they
 // may appear to be free when this is called from canUseAsPrologue (during
@@ -887,7 +881,7 @@ static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI,
 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
     const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc,
-    bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, bool InProlog = true) {
+    bool NeedsWinCFI, bool *HasWinCFI, bool InProlog = true) {
   unsigned NewOpc;
   switch (MBBI->getOpcode()) {
   default:
@@ -946,15 +940,12 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
 
   // If the first store isn't right where we want SP then we can't fold the
   // update in so create a normal arithmetic instruction instead.
-  MachineFunction &MF = *MBB.getParent();
   if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 ||
       CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
     emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
                     StackOffset::getFixed(CSStackSizeInc), TII,
                     InProlog ? MachineInstr::FrameSetup
-                             : MachineInstr::FrameDestroy,
-                    false, false, nullptr, EmitCFI && InProlog);
-
+                             : MachineInstr::FrameDestroy);
     return std::prev(MBBI);
   }
 
@@ -985,14 +976,6 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
               InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy);
   }
 
-  if (EmitCFI && InProlog) {
-    unsigned CFIIndex = MF.addFrameInst(
-        MCCFIInstruction::cfiDefCfaOffset(nullptr, -CSStackSizeInc));
-    BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
-        .addCFIIndex(CFIIndex)
-        .setMIFlags(MachineInstr::FrameSetup);
-  }
-
   return std::prev(MBB.erase(MBBI));
 }
 
@@ -1297,16 +1280,14 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
     assert(!SVEStackSize && "Cannot combine SP bump with SVE");
     emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
                     StackOffset::getFixed(-NumBytes), TII,
-                    MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI,
-                    EmitCFI);
+                    MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
     NumBytes = 0;
   } else if (HomPrologEpilog) {
     // Stack has been already adjusted.
     NumBytes -= PrologueSaveSize;
   } else if (PrologueSaveSize != 0) {
     MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(
-        MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
-        EmitCFI);
+        MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI);
     NumBytes -= PrologueSaveSize;
   }
   assert(NumBytes >= 0 && "Negative stack allocation size!?");
@@ -1360,27 +1341,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
                       StackOffset::getFixed(FPOffset), TII,
                       MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
     }
-    if (EmitCFI) {
-      // Define the current CFA rule to use the provided FP.
-      const int OffsetToFirstCalleeSaveFromFP =
-          AFI->getCalleeSaveBaseToFrameRecordOffset() -
-          AFI->getCalleeSavedStackSize();
-      Register FramePtr = RegInfo->getFrameRegister(MF);
-      unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
-      unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
-          nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
-      BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
-          .addCFIIndex(CFIIndex)
-          .setMIFlags(MachineInstr::FrameSetup);
-    }
   }
 
-  // Now emit the moves for whatever callee saved regs we have (including FP,
-  // LR if those are saved). Frame instructions for SVE register are emitted
-  // later, after the instruction which actually save SVE regs.
-  if (EmitCFI)
-    emitCalleeSavedGPRLocations(MBB, MBBI);
-
   if (windowsRequiresStackProbe(MF, NumBytes)) {
     uint64_t NumWords = NumBytes >> 4;
     if (NeedsWinCFI) {
@@ -1493,21 +1455,14 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
   }
 
   // Allocate space for the callee saves (if any).
-  emitFrameOffset(
-      MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, -AllocateBefore, TII,
-      MachineInstr::FrameSetup, false, false, nullptr,
-      EmitCFI && !HasFP && AllocateBefore,
-      StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes));
-
-  if (EmitCFI)
-    emitCalleeSavedSVELocations(MBB, CalleeSavesEnd);
+  emitFrameOffset(MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP,
+                  -AllocateBefore, TII,
+                  MachineInstr::FrameSetup);
 
   // Finally allocate remaining SVE stack space.
   emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP,
-                  -AllocateAfter, TII, MachineInstr::FrameSetup, false, false,
-                  nullptr, EmitCFI && !HasFP && AllocateAfter,
-                  AllocateBefore + StackOffset::getFixed(
-                                       (int64_t)MFI.getStackSize() - NumBytes));
+                  -AllocateAfter, TII,
+                  MachineInstr::FrameSetup);
 
   // Allocate space for the rest of the frame.
   if (NumBytes) {
@@ -1522,17 +1477,14 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
     }
 
     // If we're a leaf function, try using the red zone.
-    if (!canUseRedZone(MF)) {
+    if (!canUseRedZone(MF))
       // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
       // the correct value here, as NumBytes also includes padding bytes,
       // which shouldn't be counted here.
-      emitFrameOffset(
-          MBB, MBBI, DL, scratchSPReg, AArch64::SP,
-          StackOffset::getFixed(-NumBytes), TII, MachineInstr::FrameSetup,
-          false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
-          SVEStackSize +
-              StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes));
-    }
+      emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP,
+                      StackOffset::getFixed(-NumBytes), TII,
+                      MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
+
     if (NeedsRealignment) {
       const unsigned NrBitsToZero = Log2(MFI.getMaxAlign());
       assert(NrBitsToZero > 1);
@@ -1599,6 +1551,109 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
       MBB.addLiveIn(AArch64::X1);
     }
   }
+
+  if (EmitCFI) {
+    // An example of the prologue:
+    //
+    //     .globl __foo
+    //     .align 2
+    //  __foo:
+    // Ltmp0:
+    //     .cfi_startproc
+    //     .cfi_personality 155, ___gxx_personality_v0
+    // Leh_func_begin:
+    //     .cfi_lsda 16, Lexception33
+    //
+    //     stp  xa,bx, [sp, -#offset]!
+    //     ...
+    //     stp  x28, x27, [sp, #offset-32]
+    //     stp  fp, lr, [sp, #offset-16]
+    //     add  fp, sp, #offset - 16
+    //     sub  sp, sp, #1360
+    //
+    // The Stack:
+    //       +-------------------------------------------+
+    // 10000 | ........ | ........ | ........ | ........ |
+    // 10004 | ........ | ........ | ........ | ........ |
+    //       +-------------------------------------------+
+    // 10008 | ........ | ........ | ........ | ........ |
+    // 1000c | ........ | ........ | ........ | ........ |
+    //       +===========================================+
+    // 10010 |                X28 Register               |
+    // 10014 |                X28 Register               |
+    //       +-------------------------------------------+
+    // 10018 |                X27 Register               |
+    // 1001c |                X27 Register               |
+    //       +===========================================+
+    // 10020 |                Frame Pointer              |
+    // 10024 |                Frame Pointer              |
+    //       +-------------------------------------------+
+    // 10028 |                Link Register              |
+    // 1002c |                Link Register              |
+    //       +===========================================+
+    // 10030 | ........ | ........ | ........ | ........ |
+    // 10034 | ........ | ........ | ........ | ........ |
+    //       +-------------------------------------------+
+    // 10038 | ........ | ........ | ........ | ........ |
+    // 1003c | ........ | ........ | ........ | ........ |
+    //       +-------------------------------------------+
+    //
+    //     [sp] = 10030        ::    >>initial value<<
+    //     sp = 10020          ::  stp fp, lr, [sp, #-16]!
+    //     fp = sp == 10020    ::  mov fp, sp
+    //     [sp] == 10020       ::  stp x28, x27, [sp, #-16]!
+    //     sp == 10010         ::    >>final value<<
+    //
+    // The frame pointer (w29) points to address 10020. If we use an offset of
+    // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
+    // for w27, and -32 for w28:
+    //
+    //  Ltmp1:
+    //     .cfi_def_cfa w29, 16
+    //  Ltmp2:
+    //     .cfi_offset w30, -8
+    //  Ltmp3:
+    //     .cfi_offset w29, -16
+    //  Ltmp4:
+    //     .cfi_offset w27, -24
+    //  Ltmp5:
+    //     .cfi_offset w28, -32
+
+    if (HasFP) {
+      const int OffsetToFirstCalleeSaveFromFP =
+          AFI->getCalleeSaveBaseToFrameRecordOffset() -
+          AFI->getCalleeSavedStackSize();
+      Register FramePtr = RegInfo->getFrameRegister(MF);
+
+      // Define the current CFA rule to use the provided FP.
+      unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
+      unsigned CFIIndex = MF.addFrameInst(
+          MCCFIInstruction::cfiDefCfa(nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
+      BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+          .addCFIIndex(CFIIndex)
+          .setMIFlags(MachineInstr::FrameSetup);
+    } else {
+      unsigned CFIIndex;
+      if (SVEStackSize) {
+        const TargetSubtargetInfo &STI = MF.getSubtarget();
+        const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
+        StackOffset TotalSize =
+            SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize());
+        CFIIndex = MF.addFrameInst(createDefCFAExpressionFromSP(TRI, TotalSize));
+      } else {
+        // Encode the stack size of the leaf function.
+        CFIIndex = MF.addFrameInst(
+            MCCFIInstruction::cfiDefCfaOffset(nullptr, MFI.getStackSize()));
+      }
+      BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+          .addCFIIndex(CFIIndex)
+          .setMIFlags(MachineInstr::FrameSetup);
+    }
+
+    // Now emit the moves for whatever callee saved regs we have (including FP,
+    // LR if those are saved).
+    emitCalleeSavedFrameMoves(MBB, MBBI);
+  }
 }
 
 static void InsertReturnAddressAuth(MachineFunction &MF,
@@ -1751,9 +1806,8 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     // allocate more stack for arguments (in space that an untimely interrupt
     // may clobber), convert it to a post-index ldp.
     if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0)
-      convertCalleeSaveRestoreToSPPrePostIncDec(MBB, Pop, DL, TII,
-                                                PrologueSaveSize, NeedsWinCFI,
-                                                &HasWinCFI, false, false);
+      convertCalleeSaveRestoreToSPPrePostIncDec(
+          MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, false);
     else {
       // If not, make sure to emit an add after the last ldp.
       // We're doing this by transfering the size to be restored from the

diff  --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 1eb13d0588345..31f57cbc49f2b 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -141,12 +141,13 @@ class AArch64FrameLowering : public TargetFrameLowering {
   int64_t assignSVEStackObjectOffsets(MachineFrameInfo &MF,
                                       int &MinCSFrameIndex,
                                       int &MaxCSFrameIndex) const;
+  MCCFIInstruction
+  createDefCFAExpressionFromSP(const TargetRegisterInfo &TRI,
+                               const StackOffset &OffsetFromSP) const;
+  MCCFIInstruction createCfaOffset(const TargetRegisterInfo &MRI, unsigned DwarfReg,
+                                   const StackOffset &OffsetFromDefCFA) const;
   bool shouldCombineCSRLocalStackBumpInEpilogue(MachineBasicBlock &MBB,
                                                 unsigned StackBumpBytes) const;
-  void emitCalleeSavedGPRLocations(MachineBasicBlock &MBB,
-                                   MachineBasicBlock::iterator MBBI) const;
-  void emitCalleeSavedSVELocations(MachineBasicBlock &MBB,
-                                   MachineBasicBlock::iterator MBBI) const;
 };
 
 } // End llvm namespace

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index f10e20f551819..74a2b7d9a3a86 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -42,7 +42,6 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/LEB128.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
@@ -4079,118 +4078,6 @@ void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
   }
 }
 
-// Convenience function to create a DWARF expression for
-//   Expr + NumBytes + NumVGScaledBytes * AArch64::VG
-static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr, int NumBytes,
-                                     int NumVGScaledBytes, unsigned VG,
-                                     llvm::raw_string_ostream &Comment) {
-  uint8_t buffer[16];
-
-  if (NumBytes) {
-    Expr.push_back(dwarf::DW_OP_consts);
-    Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer));
-    Expr.push_back((uint8_t)dwarf::DW_OP_plus);
-    Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes);
-  }
-
-  if (NumVGScaledBytes) {
-    Expr.push_back((uint8_t)dwarf::DW_OP_consts);
-    Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer));
-
-    Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
-    Expr.append(buffer, buffer + encodeULEB128(VG, buffer));
-    Expr.push_back(0);
-
-    Expr.push_back((uint8_t)dwarf::DW_OP_mul);
-    Expr.push_back((uint8_t)dwarf::DW_OP_plus);
-
-    Comment << (NumVGScaledBytes < 0 ? " - " : " + ")
-            << std::abs(NumVGScaledBytes) << " * VG";
-  }
-}
-
-// Creates an MCCFIInstruction:
-//    { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr }
-static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
-                                               unsigned Reg,
-                                               const StackOffset &Offset) {
-  int64_t NumBytes, NumVGScaledBytes;
-  AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(Offset, NumBytes,
-                                                        NumVGScaledBytes);
-  std::string CommentBuffer;
-  llvm::raw_string_ostream Comment(CommentBuffer);
-
-  if (Reg == AArch64::SP)
-    Comment << "sp";
-  else if (Reg == AArch64::FP)
-    Comment << "fp";
-  else
-    Comment << printReg(Reg, &TRI);
-
-  // Build up the expression (Reg + NumBytes + NumVGScaledBytes * AArch64::VG)
-  SmallString<64> Expr;
-  unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
-  Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
-  Expr.push_back(0);
-  appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes,
-                           TRI.getDwarfRegNum(AArch64::VG, true), Comment);
-
-  // Wrap this into DW_CFA_def_cfa.
-  SmallString<64> DefCfaExpr;
-  DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
-  uint8_t buffer[16];
-  DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer));
-  DefCfaExpr.append(Expr.str());
-  return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(),
-                                        Comment.str());
-}
-
-MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI,
-                                    unsigned FrameReg, unsigned Reg,
-                                    const StackOffset &Offset) {
-  if (Offset.getScalable())
-    return createDefCFAExpression(TRI, Reg, Offset);
-
-  if (FrameReg == Reg)
-    return MCCFIInstruction::cfiDefCfaOffset(nullptr, int(Offset.getFixed()));
-
-  unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
-  return MCCFIInstruction::cfiDefCfa(nullptr, DwarfReg, (int)Offset.getFixed());
-}
-
-MCCFIInstruction llvm::createCFAOffset(const TargetRegisterInfo &TRI,
-                                       unsigned Reg,
-                                       const StackOffset &OffsetFromDefCFA) {
-  int64_t NumBytes, NumVGScaledBytes;
-  AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
-      OffsetFromDefCFA, NumBytes, NumVGScaledBytes);
-
-  unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
-
-  // Non-scalable offsets can use DW_CFA_offset directly.
-  if (!NumVGScaledBytes)
-    return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes);
-
-  std::string CommentBuffer;
-  llvm::raw_string_ostream Comment(CommentBuffer);
-  Comment << printReg(Reg, &TRI) << "  @ cfa";
-
-  // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG)
-  SmallString<64> OffsetExpr;
-  appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes,
-                           TRI.getDwarfRegNum(AArch64::VG, true), Comment);
-
-  // Wrap this into DW_CFA_expression
-  SmallString<64> CfaExpr;
-  CfaExpr.push_back(dwarf::DW_CFA_expression);
-  uint8_t buffer[16];
-  CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
-  CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer));
-  CfaExpr.append(OffsetExpr.str());
-
-  return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str());
-}
-
 // Helper function to emit a frame offset adjustment from a given
 // pointer (SrcReg), stored into DestReg. This function is explicit
 // in that it requires the opcode.
@@ -4200,8 +4087,7 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
                                unsigned SrcReg, int64_t Offset, unsigned Opc,
                                const TargetInstrInfo *TII,
                                MachineInstr::MIFlag Flag, bool NeedsWinCFI,
-                               bool *HasWinCFI, bool EmitCFAOffset,
-                               StackOffset CFAOffset, unsigned FrameReg) {
+                               bool *HasWinCFI) {
   int Sign = 1;
   unsigned MaxEncoding, ShiftSize;
   switch (Opc) {
@@ -4226,13 +4112,6 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
     llvm_unreachable("Unsupported opcode");
   }
 
-  // `Offset` can be in bytes or in "scalable bytes".
-  int VScale = 1;
-  if (Opc == AArch64::ADDVL_XXI)
-    VScale = 16;
-  else if (Opc == AArch64::ADDPL_XXI)
-    VScale = 2;
-
   // FIXME: If the offset won't fit in 24-bits, compute the offset into a
   // scratch register.  If DestReg is a virtual register, use it as the
   // scratch register; otherwise, create a new virtual register (to be
@@ -4270,26 +4149,6 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
           AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize));
     MBI = MBI.setMIFlag(Flag);
 
-    auto Change =
-        VScale == 1
-            ? StackOffset::getFixed(ThisVal << LocalShiftSize)
-            : StackOffset::getScalable(VScale * (ThisVal << LocalShiftSize));
-    if (Sign == -1 || Opc == AArch64::SUBXri || Opc == AArch64::SUBSXri)
-      CFAOffset += Change;
-    else
-      CFAOffset -= Change;
-    if (EmitCFAOffset && DestReg == TmpReg) {
-      MachineFunction &MF = *MBB.getParent();
-      const TargetSubtargetInfo &STI = MF.getSubtarget();
-      const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
-
-      unsigned CFIIndex =
-          MF.addFrameInst(createDefCFA(TRI, FrameReg, DestReg, CFAOffset));
-      BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
-          .addCFIIndex(CFIIndex)
-          .setMIFlags(Flag);
-    }
-
     if (NeedsWinCFI) {
       assert(Sign == 1 && "SEH directives should always have a positive sign");
       int Imm = (int)(ThisVal << LocalShiftSize);
@@ -4326,9 +4185,7 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
                            unsigned DestReg, unsigned SrcReg,
                            StackOffset Offset, const TargetInstrInfo *TII,
                            MachineInstr::MIFlag Flag, bool SetNZCV,
-                           bool NeedsWinCFI, bool *HasWinCFI,
-                           bool EmitCFAOffset, StackOffset CFAOffset,
-                           unsigned FrameReg) {
+                           bool NeedsWinCFI, bool *HasWinCFI) {
   int64_t Bytes, NumPredicateVectors, NumDataVectors;
   AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
       Offset, Bytes, NumPredicateVectors, NumDataVectors);
@@ -4343,13 +4200,8 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
       Opc = SetNZCV ? AArch64::SUBSXri : AArch64::SUBXri;
     }
     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, Bytes, Opc, TII, Flag,
-                       NeedsWinCFI, HasWinCFI, EmitCFAOffset, CFAOffset,
-                       FrameReg);
-    CFAOffset += (Opc == AArch64::ADDXri || Opc == AArch64::ADDSXri)
-                     ? StackOffset::getFixed(-Bytes)
-                     : StackOffset::getFixed(Bytes);
+                       NeedsWinCFI, HasWinCFI);
     SrcReg = DestReg;
-    FrameReg = DestReg;
   }
 
   assert(!(SetNZCV && (NumPredicateVectors || NumDataVectors)) &&
@@ -4359,17 +4211,14 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
 
   if (NumDataVectors) {
     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumDataVectors,
-                       AArch64::ADDVL_XXI, TII, Flag, NeedsWinCFI, nullptr,
-                       EmitCFAOffset, CFAOffset, FrameReg);
-    CFAOffset += StackOffset::getScalable(-NumDataVectors * 16);
+                       AArch64::ADDVL_XXI, TII, Flag, NeedsWinCFI, nullptr);
     SrcReg = DestReg;
   }
 
   if (NumPredicateVectors) {
     assert(DestReg != AArch64::SP && "Unaligned access to SP");
     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumPredicateVectors,
-                       AArch64::ADDPL_XXI, TII, Flag, NeedsWinCFI, nullptr,
-                       EmitCFAOffset, CFAOffset, FrameReg);
+                       AArch64::ADDPL_XXI, TII, Flag, NeedsWinCFI, nullptr);
   }
 }
 

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index eca96c79d2821..55b1813f0b301 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -395,11 +395,6 @@ bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
                                      const MachineInstr &UseMI,
                                      const TargetRegisterInfo *TRI);
 
-MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
-                              unsigned Reg, const StackOffset &Offset);
-MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
-                                 const StackOffset &OffsetFromDefCFA);
-
 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
 /// plus Offset.  This is intended to be used from within the prolog/epilog
 /// insertion (PEI) pass, where a virtual scratch register may be allocated
@@ -409,9 +404,7 @@ void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
                      StackOffset Offset, const TargetInstrInfo *TII,
                      MachineInstr::MIFlag = MachineInstr::NoFlags,
                      bool SetNZCV = false, bool NeedsWinCFI = false,
-                     bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
-                     StackOffset InitialOffset = {},
-                     unsigned FrameReg = AArch64::SP);
+                     bool *HasWinCFI = nullptr);
 
 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
 /// FP. Return false if the offset could not be handled directly in MI, and

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
index 042264793c2c3..30eb6922af525 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
@@ -7,8 +7,8 @@ define void @call_byval_i32(i32* %incoming) {
 ; CHECK-LABEL: call_byval_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    str w8, [sp]
@@ -26,7 +26,6 @@ define void @call_byval_a64i32([64 x i32]* %incoming) {
 ; CHECK-LABEL: call_byval_a64i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #288
-; CHECK-NEXT:    .cfi_def_cfa_offset 288
 ; CHECK-NEXT:    stp x29, x30, [sp, #256] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x28, [sp, #272] // 8-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #256

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
index 8cb0d5401b7de..44c0854ea03d8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
@@ -31,13 +31,13 @@ define i32 @test_musttail_variadic_spill(i32 %arg0, ...) {
 ; CHECK-LABEL: test_musttail_variadic_spill:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #224
-; CHECK-NEXT:    .cfi_def_cfa_offset 224
 ; CHECK-NEXT:    stp x28, x27, [sp, #128] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x26, x25, [sp, #144] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #160] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #176] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #192] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #208] ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 224
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_offset w19, -24
@@ -103,13 +103,13 @@ define void @f_thunk(i8* %this, ...) {
 ; CHECK-LABEL: f_thunk:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #256
-; CHECK-NEXT:    .cfi_def_cfa_offset 256
 ; CHECK-NEXT:    stp x28, x27, [sp, #160] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x26, x25, [sp, #176] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #192] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #208] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #224] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #240] ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 256
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_offset w19, -24

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index 2f8b6273ca9ff..cdb55d9c15c77 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -99,9 +99,9 @@ entry:
 ; CHECK: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK: sub	sp, sp, #32
-; CHECK: .cfi_def_cfa_offset 32
 ; CHECK: stp	x30, x19, [sp, #16]
 ;   Check correctness of cfi pseudo-instructions
+; CHECK: .cfi_def_cfa_offset 32
 ; CHECK: .cfi_offset w19, -8
 ; CHECK: .cfi_offset w30, -16
 ;   Check correct access to arguments passed on the stack, through stack pointer
@@ -118,7 +118,6 @@ entry:
 ; CHECK-MACHO: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK-MACHO: sub	sp, sp, #48
-; CHECK-MACHO:.cfi_def_cfa_offset 48
 ; CHECK-MACHO: stp	x20, x19, [sp, #16]
 ;   Check that the frame pointer is created:
 ; CHECK-MACHO: stp	x29, x30, [sp, #32]
@@ -182,18 +181,17 @@ entry:
 ; CHECK: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK: stp	x29, x30, [sp, #-32]!
-; CHECK: .cfi_def_cfa_offset 32
 ;   Check that the frame pointer is created:
 ; CHECK: str	x19, [sp, #16]
 ; CHECK: mov	x29, sp
+;   Check the dynamic realignment of the stack pointer to a 128-byte boundary
+; CHECK: sub	x9, sp, #96
+; CHECK: and	sp, x9, #0xffffffffffffff80
 ;   Check correctness of cfi pseudo-instructions
 ; CHECK: .cfi_def_cfa w29, 32
 ; CHECK: .cfi_offset w19, -16
 ; CHECK: .cfi_offset w30, -24
 ; CHECK: .cfi_offset w29, -32
-;   Check the dynamic realignment of the stack pointer to a 128-byte boundary
-; CHECK: sub	x9, sp, #96
-; CHECK: and	sp, x9, #0xffffffffffffff80
 ;   Check correct access to arguments passed on the stack, through frame pointer
 ; CHECK: ldr	d[[DARG:[0-9]+]], [x29, #56]
 ; CHECK: ldr	w[[IARG:[0-9]+]], [x29, #40]
@@ -211,19 +209,18 @@ entry:
 ; CHECK-MACHO: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK-MACHO: stp	x20, x19, [sp, #-32]!
-; CHECK-MACHO: .cfi_def_cfa_offset 32
 ;   Check that the frame pointer is created:
 ; CHECK-MACHO: stp	x29, x30, [sp, #16]
 ; CHECK-MACHO: add	x29, sp, #16
+;   Check the dynamic realignment of the stack pointer to a 128-byte boundary
+; CHECK-MACHO: sub	x9, sp, #96
+; CHECK-MACHO: and	sp, x9, #0xffffffffffffff80
 ;   Check correctness of cfi pseudo-instructions
 ; CHECK-MACHO: .cfi_def_cfa w29, 16
 ; CHECK-MACHO: .cfi_offset w30, -8
 ; CHECK-MACHO: .cfi_offset w29, -16
 ; CHECK-MACHO: .cfi_offset w19, -24
 ; CHECK-MACHO: .cfi_offset w20, -32
-;   Check the dynamic realignment of the stack pointer to a 128-byte boundary
-; CHECK-MACHO: sub	x9, sp, #96
-; CHECK-MACHO: and	sp, x9, #0xffffffffffffff80
 ;   Check correct access to arguments passed on the stack, through frame pointer
 ; CHECK-MACHO: ldr	d[[DARG:[0-9]+]], [x29, #32]
 ; CHECK-MACHO: ldr	w[[IARG:[0-9]+]], [x29, #20]
@@ -288,19 +285,18 @@ entry:
 ; CHECK: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK: stp	x29, x30, [sp, #-32]!
-; CHECK: .cfi_def_cfa_offset 32
 ;   Check that the frame pointer is created:
 ; CHECK: stp	x20, x19, [sp, #16]
 ; CHECK: mov	x29, sp
+;   Check that space is reserved on the stack for the local variable,
+;   rounded up to a multiple of 16 to keep the stack pointer 16-byte aligned.
+; CHECK: sub	sp, sp, #16
 ;   Check correctness of cfi pseudo-instructions
 ; CHECK: .cfi_def_cfa w29, 32
 ; CHECK: .cfi_offset w19, -8
 ; CHECK: .cfi_offset w20, -16
 ; CHECK: .cfi_offset w30, -24
 ; CHECK: .cfi_offset w29, -32
-;   Check that space is reserved on the stack for the local variable,
-;   rounded up to a multiple of 16 to keep the stack pointer 16-byte aligned.
-; CHECK: sub	sp, sp, #16
 ;   Check correct access to arguments passed on the stack, through frame pointer
 ; CHECK: ldr	w[[IARG:[0-9]+]], [x29, #40]
 ; CHECK: ldr	d[[DARG:[0-9]+]], [x29, #56]
@@ -390,18 +386,10 @@ entry:
 ; CHECK: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK: stp	x29, x30, [sp, #-48]!
-; CHECK: .cfi_def_cfa_offset 48
 ; CHECK: str	x21, [sp, #16]
 ; CHECK: stp	x20, x19, [sp, #32]
 ;   Check that the frame pointer is created:
 ; CHECK: mov	x29, sp
-;   Check correctness of cfi pseudo-instructions
-; CHECK: .cfi_def_cfa w29, 48
-; CHECK: .cfi_offset w19, -8
-; CHECK: .cfi_offset w20, -16
-; CHECK: .cfi_offset w21, -32
-; CHECK: .cfi_offset w30, -40
-; CHECK: .cfi_offset w29, -48
 ;   Check that the stack pointer gets re-aligned to 128
 ;   bytes & the base pointer (x19) gets initialized to
 ;   this 128-byte aligned area for local variables &
@@ -409,6 +397,13 @@ entry:
 ; CHECK: sub	x9, sp, #80
 ; CHECK: and	sp, x9, #0xffffffffffffff80
 ; CHECK: mov    x19, sp
+;   Check correctness of cfi pseudo-instructions
+; CHECK: .cfi_def_cfa w29, 48
+; CHECK: .cfi_offset w19, -8
+; CHECK: .cfi_offset w20, -16
+; CHECK: .cfi_offset w21, -32
+; CHECK: .cfi_offset w30, -40
+; CHECK: .cfi_offset w29, -48
 ;   Check correct access to arguments passed on the stack, through frame pointer
 ; CHECK: ldr	w[[IARG:[0-9]+]], [x29, #56]
 ; CHECK: ldr	d[[DARG:[0-9]+]], [x29, #72]
@@ -437,11 +432,17 @@ entry:
 ; CHECK-MACHO: .cfi_startproc
 ;   Check that used callee-saved registers are saved
 ; CHECK-MACHO: stp	x22, x21, [sp, #-48]!
-; CHECK-MACHO: .cfi_def_cfa_offset 48
 ; CHECK-MACHO: stp	x20, x19, [sp, #16]
 ;   Check that the frame pointer is created:
 ; CHECK-MACHO: stp	x29, x30, [sp, #32]
 ; CHECK-MACHO: add	x29, sp, #32
+;   Check that the stack pointer gets re-aligned to 128
+;   bytes & the base pointer (x19) gets initialized to
+;   this 128-byte aligned area for local variables &
+;   spill slots
+; CHECK-MACHO: sub	x9, sp, #80
+; CHECK-MACHO: and	sp, x9, #0xffffffffffffff80
+; CHECK-MACHO: mov    x19, sp
 ;   Check correctness of cfi pseudo-instructions
 ; CHECK-MACHO: .cfi_def_cfa w29, 16
 ; CHECK-MACHO: .cfi_offset w30, -8
@@ -450,13 +451,6 @@ entry:
 ; CHECK-MACHO: .cfi_offset w20, -32
 ; CHECK-MACHO: .cfi_offset w21, -40
 ; CHECK-MACHO: .cfi_offset w22, -48
-;   Check that the stack pointer gets re-aligned to 128
-;   bytes & the base pointer (x19) gets initialized to
-;   this 128-byte aligned area for local variables &
-;   spill slots
-; CHECK-MACHO: sub	x9, sp, #80
-; CHECK-MACHO: and	sp, x9, #0xffffffffffffff80
-; CHECK-MACHO: mov    x19, sp
 ;   Check correct access to arguments passed on the stack, through frame pointer
 ; CHECK-MACHO: ldr	w[[IARG:[0-9]+]], [x29, #20]
 ; CHECK-MACHO: ldr	d[[DARG:[0-9]+]], [x29, #32]

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
index 1b1ae0853d5f3..5c6de3a728d98 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
@@ -9,35 +9,34 @@ declare void @fn(i8*, i8*)
 define void @consecutive() {
 ; CHECK-MOPS-LABEL: consecutive:
 ; CHECK-MOPS:       // %bb.0: // %entry
-; CHECK-MOPS-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-MOPS-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-MOPS-NEXT:    stp	x29, x30, [sp, #-16]!           // 16-byte Folded Spill
+; CHECK-MOPS-NEXT:    sub	sp, sp, #2016
+; CHECK-MOPS-NEXT:    .cfi_def_cfa_offset 2032
 ; CHECK-MOPS-NEXT:    .cfi_offset w30, -8
 ; CHECK-MOPS-NEXT:    .cfi_offset w29, -16
-; CHECK-MOPS-NEXT:    sub sp, sp, #2016
-; CHECK-MOPS-NEXT:    .cfi_def_cfa_offset 2032
-; CHECK-MOPS-NEXT:    mov w8, #1000
-; CHECK-MOPS-NEXT:    add x9, sp, #8
-; CHECK-MOPS-NEXT:    adrp x10, .LCPI0_0
-; CHECK-MOPS-NEXT:    adrp x11, .LCPI0_1
-; CHECK-MOPS-NEXT:    mov w12, #6424
-; CHECK-MOPS-NEXT:    mov w13, #7452
-; CHECK-MOPS-NEXT:    setp [x9]!, x8!, xzr
-; CHECK-MOPS-NEXT:    setm [x9]!, x8!, xzr
-; CHECK-MOPS-NEXT:    sete [x9]!, x8!, xzr
-; CHECK-MOPS-NEXT:    movk w12, #6938, lsl #16
-; CHECK-MOPS-NEXT:    ldr q0, [x10, :lo12:.LCPI0_0]
-; CHECK-MOPS-NEXT:    mov w8, #30
-; CHECK-MOPS-NEXT:    ldr d1, [x11, :lo12:.LCPI0_1]
-; CHECK-MOPS-NEXT:    add x0, sp, #1008
-; CHECK-MOPS-NEXT:    add x1, sp, #8
-; CHECK-MOPS-NEXT:    str w12, [sp, #1032]
-; CHECK-MOPS-NEXT:    strh w13, [sp, #1036]
-; CHECK-MOPS-NEXT:    str q0, [sp, #1008]
-; CHECK-MOPS-NEXT:    str d1, [sp, #1024]
-; CHECK-MOPS-NEXT:    strb w8, [sp, #1038]
-; CHECK-MOPS-NEXT:    bl fn
-; CHECK-MOPS-NEXT:    add sp, sp, #2016
-; CHECK-MOPS-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-MOPS-NEXT:    mov	w8, #1000
+; CHECK-MOPS-NEXT:    add	x9, sp, #8
+; CHECK-MOPS-NEXT:    adrp	x10, .LCPI0_0
+; CHECK-MOPS-NEXT:    adrp	x11, .LCPI0_1
+; CHECK-MOPS-NEXT:    mov	w12, #6424
+; CHECK-MOPS-NEXT:    mov	w13, #7452
+; CHECK-MOPS-NEXT:    setp	[x9]!, x8!, xzr
+; CHECK-MOPS-NEXT:    setm	[x9]!, x8!, xzr
+; CHECK-MOPS-NEXT:    sete	[x9]!, x8!, xzr
+; CHECK-MOPS-NEXT:    movk	w12, #6938, lsl #16
+; CHECK-MOPS-NEXT:    ldr	q0, [x10, :lo12:.LCPI0_0]
+; CHECK-MOPS-NEXT:    mov	w8, #30
+; CHECK-MOPS-NEXT:    ldr	d1, [x11, :lo12:.LCPI0_1]
+; CHECK-MOPS-NEXT:    add	x0, sp, #1008
+; CHECK-MOPS-NEXT:    add	x1, sp, #8
+; CHECK-MOPS-NEXT:    str	w12, [sp, #1032]
+; CHECK-MOPS-NEXT:    strh	w13, [sp, #1036]
+; CHECK-MOPS-NEXT:    str	q0, [sp, #1008]
+; CHECK-MOPS-NEXT:    str	d1, [sp, #1024]
+; CHECK-MOPS-NEXT:    strb	w8, [sp, #1038]
+; CHECK-MOPS-NEXT:    bl	fn
+; CHECK-MOPS-NEXT:    add	sp, sp, #2016
+; CHECK-MOPS-NEXT:    ldp	x29, x30, [sp], #16             // 16-byte Folded Reload
 ; CHECK-MOPS-NEXT:    ret
 entry:
   %buf_from = alloca [1000 x i8], align 16

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
index 5a880d3e59aeb..847cfd26d1731 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
@@ -690,8 +690,8 @@ define void @memset_size(i8* %dst, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    sub sp, sp, #32
-; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_def_cfa_offset 32
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_def_cfa_offset 32
 ; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_offset w30, -16
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x1, [sp, #8] // 8-byte Folded Spill
 ; GISel-WITHOUT-MOPS-O0-NEXT:    mov w1, w2
@@ -759,8 +759,8 @@ define void @memset_size_volatile(i8* %dst, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    sub sp, sp, #32
-; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_def_cfa_offset 32
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_def_cfa_offset 32
 ; GISel-WITHOUT-MOPS-O0-NEXT:    .cfi_offset w30, -16
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x1, [sp, #8] // 8-byte Folded Spill
 ; GISel-WITHOUT-MOPS-O0-NEXT:    mov w1, w2

diff  --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index 4ec31ee82e8ce..11bcb783cb5cd 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -155,10 +155,11 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
 ; CHECK-LABEL: lane_mask_nxv32i1_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    mov z3.s, w0
 ; CHECK-NEXT:    mov z1.d, z0.d
@@ -178,7 +179,6 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
 ; CHECK-NEXT:    cmphi p3.s, p0/z, z4.s, z5.s
 ; CHECK-NEXT:    uqadd z5.s, z6.s, z3.s
 ; CHECK-NEXT:    incw z1.s, all, mul #4
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z5.s
 ; CHECK-NEXT:    uqadd z0.s, z0.s, z3.s
 ; CHECK-NEXT:    uqadd z1.s, z1.s, z3.s
@@ -190,7 +190,6 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
 ; CHECK-NEXT:    cmphi p4.s, p0/z, z4.s, z1.s
 ; CHECK-NEXT:    uqadd z0.s, z2.s, z3.s
 ; CHECK-NEXT:    uqadd z1.s, z6.s, z3.s
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmphi p5.s, p0/z, z4.s, z0.s
 ; CHECK-NEXT:    cmphi p0.s, p0/z, z4.s, z1.s
 ; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
@@ -210,10 +209,13 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
 ; CHECK-LABEL: lane_mask_nxv32i1_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    mov z3.d, x0
 ; CHECK-NEXT:    mov z1.d, z0.d
@@ -239,16 +241,13 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
 ; CHECK-NEXT:    uqadd z6.d, z7.d, z3.d
 ; CHECK-NEXT:    mov z25.d, z2.d
 ; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    mov z26.d, z5.d
 ; CHECK-NEXT:    cmphi p4.d, p0/z, z4.d, z6.d
 ; CHECK-NEXT:    uqadd z6.d, z24.d, z3.d
 ; CHECK-NEXT:    incd z25.d, all, mul #4
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmphi p5.d, p0/z, z4.d, z6.d
 ; CHECK-NEXT:    uqadd z6.d, z25.d, z3.d
 ; CHECK-NEXT:    incd z26.d, all, mul #4
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmphi p6.d, p0/z, z4.d, z6.d
 ; CHECK-NEXT:    uqadd z6.d, z26.d, z3.d
 ; CHECK-NEXT:    uzp1 p2.s, p2.s, p3.s
@@ -281,7 +280,6 @@ define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
 ; CHECK-NEXT:    cmphi p6.d, p0/z, z4.d, z1.d
 ; CHECK-NEXT:    uqadd z0.d, z25.d, z3.d
 ; CHECK-NEXT:    uqadd z1.d, z26.d, z3.d
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmphi p7.d, p0/z, z4.d, z0.d
 ; CHECK-NEXT:    cmphi p0.d, p0/z, z4.d, z1.d
 ; CHECK-NEXT:    uzp1 p5.s, p5.s, p6.s

diff  --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
index 0509e2f5c06f0..81b7991a7f86e 100644
--- a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
@@ -50,8 +50,8 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_add_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -127,8 +127,8 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_sub_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -206,8 +206,8 @@ define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_const_sub_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -283,8 +283,8 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_add_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -360,8 +360,8 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_sub_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -439,8 +439,8 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_const_sub_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
@@ -519,8 +519,8 @@ define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_add_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -599,8 +599,8 @@ define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_sub_const_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -678,8 +678,8 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_const_sub_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.4s, #8
 ; CHECK-NEXT:    sub v0.4s, v1.4s, v0.4s

diff  --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
index 4b94de4c7a49b..3a3b635af544f 100644
--- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
+++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
@@ -452,8 +452,8 @@ define void @caller_in_memory() {
 ; CHECK-LABEL: caller_in_memory:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    add x8, sp, #8
 ; CHECK-NEXT:    bl return_in_memory
@@ -496,8 +496,8 @@ define void @argument_in_memory() {
 ; CHECK-LABEL: argument_in_memory:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, in_memory_store
 ; CHECK-NEXT:    add x8, x8, :lo12:in_memory_store

diff  --git a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
index 3cee66c2d29ff..abccb90e3b4ef 100644
--- a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
@@ -72,7 +72,6 @@ define dso_local void @callee() {
 ; CHECK-SAVED-X18: str x18, [sp
 
 ; CHECK-SAVED-ALL: str x18, [sp
-; CHECK-SAVED-ALL-NEXT: .cfi_def_cfa_offset
 ; CHECK-SAVED-ALL-NEXT: stp x15, x14, [sp
 ; CHECK-SAVED-ALL-NEXT: stp x13, x12, [sp
 ; CHECK-SAVED-ALL-NEXT: stp x11, x10, [sp

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 19ac1f0e53308..a255b9aa73516 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -75,8 +75,8 @@ define dso_local void @test_fptosi() {
 ; CHECK-LABEL: test_fptosi:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, lhs
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:lhs]
@@ -106,8 +106,8 @@ define dso_local void @test_fptoui() {
 ; CHECK-LABEL: test_fptoui:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, lhs
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:lhs]
@@ -247,8 +247,8 @@ define dso_local i1 @test_setcc3() {
 ; CHECK-LABEL: test_setcc3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, lhs
@@ -336,8 +336,8 @@ define dso_local void @test_round() {
 ; CHECK-LABEL: test_round:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, lhs
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:lhs]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
index 059827c5242a1..897d6e5862d35 100644
--- a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
@@ -11,14 +11,12 @@ define void @test_bigframe() {
   %var2 = alloca i8, i32 16
   %var3 = alloca i8, i32 20000000
 
-; CHECK:      sub sp, sp, #4095, lsl #12          // =16773120
-; CHECK-NEXT: .cfi_def_cfa_offset 16773136
-; CHECK-NEXT: sub sp, sp, #4095, lsl #12          // =16773120
-; CHECK-NEXT: .cfi_def_cfa_offset 33546256
-; CHECK-NEXT: sub sp, sp, #1575, lsl #12          // =6451200
-; CHECK-NEXT: .cfi_def_cfa_offset 39997456
-; CHECK-NEXT: sub sp, sp, #2576
-; CHECK-NEXT: .cfi_def_cfa_offset 40000032
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #1575, lsl #12
+; CHECK: sub sp, sp, #2576
+; CHECK: .cfi_def_cfa_offset 40000032
+
 
 ; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
 ; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12
@@ -54,11 +52,8 @@ define void @test_mediumframe() {
   %var1 = alloca i8, i32 1000000
   %var2 = alloca i8, i32 16
   %var3 = alloca i8, i32 1000000
-
-; CHECK:      sub sp, sp, #488, lsl #12           // =1998848
-; CHECK-NEXT: .cfi_def_cfa_offset 1998864
+; CHECK: sub sp, sp, #488, lsl #12
 ; CHECK-NEXT: sub sp, sp, #1168
-; CHECK-NEXT: .cfi_def_cfa_offset 2000032
 
   store volatile i8* %var1, i8** @addr
 ; CHECK: add     [[VAR1ADDR:x[0-9]+]], sp, #244, lsl #12

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
index 1d59193f4d915..c1f4c4540c200 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -98,8 +98,8 @@ define void @bzero_12_stack() {
 ; CHECK-LABEL: bzero_12_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    str wzr, [sp, #8]
@@ -119,8 +119,8 @@ define void @bzero_16_stack() {
 ; CHECK-LABEL: bzero_16_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp xzr, x30, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    str xzr, [sp]
@@ -139,8 +139,8 @@ define void @bzero_20_stack() {
 ; CHECK-LABEL: bzero_20_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    add x0, sp, #8
 ; CHECK-NEXT:    stp xzr, xzr, [sp, #8]
@@ -160,8 +160,8 @@ define void @bzero_26_stack() {
 ; CHECK-LABEL: bzero_26_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    stp xzr, xzr, [sp]
@@ -182,8 +182,8 @@ define void @bzero_32_stack() {
 ; CHECK-LABEL: bzero_32_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov x0, sp
@@ -203,8 +203,8 @@ define void @bzero_40_stack() {
 ; CHECK-LABEL: bzero_40_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov x0, sp
@@ -225,8 +225,8 @@ define void @bzero_64_stack() {
 ; CHECK-LABEL: bzero_64_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov x0, sp
@@ -247,8 +247,8 @@ define void @bzero_72_stack() {
 ; CHECK-LABEL: bzero_72_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov x0, sp
@@ -270,8 +270,8 @@ define void @bzero_128_stack() {
 ; CHECK-LABEL: bzero_128_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #144
-; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    str x30, [sp, #128] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov x0, sp
@@ -294,8 +294,8 @@ define void @bzero_256_stack() {
 ; CHECK-LABEL: bzero_256_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #272
-; CHECK-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-NEXT:    stp x29, x30, [sp, #256] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -360,8 +360,8 @@ define void @memset_12_stack() {
 ; CHECK-LABEL: memset_12_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x8, #-6148914691236517206
 ; CHECK-NEXT:    mov x0, sp
@@ -403,8 +403,8 @@ define void @memset_20_stack() {
 ; CHECK-LABEL: memset_20_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x8, #-6148914691236517206
 ; CHECK-NEXT:    add x0, sp, #8
@@ -425,8 +425,8 @@ define void @memset_26_stack() {
 ; CHECK-LABEL: memset_26_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x8, #-6148914691236517206
 ; CHECK-NEXT:    mov x0, sp
@@ -448,8 +448,8 @@ define void @memset_32_stack() {
 ; CHECK-LABEL: memset_32_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
 ; CHECK-NEXT:    mov x0, sp
@@ -469,8 +469,8 @@ define void @memset_40_stack() {
 ; CHECK-LABEL: memset_40_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
 ; CHECK-NEXT:    mov x8, #-6148914691236517206
@@ -492,8 +492,8 @@ define void @memset_64_stack() {
 ; CHECK-LABEL: memset_64_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
 ; CHECK-NEXT:    mov x0, sp
@@ -514,8 +514,8 @@ define void @memset_72_stack() {
 ; CHECK-LABEL: memset_72_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
 ; CHECK-NEXT:    mov x8, #-6148914691236517206
@@ -538,8 +538,8 @@ define void @memset_128_stack() {
 ; CHECK-LABEL: memset_128_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #144
-; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    str x30, [sp, #128] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
 ; CHECK-NEXT:    mov x0, sp
@@ -562,8 +562,8 @@ define void @memset_256_stack() {
 ; CHECK-LABEL: memset_256_stack:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #272
-; CHECK-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-NEXT:    stp x29, x30, [sp, #256] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    movi v0.16b, #170

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
index 7cfd698167bde..cb5dd63ac8f70 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -807,10 +807,10 @@ define <16 x i8> @srem16x8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK-LABEL: srem16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp x24, x23, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1171,10 +1171,10 @@ define <16 x i8> @urem16x8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK-LABEL: urem16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp x24, x23, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1469,8 +1469,8 @@ define <2 x float> @frem2f32(<2 x float> %A, <2 x float> %B) {
 ; CHECK-LABEL: frem2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -1498,8 +1498,8 @@ define <4 x float> @frem4f32(<4 x float> %A, <4 x float> %B) {
 ; CHECK-LABEL: frem4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    stp q0, q1, [sp, #16] // 32-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
@@ -1554,8 +1554,8 @@ define <2 x double> @frem2d64(<2 x double> %A, <2 x double> %B) {
 ; CHECK-LABEL: frem2d64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
 ; CHECK-NEXT:    mov d0, v0.d[1]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
index 0111d841450df..b64f2455908e0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -27,7 +27,6 @@ entry:
 ;
 ; CHECK-LABEL: caller_meta_leaf
 ; CHECK:       sub sp, sp, #48
-; CHECK-NEXT: .cfi_def_cfa_offset 48
 ; CHECK-NEXT:  stp x29, x30, [sp, #32]
 ; CHECK-NEXT:  add x29, sp, #32
 ; CHECK:       Ltmp

diff  --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index a35569d2dd175..3ad1f964162b7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -13,7 +13,6 @@ define i32 @foo(i32 %a, i32 %b) {
 ; ENABLE-NEXT:    b.ge LBB0_2
 ; ENABLE-NEXT:  ; %bb.1: ; %true
 ; ENABLE-NEXT:    sub sp, sp, #32
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -31,7 +30,6 @@ define i32 @foo(i32 %a, i32 %b) {
 ; DISABLE-LABEL: foo:
 ; DISABLE:       ; %bb.0:
 ; DISABLE-NEXT:    sub sp, sp, #32
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -74,7 +72,6 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    cbz w0, LBB1_4
 ; ENABLE-NEXT:  ; %bb.1: ; %for.body.preheader
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -102,7 +99,6 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -163,7 +159,6 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; ENABLE-LABEL: freqSaveAndRestoreOutsideLoop2:
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -188,7 +183,6 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop2:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -233,7 +227,6 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    cbz w0, LBB3_4
 ; ENABLE-NEXT:  ; %bb.1: ; %for.body.preheader
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -262,7 +255,6 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-LABEL: loopInfoSaveOutsideLoop:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -321,20 +313,14 @@ declare void @somethingElse(...)
 
 ; Check with a more complex case that we do not have restore within the loop and
 ; save outside.
-define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind uwtable {
+define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; ENABLE-LABEL: loopInfoRestoreOutsideLoop:
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    cbz w0, LBB4_4
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
-; ENABLE-NEXT:    .cfi_def_cfa w29, 16
-; ENABLE-NEXT:    .cfi_offset w30, -8
-; ENABLE-NEXT:    .cfi_offset w29, -16
-; ENABLE-NEXT:    .cfi_offset w19, -24
-; ENABLE-NEXT:    .cfi_offset w20, -32
 ; ENABLE-NEXT:    bl _somethingElse
 ; ENABLE-NEXT:    mov w19, wzr
 ; ENABLE-NEXT:    mov w20, #10
@@ -356,14 +342,8 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind uwtable {
 ; DISABLE-LABEL: loopInfoRestoreOutsideLoop:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
-; DISABLE-NEXT:    .cfi_def_cfa w29, 16
-; DISABLE-NEXT:    .cfi_offset w30, -8
-; DISABLE-NEXT:    .cfi_offset w29, -16
-; DISABLE-NEXT:    .cfi_offset w19, -24
-; DISABLE-NEXT:    .cfi_offset w20, -32
 ; DISABLE-NEXT:    cbz w0, LBB4_4
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
 ; DISABLE-NEXT:    bl _somethingElse
@@ -430,13 +410,12 @@ entry:
 }
 
 ; Check that we handle variadic function correctly.
-define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind uwtable {
+define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind {
 ; ENABLE-LABEL: variadicFunc:
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    cbz w0, LBB6_4
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
 ; ENABLE-NEXT:    sub sp, sp, #16
-; ENABLE-NEXT:    .cfi_def_cfa_offset 16
 ; ENABLE-NEXT:    add x8, sp, #16
 ; ENABLE-NEXT:    cmp w1, #1
 ; ENABLE-NEXT:    str x8, [sp, #8]
@@ -461,7 +440,6 @@ define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind uwtable {
 ; DISABLE-LABEL: variadicFunc:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    sub sp, sp, #16
-; DISABLE-NEXT:    .cfi_def_cfa_offset 16
 ; DISABLE-NEXT:    cbz w0, LBB6_4
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
 ; DISABLE-NEXT:    add x8, sp, #16
@@ -601,7 +579,6 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    cbz w0, LBB8_2
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
 ; ENABLE-NEXT:    sub sp, sp, #64
-; ENABLE-NEXT:    .cfi_def_cfa_offset 64
 ; ENABLE-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #48
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -623,7 +600,6 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ; DISABLE-LABEL: callVariadicFunc:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    sub sp, sp, #64
-; DISABLE-NEXT:    .cfi_def_cfa_offset 64
 ; DISABLE-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #48
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -678,7 +654,6 @@ define i32 @noreturn(i8 signext %bad_thing) {
 ; ENABLE-NEXT:    ret
 ; ENABLE-NEXT:  LBB9_2: ; %if.abort
 ; ENABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 16
 ; ENABLE-NEXT:    mov x29, sp
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
@@ -688,7 +663,6 @@ define i32 @noreturn(i8 signext %bad_thing) {
 ; DISABLE-LABEL: noreturn:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 16
 ; DISABLE-NEXT:    mov x29, sp
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
@@ -728,7 +702,6 @@ define void @infiniteloop() {
 ; ENABLE-LABEL: infiniteloop:
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -756,7 +729,6 @@ define void @infiniteloop() {
 ; DISABLE-LABEL: infiniteloop:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -803,7 +775,6 @@ define void @infiniteloop2() {
 ; ENABLE-LABEL: infiniteloop2:
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -837,7 +808,6 @@ define void @infiniteloop2() {
 ; DISABLE-LABEL: infiniteloop2:
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
@@ -976,13 +946,12 @@ define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
 ; ENABLE-LABEL: stack_realign:
 ; ENABLE:       ; %bb.0:
 ; ENABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 16
 ; ENABLE-NEXT:    mov x29, sp
+; ENABLE-NEXT:    sub x9, sp, #16
+; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
-; ENABLE-NEXT:    sub x9, sp, #16
-; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    lsl w8, w0, w1
 ; ENABLE-NEXT:    lsl w9, w1, w0
 ; ENABLE-NEXT:    cmp w0, w1
@@ -999,13 +968,12 @@ define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
 ; DISABLE-LABEL: stack_realign:
 ; DISABLE:       ; %bb.0:
 ; DISABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 16
 ; DISABLE-NEXT:    mov x29, sp
+; DISABLE-NEXT:    sub x9, sp, #16
+; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
-; DISABLE-NEXT:    sub x9, sp, #16
-; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    lsl w8, w0, w1
 ; DISABLE-NEXT:    lsl w9, w1, w0
 ; DISABLE-NEXT:    cmp w0, w1
@@ -1045,13 +1013,14 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; ENABLE-LABEL: stack_realign2:
 ; ENABLE:       ; %bb.0:
 ; ENABLE-NEXT:    stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
-; ENABLE-NEXT:    .cfi_def_cfa_offset 96
 ; ENABLE-NEXT:    stp x26, x25, [sp, #16] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x24, x23, [sp, #32] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x20, x19, [sp, #64] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    add x29, sp, #80
+; ENABLE-NEXT:    sub x9, sp, #32
+; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -1065,8 +1034,6 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; ENABLE-NEXT:    .cfi_offset w26, -80
 ; ENABLE-NEXT:    .cfi_offset w27, -88
 ; ENABLE-NEXT:    .cfi_offset w28, -96
-; ENABLE-NEXT:    sub x9, sp, #32
-; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    add w8, w1, w0
 ; ENABLE-NEXT:    lsl w9, w0, w1
 ; ENABLE-NEXT:    lsl w10, w1, w0
@@ -1105,13 +1072,14 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; DISABLE-LABEL: stack_realign2:
 ; DISABLE:       ; %bb.0:
 ; DISABLE-NEXT:    stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
-; DISABLE-NEXT:    .cfi_def_cfa_offset 96
 ; DISABLE-NEXT:    stp x26, x25, [sp, #16] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x24, x23, [sp, #32] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x20, x19, [sp, #64] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    add x29, sp, #80
+; DISABLE-NEXT:    sub x9, sp, #32
+; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -1125,8 +1093,6 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; DISABLE-NEXT:    .cfi_offset w26, -80
 ; DISABLE-NEXT:    .cfi_offset w27, -88
 ; DISABLE-NEXT:    .cfi_offset w28, -96
-; DISABLE-NEXT:    sub x9, sp, #32
-; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    add w8, w1, w0
 ; DISABLE-NEXT:    lsl w9, w0, w1
 ; DISABLE-NEXT:    lsl w10, w1, w0

diff  --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 1506c68a447a9..abf9469c45ef5 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -173,8 +173,8 @@ define <4 x i32> @sign_4xi32_multi_use(<4 x i32> %a) {
 ; CHECK-LABEL: sign_4xi32_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v1.2d, #0xffffffffffffffff
 ; CHECK-NEXT:    cmlt v2.4s, v0.4s, #0

diff  --git a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
index 8fbac25d4f483..7232e7cb72416 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -25,8 +25,8 @@ define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
 ; OUTLINE-ATOMICS-LABEL: test_return:
 ; OUTLINE-ATOMICS:       ; %bb.0:
 ; OUTLINE-ATOMICS-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w30, -8
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w29, -16
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w19, -24
@@ -75,8 +75,8 @@ define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
 ; OUTLINE-ATOMICS-LABEL: test_return_bool:
 ; OUTLINE-ATOMICS:       ; %bb.0:
 ; OUTLINE-ATOMICS-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w30, -8
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w29, -16
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w19, -24
@@ -120,8 +120,8 @@ define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
 ; OUTLINE-ATOMICS-LABEL: test_conditional:
 ; OUTLINE-ATOMICS:       ; %bb.0:
 ; OUTLINE-ATOMICS-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 32
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w30, -8
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w29, -16
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w19, -24
@@ -166,9 +166,9 @@ define i1 @test_conditional2(i32 %a, i32 %b, i32* %c) {
 ; CHECK-LABEL: test_conditional2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_offset w19, -24
@@ -222,9 +222,9 @@ define i1 @test_conditional2(i32 %a, i32 %b, i32* %c) {
 ; OUTLINE-ATOMICS-LABEL: test_conditional2:
 ; OUTLINE-ATOMICS:       ; %bb.0: ; %entry
 ; OUTLINE-ATOMICS-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
-; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 48
 ; OUTLINE-ATOMICS-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
 ; OUTLINE-ATOMICS-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; OUTLINE-ATOMICS-NEXT:    .cfi_def_cfa_offset 48
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w30, -8
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w29, -16
 ; OUTLINE-ATOMICS-NEXT:    .cfi_offset w19, -24

diff  --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index c8320daeb4226..018905e47fd41 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -416,9 +416,9 @@ define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdC
 ; CHECK-LABEL: combine_non_adjacent_cmp_br:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-48]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x22, x21, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -474,8 +474,8 @@ define i32 @do_nothing_if_resultant_opcodes_would_
diff er() #0 {
 ; CHECK-LABEL: do_nothing_if_resultant_opcodes_would_
diff er:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -642,8 +642,8 @@ define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
 ; CHECK-LABEL: fcmpri:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    .cfi_offset b8, -32

diff  --git a/llvm/test/CodeGen/AArch64/csr-split.ll b/llvm/test/CodeGen/AArch64/csr-split.ll
index f45455006fb53..e54ccd45ec571 100644
--- a/llvm/test/CodeGen/AArch64/csr-split.ll
+++ b/llvm/test/CodeGen/AArch64/csr-split.ll
@@ -30,8 +30,8 @@ define dso_local signext i32 @test1(i32* %b) local_unnamed_addr  {
 ; CHECK-APPLE-LABEL: test1:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
 ; CHECK-APPLE-NEXT:    .cfi_offset w29, -16
 ; CHECK-APPLE-NEXT:    .cfi_offset w19, -24
@@ -102,8 +102,8 @@ define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr  {
 ; CHECK-APPLE-LABEL: test2:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
 ; CHECK-APPLE-NEXT:    .cfi_offset w29, -16
 ; CHECK-APPLE-NEXT:    .cfi_offset w19, -24
@@ -155,8 +155,8 @@ define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_ad
 ; CHECK-LABEL: test3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -176,8 +176,8 @@ define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_ad
 ; CHECK-APPLE-LABEL: test3:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
 ; CHECK-APPLE-NEXT:    .cfi_offset w29, -16
 ; CHECK-APPLE-NEXT:    .cfi_offset w19, -24

diff  --git a/llvm/test/CodeGen/AArch64/fastcc.ll b/llvm/test/CodeGen/AArch64/fastcc.ll
index 8ae7c2dc2526b..56a35b9eb1a69 100644
--- a/llvm/test/CodeGen/AArch64/fastcc.ll
+++ b/llvm/test/CodeGen/AArch64/fastcc.ll
@@ -13,7 +13,6 @@ define fastcc void @func_stack0() {
 
 ; CHECK-TAIL-LABEL: func_stack0:
 ; CHECK-TAIL: sub sp, sp, #48
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 48
 ; CHECK-TAIL-NEXT: stp x29, x30, [sp, #32]
 ; CHECK-TAIL-NEXT: add x29, sp, #32
 ; CHECK-TAIL: str w{{[0-9]+}}, [sp]
@@ -190,7 +189,6 @@ define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1)
 define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %stacked1) {
 ; CHECK-LABEL: func_stack32_leaf_local:
 ; CHECK: sub     sp, sp, #32
-; CHECK-NEXT:  .cfi_def_cfa_offset 32
 ; CHECK-NEXT: str     x20, [sp, #16]
 ; CHECK: nop
 ; CHECK-NEXT: //NO_APP
@@ -200,7 +198,6 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %sta
 
 ; CHECK-TAIL-LABEL: func_stack32_leaf_local:
 ; CHECK-TAIL: sub     sp, sp, #32
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 32
 ; CHECK-TAIL-NEXT: str     x20, [sp, #16]
 ; CHECK-TAIL: nop
 ; CHECK-TAIL-NEXT: //NO_APP

diff  --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index fab520135fc92..5d06f762a7710 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -353,9 +353,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-LABEL: utest_f64i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -390,9 +390,9 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-LABEL: ustest_f64i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -465,9 +465,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-LABEL: utest_f32i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -503,9 +503,9 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-LABEL: ustest_f32i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -591,9 +591,9 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-LABEL: utesth_f16i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -629,9 +629,9 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-LABEL: ustest_f16i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1002,9 +1002,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-LABEL: utest_f64i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1042,9 +1042,9 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-LABEL: ustest_f64i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1107,9 +1107,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-LABEL: utest_f32i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1148,9 +1148,9 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-LABEL: ustest_f32i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1226,9 +1226,9 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-LABEL: utesth_f16i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1267,9 +1267,9 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-LABEL: ustest_f16i64_mm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32

diff  --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index 55d4abc962fc2..ebfe8e4a20d08 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -248,8 +248,8 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v1f128_v1i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, .LCPI14_0
@@ -286,10 +286,10 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v2f128_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -355,10 +355,10 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v3f128_v3i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #96] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -445,10 +445,10 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v4f128_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #144
-; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #112] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #128] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -808,12 +808,12 @@ define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-LABEL: test_signed_v2f32_v2i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -874,12 +874,12 @@ define <2 x i128> @test_signed_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-LABEL: test_signed_v2f32_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1080,7 +1080,6 @@ define <4 x i100> @test_signed_v4f32_v4i100(<4 x float> %f) {
 ; CHECK-LABEL: test_signed_v4f32_v4i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    str d10, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #40] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #56] // 8-byte Folded Spill
@@ -1088,6 +1087,7 @@ define <4 x i100> @test_signed_v4f32_v4i100(<4 x float> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #96] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1184,7 +1184,6 @@ define <4 x i128> @test_signed_v4f32_v4i128(<4 x float> %f) {
 ; CHECK-LABEL: test_signed_v4f32_v4i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    str d10, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #40] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #56] // 8-byte Folded Spill
@@ -1192,6 +1191,7 @@ define <4 x i128> @test_signed_v4f32_v4i128(<4 x float> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #96] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1466,12 +1466,12 @@ define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-LABEL: test_signed_v2f64_v2i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1532,12 +1532,12 @@ define <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-LABEL: test_signed_v2f64_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1832,7 +1832,6 @@ define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-LABEL: test_signed_v4f16_v4i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1840,6 +1839,7 @@ define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1939,7 +1939,6 @@ define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-LABEL: test_signed_v4f16_v4i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1947,6 +1946,7 @@ define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -2580,7 +2580,6 @@ define <8 x i100> @test_signed_v8f16_v8i100(<8 x half> %f) {
 ; CHECK-LABEL: test_signed_v8f16_v8i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #192
-; CHECK-NEXT:    .cfi_def_cfa_offset 192
 ; CHECK-NEXT:    str d10, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #96] // 16-byte Folded Spill
@@ -2589,6 +2588,7 @@ define <8 x i100> @test_signed_v8f16_v8i100(<8 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #144] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #160] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #176] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 192
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -2789,7 +2789,6 @@ define <8 x i128> @test_signed_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-LABEL: test_signed_v8f16_v8i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #192
-; CHECK-NEXT:    .cfi_def_cfa_offset 192
 ; CHECK-NEXT:    str d10, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #96] // 16-byte Folded Spill
@@ -2798,6 +2797,7 @@ define <8 x i128> @test_signed_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #144] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #160] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #176] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 192
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24

diff  --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index 2298e31d7fba7..cbb8b8a51126f 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -248,8 +248,8 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v1f128_v1i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    adrp x8, .LCPI14_0
@@ -279,9 +279,9 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v2f128_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -332,9 +332,9 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v3f128_v3i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -400,9 +400,9 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v4f128_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -722,10 +722,10 @@ define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v2f32_v2i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -774,10 +774,10 @@ define <2 x i128> @test_unsigned_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v2f32_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -947,12 +947,12 @@ define <4 x i100> @test_unsigned_v4f32_v4i100(<4 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v4f32_v4i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    stp d9, d8, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x25, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1031,12 +1031,12 @@ define <4 x i128> @test_unsigned_v4f32_v4i128(<4 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v4f32_v4i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    stp d9, d8, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1262,10 +1262,10 @@ define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-LABEL: test_unsigned_v2f64_v2i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1313,10 +1313,10 @@ define <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-LABEL: test_unsigned_v2f64_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -1564,12 +1564,12 @@ define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v4f16_v4i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x25, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -1651,12 +1651,12 @@ define <4 x i128> @test_unsigned_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v4f16_v4i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -2162,7 +2162,6 @@ define <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v8f16_v8i100:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #176
-; CHECK-NEXT:    .cfi_def_cfa_offset 176
 ; CHECK-NEXT:    stp d9, d8, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x28, x27, [sp, #96] // 16-byte Folded Spill
@@ -2170,6 +2169,7 @@ define <8 x i100> @test_unsigned_v8f16_v8i100(<8 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #128] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #144] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 176
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -2339,7 +2339,6 @@ define <8 x i128> @test_unsigned_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v8f16_v8i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #176
-; CHECK-NEXT:    .cfi_def_cfa_offset 176
 ; CHECK-NEXT:    stp d9, d8, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x28, x27, [sp, #96] // 16-byte Folded Spill
@@ -2347,6 +2346,7 @@ define <8 x i128> @test_unsigned_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-NEXT:    stp x24, x23, [sp, #128] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #144] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 176
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index baf3ab724c9c4..61f2e37963abc 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -6,15 +6,13 @@
   ; CHECK-LABEL: fix_restorepoint_p4:
   ; CHECK:       // %bb.0: // %entry
   ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-  ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-  ; CHECK-NEXT:    .cfi_offset w29, -16
   ; CHECK-NEXT:    addvl sp, sp, #-2
-  ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
   ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
   ; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-  ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
   ; CHECK-NEXT:    addvl sp, sp, #-1
   ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+  ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+  ; CHECK-NEXT:    .cfi_offset w29, -16
   ; CHECK-NEXT:    // implicit-def: $z8
   ; CHECK-NEXT:    // implicit-def: $p4
   ; CHECK-NEXT:    addvl sp, sp, #1

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 5dba474446fe3..b3d17cf17bbe5 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -57,12 +57,9 @@
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2
 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0
@@ -70,16 +67,11 @@
 # CHECK-NEXT: RET_ReallyLR
 
 # ASM-LABEL: test_allocate_sve:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
-# UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-
+# UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
 name:            test_allocate_sve
 stack:
   - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
@@ -103,13 +95,10 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: frame-setup STPXi killed $x21, killed $x20, $sp, 2
-# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-4: frame-setup CFI_INSTRUCTION
 #
 # CHECK-NEXT: $x20 = IMPLICIT_DEF
 # CHECK-NEXT: $x21 = IMPLICIT_DEF
@@ -120,19 +109,15 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_allocate_sve_gpr_callee_saves:
-# ASM:       .cfi_def_cfa_offset 32
-# ASM:       .cfi_offset w20, -8
+# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
+# ASM-NEXT:  .cfi_offset w20, -8
 # ASM-NEXT:  .cfi_offset w21, -16
 # ASM-NEXT:  .cfi_offset w29, -32
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
 #
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +32
-# UNWINDINFO:       DW_CFA_offset: reg20 -8
+# UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT:  DW_CFA_offset: reg20 -8
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg21 -16
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -32
-# UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 name:            test_allocate_sve_gpr_callee_saves
 stack:
   - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
@@ -157,23 +142,20 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0
-# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2
 # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0
 # CHECK-NEXT: $sp = ANDXri killed $[[TMP]]
+# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $fp, 0, 0
 # CHECK-NEXT: $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_allocate_sve_gpr_realigned:
-# ASM:       .cfi_def_cfa_offset 16
 # ASM:       .cfi_def_cfa w29, 16
 # ASM-NEXT:  .cfi_offset w30, -8
 # ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:       DW_CFA_def_cfa: reg29 +16
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg30 -8
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
@@ -202,11 +184,9 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 
 # CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16
 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2
@@ -221,15 +201,11 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL:  test_address_sve:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
-# UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
 
 name:            test_address_sve
 frameInfo:
@@ -266,11 +242,10 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0
-# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
+# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 
 # CHECK-NEXT: STR_ZXI $z0, $fp, -1
 # CHECK-NEXT: STR_ZXI $z1, $fp, -2
@@ -282,12 +257,10 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_address_sve_fp:
-# ASM:       .cfi_def_cfa_offset 16
 # ASM:       .cfi_def_cfa w29, 16
 # ASM-NEXT:  .cfi_offset w30, -8
 # ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
@@ -325,10 +298,9 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
+# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 
 # CHECK:      $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1
 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4
@@ -339,15 +311,11 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_stack_arg_sve:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 
 name:             test_stack_arg_sve
 fixedStack:
@@ -380,25 +348,16 @@ body:             |
 
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 
 # CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1
 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255
@@ -419,29 +378,11 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_address_sve_out_of_range:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 name:            test_address_sve_out_of_range
 frameInfo:
   maxAlignment:  16
@@ -478,13 +419,11 @@ body:             |
 # CHECK: RET_ReallyLR
 #
 # ASM-LABEL: test_address_gpr_vla:
-# ASM:       .cfi_def_cfa_offset 32
 # ASM:       .cfi_def_cfa w29, 32
 # ASM-NEXT:  .cfi_offset w19, -16
 # ASM-NEXT:  .cfi_offset w30, -24
 # ASM-NEXT:  .cfi_offset w29, -32
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +32
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +32
 # UNWINDINFO-NEXT: DW_CFA_offset: reg19 -16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -24
@@ -506,14 +445,12 @@ body:             |
 ---
 ...
 # CHECK-LABEL: name: save_restore_pregs_sve
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK: frame-setup STR_PXI killed $p6, $sp, 5
 # CHECK: frame-setup STR_PXI killed $p5, $sp, 6
 # CHECK: frame-setup STR_PXI killed $p4, $sp, 7
 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 
 # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
 # CHECK: $p6 = frame-destroy LDR_PXI $sp, 5
@@ -523,15 +460,11 @@ body:             |
 # CHECK: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_pregs_sve:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:         DW_CFA_def_cfa_offset: +16
-# UNWINDINFO:         DW_CFA_offset: reg29 -16
-# UNWINDINFO:         DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:         DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT:    DW_CFA_offset: reg29 -16
 name: save_restore_pregs_sve
 stack:
   - { id: 0, stack-id: default, size: 32, alignment: 16 }
@@ -547,15 +480,12 @@ body:             |
 ...
 # CHECK-LABEL: name: save_restore_zregs_sve
 # CHECK:      $sp = frame-setup STRXpre killed $fp, $sp, -16
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0
 # CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1
 # CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2
-# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-5: frame-setup CFI_INSTRUCTION
 
 # CHECK:      $sp  = frame-destroy ADDXri $sp, 32, 0
 # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
@@ -566,21 +496,16 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_zregs_sve:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
+# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG
+# ASM-NEXT:  .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9  @ cfa - 16 - 16 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10  @ cfa - 16 - 24 * VG
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG
 
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 
 name: save_restore_zregs_sve
 stack:
@@ -603,11 +528,8 @@ body:             |
 #
 # CHECK-LABEL: name: save_restore_sve
 # CHECK: $sp = frame-setup STPXpre killed ${{[a-z0-9]+}}, killed $x21, $sp, -4
-# CHECK: frame-setup CFI_INSTRUCTION
 # CHECK: frame-setup STPXi killed $x20, killed $x19, $sp, 2
-# CHECK-COUNT-4: frame-setup CFI_INSTRUCTION
 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -18
-# CHECK: frame-setup CFI_INSTRUCTION
 # CHECK: frame-setup STR_PXI killed $p15, $sp, 4
 # CHECK: frame-setup STR_PXI killed $p14, $sp, 5
 # CHECK: frame-setup STR_PXI killed $p5, $sp, 14
@@ -616,11 +538,9 @@ body:             |
 # CHECK: frame-setup STR_ZXI killed $z22, $sp, 3
 # CHECK: frame-setup STR_ZXI killed $z9, $sp, 16
 # CHECK: frame-setup STR_ZXI killed $z8, $sp, 17
-# CHECK-COUNT-8: frame-setup CFI_INSTRUCTION
 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK: frame-setup CFI_INSTRUCTION
 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK: frame-setup CFI_INSTRUCTION
+# CHECK-COUNT-13: frame-setup CFI_INSTRUCTION
 
 # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1
@@ -638,13 +558,8 @@ body:             |
 # CHECK: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_sve:
-# ASM:       .cfi_def_cfa_offset 32
-# ASM:       .cfi_offset w19, -8
-# ASM-NEXT:  .cfi_offset w20, -16
-# ASM-NEXT:  .cfi_offset w21, -24
-# ASM-NEXT:  .cfi_offset w29, -32
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG 
-# ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
+# ASM:       .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG
+# ASM-NEXT:  .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG
@@ -652,16 +567,13 @@ body:             |
 # ASM-NEXT:  .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG
-# ASM:       .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG
+# ASM-NEXT:  .cfi_offset w19, -8
+# ASM-NEXT:  .cfi_offset w20, -16
+# ASM-NEXT:  .cfi_offset w21, -24
+# ASM-NEXT:  .cfi_offset w29, -32
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +32
-# UNWINDINFO:      DW_CFA_offset: reg19 -8
-# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
@@ -669,8 +581,10 @@ body:             |
 # UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg19 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32
 
 name: save_restore_sve
 stack:
@@ -711,9 +625,7 @@ body:             |
 #
 # CHECK-LABEL: name: save_restore_sve_realign
 # CHECK:      $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0
-# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -18
 # CHECK-NEXT: STR_PXI killed $p15, $sp, 4
 # CHECK-NEXT: STR_PXI killed $p14, $sp, 5
@@ -723,10 +635,10 @@ body:             |
 # CHECK-NEXT: STR_ZXI killed $z22, $sp, 3
 # CHECK:      STR_ZXI killed $z9, $sp, 16
 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 17
-# CHECK-COUNT-8: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1
 # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0
 # CHECK-NEXT: $sp = ANDXri killed $[[TMP]]
+# CHECK-COUNT-11: frame-setup CFI_INSTRUCTION
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $fp, -18
 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4
@@ -742,11 +654,8 @@ body:             |
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_sve_realign:
-# ASM:       .cfi_def_cfa_offset 16
 # ASM:       .cfi_def_cfa w29, 16
-# ASM-NEXT:  .cfi_offset w30, -8
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
+# ASM-NEXT:  .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9  @ cfa - 16 - 16 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10  @ cfa - 16 - 24 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11  @ cfa - 16 - 32 * VG
@@ -754,12 +663,11 @@ body:             |
 # ASM-NEXT:  .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13  @ cfa - 16 - 48 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14  @ cfa - 16 - 56 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15  @ cfa - 16 - 64 * VG
+# ASM-NEXT:  .cfi_offset w30, -8
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO:      DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
@@ -767,6 +675,8 @@ body:             |
 # UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 name: save_restore_sve_realign
 stack:
   - { id: 0, stack-id: scalable-vector, size: 16, alignment: 16 }
@@ -835,29 +745,21 @@ body:             |
 #
 # CHECK:      bb.0.entry:
 # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16
-# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: STR_PXI killed $p15, $sp, 6
 # CHECK-NEXT: STR_PXI killed $p4, $sp, 7
 # CHECK-NEXT: STR_ZXI killed $z23, $sp, 1
 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION
-#
+# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION
 # ASM-LABEL: frame_layout:
-# ASM:       .cfi_def_cfa_offset 16
-# ASM-NEXT:  .cfi_offset w29, -16
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG
+# ASM-NEXT:  .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
+# ASM-NEXT:  .cfi_offset w29, -16
 #
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
-# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 name: frame_layout
 stack:
   - { id: 0, type: default,    size:  32, alignment: 16, stack-id: scalable-vector }

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
index fff3c1b34ce33..e87c98eb556de 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
+++ b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
@@ -27,7 +27,6 @@ entry:
 
 ; CHECK-LABEL: b:
 ; CHECK:       str     d8, [sp, #-32]!
-; CHECK-NEXT:  .cfi_def_cfa_offset 32
 ; CHECK-NEXT:  stp     x29, x30, [sp, #8]
 ; CHECK-NEXT:  str     x19, [sp, #24]
 ; CHECK-NEXT:  add     x29, sp, #8

diff  --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll
index 8741b2381032d..059a9033ddc54 100644
--- a/llvm/test/CodeGen/AArch64/isinf.ll
+++ b/llvm/test/CodeGen/AArch64/isinf.ll
@@ -59,8 +59,8 @@ define i32 @replace_isinf_call_f128(fp128 %x) {
 ; CHECK-LABEL: replace_isinf_call_f128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    str q0, [sp]
 ; CHECK-NEXT:    ldrb w8, [sp, #15]

diff  --git a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
index e19c8531fd4d5..3f5ff01b55afc 100644
--- a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
@@ -5,16 +5,14 @@ define void @foo() {
 ; CHECK-LABEL: foo:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #1, lsl #12 ; =4096
+; CHECK-NEXT:    sub sp, sp, #80
+; CHECK-NEXT:    .cfi_def_cfa_offset 4208
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_offset w27, -24
 ; CHECK-NEXT:    .cfi_offset w28, -32
-; CHECK-NEXT:    sub sp, sp, #1, lsl #12 ; =4096
-; CHECK-NEXT:    .cfi_def_cfa_offset 4128
-; CHECK-NEXT:    sub sp, sp, #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 4208
 ; CHECK-NEXT:    adds x8, sp, #1, lsl #12 ; =4096
 ; CHECK-NEXT:    cmn x8, #32
 ; CHECK-NEXT:    b.eq LBB0_2

diff  --git a/llvm/test/CodeGen/AArch64/large-stack.ll b/llvm/test/CodeGen/AArch64/large-stack.ll
index 23ef121bcc616..60cac6302fd7f 100644
--- a/llvm/test/CodeGen/AArch64/large-stack.ll
+++ b/llvm/test/CodeGen/AArch64/large-stack.ll
@@ -24,10 +24,8 @@ declare dso_local i32 @printf(i8*, ...)
 attributes #0 = { noinline optnone "frame-pointer"="all" }
 
 ; CHECK:                  stp	x[[SPILL_REG1:[0-9]+]], x[[SPILL_REG2:[0-9]+]], [sp, #-[[SPILL_OFFSET1:[0-9]+]]]
-; CHECK-NEXT:             .cfi_def_cfa_offset [[SPILL_OFFSET1]]
 ; CHECK-NEXT:             str	x[[SPILL_REG3:[0-9]+]], [sp, #[[SPILL_OFFSET2:[0-9]+]]]
 ; CHECK-NEXT:             mov	x[[FRAME:[0-9]+]], sp
-; CHECK-NEXT:             .cfi_def_cfa w[[FRAME]], [[SPILL_OFFSET1]]
 ; CHECK-COUNT-128:        sub	sp, sp, #[[STACK1:[0-9]+]], lsl #12
 ; CHECK-NEXT:             sub	sp, sp, #[[STACK2:[0-9]+]], lsl #12
 ; CHECK-NEXT:             sub	sp, sp, #[[STACK3:[0-9]+]]

diff  --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
index 4a9aa44ec839b..e8fbf89d9588f 100644
--- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
@@ -13,8 +13,8 @@ define i32 @main() local_unnamed_addr #1 {
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0: // %for.body.lr.ph.i.i.i.i.i.i63
 ; CHECK-NEXT:    sub sp, sp, #112
-; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl _Z5setupv
 ; CHECK-NEXT:    movi v0.4s, #1

diff  --git a/llvm/test/CodeGen/AArch64/local_vars.ll b/llvm/test/CodeGen/AArch64/local_vars.ll
index 335482e4aec8f..cf5bdac552794 100644
--- a/llvm/test/CodeGen/AArch64/local_vars.ll
+++ b/llvm/test/CodeGen/AArch64/local_vars.ll
@@ -30,11 +30,7 @@ define void @trivial_fp_func() {
 
 ; CHECK-WITHFP-ARM64-LABEL: trivial_fp_func:
 ; CHECK-WITHFP-ARM64: stp x29, x30, [sp, #-16]!
-; CHECK-WITHFP-ARM64-NEXT: .cfi_def_cfa_offset 16
 ; CHECK-WITHFP-ARM64-NEXT: mov x29, sp
-; CHECK-WITHFP-ARM64-NEXT: .cfi_def_cfa w29, 16
-; CHECK-WITHFP-ARM64-NEXT: .cfi_offset w30, -8
-; CHECK-WITHFP-ARM64-NEXT: .cfi_offset w29, -16
 
 ; Dont't really care, but it would be a Bad Thing if this came after the epilogue.
 ; CHECK-WITHFP-ARM64: bl foo

diff  --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
index 3af1c6c5ca461..70f7ef5561dce 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -9,8 +9,8 @@ define i32 @sink_load_and_copy(i32 %n) {
 ; CHECK-LABEL: sink_load_and_copy:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -63,8 +63,8 @@ define i32 @cant_sink_successive_call(i32 %n) {
 ; CHECK-LABEL: cant_sink_successive_call:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24
@@ -120,8 +120,8 @@ define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
 ; CHECK-LABEL: cant_sink_successive_store:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll b/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll
index 2348c22929d15..2ea906fa47ba9 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll
@@ -20,7 +20,6 @@ define void @_Z2f1v() minsize {
 ; NOOMIT-LABEL: _Z2f1v:
 ; NOOMIT:       // %bb.0: // %entry
 ; NOOMIT-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; NOOMIT-NEXT:    .cfi_def_cfa_offset 16
 ; NOOMIT-NEXT:    mov x29, sp
 ; NOOMIT-NEXT:    .cfi_def_cfa w29, 16
 ; NOOMIT-NEXT:    .cfi_offset w30, -8
@@ -47,7 +46,6 @@ define void @_Z2f2v() minsize {
 ; NOOMIT-LABEL: _Z2f2v:
 ; NOOMIT:       // %bb.0: // %entry
 ; NOOMIT-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; NOOMIT-NEXT:   .cfi_def_cfa_offset 16
 ; NOOMIT-NEXT:    mov x29, sp
 ; NOOMIT-NEXT:    .cfi_def_cfa w29, 16
 ; NOOMIT-NEXT:    .cfi_offset w30, -8

diff  --git a/llvm/test/CodeGen/AArch64/neg-imm.ll b/llvm/test/CodeGen/AArch64/neg-imm.ll
index 345713d4659f4..baf1463058664 100644
--- a/llvm/test/CodeGen/AArch64/neg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/neg-imm.ll
@@ -9,8 +9,8 @@ define void @test(i32 %px) {
 ; CHECK-LABEL: test:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32

diff  --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index 706520449a7bb..75c0355965ebc 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -9,11 +9,11 @@ define dso_local void @run_test() local_unnamed_addr #0 {
 ; CHECK-LABEL: run_test:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    stp d15, d14, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp d13, d12, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp d11, d10, [sp, #64] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset b8, -8
 ; CHECK-NEXT:    .cfi_offset b9, -16
 ; CHECK-NEXT:    .cfi_offset b10, -24
@@ -176,7 +176,6 @@ define dso_local void @run_test() local_unnamed_addr #0 {
 ; CHECK-NEXT:    str q0, [x8, #496]
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
-; CH`ECK-NEXT:    .cfi_offset b9, -16
 entry:
   br label %for.cond1.preheader
 

diff  --git a/llvm/test/CodeGen/AArch64/settag.ll b/llvm/test/CodeGen/AArch64/settag.ll
index 004c32ec20288..b2f328297f806 100644
--- a/llvm/test/CodeGen/AArch64/settag.ll
+++ b/llvm/test/CodeGen/AArch64/settag.ll
@@ -148,8 +148,8 @@ define void @stg_alloca17() {
 ; CHECK-LABEL: stg_alloca17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #288
-; CHECK-NEXT:    .cfi_def_cfa_offset 288
 ; CHECK-NEXT:    str x29, [sp, #272] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 288
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #256
 ; CHECK-NEXT:  .LBB11_1: // %entry

diff  --git a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
index 1c08079ca5526..3fc0c22c658bb 100644
--- a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
+++ b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
@@ -78,7 +78,6 @@ declare void @llvm.stackrestore(i8*)
 ; CHECK-LABEL: f
 
 ; CHECK:      stp x29, x30, [sp, #-16]!
-; CHECK-NEXT: .cfi_def_cfa_offset 16
 ; CHECK-NEXT: mov x29, sp
 
 ; VLA allocation

diff  --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll
index 7fcee6c3d1ade..4e37b55009b44 100644
--- a/llvm/test/CodeGen/AArch64/sibling-call.ll
+++ b/llvm/test/CodeGen/AArch64/sibling-call.ll
@@ -26,8 +26,8 @@ define dso_local void @caller_to8_from0() {
 ; CHECK-LABEL: caller_to8_from0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w8, #42
 ; CHECK-NEXT:    str x8, [sp]
@@ -58,8 +58,8 @@ define dso_local void @caller_to16_from8([8 x i64], i64 %a) {
 ; CHECK-LABEL: caller_to16_from8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl callee_stack16
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
index 37e672567103e..aa3d9fb8cf6e7 100644
--- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll
+++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
@@ -17,10 +17,9 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-LABEL: test_nxv2i64_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    mov w9, #2
 ; CHECK-NEXT:    sub x8, x8, #2
@@ -71,10 +70,9 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-LABEL: test_nxv2f64_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    mov w9, #2
 ; CHECK-NEXT:    sub x8, x8, #2

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
index bb3397efbefa5..9307f075f7e8d 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
@@ -10,7 +10,6 @@ define i32 @test_stack_guard_remat2() ssp {
 ; CHECK-LABEL: test_stack_guard_remat2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #48
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
index 8dd4138b0fb6b..2abed4fdc1256 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
@@ -42,12 +42,11 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
 ; CHECK-LABEL:   foo:
 ; CHECK:         // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    mrs x8, SP_EL0
 ; CHECK-NEXT:    lsl x9, x0, #2
 ; CHECK-NO-OFFSET:       ldr x8, [x8]

diff  --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
index ddd39369f733b..5eded3032a91e 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
@@ -161,8 +161,8 @@ define i1 @test_cross_bb(i32 addrspace(1)* %a, i1 %external_cond) gc "statepoint
 ; CHECK-LABEL: test_cross_bb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
@@ -205,8 +205,8 @@ define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-exampl
 ; CHECK-LABEL: test_attributes:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov x18, xzr
 ; CHECK-NEXT:    ldr q0, [sp, #48]

diff  --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 181b7a2854d9f..d46aad0509fda 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -9,14 +9,8 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-NEXT:    tbz w0, #0, .LBB0_2
 ; CHECK-NEXT:  // %bb.1: // %if.then
 ; CHECK-NEXT:    stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x28, x19, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov x29, sp
-; CHECK-NEXT:    .cfi_def_cfa w29, 32
-; CHECK-NEXT:    .cfi_offset w19, -8
-; CHECK-NEXT:    .cfi_offset w28, -16
-; CHECK-NEXT:    .cfi_offset w30, -24
-; CHECK-NEXT:    .cfi_offset w29, -32
 ; CHECK-NEXT:    addvl sp, sp, #-18
 ; CHECK-NEXT:    str p15, [sp, #4, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p14, [sp, #5, mul vl] // 2-byte Folded Spill
@@ -46,6 +40,8 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-NEXT:    str z10, [sp, #15, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    str z9, [sp, #16, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    mov x19, sp
+; CHECK-NEXT:    .cfi_def_cfa w29, 32
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
@@ -54,10 +50,13 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w28, -16
+; CHECK-NEXT:    .cfi_offset w30, -24
+; CHECK-NEXT:    .cfi_offset w29, -32
 ; CHECK-NEXT:    rdvl x9, #2
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    add x9, x9, #15
-; CHECK-NEXT:    mov x19, sp
 ; CHECK-NEXT:    and x9, x9, #0xfffffffffffffff0
 ; CHECK-NEXT:    sub x8, x8, x9
 ; CHECK-NEXT:    and x0, x8, #0xffffffffffffffe0

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
index 4e07f56f6263f..cd2bf99400bfe 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
@@ -60,18 +60,17 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) {
 ; CHECK-LABEL: extract_nxv14i1_nxv28i1_14:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    punpkhi p2.h, p1.b
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    punpklo p2.h, p2.b
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    punpkhi p3.h, p1.b
 ; CHECK-NEXT:    punpkhi p4.h, p2.b
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    punpklo p1.h, p1.b
 ; CHECK-NEXT:    punpklo p2.h, p2.b
 ; CHECK-NEXT:    punpkhi p5.h, p3.b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index 191e9edc8d84f..1fea072f0e374 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -48,13 +48,12 @@ define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b, <32 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48 
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0 
 ; CHECK-NEXT:    ldr w8, [x2]
 ; CHECK-NEXT:    ptrue p0.b, vl32
 ; CHECK-NEXT:    ptrue p1.b
@@ -144,13 +143,12 @@ define void @select_v64i8(<64 x i8>* %a, <64 x i8>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_512-LABEL: select_v64i8:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
 ; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
 ; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_512-NEXT:    sub x9, sp, #112 
-; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0 
 ; VBITS_GE_512-NEXT:    ldr x8, [x2]
 ; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
 ; VBITS_GE_512-NEXT:    ptrue p1.b
@@ -304,13 +302,12 @@ define void @select_v128i8(<128 x i8>* %a, <128 x i8>* %b, <128 x i1>* %c) #0 {
 ; VBITS_GE_1024-LABEL: select_v128i8:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
 ; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
 ; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_1024-NEXT:    sub x9, sp, #240 
-; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80 
 ; VBITS_GE_1024-NEXT:    ldr x8, [x2, #8]
 ; VBITS_GE_1024-NEXT:    ptrue p0.b, vl128
 ; VBITS_GE_1024-NEXT:    ptrue p1.b
@@ -593,13 +590,12 @@ define void @select_v256i8(<256 x i8>* %a, <256 x i8>* %b, <256 x i1>* %c) #0 {
 ; VBITS_GE_2048-LABEL: select_v256i8:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
 ; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
 ; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_2048-NEXT:    sub x9, sp, #496 
-; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00 
 ; VBITS_GE_2048-NEXT:    ldr x8, [x2, #24]
 ; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
 ; VBITS_GE_2048-NEXT:    ptrue p1.b
@@ -1165,13 +1161,12 @@ define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48 
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0 
 ; CHECK-NEXT:    ldrh w8, [x2]
 ; CHECK-NEXT:    ptrue p0.h, vl16
 ; CHECK-NEXT:    ptrue p1.h
@@ -1229,13 +1224,12 @@ define void @select_v32i16(<32 x i16>* %a, <32 x i16>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_512-LABEL: select_v32i16:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
 ; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
 ; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_512-NEXT:    sub x9, sp, #112 
-; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0 
 ; VBITS_GE_512-NEXT:    ldr w8, [x2]
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
 ; VBITS_GE_512-NEXT:    ptrue p1.h
@@ -1325,13 +1319,12 @@ define void @select_v64i16(<64 x i16>* %a, <64 x i16>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_1024-LABEL: select_v64i16:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
 ; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
 ; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_1024-NEXT:    sub x9, sp, #240 
-; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80 
 ; VBITS_GE_1024-NEXT:    ldr x8, [x2]
 ; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
 ; VBITS_GE_1024-NEXT:    ptrue p1.h
@@ -1485,13 +1478,12 @@ define void @select_v128i16(<128 x i16>* %a, <128 x i16>* %b, <128 x i1>* %c) #0
 ; VBITS_GE_2048-LABEL: select_v128i16:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
 ; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
 ; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_2048-NEXT:    sub x9, sp, #496 
-; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00 
 ; VBITS_GE_2048-NEXT:    ldr x8, [x2, #8]
 ; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
 ; VBITS_GE_2048-NEXT:    ptrue p1.h
@@ -1799,13 +1791,12 @@ define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48 
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0 
 ; CHECK-NEXT:    ldrb w8, [x2]
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ptrue p1.s
@@ -1843,13 +1834,12 @@ define void @select_v16i32(<16 x i32>* %a, <16 x i32>* %b, <16 x i1>* %c) #0 {
 ; VBITS_GE_512-LABEL: select_v16i32:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
 ; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
 ; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_512-NEXT:    sub x9, sp, #112 
-; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0 
 ; VBITS_GE_512-NEXT:    ldrh w8, [x2]
 ; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
 ; VBITS_GE_512-NEXT:    ptrue p1.s
@@ -1899,13 +1889,12 @@ define void @select_v32i32(<32 x i32>* %a, <32 x i32>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_1024-LABEL: select_v32i32:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
 ; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
 ; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_1024-NEXT:    sub x9, sp, #240 
-; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80 
 ; VBITS_GE_1024-NEXT:    ldr w8, [x2]
 ; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
 ; VBITS_GE_1024-NEXT:    ptrue p1.s
@@ -1979,13 +1968,12 @@ define void @select_v64i32(<64 x i32>* %a, <64 x i32>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_2048-LABEL: select_v64i32:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
 ; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
 ; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_2048-NEXT:    sub x9, sp, #496 
-; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00 
 ; VBITS_GE_2048-NEXT:    ldr x8, [x2]
 ; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
 ; VBITS_GE_2048-NEXT:    ptrue p1.s
@@ -2133,13 +2121,12 @@ define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b, <4 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48 
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0 
 ; CHECK-NEXT:    ldrb w8, [x2]
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ptrue p1.d
@@ -2174,13 +2161,12 @@ define void @select_v8i64(<8 x i64>* %a, <8 x i64>* %b, <8 x i1>* %c) #0 {
 ; VBITS_GE_512-LABEL: select_v8i64:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
 ; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
 ; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_512-NEXT:    sub x9, sp, #112 
-; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0 
 ; VBITS_GE_512-NEXT:    ldrb w8, [x2]
 ; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
 ; VBITS_GE_512-NEXT:    ptrue p1.d
@@ -2225,13 +2211,12 @@ define void @select_v16i64(<16 x i64>* %a, <16 x i64>* %b, <16 x i1>* %c) #0 {
 ; VBITS_GE_1024-LABEL: select_v16i64:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
 ; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
 ; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_1024-NEXT:    sub x9, sp, #240 
-; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80 
 ; VBITS_GE_1024-NEXT:    ldrh w8, [x2]
 ; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
 ; VBITS_GE_1024-NEXT:    ptrue p1.d
@@ -2296,13 +2281,12 @@ define void @select_v32i64(<32 x i64>* %a, <32 x i64>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_2048-LABEL: select_v32i64:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
 ; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
 ; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
 ; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
-; VBITS_GE_2048-NEXT:    sub x9, sp, #496 
-; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00 
 ; VBITS_GE_2048-NEXT:    ldr w8, [x2]
 ; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
 ; VBITS_GE_2048-NEXT:    ptrue p1.d

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
index 4156689233fc2..313051ac3da3a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
@@ -195,13 +195,12 @@ define void @test_rev_elts_fail(<4 x i64>* %a) #1 {
 ; CHECK-LABEL: test_rev_elts_fail:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    mov z1.d, z0.d[2]
@@ -232,13 +231,12 @@ define void @test_revv8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: test_revv8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    mov w8, v0.s[1]
@@ -349,13 +347,12 @@ define void @test_rev_fail(<16 x i16>* %a) #1 {
 ; CHECK-LABEL: test_rev_fail:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    mov z1.h, z0.h[8]
@@ -414,13 +411,12 @@ define void @test_revv8i16v8i16(<8 x i16>* %a, <8 x i16>* %b, <16 x i16>* %c) #1
 ; CHECK-LABEL: test_revv8i16v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    orr x9, x8, #0x1e

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index e497ad2b5b3b1..2bf85201321ac 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -132,13 +132,12 @@ define void @zip_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 ; VBITS_EQ_512-LABEL: zip_v4f64:
 ; VBITS_EQ_512:       // %bb.0:
 ; VBITS_EQ_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_512-NEXT:    .cfi_def_cfa_offset 16
+; VBITS_EQ_512-NEXT:    sub x9, sp, #48
 ; VBITS_EQ_512-NEXT:    mov x29, sp
+; VBITS_EQ_512-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; VBITS_EQ_512-NEXT:    .cfi_def_cfa w29, 16
 ; VBITS_EQ_512-NEXT:    .cfi_offset w30, -8
 ; VBITS_EQ_512-NEXT:    .cfi_offset w29, -16
-; VBITS_EQ_512-NEXT:    sub x9, sp, #48
-; VBITS_EQ_512-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; VBITS_EQ_512-NEXT:    ptrue p0.d, vl4
 ; VBITS_EQ_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
@@ -649,13 +648,12 @@ define void @zip_vscale2_4(<4 x double>* %a, <4 x double>* %b) #2 {
 ; CHECK-LABEL: zip_vscale2_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
index 45b33f5acae7a..7643da08ff71b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
@@ -932,13 +932,12 @@ define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: shuffle_ext_invalid:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    sub x9, sp, #48
 ; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    sub x9, sp, #48
-; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index f08bb15f84e99..9116a45224e1a 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -500,10 +500,9 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
 ; CHECK-LABEL: test_predicate_insert_32xi1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    sxtw x9, w1

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index d717585d4e480..b0b8dd15780cc 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -190,10 +190,9 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16
 ; CHECK-LABEL: insert_v2i64_nxv16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
@@ -232,10 +231,9 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    str q0, [sp, #16]

diff  --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
index 7d979ffb1883b..f0df5c9e6893a 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
@@ -44,10 +44,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2)
@@ -100,10 +99,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2)
@@ -156,10 +154,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
     ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
@@ -225,10 +222,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
     ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4

diff  --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
index 7e30d69b5ca45..a5701a6709284 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
@@ -44,10 +44,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object)
     ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8)
@@ -84,10 +83,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object)
     ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object)
@@ -124,10 +122,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
     ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
@@ -172,10 +169,9 @@ body:             |
     ; CHECK: liveins: $p0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
-    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
     ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+    ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
     ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2)
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
index dc20a123a15a2..1fb42e2d98be6 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
@@ -50,20 +50,19 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1>
 ; CHECK-LABEL: add_nxv64i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue p8.b
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ldr p4, [x1]
 ; CHECK-NEXT:    ldr p5, [x0]
 ; CHECK-NEXT:    ldr p6, [x3]
 ; CHECK-NEXT:    ldr p7, [x2]
+; CHECK-NEXT:    ptrue p8.b
 ; CHECK-NEXT:    eor p0.b, p8/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p8/z, p1.b, p4.b
 ; CHECK-NEXT:    eor p2.b, p8/z, p2.b, p7.b
@@ -131,20 +130,19 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1>
 ; CHECK-LABEL: sub_nxv64i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue p8.b
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    ldr p4, [x1]
 ; CHECK-NEXT:    ldr p5, [x0]
 ; CHECK-NEXT:    ldr p6, [x3]
 ; CHECK-NEXT:    ldr p7, [x2]
+; CHECK-NEXT:    ptrue p8.b
 ; CHECK-NEXT:    eor p0.b, p8/z, p0.b, p5.b
 ; CHECK-NEXT:    eor p1.b, p8/z, p1.b, p4.b
 ; CHECK-NEXT:    eor p2.b, p8/z, p2.b, p7.b

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
index 1b401ee3da8a0..accbb533bd8f5 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
@@ -20,10 +20,9 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) {
 ; CHECK-LABEL: split_extract_32i8_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
@@ -46,10 +45,9 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) {
 ; CHECK-LABEL: split_extract_16i16_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
@@ -72,10 +70,9 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) {
 ; CHECK-LABEL: split_extract_8i32_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
@@ -98,10 +95,9 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) {
 ; CHECK-LABEL: split_extract_8i64_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
@@ -146,10 +142,9 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) {
 ; CHECK-LABEL: split_extract_16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    mov w9, #128
 ; CHECK-NEXT:    ptrue p0.h
@@ -171,10 +166,9 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) {
 ; CHECK-LABEL: split_extract_16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    mov w9, #34464
 ; CHECK-NEXT:    movk w9, #1, lsl #16
@@ -199,10 +193,9 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) {
 ; CHECK-LABEL: split_extract_4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cntw x8
 ; CHECK-NEXT:    mov w9, #10
 ; CHECK-NEXT:    sub x8, x8, #1

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
index 2e4c960c2c4f3..0465da7e7093d 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
@@ -20,10 +20,9 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt,
 ; CHECK-LABEL: split_insert_32i8_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    ptrue p0.b
@@ -46,10 +45,9 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa
 ; CHECK-LABEL: split_insert_8f32_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    sub x8, x8, #1
@@ -72,10 +70,9 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt
 ; CHECK-LABEL: split_insert_8i64_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    sub x8, x8, #1
@@ -135,10 +132,9 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt)
 ; CHECK-LABEL: split_insert_32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-4
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    mov x8, #-1
 ; CHECK-NEXT:    mov w9, #128
 ; CHECK-NEXT:    ptrue p0.h
@@ -166,10 +162,9 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) {
 ; CHECK-LABEL: split_insert_8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    mov w9, #16960
 ; CHECK-NEXT:    movk w9, #15, lsl #16

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll b/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll
index cf91ae9c50539..e0d235dfd5b43 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll
@@ -20,13 +20,12 @@ define i1 @andv_nxv64i1(<vscale x 64 x i1> %a) {
 ; CHECK-LABEL: andv_nxv64i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    and p1.b, p1/z, p1.b, p3.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p2.b
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue p4.b
 ; CHECK-NEXT:    and p0.b, p0/z, p0.b, p1.b
 ; CHECK-NEXT:    not p0.b, p4/z, p0.b

diff  --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index b3c2aad3e9057..cbc7063a1a229 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -110,16 +110,16 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
 ; CHECK-LABEL: trunc_i64toi1_split3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    and z7.d, z7.d, #0x1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    and z6.d, z6.d, #0x1
 ; CHECK-NEXT:    and z5.d, z5.d, #0x1
 ; CHECK-NEXT:    and z4.d, z4.d, #0x1
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    cmpne p1.d, p0/z, z7.d, #0
 ; CHECK-NEXT:    cmpne p2.d, p0/z, z6.d, #0
 ; CHECK-NEXT:    cmpne p3.d, p0/z, z5.d, #0
@@ -128,7 +128,6 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
 ; CHECK-NEXT:    and z2.d, z2.d, #0x1
 ; CHECK-NEXT:    and z1.d, z1.d, #0x1
 ; CHECK-NEXT:    and z0.d, z0.d, #0x1
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
 ; CHECK-NEXT:    uzp1 p2.s, p4.s, p3.s
 ; CHECK-NEXT:    cmpne p3.d, p0/z, z3.d, #0

diff  --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll
index c51db5f37aeb3..729a587a3f995 100644
--- a/llvm/test/CodeGen/AArch64/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/swifterror.ll
@@ -14,7 +14,6 @@ define float @foo(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: foo:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-APPLE-NEXT:    mov x29, sp
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
@@ -31,7 +30,6 @@ define float @foo(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-O0-AARCH64-LABEL: foo:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-O0-AARCH64-NEXT:    mov x29, sp
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_offset w30, -8
@@ -75,7 +73,6 @@ define float @caller(i8* %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #64
-; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-APPLE-AARCH64-NEXT:    stp x22, x21, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x20, x19, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
@@ -107,7 +104,6 @@ define float @caller(i8* %error_ref) {
 ; CHECK-O0-AARCH64-LABEL: caller:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #64
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #48
@@ -139,7 +135,6 @@ define float @caller(i8* %error_ref) {
 ; CHECK-APPLE-ARM64_32-LABEL: caller:
 ; CHECK-APPLE-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-APPLE-ARM64_32-NEXT:    sub sp, sp, #64
-; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x22, x21, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x20, x19, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
@@ -171,9 +166,9 @@ define float @caller(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-LABEL: caller:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #64
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-ARM64_32-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w29, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w21, -24
@@ -224,7 +219,6 @@ define float @caller2(i8* %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller2:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #80
-; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-APPLE-AARCH64-NEXT:    stp d9, d8, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
@@ -267,7 +261,6 @@ define float @caller2(i8* %error_ref) {
 ; CHECK-O0-AARCH64-LABEL: caller2:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #64
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #48
@@ -308,7 +301,6 @@ define float @caller2(i8* %error_ref) {
 ; CHECK-APPLE-ARM64_32-LABEL: caller2:
 ; CHECK-APPLE-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-APPLE-ARM64_32-NEXT:    sub sp, sp, #80
-; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-APPLE-ARM64_32-NEXT:    stp d9, d8, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
@@ -351,9 +343,9 @@ define float @caller2(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-LABEL: caller2:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #64
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-ARM64_32-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w29, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w21, -24
@@ -419,7 +411,6 @@ define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
 ; CHECK-APPLE-LABEL: foo_if:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-APPLE-NEXT:    mov x29, sp
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
@@ -442,7 +433,6 @@ define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
 ; CHECK-O0-AARCH64-LABEL: foo_if:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #32
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -471,8 +461,8 @@ define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
 ; CHECK-O0-ARM64_32-LABEL: foo_if:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #32
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    str x30, [sp, #16] ; 8-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -16
 ; CHECK-O0-ARM64_32-NEXT:    str x21, [sp, #8] ; 8-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    cbz w0, LBB3_2
@@ -518,7 +508,6 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
 ; CHECK-APPLE-LABEL: foo_loop:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp d9, d8, [sp, #-48]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    add x29, sp, #32
@@ -559,7 +548,6 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
 ; CHECK-O0-AARCH64-LABEL: foo_loop:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #48
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #32
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -603,8 +591,8 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
 ; CHECK-O0-ARM64_32-LABEL: foo_loop:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #48
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    str x30, [sp, #32] ; 8-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -16
 ; CHECK-O0-ARM64_32-NEXT:    str s0, [sp, #16] ; 4-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    str w0, [sp, #20] ; 4-byte Folded Spill
@@ -679,7 +667,6 @@ define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_
 ; CHECK-APPLE-LABEL: foo_sret:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    add x29, sp, #16
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
@@ -702,7 +689,6 @@ define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_
 ; CHECK-O0-AARCH64-LABEL: foo_sret:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #32
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -727,8 +713,8 @@ define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_
 ; CHECK-O0-ARM64_32-LABEL: foo_sret:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #32
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    str x30, [sp, #16] ; 8-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -16
 ; CHECK-O0-ARM64_32-NEXT:    str w0, [sp, #12] ; 4-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    str x8, [sp] ; 8-byte Folded Spill
@@ -764,7 +750,6 @@ define float @caller3(i8* %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller3:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #80
-; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-APPLE-AARCH64-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
@@ -798,7 +783,6 @@ define float @caller3(i8* %error_ref) {
 ; CHECK-O0-AARCH64-LABEL: caller3:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #80
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #64
@@ -832,7 +816,6 @@ define float @caller3(i8* %error_ref) {
 ; CHECK-APPLE-ARM64_32-LABEL: caller3:
 ; CHECK-APPLE-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-APPLE-ARM64_32-NEXT:    sub sp, sp, #80
-; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
@@ -866,9 +849,9 @@ define float @caller3(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-LABEL: caller3:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #80
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-O0-ARM64_32-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w29, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w21, -24
@@ -927,7 +910,6 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 ; CHECK-APPLE-AARCH64-LABEL: foo_vararg:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #48
-; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-APPLE-AARCH64-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    add x29, sp, #32
 ; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -954,7 +936,6 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 ; CHECK-O0-AARCH64-LABEL: foo_vararg:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #48
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #32
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -991,7 +972,6 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 ; CHECK-APPLE-ARM64_32-LABEL: foo_vararg:
 ; CHECK-APPLE-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-APPLE-ARM64_32-NEXT:    sub sp, sp, #48
-; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    add x29, sp, #32
 ; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa w29, 16
@@ -1023,8 +1003,8 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 ; CHECK-O0-ARM64_32-LABEL: foo_vararg:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #48
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    str x30, [sp, #32] ; 8-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -16
 ; CHECK-O0-ARM64_32-NEXT:    mov w8, #16
 ; CHECK-O0-ARM64_32-NEXT:    mov w0, w8
@@ -1095,7 +1075,6 @@ define float @caller4(i8* %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller4:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #96
-; CHECK-APPLE-AARCH64-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-APPLE-AARCH64-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x20, x19, [sp, #64] ; 16-byte Folded Spill
 ; CHECK-APPLE-AARCH64-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
@@ -1134,7 +1113,6 @@ define float @caller4(i8* %error_ref) {
 ; CHECK-O0-AARCH64-LABEL: caller4:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #96
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #64] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #80
@@ -1182,7 +1160,6 @@ define float @caller4(i8* %error_ref) {
 ; CHECK-APPLE-ARM64_32-LABEL: caller4:
 ; CHECK-APPLE-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-APPLE-ARM64_32-NEXT:    sub sp, sp, #80
-; CHECK-APPLE-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-APPLE-ARM64_32-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
@@ -1223,9 +1200,9 @@ define float @caller4(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-LABEL: caller4:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #80
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-O0-ARM64_32-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 80
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w29, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w21, -24
@@ -1302,7 +1279,6 @@ define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: tailcallswifterror:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-APPLE-NEXT:    mov x29, sp
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
@@ -1314,7 +1290,6 @@ define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-O0-AARCH64-LABEL: tailcallswifterror:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-O0-AARCH64-NEXT:    mov x29, sp
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_offset w30, -8
@@ -1339,7 +1314,6 @@ define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %erro
 ; CHECK-APPLE-LABEL: tailcallswifterror_swiftcc:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-APPLE-NEXT:    mov x29, sp
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
@@ -1351,7 +1325,6 @@ define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %erro
 ; CHECK-O0-AARCH64-LABEL: tailcallswifterror_swiftcc:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-O0-AARCH64-NEXT:    mov x29, sp
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_offset w30, -8
@@ -1377,7 +1350,6 @@ define swiftcc void @swifterror_clobber(%swift_error** nocapture swifterror %err
 ; CHECK-APPLE-LABEL: swifterror_clobber:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-APPLE-NEXT:    mov x29, sp
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-APPLE-NEXT:    .cfi_offset w30, -8
@@ -1393,7 +1365,6 @@ define swiftcc void @swifterror_clobber(%swift_error** nocapture swifterror %err
 ; CHECK-O0-AARCH64-LABEL: swifterror_clobber:
 ; CHECK-O0-AARCH64:       ; %bb.0:
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #32
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -1427,7 +1398,6 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
 ; CHECK-APPLE-LABEL: swifterror_reg_clobber:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    stp x22, x21, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    add x29, sp, #16
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
@@ -1445,7 +1415,6 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
 ; CHECK-O0-AARCH64-LABEL: swifterror_reg_clobber:
 ; CHECK-O0-AARCH64:       ; %bb.0:
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #-32]! ; 16-byte Folded Spill
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -1479,7 +1448,6 @@ define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* s
 ; CHECK-APPLE-LABEL: params_in_reg:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    sub sp, sp, #112
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-APPLE-NEXT:    stp x21, x28, [sp, #8] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x27, x26, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x25, x24, [sp, #48] ; 16-byte Folded Spill
@@ -1541,7 +1509,6 @@ define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* s
 ; CHECK-O0-AARCH64-LABEL: params_in_reg:
 ; CHECK-O0-AARCH64:       ; %bb.0:
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #128
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-O0-AARCH64-NEXT:    str x20, [sp, #96] ; 8-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #112] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #112
@@ -1599,8 +1566,8 @@ define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* s
 ; CHECK-O0-ARM64_32-LABEL: params_in_reg:
 ; CHECK-O0-ARM64_32:       ; %bb.0:
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #112
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-O0-ARM64_32-NEXT:    stp x20, x30, [sp, #96] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w20, -16
 ; CHECK-O0-ARM64_32-NEXT:    str x21, [sp, #80] ; 8-byte Folded Spill
@@ -1660,7 +1627,6 @@ define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_
 ; CHECK-APPLE-LABEL: params_and_return_in_reg:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    sub sp, sp, #128
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 128
 ; CHECK-APPLE-NEXT:    stp x20, x28, [sp, #24] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x27, x26, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x25, x24, [sp, #64] ; 16-byte Folded Spill
@@ -1751,7 +1717,6 @@ define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_
 ; CHECK-O0-AARCH64-LABEL: params_and_return_in_reg:
 ; CHECK-O0-AARCH64:       ; %bb.0:
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #272
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-O0-AARCH64-NEXT:    stp x28, x20, [sp, #240] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #256] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #256
@@ -1859,9 +1824,9 @@ define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_
 ; CHECK-O0-ARM64_32-LABEL: params_and_return_in_reg:
 ; CHECK-O0-ARM64_32:       ; %bb.0:
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #272
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-O0-ARM64_32-NEXT:    str x28, [sp, #240] ; 8-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x20, x30, [sp, #256] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 272
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w20, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w28, -32
@@ -1978,7 +1943,6 @@ define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_p
 ; CHECK-APPLE-LABEL: tailcall_from_swifterror:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    add x29, sp, #16
 ; CHECK-APPLE-NEXT:    .cfi_def_cfa w29, 16
@@ -1996,7 +1960,6 @@ define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_p
 ; CHECK-O0-AARCH64-LABEL: tailcall_from_swifterror:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #32
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #16
 ; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa w29, 16
@@ -2013,8 +1976,8 @@ define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_p
 ; CHECK-O0-ARM64_32-LABEL: tailcall_from_swifterror:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #32
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    str x30, [sp, #16] ; 8-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -16
 ; CHECK-O0-ARM64_32-NEXT:    str x21, [sp, #8] ; 8-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    mov x0, xzr
@@ -2035,7 +1998,6 @@ define swiftcc %swift_error* @testAssign(i8* %error_ref) {
 ; CHECK-APPLE-LABEL: testAssign:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    sub sp, sp, #48
-; CHECK-APPLE-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-APPLE-NEXT:    stp x22, x21, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-APPLE-NEXT:    add x29, sp, #32
@@ -2055,7 +2017,6 @@ define swiftcc %swift_error* @testAssign(i8* %error_ref) {
 ; CHECK-O0-AARCH64-LABEL: testAssign:
 ; CHECK-O0-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-O0-AARCH64-NEXT:    sub sp, sp, #48
-; CHECK-O0-AARCH64-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-AARCH64-NEXT:    stp x22, x21, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-O0-AARCH64-NEXT:    add x29, sp, #32
@@ -2078,9 +2039,9 @@ define swiftcc %swift_error* @testAssign(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-LABEL: testAssign:
 ; CHECK-O0-ARM64_32:       ; %bb.0: ; %entry
 ; CHECK-O0-ARM64_32-NEXT:    sub sp, sp, #48
-; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    stp x22, x21, [sp, #16] ; 16-byte Folded Spill
 ; CHECK-O0-ARM64_32-NEXT:    stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-O0-ARM64_32-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w30, -8
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w29, -16
 ; CHECK-O0-ARM64_32-NEXT:    .cfi_offset w21, -24

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
index 79d7f4a420056..276ba08834a0a 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
@@ -53,7 +53,6 @@ body:             |
   ; CHECK:   successors: %bb.1, %bb.2
   ; CHECK:   liveins: $q0, $q22, $q23, $q20, $q21, $q18, $q19, $q16, $q17, $q14, $q15, $q12, $q13, $q10, $q11, $q8, $q9, $lr, $fp
   ; CHECK:   $sp = frame-setup SUBXri $sp, 304, 0
-  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 304
   ; CHECK:   frame-setup STPQi killed $q23, killed $q22, $sp, 2 :: (store (s128) into %stack.19), (store (s128) into %stack.18)
   ; CHECK:   frame-setup STPQi killed $q21, killed $q20, $sp, 4 :: (store (s128) into %stack.17), (store (s128) into %stack.16)
   ; CHECK:   frame-setup STPQi killed $q19, killed $q18, $sp, 6 :: (store (s128) into %stack.15), (store (s128) into %stack.14)
@@ -63,6 +62,7 @@ body:             |
   ; CHECK:   frame-setup STPQi killed $q11, killed $q10, $sp, 14 :: (store (s128) into %stack.7), (store (s128) into %stack.6)
   ; CHECK:   frame-setup STPQi killed $q9, killed $q8, $sp, 16 :: (store (s128) into %stack.5), (store (s128) into %stack.4)
   ; CHECK:   frame-setup STPXi killed $fp, killed $lr, $sp, 36 :: (store (s64) into %stack.3), (store (s64) into %stack.2)
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 304
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $w30, -8
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $w29, -16
   ; CHECK:   frame-setup CFI_INSTRUCTION offset $b8, -32

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index a5eb53c554e61..3e177043321ee 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -9,11 +9,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w30, -8
-; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    addvl sp, sp, #-18
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; CHECK-NEXT:    str p15, [sp, #4, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p14, [sp, #5, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -42,6 +38,8 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; CHECK-NEXT:    str z10, [sp, #15, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    str z9, [sp, #16, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-2
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
@@ -50,8 +48,8 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
 ; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
-; CHECK-NEXT:    addvl sp, sp, #-2
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    str z0, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    bl may_throw_sve
@@ -133,11 +131,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; GISEL-NEXT:    .cfi_startproc
 ; GISEL-NEXT:  // %bb.0:
 ; GISEL-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; GISEL-NEXT:    .cfi_def_cfa_offset 16
-; GISEL-NEXT:    .cfi_offset w30, -8
-; GISEL-NEXT:    .cfi_offset w29, -16
 ; GISEL-NEXT:    addvl sp, sp, #-18
-; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; GISEL-NEXT:    str p15, [sp, #4, mul vl] // 2-byte Folded Spill
 ; GISEL-NEXT:    str p14, [sp, #5, mul vl] // 2-byte Folded Spill
 ; GISEL-NEXT:    str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -166,6 +160,8 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; GISEL-NEXT:    str z10, [sp, #15, mul vl] // 16-byte Folded Spill
 ; GISEL-NEXT:    str z9, [sp, #16, mul vl] // 16-byte Folded Spill
 ; GISEL-NEXT:    str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    addvl sp, sp, #-2
+; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
@@ -174,8 +170,8 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
 ; GISEL-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
-; GISEL-NEXT:    addvl sp, sp, #-2
-; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; GISEL-NEXT:    .cfi_offset w30, -8
+; GISEL-NEXT:    .cfi_offset w29, -16
 ; GISEL-NEXT:    str z0, [sp] // 16-byte Folded Spill
 ; GISEL-NEXT:  .Ltmp0:
 ; GISEL-NEXT:    bl may_throw_sve
@@ -269,7 +265,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #304
-; CHECK-NEXT:    .cfi_def_cfa_offset 304
 ; CHECK-NEXT:    stp q23, q22, [sp, #32] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp q21, q20, [sp, #64] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp q19, q18, [sp, #96] // 32-byte Folded Spill
@@ -279,6 +274,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    stp q11, q10, [sp, #224] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp q9, q8, [sp, #256] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #288] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 304
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    .cfi_offset b8, -32
@@ -336,7 +332,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    .cfi_startproc
 ; GISEL-NEXT:  // %bb.0:
 ; GISEL-NEXT:    sub sp, sp, #304
-; GISEL-NEXT:    .cfi_def_cfa_offset 304
 ; GISEL-NEXT:    stp q23, q22, [sp, #32] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp q21, q20, [sp, #64] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp q19, q18, [sp, #96] // 32-byte Folded Spill
@@ -346,6 +341,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    stp q11, q10, [sp, #224] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp q9, q8, [sp, #256] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp x29, x30, [sp, #288] // 16-byte Folded Spill
+; GISEL-NEXT:    .cfi_def_cfa_offset 304
 ; GISEL-NEXT:    .cfi_offset w30, -8
 ; GISEL-NEXT:    .cfi_offset w29, -16
 ; GISEL-NEXT:    .cfi_offset b8, -32

diff  --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
index 2bc064d986f99..97e877211b120 100644
--- a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
@@ -142,8 +142,8 @@ define void @test4(i32 %n) personality i32 (...)* @__FrameHandler {
 ; CHECK-NEXT:    .cfi_lsda 0, .Lexception0
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w21, -24

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
index df33af22e1a75..8b22834bc595c 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
@@ -65,7 +65,6 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-LABEL: check_boundaries:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
@@ -98,7 +97,6 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
index 9765cbace729a..4267291471c1d 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
@@ -6,7 +6,6 @@ define dso_local i32 @check_boundaries() #0 {
 ; CHECK-LABEL: check_boundaries:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
@@ -75,7 +74,6 @@ define dso_local i32 @main() #0 {
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16


        


More information about the llvm-commits mailing list