[llvm] 63c9aca - Revert "[AArch64] Async unwind - function epilogues"

Momchil Velikov via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 2 07:02:07 PST 2022


Author: Momchil Velikov
Date: 2022-03-02T15:01:57Z
New Revision: 63c9aca12afad6745173861f5315ee96fbec72ed

URL: https://github.com/llvm/llvm-project/commit/63c9aca12afad6745173861f5315ee96fbec72ed
DIFF: https://github.com/llvm/llvm-project/commit/63c9aca12afad6745173861f5315ee96fbec72ed.diff

LOG: Revert "[AArch64] Async unwind - function epilogues"

This reverts commit 74319d67943a4fbef36e81f54273549ce4962f84.

It causes test failures that look like infinite loop in asan/hwasan
unwinding.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
    llvm/lib/Target/AArch64/AArch64FrameLowering.h
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/lib/Target/AArch64/AArch64InstrInfo.h
    llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
    llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
    llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
    llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
    llvm/test/CodeGen/AArch64/cond-br-tuning.ll
    llvm/test/CodeGen/AArch64/csr-split.ll
    llvm/test/CodeGen/AArch64/emutls.ll
    llvm/test/CodeGen/AArch64/fastcc.ll
    llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
    llvm/test/CodeGen/AArch64/framelayout-sve.mir
    llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
    llvm/test/CodeGen/AArch64/large-stack.ll
    llvm/test/CodeGen/AArch64/local_vars.ll
    llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
    llvm/test/CodeGen/AArch64/merge-store-dependency.ll
    llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
    llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
    llvm/test/CodeGen/AArch64/settag.ll
    llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
    llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
    llvm/test/CodeGen/AArch64/split-vector-insert.ll
    llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
    llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
    llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
    llvm/test/CodeGen/AArch64/sve-insert-element.ll
    llvm/test/CodeGen/AArch64/sve-insert-vector.ll
    llvm/test/CodeGen/AArch64/sve-ldnf1.mir
    llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
    llvm/test/CodeGen/AArch64/sve-pred-arith.ll
    llvm/test/CodeGen/AArch64/sve-varargs.ll
    llvm/test/CodeGen/AArch64/swifttail-call.ll
    llvm/test/CodeGen/AArch64/tail-call.ll
    llvm/test/CodeGen/AArch64/tailcc-tail-call.ll
    llvm/test/CodeGen/AArch64/unwind-preserved.ll
    llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index e7d9743198b66..d5c77e3afe74f 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -580,49 +580,6 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
   emitCalleeSavedSVELocations(MBB, MBBI);
 }
 
-static void emitCalleeSavedRestores(MachineBasicBlock &MBB,
-                                    MachineBasicBlock::iterator MBBI,
-                                    bool SVE) {
-  MachineFunction &MF = *MBB.getParent();
-  MachineFrameInfo &MFI = MF.getFrameInfo();
-
-  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
-  if (CSI.empty())
-    return;
-
-  const TargetSubtargetInfo &STI = MF.getSubtarget();
-  const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
-  const TargetInstrInfo &TII = *STI.getInstrInfo();
-  DebugLoc DL = MBB.findDebugLoc(MBBI);
-
-  for (const auto &Info : CSI) {
-    if (SVE !=
-        (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
-      continue;
-
-    unsigned Reg = Info.getReg();
-    if (SVE &&
-        !static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
-      continue;
-
-    unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore(
-        nullptr, TRI.getDwarfRegNum(Info.getReg(), true)));
-    BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
-        .addCFIIndex(CFIIndex)
-        .setMIFlags(MachineInstr::FrameDestroy);
-  }
-}
-
-void AArch64FrameLowering::emitCalleeSavedGPRRestores(
-    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
-  emitCalleeSavedRestores(MBB, MBBI, false);
-}
-
-void AArch64FrameLowering::emitCalleeSavedSVERestores(
-    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
-  emitCalleeSavedRestores(MBB, MBBI, true);
-}
-
 // Find a scratch register that we can use at the start of the prologue to
 // re-align the stack pointer.  We avoid using callee-save registers since they
 // may appear to be free when this is called from canUseAsPrologue (during
@@ -930,9 +887,7 @@ static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI,
 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
     const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc,
-    bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI,
-    MachineInstr::MIFlag FrameFlag = MachineInstr::FrameSetup,
-    int CFAOffset = 0) {
+    bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, bool InProlog = true) {
   unsigned NewOpc;
   switch (MBBI->getOpcode()) {
   default:
@@ -995,9 +950,10 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
   if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 ||
       CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
     emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
-                    StackOffset::getFixed(CSStackSizeInc), TII, FrameFlag,
-                    false, false, nullptr, EmitCFI,
-                    StackOffset::getFixed(CFAOffset));
+                    StackOffset::getFixed(CSStackSizeInc), TII,
+                    InProlog ? MachineInstr::FrameSetup
+                             : MachineInstr::FrameDestroy,
+                    false, false, nullptr, EmitCFI && InProlog);
 
     return std::prev(MBBI);
   }
@@ -1025,15 +981,16 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
   // Generate a new SEH code that corresponds to the new instruction.
   if (NeedsWinCFI) {
     *HasWinCFI = true;
-    InsertSEH(*MIB, *TII, FrameFlag);
+    InsertSEH(*MIB, *TII,
+              InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy);
   }
 
-  if (EmitCFI) {
+  if (EmitCFI && InProlog) {
     unsigned CFIIndex = MF.addFrameInst(
-        MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset - CSStackSizeInc));
+        MCCFIInstruction::cfiDefCfaOffset(nullptr, -CSStackSizeInc));
     BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
         .addCFIIndex(CFIIndex)
-        .setMIFlags(FrameFlag);
+        .setMIFlags(MachineInstr::FrameSetup);
   }
 
   return std::prev(MBB.erase(MBBI));
@@ -1172,14 +1129,6 @@ static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII,
       .addReg(AArch64::X18)
       .addImm(-8)
       .setMIFlag(MachineInstr::FrameDestroy);
-
-  if (MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo()) {
-    unsigned CFIIndex =
-        MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, 18));
-    BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
-        .addCFIIndex(CFIIndex)
-        .setMIFlags(MachineInstr::FrameDestroy);
-  }
 }
 
 void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
@@ -1215,6 +1164,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
                                 MFnI.needsDwarfUnwindInfo());
 
   if (MFnI.shouldSignReturnAddress()) {
+
     unsigned PACI;
     if (MFnI.shouldSignWithBKey()) {
       BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY))
@@ -1700,7 +1650,6 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
   DebugLoc DL;
   bool NeedsWinCFI = needsWinCFI(MF);
-  bool EmitCFI = MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo();
   bool HasWinCFI = false;
   bool IsFunclet = false;
   auto WinCFI = make_scope_exit([&]() { assert(HasWinCFI == MF.hasWinCFI()); });
@@ -1710,12 +1659,9 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     IsFunclet = isFuncletReturnInstr(*MBBI);
   }
 
-  auto FinishingTouches = make_scope_exit([&]() {
-    InsertReturnAddressAuth(MF, MBB);
+  auto ShadowStackEpilogue = make_scope_exit([&]() {
     if (needsShadowCallStackPrologueEpilogue(MF))
       emitShadowCallStackEpilogue(*TII, MF, MBB, MBB.getFirstTerminator(), DL);
-    if (EmitCFI)
-      emitCalleeSavedGPRRestores(MBB, MBB.getFirstTerminator());
   });
 
   int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
@@ -1730,6 +1676,36 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   // How much of the stack used by incoming arguments this function is expected
   // to restore in this particular epilogue.
   int64_t ArgumentStackToRestore = getArgumentStackToRestore(MF, MBB);
+
+  // The stack frame should be like below,
+  //
+  //      ----------------------                     ---
+  //      |                    |                      |
+  //      | BytesInStackArgArea|              CalleeArgStackSize
+  //      | (NumReusableBytes) |                (of tail call)
+  //      |                    |                     ---
+  //      |                    |                      |
+  //      ---------------------|        ---           |
+  //      |                    |         |            |
+  //      |   CalleeSavedReg   |         |            |
+  //      | (CalleeSavedStackSize)|      |            |
+  //      |                    |         |            |
+  //      ---------------------|         |         NumBytes
+  //      |                    |     StackSize  (StackAdjustUp)
+  //      |   LocalStackSize   |         |            |
+  //      | (covering callee   |         |            |
+  //      |       args)        |         |            |
+  //      |                    |         |            |
+  //      ----------------------        ---          ---
+  //
+  // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
+  //             = StackSize + ArgumentPopSize
+  //
+  // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
+  // it as the 2nd argument of AArch64ISD::TC_RETURN.
+
+  auto Cleanup = make_scope_exit([&] { InsertReturnAddressAuth(MF, MBB); });
+
   bool IsWin64 =
       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
   unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet);
@@ -1764,11 +1740,9 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes);
   // Assume we can't combine the last pop with the sp restore.
 
-  bool CombineAfterCSRBump = false;
   if (!CombineSPBump && PrologueSaveSize != 0) {
     MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
-    while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
-           AArch64InstrInfo::isSEHInstruction(*Pop))
+    while (AArch64InstrInfo::isSEHInstruction(*Pop))
       Pop = std::prev(Pop);
     // Converting the last ldp to a post-index ldp is valid only if the last
     // ldp's offset is 0.
@@ -1776,17 +1750,16 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     // If the offset is 0 and the AfterCSR pop is not actually trying to
     // allocate more stack for arguments (in space that an untimely interrupt
     // may clobber), convert it to a post-index ldp.
-    if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
-      convertCalleeSaveRestoreToSPPrePostIncDec(
-          MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
-          MachineInstr::FrameDestroy, PrologueSaveSize);
-    } else {
+    if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0)
+      convertCalleeSaveRestoreToSPPrePostIncDec(MBB, Pop, DL, TII,
+                                                PrologueSaveSize, NeedsWinCFI,
+                                                &HasWinCFI, false, false);
+    else {
       // If not, make sure to emit an add after the last ldp.
       // We're doing this by transfering the size to be restored from the
       // adjustment *before* the CSR pops to the adjustment *after* the CSR
       // pops.
       AfterCSRPopSize += PrologueSaveSize;
-      CombineAfterCSRBump = true;
     }
   }
 
@@ -1835,22 +1808,10 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   // If there is a single SP update, insert it before the ret and we're done.
   if (CombineSPBump) {
     assert(!SVEStackSize && "Cannot combine SP bump with SVE");
-
-    // When we are about to restore the CSRs, the CFA register is SP again.
-    if (EmitCFI && hasFP(MF)) {
-      const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo();
-      unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true);
-      unsigned CFIIndex =
-          MF.addFrameInst(MCCFIInstruction::cfiDefCfa(nullptr, Reg, NumBytes));
-      BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
-          .addCFIIndex(CFIIndex)
-          .setMIFlags(MachineInstr::FrameDestroy);
-    }
-
     emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
                     StackOffset::getFixed(NumBytes + (int64_t)AfterCSRPopSize),
                     TII, MachineInstr::FrameDestroy, false, NeedsWinCFI,
-                    &HasWinCFI, EmitCFI, StackOffset::getFixed(NumBytes));
+                    &HasWinCFI);
     if (HasWinCFI)
       BuildMI(MBB, MBB.getFirstTerminator(), DL,
               TII->get(AArch64::SEH_EpilogEnd))
@@ -1883,40 +1844,29 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
   // Deallocate the SVE area.
   if (SVEStackSize) {
     if (AFI->isStackRealigned()) {
-      if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
+      if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize())
         // Set SP to start of SVE callee-save area from which they can
         // be reloaded. The code below will deallocate the stack space
         // space by moving FP -> SP.
         emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
                         StackOffset::getScalable(-CalleeSavedSize), TII,
                         MachineInstr::FrameDestroy);
-      }
     } else {
       if (AFI->getSVECalleeSavedStackSize()) {
         // Deallocate the non-SVE locals first before we can deallocate (and
         // restore callee saves) from the SVE area.
-        emitFrameOffset(
-            MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
-            StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy,
-            false, false, nullptr, EmitCFI && !hasFP(MF),
-            SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize));
+        emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+                        StackOffset::getFixed(NumBytes), TII,
+                        MachineInstr::FrameDestroy);
         NumBytes = 0;
       }
 
       emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
-                      DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
-                      false, nullptr, EmitCFI && !hasFP(MF),
-                      SVEStackSize +
-                          StackOffset::getFixed(NumBytes + PrologueSaveSize));
+                      DeallocateBefore, TII, MachineInstr::FrameDestroy);
 
       emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
-                      DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
-                      false, nullptr, EmitCFI && !hasFP(MF),
-                      DeallocateAfter +
-                          StackOffset::getFixed(NumBytes + PrologueSaveSize));
+                      DeallocateAfter, TII, MachineInstr::FrameDestroy);
     }
-    if (EmitCFI)
-      emitCalleeSavedSVERestores(MBB, RestoreEnd);
   }
 
   if (!hasFP(MF)) {
@@ -1926,21 +1876,14 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     if (RedZone && AfterCSRPopSize == 0)
       return;
 
-    // Pop the local variables off the stack. If there are no callee-saved
-    // registers, it means we are actually positioned at the terminator and can
-    // combine stack increment for the locals and the stack increment for
-    // callee-popped arguments into (possibly) a single instruction and be done.
     bool NoCalleeSaveRestore = PrologueSaveSize == 0;
     int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
     if (NoCalleeSaveRestore)
       StackRestoreBytes += AfterCSRPopSize;
 
-    emitFrameOffset(
-        MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
-        StackOffset::getFixed(StackRestoreBytes), TII,
-        MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI, EmitCFI,
-        StackOffset::getFixed((RedZone ? 0 : NumBytes) + PrologueSaveSize));
-
+    emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
+                    StackOffset::getFixed(StackRestoreBytes), TII,
+                    MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
     // If we were able to combine the local stack pop with the argument pop,
     // then we're done.
     if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
@@ -1969,17 +1912,6 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
                     StackOffset::getFixed(NumBytes), TII,
                     MachineInstr::FrameDestroy, false, NeedsWinCFI);
 
-  // When we are about to restore the CSRs, the CFA register is SP again.
-  if (EmitCFI && hasFP(MF)) {
-    const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo();
-    unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true);
-    unsigned CFIIndex = MF.addFrameInst(
-        MCCFIInstruction::cfiDefCfa(nullptr, Reg, PrologueSaveSize));
-    BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
-        .addCFIIndex(CFIIndex)
-        .setMIFlags(MachineInstr::FrameDestroy);
-  }
-
   // This must be placed after the callee-save restore code because that code
   // assumes the SP is at the same location as it was after the callee-save save
   // code in the prologue.
@@ -1987,11 +1919,9 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
     assert(AfterCSRPopSize > 0 && "attempting to reallocate arg stack that an "
                                   "interrupt may have clobbered");
 
-    emitFrameOffset(
-        MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
-        StackOffset::getFixed(AfterCSRPopSize), TII, MachineInstr::FrameDestroy,
-        false, NeedsWinCFI, &HasWinCFI, EmitCFI,
-        StackOffset::getFixed(CombineAfterCSRBump ? PrologueSaveSize : 0));
+    emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
+                    StackOffset::getFixed(AfterCSRPopSize), TII,
+                    MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
   }
   if (HasWinCFI)
     BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd))
@@ -2687,7 +2617,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
         MachineMemOperand::MOLoad, Size, Alignment));
     if (NeedsWinCFI)
       InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
-
     return MIB->getIterator();
   };
 

diff  --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 592b85ce24f64..1eb13d0588345 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -147,10 +147,6 @@ class AArch64FrameLowering : public TargetFrameLowering {
                                    MachineBasicBlock::iterator MBBI) const;
   void emitCalleeSavedSVELocations(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator MBBI) const;
-  void emitCalleeSavedGPRRestores(MachineBasicBlock &MBB,
-                                  MachineBasicBlock::iterator MBBI) const;
-  void emitCalleeSavedSVERestores(MachineBasicBlock &MBB,
-                                  MachineBasicBlock::iterator MBBI) const;
 };
 
 } // End llvm namespace

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index d9ad1313536c8..ec8ecf4c3746f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -4139,12 +4139,11 @@ static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
 
 MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI,
                                     unsigned FrameReg, unsigned Reg,
-                                    const StackOffset &Offset,
-                                    bool LastAdjustmentWasScalable) {
+                                    const StackOffset &Offset) {
   if (Offset.getScalable())
     return createDefCFAExpression(TRI, Reg, Offset);
 
-  if (FrameReg == Reg && !LastAdjustmentWasScalable)
+  if (FrameReg == Reg)
     return MCCFIInstruction::cfiDefCfaOffset(nullptr, int(Offset.getFixed()));
 
   unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
@@ -4276,8 +4275,8 @@ static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
       const TargetSubtargetInfo &STI = MF.getSubtarget();
       const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
 
-      unsigned CFIIndex = MF.addFrameInst(
-          createDefCFA(TRI, FrameReg, DestReg, CFAOffset, VScale != 1));
+      unsigned CFIIndex =
+          MF.addFrameInst(createDefCFA(TRI, FrameReg, DestReg, CFAOffset));
       BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
           .addCFIIndex(CFIIndex)
           .setMIFlags(Flag);

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 337372644c014..eca96c79d2821 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -396,8 +396,7 @@ bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
                                      const TargetRegisterInfo *TRI);
 
 MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
-                              unsigned Reg, const StackOffset &Offset,
-                              bool LastAdjustmentWasScalable = true);
+                              unsigned Reg, const StackOffset &Offset);
 MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
                                  const StackOffset &OffsetFromDefCFA);
 

diff  --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
index a7ff0a3ef1c99..16b6cd3aa0081 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
@@ -15,7 +15,6 @@
 
 #include "AArch64MachineFunctionInfo.h"
 #include "AArch64InstrInfo.h"
-#include "AArch64Subtarget.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Metadata.h"
@@ -127,19 +126,9 @@ bool AArch64FunctionInfo::needsDwarfUnwindInfo() const {
 }
 
 bool AArch64FunctionInfo::needsAsyncDwarfUnwindInfo() const {
-  if (!NeedsAsyncDwarfUnwindInfo.hasValue()) {
-    const Function &F = MF.getFunction();
-    NeedsAsyncDwarfUnwindInfo =
-        needsDwarfUnwindInfo() && F.getUWTableKind() == UWTableKind::Async &&
-        !MF.getSubtarget<AArch64Subtarget>()
-             .isTargetMachO() && // TODO: async unwind info not represenatble in
-                                 // the compact format(?).
-        !F.hasMinSize(); // TODO: this is to prevent epilogue unwind info
-                         // from being emitted for homogeneous epilogues,
-                         // outlined functions, and functions outlined from.
-                         // Alternatively, we could disable those
-                         // optimisations. Or even better, add async unwind
-                         // support to them!
-  }
-  return NeedsAsyncDwarfUnwindInfo.getValue();
+  if (!NeedsDwarfAsyncUnwindInfo.hasValue())
+    NeedsDwarfAsyncUnwindInfo =
+        needsDwarfUnwindInfo() &&
+        MF.getFunction().getUWTableKind() == UWTableKind::Async;
+  return NeedsDwarfAsyncUnwindInfo.getValue();
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index daf474c60bdd7..1248d15a3bd56 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -177,7 +177,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
   mutable Optional<bool> NeedsDwarfUnwindInfo;
 
   /// True if the function need asynchronous unwind information.
-  mutable Optional<bool> NeedsAsyncDwarfUnwindInfo;
+  mutable Optional<bool> NeedsDwarfAsyncUnwindInfo;
 
 public:
   explicit AArch64FunctionInfo(MachineFunction &MF);

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
index 85dfab130ae32..042264793c2c3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
@@ -3,7 +3,7 @@
 
 declare void @byval_i32(i32* byval(i32) %ptr)
 
-define void @call_byval_i32(i32* %incoming) uwtable {
+define void @call_byval_i32(i32* %incoming) {
 ; CHECK-LABEL: call_byval_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -15,8 +15,6 @@ define void @call_byval_i32(i32* %incoming) uwtable {
 ; CHECK-NEXT:    bl byval_i32
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   call void @byval_i32(i32* byval(i32) %incoming)
   ret void
@@ -24,7 +22,7 @@ define void @call_byval_i32(i32* %incoming) uwtable {
 
 declare void @byval_a64i32([64 x i32]* byval([64 x i32]) %ptr)
 
-define void @call_byval_a64i32([64 x i32]* %incoming) uwtable {
+define void @call_byval_a64i32([64 x i32]* %incoming) {
 ; CHECK-LABEL: call_byval_a64i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #288
@@ -69,14 +67,9 @@ define void @call_byval_a64i32([64 x i32]* %incoming) uwtable {
 ; CHECK-NEXT:    ldr q0, [x0, #240]
 ; CHECK-NEXT:    str q0, [sp, #240]
 ; CHECK-NEXT:    bl byval_a64i32
-; CHECK-NEXT:    .cfi_def_cfa wsp, 288
 ; CHECK-NEXT:    ldp x29, x30, [sp, #256] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x28, [sp, #272] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #288
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w28
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   call void @byval_a64i32([64 x i32]* byval([64 x i32]) %incoming)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index fad479ff91cb9..c8320daeb4226 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -444,12 +444,6 @@ define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdC
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x22, x21, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp], #48 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w20
-; CHECK-NEXT:    .cfi_restore w21
-; CHECK-NEXT:    .cfi_restore w22
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
   %size = getelementptr inbounds %struct.Struct, %struct.Struct* %hdCall, i64 0, i32 0
@@ -519,10 +513,6 @@ define i32 @do_nothing_if_resultant_opcodes_would_
diff er() #0 {
 ; CHECK-NEXT:  .LBB7_8: // %return
 ; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w20
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
@@ -599,16 +589,10 @@ define i32 @do_nothing_if_compares_can_not_be_adjusted_to_each_other() #0 {
 ; CHECK-NEXT:  // %bb.5:
 ; CHECK-NEXT:    mov w0, #123
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB8_6: // %if.end
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
@@ -654,7 +638,7 @@ return:                                           ; preds = %if.end, %land.lhs.t
 ; fcmp d8, #0.0
 ; b.gt .LBB0_5
 
-define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) #0 {
+define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
 ; CHECK-LABEL: fcmpri:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
@@ -692,10 +676,6 @@ define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) #0 {
 ; CHECK-NEXT:  .LBB9_4: // %return
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore b8
 ; CHECK-NEXT:    ret
 
 ; CHECK-LABEL-DAG: .LBB9_3
@@ -734,7 +714,7 @@ return:                                           ; preds = %land.lhs.true, %con
   ret i32 %retval.0
 }
 
-define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) #0 {
+define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) {
 ; CHECK-LABEL: cmp_shifted:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -748,8 +728,6 @@ define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) #0 {
 ; CHECK-NEXT:    csel w0, w9, w8, ge
 ; CHECK-NEXT:    bl zoo
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; [...]
 
@@ -852,5 +830,3 @@ declare double @yoo(i32)
 declare i32 @xoo(i32, i32)
 
 declare i32 @woo(double, double)
-
-attributes #0 = { uwtable }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
index 7d4a1c04b6dcc..5a21840c71590 100644
--- a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -180,7 +180,7 @@ declare void @foo()
 declare void @bar(i32)
 
 ; Don't transform since the call will clobber the NZCV bits.
-define void @test_call_clobber(i32 %unused, i32 %a) uwtable {
+define void @test_call_clobber(i32 %unused, i32 %a) {
 ; CHECK-LABEL: test_call_clobber:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -193,9 +193,6 @@ define void @test_call_clobber(i32 %unused, i32 %a) uwtable {
 ; CHECK-NEXT:    cbnz w19, .LBB9_2
 ; CHECK-NEXT:  // %bb.1: // %if.end
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB9_2: // %if.then
 ; CHECK-NEXT:    bl foo

diff  --git a/llvm/test/CodeGen/AArch64/csr-split.ll b/llvm/test/CodeGen/AArch64/csr-split.ll
index 2c07c60670439..f45455006fb53 100644
--- a/llvm/test/CodeGen/AArch64/csr-split.ll
+++ b/llvm/test/CodeGen/AArch64/csr-split.ll
@@ -6,7 +6,7 @@
 
 @a = common dso_local local_unnamed_addr global i32 0, align 4
 
-define dso_local signext i32 @test1(i32* %b) local_unnamed_addr uwtable  {
+define dso_local signext i32 @test1(i32* %b) local_unnamed_addr  {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -19,18 +19,12 @@ define dso_local signext i32 @test1(i32* %b) local_unnamed_addr uwtable  {
 ; CHECK-NEXT:    b.eq .LBB0_2
 ; CHECK-NEXT:  // %bb.1: // %if.end
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2: // %if.then
 ; CHECK-NEXT:    mov x19, x0
 ; CHECK-NEXT:    bl callVoid
 ; CHECK-NEXT:    mov x0, x19
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    b callNonVoid
 ;
 ; CHECK-APPLE-LABEL: test1:
@@ -81,7 +75,7 @@ declare signext i32 @callVoid(...) local_unnamed_addr
 
 declare signext i32 @callNonVoid(i32*) local_unnamed_addr
 
-define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr uwtable  {
+define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr  {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -99,16 +93,10 @@ define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr uwtable  {
 ; CHECK-NEXT:    bl callVoid
 ; CHECK-NEXT:    mov x0, x19
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    b callNonVoid
 ; CHECK-NEXT:  .LBB1_3: // %return
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-APPLE-LABEL: test2:
@@ -163,7 +151,7 @@ return:                                           ; preds = %if.end, %entry, %if
 }
 
 
-define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_addr uwtable  {
+define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_addr  {
 ; CHECK-LABEL: test3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
@@ -183,10 +171,6 @@ define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_ad
 ; CHECK-NEXT:    mov x0, x19
 ; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w20
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-APPLE-LABEL: test3:

diff  --git a/llvm/test/CodeGen/AArch64/emutls.ll b/llvm/test/CodeGen/AArch64/emutls.ll
index 4fdc99d0b7e01..5048f71b1591b 100644
--- a/llvm/test/CodeGen/AArch64/emutls.ll
+++ b/llvm/test/CodeGen/AArch64/emutls.ll
@@ -9,13 +9,12 @@
 @my_emutls_v_xyz = external global i8*, align 4
 declare i8* @my_emutls_get_address(i8*)
 
-define i32 @my_get_xyz() uwtable {
+define i32 @my_get_xyz() {
 ; ARM64-LABEL: my_get_xyz:
 ; ARM64:        adrp x0, :got:my_emutls_v_xyz
 ; ARM64-NEXT:   ldr x0, [x0, :got_lo12:my_emutls_v_xyz]
 ; ARM64-NEXT:   bl my_emutls_get_address
 ; ARM64-NEXT:   ldr  w0, [x0]
-; ARM64-NEXT: 	.cfi_def_cfa wsp, 16
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
@@ -33,13 +32,12 @@ entry:
 @s1 = thread_local global i16 15
 @b1 = thread_local global i8 0
 
-define i32 @f1() uwtable {
+define i32 @f1() {
 ; ARM64-LABEL: f1:
 ; ARM64:        adrp x0, :got:__emutls_v.i1
 ; ARM64-NEXT:   ldr x0, [x0, :got_lo12:__emutls_v.i1]
 ; ARM64-NEXT:   bl __emutls_get_address
 ; ARM64-NEXT:   ldr  w0, [x0]
-; ARM64-NEXT: 	.cfi_def_cfa wsp, 16
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
@@ -47,12 +45,11 @@ entry:
   ret i32 %tmp1
 }
 
-define i32* @f2() uwtable {
+define i32* @f2() {
 ; ARM64-LABEL: f2:
 ; ARM64:        adrp x0, :got:__emutls_v.i1
 ; ARM64-NEXT:   ldr x0, [x0, :got_lo12:__emutls_v.i1]
 ; ARM64-NEXT:   bl __emutls_get_address
-; ARM64-NEXT: 	.cfi_def_cfa wsp, 16
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
@@ -71,12 +68,11 @@ entry:
   ret i32 %tmp1
 }
 
-define i32* @f6() uwtable {
+define i32* @f6() {
 ; ARM64-LABEL: f6:
 ; ARM64:        adrp x0, __emutls_v.i3
 ; ARM64:        add x0, x0, :lo12:__emutls_v.i3
 ; ARM64-NEXT:   bl __emutls_get_address
-; ARM64-NEXT: 	.cfi_def_cfa wsp, 16
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/fastcc.ll b/llvm/test/CodeGen/AArch64/fastcc.ll
index c499d6833d6da..8ae7c2dc2526b 100644
--- a/llvm/test/CodeGen/AArch64/fastcc.ll
+++ b/llvm/test/CodeGen/AArch64/fastcc.ll
@@ -5,7 +5,7 @@
 ; Without tailcallopt fastcc still means the caller cleans up the
 ; stack, so try to make sure this is respected.
 
-define fastcc void @func_stack0() uwtable {
+define fastcc void @func_stack0() {
 ; CHECK-LABEL: func_stack0:
 ; CHECK: sub sp, sp, #48
 ; CHECK: add x29, sp, #32
@@ -49,21 +49,15 @@ define fastcc void @func_stack0() uwtable {
   ret void
 ; CHECK: ldp     x29, x30, [sp, #32]
 ; CHECK-NEXT: add sp, sp, #48
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: .cfi_restore w29
 ; CHECK-NEXT: ret
 
 
 ; CHECK-TAIL: ldp     x29, x30, [sp, #32]
 ; CHECK-TAIL-NEXT: add sp, sp, #48
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 0
-; CHECK-TAIL-NEXT: .cfi_restore w30
-; CHECK-TAIL-NEXT: .cfi_restore w29
 ; CHECK-TAIL-NEXT: ret
 }
 
-define fastcc void @func_stack8([8 x i64], i32 %stacked) uwtable {
+define fastcc void @func_stack8([8 x i64], i32 %stacked) {
 ; CHECK-LABEL: func_stack8:
 ; CHECK: sub sp, sp, #48
 ; CHECK: stp x29, x30, [sp, #32]
@@ -106,24 +100,17 @@ define fastcc void @func_stack8([8 x i64], i32 %stacked) uwtable {
 ; CHECK-TAIL-NOT: sub sp, sp
 
   ret void
-; CHECK-NEXT: .cfi_def_cfa wsp, 48
 ; CHECK-NEXT: ldp     x29, x30, [sp, #32]
-; CHECK-NEXT: add sp, sp, #48
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: .cfi_restore w29
+; CHECK: add sp, sp, #48
 ; CHECK-NEXT: ret
 
 
 ; CHECK-TAIL: ldp     x29, x30, [sp, #32]
 ; CHECK-TAIL-NEXT: add     sp, sp, #64
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset -16
-; CHECK-TAIL-NEXT: .cfi_restore w30
-; CHECK-TAIL-NEXT: .cfi_restore w29
 ; CHECK-TAIL-NEXT: ret
 }
 
-define fastcc void @func_stack32([8 x i64], i128 %stacked0, i128 %stacked1) uwtable {
+define fastcc void @func_stack32([8 x i64], i128 %stacked0, i128 %stacked1) {
 ; CHECK-LABEL: func_stack32:
 ; CHECK: add x29, sp, #32
 
@@ -159,31 +146,22 @@ define fastcc void @func_stack32([8 x i64], i128 %stacked0, i128 %stacked1) uwta
 ; CHECK-TAIL-NOT: sub sp, sp
 
   ret void
-; CHECK:      .cfi_def_cfa wsp, 48
-; CHECK-NEXT: ldp x29, x30, [sp, #32]
+; CHECK: ldp     x29, x30, [sp, #32]
 ; CHECK-NEXT: add sp, sp, #48
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: .cfi_restore w29
 ; CHECK-NEXT: ret
 
 ; CHECK-TAIL: ldp     x29, x30, [sp, #32]
 ; CHECK-TAIL-NEXT: add     sp, sp, #80
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset -32
-; CHECK-TAIL-NEXT: .cfi_restore w30
-; CHECK-TAIL-NEXT: .cfi_restore w29
 ; CHECK-TAIL-NEXT: ret
 }
 
 ; Check that arg stack pop is done after callee-save restore when no frame pointer is used.
-define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1) uwtable {
+define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1) {
 ; CHECK-LABEL: func_stack32_leaf:
 ; CHECK: str     x20, [sp, #-16]!
 ; CHECK: nop
 ; CHECK-NEXT: //NO_APP
 ; CHECK-NEXT: ldr     x20, [sp], #16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w20
 ; CHECK-NEXT: ret
 
 ; CHECK-TAIL-LABEL: func_stack32_leaf:
@@ -191,10 +169,7 @@ define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1)
 ; CHECK-TAIL: nop
 ; CHECK-TAIL-NEXT: //NO_APP
 ; CHECK-TAIL-NEXT: ldr     x20, [sp], #16
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 0
-; CHECK-TAIL-NEXT: add	sp, sp, #32
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset -32
-; CHECK-TAIL-NEXT: .cfi_restore w20
+; CHECK-TAIL-NEXT: add     sp, sp, #32
 ; CHECK-TAIL-NEXT: ret
 
 ; CHECK-TAIL-RZ-LABEL: func_stack32_leaf:
@@ -203,10 +178,7 @@ define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1)
 ; CHECK-TAIL-RZ: nop
 ; CHECK-TAIL-RZ-NEXT: //NO_APP
 ; CHECK-TAIL-RZ-NEXT: ldr     x20, [sp], #16
-; CHECK-TAIL-RZ-NEXT: .cfi_def_cfa_offset 0
-; CHECK-TAIL-RZ-NEXT: add	sp, sp, #32
-; CHECK-TAIL-RZ-NEXT: .cfi_def_cfa_offset -32
-; CHECK-TAIL-RZ-NEXT: .cfi_restore w20
+; CHECK-TAIL-RZ-NEXT: add     sp, sp, #32
 ; CHECK-TAIL-RZ-NEXT: ret
 
   ; Make sure there is a callee-save register to save/restore.
@@ -215,7 +187,7 @@ define fastcc void @func_stack32_leaf([8 x i64], i128 %stacked0, i128 %stacked1)
 }
 
 ; Check that arg stack pop is done after callee-save restore when no frame pointer is used.
-define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %stacked1) uwtable {
+define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %stacked1) {
 ; CHECK-LABEL: func_stack32_leaf_local:
 ; CHECK: sub     sp, sp, #32
 ; CHECK-NEXT:  .cfi_def_cfa_offset 32
@@ -224,8 +196,6 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %sta
 ; CHECK-NEXT: //NO_APP
 ; CHECK-NEXT: ldr     x20, [sp, #16]
 ; CHECK-NEXT: add     sp, sp, #32
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w20
 ; CHECK-NEXT: ret
 
 ; CHECK-TAIL-LABEL: func_stack32_leaf_local:
@@ -236,8 +206,6 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %sta
 ; CHECK-TAIL-NEXT: //NO_APP
 ; CHECK-TAIL-NEXT: ldr     x20, [sp, #16]
 ; CHECK-TAIL-NEXT: add     sp, sp, #64
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset -32
-; CHECK-TAIL-NEXT: .cfi_restore w20
 ; CHECK-TAIL-NEXT: ret
 
 ; CHECK-TAIL-RZ-LABEL: func_stack32_leaf_local:
@@ -246,10 +214,7 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %sta
 ; CHECK-TAIL-RZ: nop
 ; CHECK-TAIL-RZ-NEXT: //NO_APP
 ; CHECK-TAIL-RZ-NEXT: ldr     x20, [sp], #16
-; CHECK-TAIL-RZ-NEXT: .cfi_def_cfa_offset 0
-; CHECK-TAIL-RZ-NEXT: add	sp, sp, #32
-; CHECK-TAIL-RZ-NEXT: .cfi_def_cfa_offset -32
-; CHECK-TAIL-RZ-NEXT: .cfi_restore w20
+; CHECK-TAIL-RZ-NEXT: add     sp, sp, #32
 ; CHECK-TAIL-RZ-NEXT: ret
 
   %val0 = alloca [2 x i64], align 8
@@ -260,24 +225,19 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %sta
 }
 
 ; Check that arg stack pop is done after callee-save restore when no frame pointer is used.
-define fastcc void @func_stack32_leaf_local_nocs([8 x i64], i128 %stacked0, i128 %stacked1) uwtable {
+define fastcc void @func_stack32_leaf_local_nocs([8 x i64], i128 %stacked0, i128 %stacked1) {
 ; CHECK-LABEL: func_stack32_leaf_local_nocs:
 ; CHECK: sub     sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add	sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK: add     sp, sp, #16
 ; CHECK-NEXT: ret
 
 ; CHECK-TAIL-LABEL: func_stack32_leaf_local_nocs:
 ; CHECK-TAIL: sub     sp, sp, #16
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 16
-; CHECK-TAIL-NEXT: add	sp, sp, #48
-; CHECK-TAIL-NEXT: .cfi_def_cfa_offset -32
+; CHECK-TAIL: add     sp, sp, #48
 ; CHECK-TAIL-NEXT: ret
 
 ; CHECK-TAIL-RZ-LABEL: func_stack32_leaf_local_nocs:
 ; CHECK-TAIL-RZ: add     sp, sp, #32
-; CHECK-TAIL-RZ-NEXT: .cfi_def_cfa_offset -32
 ; CHECK-TAIL-RZ-NEXT: ret
 
   %val0 = alloca [2 x i64], align 8

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index 7d7b3ace8a915..baf3ab724c9c4 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mattr=+sve -mtriple=aarch64-none-linux-gnu -start-before=prologepilog %s -o - | FileCheck %s
 
 --- |
-  define aarch64_sve_vector_pcs void @fix_restorepoint_p4() uwtable { entry: unreachable }
+  define aarch64_sve_vector_pcs void @fix_restorepoint_p4() { entry: unreachable }
   ; CHECK-LABEL: fix_restorepoint_p4:
   ; CHECK:       // %bb.0: // %entry
   ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -18,15 +18,10 @@
   ; CHECK-NEXT:    // implicit-def: $z8
   ; CHECK-NEXT:    // implicit-def: $p4
   ; CHECK-NEXT:    addvl sp, sp, #1
-  ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
   ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
   ; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
   ; CHECK-NEXT:    addvl sp, sp, #2
-  ; CHECK-NEXT:    .cfi_def_cfa wsp, 16
-  ; CHECK-NEXT:    .cfi_restore z8
   ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-  ; CHECK-NEXT:    .cfi_def_cfa_offset 0
-  ; CHECK-NEXT:    .cfi_restore w29
   ; CHECK-NEXT:    ret
 ...
 name: fix_restorepoint_p4

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 34bbbaaf1277a..5dba474446fe3 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -27,20 +27,20 @@
 #
 --- |
 
-  define void @test_allocate_sve() uwtable { entry: unreachable }
-  define void @test_allocate_sve_gpr_callee_saves() uwtable { entry: unreachable }
-  define void @test_allocate_sve_gpr_realigned() uwtable { entry: unreachable }
-  define void @test_address_sve() uwtable { entry: unreachable }
-  define void @test_address_sve_fp() uwtable { entry: unreachable }
-  define void @test_stack_arg_sve() uwtable { entry: unreachable }
-  define void @test_address_sve_out_of_range() uwtable { entry: unreachable }
-  define void @test_address_gpr_vla() uwtable { entry: unreachable }
-  define aarch64_sve_vector_pcs void @save_restore_pregs_sve() uwtable { entry: unreachable }
-  define aarch64_sve_vector_pcs void @save_restore_zregs_sve() uwtable { entry: unreachable }
-  define aarch64_sve_vector_pcs void @save_restore_sve() uwtable { entry: unreachable }
-  define aarch64_sve_vector_pcs void @save_restore_sve_realign() uwtable { entry: unreachable }
-  define aarch64_sve_vector_pcs void @frame_layout() uwtable { entry: unreachable }
-  define void @fp_relative_index_with_float_save() uwtable { entry: unreachable }
+  define void @test_allocate_sve() { entry: unreachable }
+  define void @test_allocate_sve_gpr_callee_saves() { entry: unreachable }
+  define void @test_allocate_sve_gpr_realigned() { entry: unreachable }
+  define void @test_address_sve() { entry: unreachable }
+  define void @test_address_sve_fp() { entry: unreachable }
+  define void @test_stack_arg_sve() { entry: unreachable }
+  define void @test_address_sve_out_of_range() { entry: unreachable }
+  define void @test_address_gpr_vla() { entry: unreachable }
+  define aarch64_sve_vector_pcs void @save_restore_pregs_sve() { entry: unreachable }
+  define aarch64_sve_vector_pcs void @save_restore_zregs_sve() { entry: unreachable }
+  define aarch64_sve_vector_pcs void @save_restore_sve() { entry: unreachable }
+  define aarch64_sve_vector_pcs void @save_restore_sve_realign() { entry: unreachable }
+  define aarch64_sve_vector_pcs void @frame_layout() { entry: unreachable }
+  define void @fp_relative_index_with_float_save() { entry: unreachable }
 
 ...
 # +----------+
@@ -65,12 +65,8 @@
 # CHECK-NEXT: frame-setup CFI_INSTRUCTION
 
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 
 # ASM-LABEL: test_allocate_sve:
@@ -78,21 +74,11 @@
 # ASM-NEXT:  .cfi_offset w29, -16
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG
-# ASM:       .cfi_def_cfa wsp, 32
-# ASM:       .cfi_def_cfa_offset 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-
 #
 # UNWINDINFO:       DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:       DW_CFA_def_cfa: reg31 +32
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +16
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg29
-
 
 name:            test_allocate_sve
 stack:
@@ -128,12 +114,9 @@ body:             |
 # CHECK-NEXT: $x20 = IMPLICIT_DEF
 # CHECK-NEXT: $x21 = IMPLICIT_DEF
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $x21, $x20 = frame-destroy LDPXi $sp, 2
 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 32
-# CHECK-COUNT-4: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_allocate_sve_gpr_callee_saves:
@@ -143,14 +126,6 @@ body:             |
 # ASM-NEXT:  .cfi_offset w29, -32
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
-#
-# ASM:       .cfi_def_cfa wsp, 48
-# ASM:       .cfi_def_cfa_offset 32
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w20
-# ASM-NEXT:  .cfi_restore w21
-# ASM-NEXT:  .cfi_restore w29
-
 #
 # UNWINDINFO:       DW_CFA_def_cfa_offset: +32
 # UNWINDINFO:       DW_CFA_offset: reg20 -8
@@ -158,14 +133,6 @@ body:             |
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -32
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:       DW_CFA_def_cfa: reg31 +48
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +32
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg20
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg21
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg29
-
 name:            test_allocate_sve_gpr_callee_saves
 stack:
   - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
@@ -197,9 +164,7 @@ body:             |
 # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0
 # CHECK-NEXT: $sp = ANDXri killed $[[TMP]]
 # CHECK-NEXT: $sp = frame-destroy ADDXri $fp, 0, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2
-# CHECK-COUNT-3: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_allocate_sve_gpr_realigned:
@@ -207,23 +172,11 @@ body:             |
 # ASM:       .cfi_def_cfa w29, 16
 # ASM-NEXT:  .cfi_offset w30, -8
 # ASM-NEXT:  .cfi_offset w29, -16
-#
-# ASM:        .cfi_def_cfa wsp, 16
-# ASM:        .cfi_def_cfa_offset 0
-# ASM-NEXT:   .cfi_restore w30
-# ASM-NEXT:   .cfi_restore w29
-
 #
 # UNWINDINFO:       DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:       DW_CFA_def_cfa: reg29 +16
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg30 -8
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
-#
-# UNWINDINFO:       DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg30
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg29
-
 
 name:            test_allocate_sve_gpr_realigned
 stack:
@@ -263,12 +216,8 @@ body:             |
 # CHECK-NEXT: STR_PXI $p0, killed $[[TMP]], 7
 
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL:  test_address_sve:
@@ -277,21 +226,10 @@ body:             |
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG
 #
-# ASM:       .cfi_def_cfa wsp, 32
-# ASM:       .cfi_def_cfa_offset 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-#
 # UNWINDINFO:       DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT:  DW_CFA_offset: reg29 -16
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:       DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:       DW_CFA_def_cfa: reg31 +32
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +16
-# UNWINDINFO:       DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT:  DW_CFA_restore: reg29
-
 
 name:            test_address_sve
 frameInfo:
@@ -340,9 +278,7 @@ body:             |
 
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
 # CHECK:      $sp = frame-destroy ADDXri $sp, 16, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2
-# CHECK-COUNT-3: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_address_sve_fp:
@@ -351,21 +287,10 @@ body:             |
 # ASM-NEXT:  .cfi_offset w30, -8
 # ASM-NEXT:  .cfi_offset w29, -16
 #
-# ASM:       .cfi_def_cfa wsp, 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w30
-# ASM-NEXT:  .cfi_restore w29
-#
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-#
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg30
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
-
 name:            test_address_sve_fp
 frameInfo:
   maxAlignment:  16
@@ -409,12 +334,8 @@ body:             |
 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4
 
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_stack_arg_sve:
@@ -423,20 +344,10 @@ body:             |
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
 #
-# ASM:       .cfi_def_cfa wsp, 32
-# ASM:       .cfi_def_cfa_offset 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +32
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +16
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
 
 name:             test_stack_arg_sve
 fixedStack:
@@ -496,26 +407,15 @@ body:             |
 # CHECK-NEXT: STR_PXI $p0, killed $[[TMP2]], 255
 
 # CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 9
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: test_address_sve_out_of_range:
@@ -531,18 +431,6 @@ body:             |
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG
 #
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
-# ASM:        .cfi_def_cfa wsp, 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
@@ -554,19 +442,6 @@ body:             |
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1808, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1560, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1312, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1064, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +816, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +568, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +320, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +72, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
-
 name:            test_address_sve_out_of_range
 frameInfo:
   maxAlignment:  16
@@ -609,24 +484,11 @@ body:             |
 # ASM-NEXT:  .cfi_offset w30, -24
 # ASM-NEXT:  .cfi_offset w29, -32
 #
-# ASM:       .cfi_def_cfa wsp, 32
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w19
-# ASM-NEXT:  .cfi_restore w30
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +32
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +32
 # UNWINDINFO-NEXT: DW_CFA_offset: reg19 -16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -24
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32
-#
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +32
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg19
-# UNWINDINFO-NEXT: DW_CFA_restore: reg30
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
-
 name:            test_address_gpr_vla
 frameInfo:
   maxAlignment:  16
@@ -666,21 +528,10 @@ body:             |
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG
 #
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
-# ASM:       .cfi_def_cfa wsp, 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:         DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:         DW_CFA_offset: reg29 -16
 # UNWINDINFO:         DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:         DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:         DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:         DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO:         DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT:    DW_CFA_restore: reg29
-
 name: save_restore_pregs_sve
 stack:
   - { id: 0, stack-id: default, size: 32, alignment: 16 }
@@ -707,15 +558,11 @@ body:             |
 # CHECK-NEXT: frame-setup CFI_INSTRUCTION
 
 # CHECK:      $sp  = frame-destroy ADDXri $sp, 32, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
 # CHECK-NEXT: $z9  = frame-destroy LDR_ZXI $sp, 1
 # CHECK-NEXT: $z8  = frame-destroy LDR_ZXI $sp, 2
 # CHECK-NEXT: $sp  = frame-destroy ADDVL_XXI $sp, 3
-# CHECK-COUNT-4: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $fp = frame-destroy LDRXpost $sp, 16
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_zregs_sve:
@@ -726,14 +573,6 @@ body:             |
 # ASM-NEXT:  .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9  @ cfa - 16 - 16 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10  @ cfa - 16 - 24 * VG
 # ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG
-#
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM:       .cfi_def_cfa wsp, 16
-# ASM-NEXT:  .cfi_restore z8
-# ASM-NEXT:  .cfi_restore z9
-# ASM-NEXT:  .cfi_restore z10
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
 
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
@@ -742,14 +581,6 @@ body:             |
 # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
 
 name: save_restore_zregs_sve
 stack:
@@ -792,9 +623,7 @@ body:             |
 # CHECK: frame-setup CFI_INSTRUCTION
 
 # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK: $p15 = frame-destroy LDR_PXI $sp, 4
 # CHECK: $p14 = frame-destroy LDR_PXI $sp, 5
 # CHECK: $p5 = frame-destroy LDR_PXI $sp, 14
@@ -804,10 +633,8 @@ body:             |
 # CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16
 # CHECK: $z8 = frame-destroy LDR_ZXI $sp, 17
 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 18
-# CHECK-COUNT-9: frame-destroy CFI_INSTRUCTION
 # CHECK: $x20, $x19 = frame-destroy LDPXi $sp, 2
 # CHECK: $sp, ${{[a-z0-9]+}}, $x21 = frame-destroy LDPXpost $sp, 4
-# CHECK-COUNT-5: frame-destroy CFI_INSTRUCTION
 # CHECK: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_sve:
@@ -816,7 +643,7 @@ body:             |
 # ASM-NEXT:  .cfi_offset w20, -16
 # ASM-NEXT:  .cfi_offset w21, -24
 # ASM-NEXT:  .cfi_offset w29, -32
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
+# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG 
 # ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
@@ -828,23 +655,6 @@ body:             |
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG
 # ASM:       .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG
 #
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG
-# ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
-# ASM:       .cfi_def_cfa wsp, 32
-# ASM-NEXT:  .cfi_restore z8
-# ASM-NEXT:  .cfi_restore z9
-# ASM-NEXT:  .cfi_restore z10
-# ASM-NEXT:  .cfi_restore z11
-# ASM-NEXT:  .cfi_restore z12
-# ASM-NEXT:  .cfi_restore z13
-# ASM-NEXT:  .cfi_restore z14
-# ASM-NEXT:  .cfi_restore z15
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w19
-# ASM-NEXT:  .cfi_restore w20
-# ASM-NEXT:  .cfi_restore w21
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +32
 # UNWINDINFO:      DW_CFA_offset: reg19 -8
 # UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16
@@ -861,23 +671,6 @@ body:             |
 # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +32
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg107
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg108
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg109
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg110
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg111
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg19
-# UNWINDINFO-NEXT: DW_CFA_restore: reg20
-# UNWINDINFO-NEXT: DW_CFA_restore: reg21
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
 
 name: save_restore_sve
 stack:
@@ -944,11 +737,8 @@ body:             |
 # CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3
 # CHECK:      $z9 = frame-destroy LDR_ZXI $sp, 16
 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17
-# CHECK-COUNT-8: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-destroy ADDXri $fp, 0, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2
-# CHECK-COUNT-3: frame-destroy CFI_INSTRUCTION
 # CHECK-NEXT: RET_ReallyLR
 #
 # ASM-LABEL: save_restore_sve_realign:
@@ -965,19 +755,6 @@ body:             |
 # ASM-NEXT:  .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14  @ cfa - 16 - 56 * VG
 # ASM-NEXT:  .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15  @ cfa - 16 - 64 * VG
 #
-# ASM:       .cfi_restore z8
-# ASM-NEXT:  .cfi_restore z9
-# ASM-NEXT:  .cfi_restore z10
-# ASM-NEXT:  .cfi_restore z11
-# ASM-NEXT:  .cfi_restore z12
-# ASM-NEXT:  .cfi_restore z13
-# ASM-NEXT:  .cfi_restore z14
-# ASM-NEXT:  .cfi_restore z15
-# ASM:       .cfi_def_cfa wsp, 16
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w30
-# ASM-NEXT:  .cfi_restore w29
-#
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO:      DW_CFA_def_cfa: reg29 +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
@@ -990,20 +767,6 @@ body:             |
 # UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_restore_extended: reg104
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg107
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg108
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg109
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg110
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg111
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg30
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
-
 name: save_restore_sve_realign
 stack:
   - { id: 0, stack-id: scalable-vector, size: 16, alignment: 16 }
@@ -1082,19 +845,6 @@ body:             |
 # CHECK-NEXT: frame-setup CFI_INSTRUCTION
 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7
 # CHECK-NEXT: frame-setup CFI_INSTRUCTION
-
-# CHECK:      $sp = frame-destroy ADDVL_XXI $sp, 7
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
-# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
-# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
-# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.10)
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION
 #
 # ASM-LABEL: frame_layout:
 # ASM:       .cfi_def_cfa_offset 16
@@ -1103,24 +853,11 @@ body:             |
 # ASM:       .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8  @ cfa - 16 - 8 * VG
 # ASM:       .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG
 #
-# ASM:       .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM:       .cfi_def_cfa wsp, 16
-# ASM-NEXT:  .cfi_restore z8
-# ASM:       .cfi_def_cfa_offset 0
-# ASM-NEXT:  .cfi_restore w29
-
 # UNWINDINFO:      DW_CFA_def_cfa_offset: +16
 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
 # UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
-# UNWINDINFO:      DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO:      DW_CFA_def_cfa: reg31 +16
-# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
-# UNWINDINFO:      DW_CFA_def_cfa_offset: +0
-# UNWINDINFO-NEXT: DW_CFA_restore: reg29
-
 name: frame_layout
 stack:
   - { id: 0, type: default,    size:  32, alignment: 16, stack-id: scalable-vector }

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
index e68710652a509..fff3c1b34ce33 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
+++ b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
@@ -11,7 +11,7 @@ target triple = "aarch64-unknown-linux-gnu"
 @a = global i64 0, align 4
 
 
-define i64 @b() uwtable {
+define i64 @b() {
 entry:
   %call = tail call i64 @d()
   %0 = alloca i8, i64 ptrtoint (i64 ()* @d to i64), align 16
@@ -33,15 +33,9 @@ entry:
 ; CHECK-NEXT:  add     x29, sp, #8
 
 ; CHECK:       sub     sp, x29, #8
-; CHECK-NEXT:  .cfi_def_cfa wsp, 32
 ; CHECK-NEXT:  ldp     x29, x30, [sp, #8]
 ; CHECK-NEXT:  ldr     x19, [sp, #24]
 ; CHECK-NEXT:  ldr     d8, [sp], #32
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w19
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: .cfi_restore w29
-; CHECK-NEXT: .cfi_restore b8
 ; CHECK-NEXT:  ret
 
 declare i64 @d()

diff  --git a/llvm/test/CodeGen/AArch64/large-stack.ll b/llvm/test/CodeGen/AArch64/large-stack.ll
index b0e83ec2b6374..23ef121bcc616 100644
--- a/llvm/test/CodeGen/AArch64/large-stack.ll
+++ b/llvm/test/CodeGen/AArch64/large-stack.ll
@@ -21,7 +21,7 @@ entry:
 
 declare dso_local i32 @printf(i8*, ...)
 
-attributes #0 = { noinline optnone "frame-pointer"="all" uwtable }
+attributes #0 = { noinline optnone "frame-pointer"="all" }
 
 ; CHECK:                  stp	x[[SPILL_REG1:[0-9]+]], x[[SPILL_REG2:[0-9]+]], [sp, #-[[SPILL_OFFSET1:[0-9]+]]]
 ; CHECK-NEXT:             .cfi_def_cfa_offset [[SPILL_OFFSET1]]
@@ -48,10 +48,5 @@ attributes #0 = { noinline optnone "frame-pointer"="all" uwtable }
 ; CHECK-COUNT-128:        add	sp, sp, #[[STACK1]], lsl #12
 ; CHECK-NEXT:             add	sp, sp, #[[STACK2]], lsl #12
 ; CHECK-NEXT:             add	sp, sp, #[[STACK3]]
-; CHECK-NEXT:	            .cfi_def_cfa wsp, [[SPILL_OFFSET1]]
 ; CHECK-NEXT:             ldr	x[[SPILL_REG3]], [sp, #[[SPILL_OFFSET2]]]
 ; CHECK-NEXT:             ldp	x[[SPILL_REG1]], x[[SPILL_REG2]], [sp], #[[SPILL_OFFSET1]]
-; CHECK-NEXT:           	.cfi_def_cfa_offset 0
-; CHECK-NEXT:           	.cfi_restore w[[SPILL_REG3]]
-; CHECK-NEXT:           	.cfi_restore w[[SPILL_REG2]]
-; CHECK-NEXT:           	.cfi_restore w[[SPILL_REG1]]

diff  --git a/llvm/test/CodeGen/AArch64/local_vars.ll b/llvm/test/CodeGen/AArch64/local_vars.ll
index 0a53ce5609467..335482e4aec8f 100644
--- a/llvm/test/CodeGen/AArch64/local_vars.ll
+++ b/llvm/test/CodeGen/AArch64/local_vars.ll
@@ -23,7 +23,7 @@ define void @trivial_func() nounwind {
   ret void
 }
 
-define void @trivial_fp_func() uwtable {
+define void @trivial_fp_func() {
 ; CHECK-LABEL: trivial_fp_func:
 ; CHECK: str x30, [sp, #-16]!
 ; CHECK-NOT: mov x29, sp
@@ -43,15 +43,9 @@ define void @trivial_fp_func() uwtable {
   ret void
 
 ; CHECK: ldr x30, [sp], #16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w30
 ; CHECK-NEXT: ret
 
-; CHECK-WITHFP-ARM64:      .cfi_def_cfa wsp, 16
-; CHECK-WITHFP-ARM64-NEXT: ldp x29, x30, [sp], #16
-; CHECK-WITHFP-ARM64-NEXT: .cfi_def_cfa_offset 0
-; CHECK-WITHFP-ARM64-NEXT: .cfi_restore w30
-; CHECK-WITHFP-ARM64-NEXT: .cfi_restore w29
+; CHECK-WITHFP-ARM64: ldp x29, x30, [sp], #16
 ; CHECK-WITHFP-ARM64-NEXT: ret
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll b/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
index 5adefc7cb7a33..06bfe9fd31268 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
@@ -102,7 +102,7 @@ define void @bar() #0 !dbg !27 {
   ret void
 }
 
-attributes #0 = { noredzone nounwind ssp uwtable minsize "frame-pointer"="none" "target-cpu"="cyclone" }
+attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="none" "target-cpu"="cyclone" }
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!3, !4, !5, !6}

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
index c3b3060308c25..3c4eff39c60b9 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
@@ -65,7 +65,7 @@ entry:
   ret i32 %add
 }
 
-attributes #0 = { "sign-return-address"="non-leaf" minsize }
+attributes #0 = { "sign-return-address"="non-leaf" }
 
 ; CHECK-NOT:        OUTLINED_FUNCTION_{{.*}}
 ; CHECK-NOT:         .cfi_b_key_frame

diff  --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index d6dd07d3d6dd5..3a768d0e3f9b4 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -6,7 +6,7 @@
 @gv0 = internal unnamed_addr global i32 0, align 4
 @gv1 = internal unnamed_addr global %struct1** null, align 8
 
-define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) uwtable {
+define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg)  {
 ;CHECK-LABEL: test
 ; A53-LABEL: test:
 ; A53:       // %bb.0: // %entry
@@ -45,9 +45,6 @@ define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg
 ; A53-NEXT:    adrp x8, gv1
 ; A53-NEXT:    str x0, [x8, :lo12:gv1]
 ; A53-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; A53-NEXT:    .cfi_def_cfa_offset 0
-; A53-NEXT:    .cfi_restore w19
-; A53-NEXT:    .cfi_restore w30
 ; A53-NEXT:    ret
 ; A53-NEXT:  .LBB0_4: // %while.body.i.split
 ; A53-NEXT:    // =>This Inner Loop Header: Depth=1

diff  --git a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
index 3833f8bcccf16..4b98449f869eb 100644
--- a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
+++ b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
@@ -10,7 +10,7 @@ target triple = "arm64--"
 ; formed in SelectionDAG, optimizeCondBranch() only triggers if the and
 ; instruction is in a 
diff erent block than the conditional jump.
 
-define void @func() uwtable {
+define void @func() {
 ; CHECK-LABEL: func:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -24,16 +24,12 @@ define void @func() uwtable {
 ; CHECK-NEXT:    ldr w8, [x8]
 ; CHECK-NEXT:    and w0, w8, #0x100
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    cbz w0, .LBB0_5
 ; CHECK-NEXT:  .LBB0_3: // %common.ret.sink.split
 ; CHECK-NEXT:    b extfunc
 ; CHECK-NEXT:  .LBB0_4: // %b2
 ; CHECK-NEXT:    bl extfunc
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    cbnz w0, .LBB0_3
 ; CHECK-NEXT:  .LBB0_5: // %common.ret
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index e0758e6e6bbcd..706520449a7bb 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -5,7 +5,7 @@
 @B = external dso_local local_unnamed_addr global [8 x [8 x i64]], align 8
 @C = external dso_local local_unnamed_addr global [8 x [8 x i64]], align 8
 
-define dso_local void @run_test() local_unnamed_addr uwtable {
+define dso_local void @run_test() local_unnamed_addr #0 {
 ; CHECK-LABEL: run_test:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #96
@@ -175,15 +175,6 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-NEXT:    stp q4, q3, [x8, #432]
 ; CHECK-NEXT:    str q0, [x8, #496]
 ; CHECK-NEXT:    add sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore b8
-; CHECK-NEXT:    .cfi_restore b9
-; CHECK-NEXT:    .cfi_restore b10
-; CHECK-NEXT:    .cfi_restore b11
-; CHECK-NEXT:    .cfi_restore b12
-; CHECK-NEXT:    .cfi_restore b13
-; CHECK-NEXT:    .cfi_restore b14
-; CHECK-NEXT:    .cfi_restore b15
 ; CHECK-NEXT:    ret
 ; CH`ECK-NEXT:    .cfi_offset b9, -16
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/settag.ll b/llvm/test/CodeGen/AArch64/settag.ll
index 1525249a17134..004c32ec20288 100644
--- a/llvm/test/CodeGen/AArch64/settag.ll
+++ b/llvm/test/CodeGen/AArch64/settag.ll
@@ -116,13 +116,12 @@ entry:
   ret void
 }
 
-define void @stg_alloca1() uwtable {
+define void @stg_alloca1() {
 ; CHECK-LABEL: stg_alloca1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    stg sp, [sp], #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 16, align 16
@@ -130,7 +129,7 @@ entry:
   ret void
 }
 
-define void @stg_alloca5() uwtable {
+define void @stg_alloca5() {
 ; CHECK-LABEL: stg_alloca5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #80
@@ -138,7 +137,6 @@ define void @stg_alloca5() uwtable {
 ; CHECK-NEXT:    st2g sp, [sp, #32]
 ; CHECK-NEXT:    stg sp, [sp, #64]
 ; CHECK-NEXT:    st2g sp, [sp], #80
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 80, align 16
@@ -146,7 +144,7 @@ entry:
   ret void
 }
 
-define void @stg_alloca17() uwtable {
+define void @stg_alloca17() {
 ; CHECK-LABEL: stg_alloca17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #288
@@ -161,10 +159,7 @@ define void @stg_alloca17() uwtable {
 ; CHECK-NEXT:    cbnz x8, .LBB11_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    stg sp, [sp], #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 272, align 16

diff  --git a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
index 6a120d9e4d13e..1c08079ca5526 100644
--- a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
+++ b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
@@ -15,7 +15,7 @@
 ;
 ; RUN: llc -mtriple aarch64-linux %s -o - | FileCheck %s
 
-define dso_local void @f(i32 %n, i32* nocapture %x) uwtable {
+define dso_local void @f(i32 %n, i32* nocapture %x) {
 entry:
   %cmp = icmp slt i32 %n, 0
   br i1 %cmp, label %return, label %if.end
@@ -80,10 +80,6 @@ declare void @llvm.stackrestore(i8*)
 ; CHECK:      stp x29, x30, [sp, #-16]!
 ; CHECK-NEXT: .cfi_def_cfa_offset 16
 ; CHECK-NEXT: mov x29, sp
-; CHECK-NEXT: .cfi_def_cfa w29, 16
-; CHECK-NEXT: .cfi_offset w30, -8
-; CHECK-NEXT: .cfi_offset w29, -16
-
 
 ; VLA allocation
 ; CHECK: mov [[X2:x[0-9]+]], sp
@@ -97,9 +93,4 @@ declare void @llvm.stackrestore(i8*)
 ; CHECK:      mov sp, [[SAVE]]
 ; Epilogue
 ; CHECK-NEXT: mov sp, x29
-; CHECK-NEXT: .cfi_def_cfa wsp, 16
 ; CHECK-NEXT: ldp x29, x30, [sp], #16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: .cfi_restore w29
-; CHECK-NEXT:  ret

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
index 447c6d165985a..4343d65d55a4d 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
@@ -138,7 +138,7 @@ entry:
   ret <2 x double> %vld1_lane
 }
 
-define i32 @deadload() speculative_load_hardening uwtable {
+define i32 @deadload() speculative_load_hardening {
 entry:
 ; CHECK-LABEL: deadload
 ; CHECK:       cmp     sp, #0
@@ -147,7 +147,6 @@ entry:
 ; CHECK-NEXT:  .cfi_def_cfa_offset 16
 ; CHECK-NEXT:  ldr     w8, [sp, #12]
 ; CHECK-NEXT:  add     sp, sp, #16
-; CHECK-NEXT:  .cfi_def_cfa_offset 0
 ; CHECK-NEXT:  mov     [[TMPREG:x[0-9]+]], sp
 ; CHECK-NEXT:  and     [[TMPREG]], [[TMPREG]], x16
 ; CHECK-NEXT:  mov     sp, [[TMPREG]]

diff  --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
index a7fd27757ae2b..37e672567103e 100644
--- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll
+++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
@@ -3,7 +3,7 @@
 ; REQUIRES: asserts
 
 target triple = "aarch64-unknown-linux-gnu"
-attributes #0 = {"target-features"="+sve" uwtable}
+attributes #0 = {"target-features"="+sve"}
 
 declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64)
 declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
@@ -52,10 +52,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-NEXT:    str q4, [x9, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #3
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:   .cfi_def_cfa_offset 0
-; CHECK-NEXT:   .cfi_restore w29
 ; CHECK-NEXT:    ret
 
 
@@ -109,10 +106,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-NEXT:    str q4, [x9, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #3
-; CHECK-NEXT:   .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:   .cfi_def_cfa_offset 0
-; CHECK-NEXT:   .cfi_restore w29
 ; CHECK-NEXT:    ret
 
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
index 7f3b363071186..8dd4138b0fb6b 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
@@ -81,11 +81,7 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
 ; CHECK-NEXT:    b.ne .LBB0_2
 ; CHECK-NEXT:  // %bb.1: // %entry
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:   .cfi_def_cfa_offset 0
-; CHECK-NEXT:   .cfi_restore w30
-; CHECK-NEXT:   .cfi_restore w29
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2: // %entry
 ; CHECK-NEXT:    bl __stack_chk_fail
@@ -100,7 +96,7 @@ declare void @baz(i32*)
 
 ; CHECK-BAD-OFFSET: LLVM ERROR: Unable to encode Stack Protector Guard Offset
 
-attributes #0 = { sspstrong uwtable }
+attributes #0 = { sspstrong }
 !llvm.module.flags = !{!1, !2, !3}
 
 !1 = !{i32 2, !"stack-protector-guard", !"sysreg"}

diff  --git a/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll b/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
index bfcb71e62e8bd..526d9a5ad8fa7 100644
--- a/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
+++ b/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
@@ -7,7 +7,7 @@
 declare %T_IN_BLOCK @return_in_block()
 @in_block_store = dso_local global %T_IN_BLOCK zeroinitializer, align 8
 
-define void @test_default() uwtable {
+define void @test_default() {
 ; CHECK-LABEL: test_default:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -23,15 +23,13 @@ define void @test_default() uwtable {
 ; CHECK-NEXT:    str d4, [x8, #32]
 ; CHECK-NEXT:    str d5, [x8, #40]
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
   store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store
   ret void
 }
 
-define void @test_minsize() minsize uwtable {
+define void @test_minsize() minsize {
 ; CHECK-LABEL: test_minsize:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -50,7 +48,7 @@ define void @test_minsize() minsize uwtable {
   ret void
 }
 
-define void @test_optsize() optsize uwtable {
+define void @test_optsize() optsize {
 ; CHECK-LABEL: test_optsize:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -63,8 +61,6 @@ define void @test_optsize() optsize uwtable {
 ; CHECK-NEXT:    stp d2, d3, [x8, #16]
 ; CHECK-NEXT:    stp d4, d5, [x8, #32]
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
   store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
index 191968a5c423a..4e07f56f6263f 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
@@ -56,7 +56,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_0(<vscale x 28 x i1> %in) {
   ret <vscale x 14 x i1> %res
 }
 
-define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uwtable {
+define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) {
 ; CHECK-LABEL: extract_nxv14i1_nxv28i1_14:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -90,10 +90,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
 ; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %res = call <vscale x 14 x i1> @llvm.experimental.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1> %in, i64 14)
   ret <vscale x 14 x i1> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
index f863eee2b8654..d56d57225d0ef 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
 ; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32
 ; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32
@@ -23,14 +22,6 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v4f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    dup v2.4h, w8
-; NO_SVE-NEXT:    bif v0.8b, v1.8b, v2.8b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -44,14 +35,6 @@ define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, i1 %mask) #0 {
 
 ; Don't use SVE for 128-bit vectors.
 define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v8f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    dup v2.8h, w8
-; NO_SVE-NEXT:    bif v0.16b, v1.16b, v2.16b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -64,20 +47,6 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, i1 %mask) #0 {
 }
 
 define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v16f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #16]
-; NO_SVE-NEXT:    ldr q2, [x1]
-; NO_SVE-NEXT:    ldr q3, [x1, #16]
-; NO_SVE-NEXT:    dup v4.8h, w8
-; NO_SVE-NEXT:    bif v0.16b, v2.16b, v4.16b
-; NO_SVE-NEXT:    bif v1.16b, v3.16b, v4.16b
-; NO_SVE-NEXT:    stp q0, q1, [x0]
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w2, #0x1
@@ -99,27 +68,6 @@ define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v32f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #48]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #16]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x1, #48]
-; NO_SVE-NEXT:    dup v6.8h, w8
-; NO_SVE-NEXT:    ldr q5, [x1]
-; NO_SVE-NEXT:    ldr q7, [x1, #16]
-; NO_SVE-NEXT:    ldr q16, [x1, #32]
-; NO_SVE-NEXT:    bif v1.16b, v5.16b, v6.16b
-; NO_SVE-NEXT:    bif v2.16b, v7.16b, v6.16b
-; NO_SVE-NEXT:    bif v0.16b, v4.16b, v6.16b
-; NO_SVE-NEXT:    bif v3.16b, v16.16b, v6.16b
-; NO_SVE-NEXT:    stp q1, q2, [x0]
-; NO_SVE-NEXT:    stp q3, q0, [x0, #32]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_512-LABEL: select_v32f16:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    and w8, w2, #0x1
@@ -141,44 +89,6 @@ define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v64f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #16]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #48]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x0, #80]
-; NO_SVE-NEXT:    dup v21.8h, w8
-; NO_SVE-NEXT:    ldr q5, [x0, #64]
-; NO_SVE-NEXT:    ldr q6, [x0, #112]
-; NO_SVE-NEXT:    ldr q7, [x0, #96]
-; NO_SVE-NEXT:    ldr q16, [x1, #16]
-; NO_SVE-NEXT:    ldr q17, [x1]
-; NO_SVE-NEXT:    ldr q18, [x1, #48]
-; NO_SVE-NEXT:    ldr q19, [x1, #32]
-; NO_SVE-NEXT:    bif v0.16b, v16.16b, v21.16b
-; NO_SVE-NEXT:    ldr q20, [x1, #80]
-; NO_SVE-NEXT:    bif v1.16b, v17.16b, v21.16b
-; NO_SVE-NEXT:    ldr q16, [x1, #64]
-; NO_SVE-NEXT:    bif v2.16b, v18.16b, v21.16b
-; NO_SVE-NEXT:    ldr q17, [x1, #112]
-; NO_SVE-NEXT:    bif v3.16b, v19.16b, v21.16b
-; NO_SVE-NEXT:    ldr q18, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v20.16b, v21.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0]
-; NO_SVE-NEXT:    mov v0.16b, v21.16b
-; NO_SVE-NEXT:    mov v1.16b, v21.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    mov v2.16b, v21.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v16.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v17.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v18.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #64]
-; NO_SVE-NEXT:    stp q2, q1, [x0, #96]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_1024-LABEL: select_v64f16:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
@@ -200,89 +110,6 @@ define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v128f16:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    stp d11, d10, [sp, #-32]! // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_def_cfa_offset 32
-; NO_SVE-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_offset b8, -8
-; NO_SVE-NEXT:    .cfi_offset b9, -16
-; NO_SVE-NEXT:    .cfi_offset b10, -24
-; NO_SVE-NEXT:    .cfi_offset b11, -32
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #240]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #224]
-; NO_SVE-NEXT:    ldr q2, [x0, #208]
-; NO_SVE-NEXT:    ldr q3, [x0, #192]
-; NO_SVE-NEXT:    ldr q4, [x0, #176]
-; NO_SVE-NEXT:    dup v8.8h, w8
-; NO_SVE-NEXT:    ldr q5, [x0, #160]
-; NO_SVE-NEXT:    ldr q6, [x0, #144]
-; NO_SVE-NEXT:    ldr q7, [x0, #128]
-; NO_SVE-NEXT:    ldr q16, [x0, #112]
-; NO_SVE-NEXT:    ldr q17, [x0, #96]
-; NO_SVE-NEXT:    ldr q18, [x0, #80]
-; NO_SVE-NEXT:    ldr q19, [x0, #64]
-; NO_SVE-NEXT:    ldr q20, [x0, #48]
-; NO_SVE-NEXT:    ldr q21, [x0, #32]
-; NO_SVE-NEXT:    ldr q22, [x0, #16]
-; NO_SVE-NEXT:    ldr q23, [x0]
-; NO_SVE-NEXT:    ldr q24, [x1, #240]
-; NO_SVE-NEXT:    ldr q25, [x1, #224]
-; NO_SVE-NEXT:    ldr q26, [x1, #208]
-; NO_SVE-NEXT:    ldr q27, [x1, #192]
-; NO_SVE-NEXT:    bif v0.16b, v24.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #176]
-; NO_SVE-NEXT:    bif v1.16b, v25.16b, v8.16b
-; NO_SVE-NEXT:    ldr q29, [x1, #160]
-; NO_SVE-NEXT:    bif v2.16b, v26.16b, v8.16b
-; NO_SVE-NEXT:    ldr q30, [x1, #144]
-; NO_SVE-NEXT:    bif v3.16b, v27.16b, v8.16b
-; NO_SVE-NEXT:    ldr q31, [x1, #128]
-; NO_SVE-NEXT:    ldr q9, [x1, #112]
-; NO_SVE-NEXT:    ldr q10, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v28.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #80]
-; NO_SVE-NEXT:    ldr q24, [x1, #64]
-; NO_SVE-NEXT:    ldr q25, [x1, #48]
-; NO_SVE-NEXT:    ldr q26, [x1, #32]
-; NO_SVE-NEXT:    ldr q27, [x1, #16]
-; NO_SVE-NEXT:    ldr q11, [x1]
-; NO_SVE-NEXT:    stp q1, q0, [x0, #224]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #192]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v29.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v30.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v31.16b
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #160]
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q2, q1, [x0, #128]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    bsl v3.16b, v16.16b, v9.16b
-; NO_SVE-NEXT:    bsl v4.16b, v17.16b, v10.16b
-; NO_SVE-NEXT:    bsl v0.16b, v18.16b, v28.16b
-; NO_SVE-NEXT:    bsl v1.16b, v19.16b, v24.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    stp q4, q3, [x0, #96]
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0, #64]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    bsl v2.16b, v20.16b, v25.16b
-; NO_SVE-NEXT:    bsl v3.16b, v21.16b, v26.16b
-; NO_SVE-NEXT:    bsl v4.16b, v22.16b, v27.16b
-; NO_SVE-NEXT:    bsl v0.16b, v23.16b, v11.16b
-; NO_SVE-NEXT:    ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    stp q0, q4, [x0]
-; NO_SVE-NEXT:    ldp d11, d10, [sp], #32 // 16-byte Folded Reload
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_2048-LABEL: select_v128f16:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    and w8, w2, #0x1
@@ -305,14 +132,6 @@ define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, i1 %mask) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v2f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    dup v2.2s, w8
-; NO_SVE-NEXT:    bif v0.8b, v1.8b, v2.8b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -326,14 +145,6 @@ define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, i1 %mask) #
 
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v4f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    dup v2.4s, w8
-; NO_SVE-NEXT:    bif v0.16b, v1.16b, v2.16b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -346,20 +157,6 @@ define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, i1 %mask) #
 }
 
 define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v8f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #16]
-; NO_SVE-NEXT:    ldr q2, [x1]
-; NO_SVE-NEXT:    ldr q3, [x1, #16]
-; NO_SVE-NEXT:    dup v4.4s, w8
-; NO_SVE-NEXT:    bif v0.16b, v2.16b, v4.16b
-; NO_SVE-NEXT:    bif v1.16b, v3.16b, v4.16b
-; NO_SVE-NEXT:    stp q0, q1, [x0]
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w2, #0x1
@@ -381,27 +178,6 @@ define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v16f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #48]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #16]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x1, #48]
-; NO_SVE-NEXT:    dup v6.4s, w8
-; NO_SVE-NEXT:    ldr q5, [x1]
-; NO_SVE-NEXT:    ldr q7, [x1, #16]
-; NO_SVE-NEXT:    ldr q16, [x1, #32]
-; NO_SVE-NEXT:    bif v1.16b, v5.16b, v6.16b
-; NO_SVE-NEXT:    bif v2.16b, v7.16b, v6.16b
-; NO_SVE-NEXT:    bif v0.16b, v4.16b, v6.16b
-; NO_SVE-NEXT:    bif v3.16b, v16.16b, v6.16b
-; NO_SVE-NEXT:    stp q1, q2, [x0]
-; NO_SVE-NEXT:    stp q3, q0, [x0, #32]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_512-LABEL: select_v16f32:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    and w8, w2, #0x1
@@ -423,44 +199,6 @@ define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v32f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #16]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #48]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x0, #80]
-; NO_SVE-NEXT:    dup v21.4s, w8
-; NO_SVE-NEXT:    ldr q5, [x0, #64]
-; NO_SVE-NEXT:    ldr q6, [x0, #112]
-; NO_SVE-NEXT:    ldr q7, [x0, #96]
-; NO_SVE-NEXT:    ldr q16, [x1, #16]
-; NO_SVE-NEXT:    ldr q17, [x1]
-; NO_SVE-NEXT:    ldr q18, [x1, #48]
-; NO_SVE-NEXT:    ldr q19, [x1, #32]
-; NO_SVE-NEXT:    bif v0.16b, v16.16b, v21.16b
-; NO_SVE-NEXT:    ldr q20, [x1, #80]
-; NO_SVE-NEXT:    bif v1.16b, v17.16b, v21.16b
-; NO_SVE-NEXT:    ldr q16, [x1, #64]
-; NO_SVE-NEXT:    bif v2.16b, v18.16b, v21.16b
-; NO_SVE-NEXT:    ldr q17, [x1, #112]
-; NO_SVE-NEXT:    bif v3.16b, v19.16b, v21.16b
-; NO_SVE-NEXT:    ldr q18, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v20.16b, v21.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0]
-; NO_SVE-NEXT:    mov v0.16b, v21.16b
-; NO_SVE-NEXT:    mov v1.16b, v21.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    mov v2.16b, v21.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v16.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v17.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v18.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #64]
-; NO_SVE-NEXT:    stp q2, q1, [x0, #96]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_1024-LABEL: select_v32f32:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
@@ -482,89 +220,6 @@ define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v64f32:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    stp d11, d10, [sp, #-32]! // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_def_cfa_offset 32
-; NO_SVE-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_offset b8, -8
-; NO_SVE-NEXT:    .cfi_offset b9, -16
-; NO_SVE-NEXT:    .cfi_offset b10, -24
-; NO_SVE-NEXT:    .cfi_offset b11, -32
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #240]
-; NO_SVE-NEXT:    csetm w8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #224]
-; NO_SVE-NEXT:    ldr q2, [x0, #208]
-; NO_SVE-NEXT:    ldr q3, [x0, #192]
-; NO_SVE-NEXT:    ldr q4, [x0, #176]
-; NO_SVE-NEXT:    dup v8.4s, w8
-; NO_SVE-NEXT:    ldr q5, [x0, #160]
-; NO_SVE-NEXT:    ldr q6, [x0, #144]
-; NO_SVE-NEXT:    ldr q7, [x0, #128]
-; NO_SVE-NEXT:    ldr q16, [x0, #112]
-; NO_SVE-NEXT:    ldr q17, [x0, #96]
-; NO_SVE-NEXT:    ldr q18, [x0, #80]
-; NO_SVE-NEXT:    ldr q19, [x0, #64]
-; NO_SVE-NEXT:    ldr q20, [x0, #48]
-; NO_SVE-NEXT:    ldr q21, [x0, #32]
-; NO_SVE-NEXT:    ldr q22, [x0, #16]
-; NO_SVE-NEXT:    ldr q23, [x0]
-; NO_SVE-NEXT:    ldr q24, [x1, #240]
-; NO_SVE-NEXT:    ldr q25, [x1, #224]
-; NO_SVE-NEXT:    ldr q26, [x1, #208]
-; NO_SVE-NEXT:    ldr q27, [x1, #192]
-; NO_SVE-NEXT:    bif v0.16b, v24.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #176]
-; NO_SVE-NEXT:    bif v1.16b, v25.16b, v8.16b
-; NO_SVE-NEXT:    ldr q29, [x1, #160]
-; NO_SVE-NEXT:    bif v2.16b, v26.16b, v8.16b
-; NO_SVE-NEXT:    ldr q30, [x1, #144]
-; NO_SVE-NEXT:    bif v3.16b, v27.16b, v8.16b
-; NO_SVE-NEXT:    ldr q31, [x1, #128]
-; NO_SVE-NEXT:    ldr q9, [x1, #112]
-; NO_SVE-NEXT:    ldr q10, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v28.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #80]
-; NO_SVE-NEXT:    ldr q24, [x1, #64]
-; NO_SVE-NEXT:    ldr q25, [x1, #48]
-; NO_SVE-NEXT:    ldr q26, [x1, #32]
-; NO_SVE-NEXT:    ldr q27, [x1, #16]
-; NO_SVE-NEXT:    ldr q11, [x1]
-; NO_SVE-NEXT:    stp q1, q0, [x0, #224]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #192]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v29.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v30.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v31.16b
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #160]
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q2, q1, [x0, #128]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    bsl v3.16b, v16.16b, v9.16b
-; NO_SVE-NEXT:    bsl v4.16b, v17.16b, v10.16b
-; NO_SVE-NEXT:    bsl v0.16b, v18.16b, v28.16b
-; NO_SVE-NEXT:    bsl v1.16b, v19.16b, v24.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    stp q4, q3, [x0, #96]
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0, #64]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    bsl v2.16b, v20.16b, v25.16b
-; NO_SVE-NEXT:    bsl v3.16b, v21.16b, v26.16b
-; NO_SVE-NEXT:    bsl v4.16b, v22.16b, v27.16b
-; NO_SVE-NEXT:    bsl v0.16b, v23.16b, v11.16b
-; NO_SVE-NEXT:    ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    stp q0, q4, [x0]
-; NO_SVE-NEXT:    ldp d11, d10, [sp], #32 // 16-byte Folded Reload
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_2048-LABEL: select_v64f32:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    and w8, w2, #0x1
@@ -587,14 +242,6 @@ define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, i1 %mask) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v1f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    fmov d2, x8
-; NO_SVE-NEXT:    bif v0.8b, v1.8b, v2.8b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -608,14 +255,6 @@ define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, i1 %mask
 
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v2f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w0, #0x1
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    dup v2.2d, x8
-; NO_SVE-NEXT:    bif v0.16b, v1.16b, v2.16b
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w0, #0x1
@@ -628,20 +267,6 @@ define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, i1 %mask
 }
 
 define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v4f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0]
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #16]
-; NO_SVE-NEXT:    ldr q2, [x1]
-; NO_SVE-NEXT:    ldr q3, [x1, #16]
-; NO_SVE-NEXT:    dup v4.2d, x8
-; NO_SVE-NEXT:    bif v0.16b, v2.16b, v4.16b
-; NO_SVE-NEXT:    bif v1.16b, v3.16b, v4.16b
-; NO_SVE-NEXT:    stp q0, q1, [x0]
-; NO_SVE-NEXT:    ret
-;
 ; CHECK-LABEL: select_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w2, #0x1
@@ -663,27 +288,6 @@ define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v8f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #48]
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #16]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x1, #48]
-; NO_SVE-NEXT:    dup v6.2d, x8
-; NO_SVE-NEXT:    ldr q5, [x1]
-; NO_SVE-NEXT:    ldr q7, [x1, #16]
-; NO_SVE-NEXT:    ldr q16, [x1, #32]
-; NO_SVE-NEXT:    bif v1.16b, v5.16b, v6.16b
-; NO_SVE-NEXT:    bif v2.16b, v7.16b, v6.16b
-; NO_SVE-NEXT:    bif v0.16b, v4.16b, v6.16b
-; NO_SVE-NEXT:    bif v3.16b, v16.16b, v6.16b
-; NO_SVE-NEXT:    stp q1, q2, [x0]
-; NO_SVE-NEXT:    stp q3, q0, [x0, #32]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_512-LABEL: select_v8f64:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    and w8, w2, #0x1
@@ -705,44 +309,6 @@ define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v16f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #16]
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    ldr q1, [x0]
-; NO_SVE-NEXT:    ldr q2, [x0, #48]
-; NO_SVE-NEXT:    ldr q3, [x0, #32]
-; NO_SVE-NEXT:    ldr q4, [x0, #80]
-; NO_SVE-NEXT:    dup v21.2d, x8
-; NO_SVE-NEXT:    ldr q5, [x0, #64]
-; NO_SVE-NEXT:    ldr q6, [x0, #112]
-; NO_SVE-NEXT:    ldr q7, [x0, #96]
-; NO_SVE-NEXT:    ldr q16, [x1, #16]
-; NO_SVE-NEXT:    ldr q17, [x1]
-; NO_SVE-NEXT:    ldr q18, [x1, #48]
-; NO_SVE-NEXT:    ldr q19, [x1, #32]
-; NO_SVE-NEXT:    bif v0.16b, v16.16b, v21.16b
-; NO_SVE-NEXT:    ldr q20, [x1, #80]
-; NO_SVE-NEXT:    bif v1.16b, v17.16b, v21.16b
-; NO_SVE-NEXT:    ldr q16, [x1, #64]
-; NO_SVE-NEXT:    bif v2.16b, v18.16b, v21.16b
-; NO_SVE-NEXT:    ldr q17, [x1, #112]
-; NO_SVE-NEXT:    bif v3.16b, v19.16b, v21.16b
-; NO_SVE-NEXT:    ldr q18, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v20.16b, v21.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0]
-; NO_SVE-NEXT:    mov v0.16b, v21.16b
-; NO_SVE-NEXT:    mov v1.16b, v21.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    mov v2.16b, v21.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v16.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v17.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v18.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #64]
-; NO_SVE-NEXT:    stp q2, q1, [x0, #96]
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_1024-LABEL: select_v16f64:
 ; VBITS_GE_1024:       // %bb.0:
 ; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
@@ -764,89 +330,6 @@ define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f64(<32 x double>* %a, <32 x double>* %b, i1 %mask) #0 {
-; NO_SVE-LABEL: select_v32f64:
-; NO_SVE:       // %bb.0:
-; NO_SVE-NEXT:    stp d11, d10, [sp, #-32]! // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_def_cfa_offset 32
-; NO_SVE-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; NO_SVE-NEXT:    .cfi_offset b8, -8
-; NO_SVE-NEXT:    .cfi_offset b9, -16
-; NO_SVE-NEXT:    .cfi_offset b10, -24
-; NO_SVE-NEXT:    .cfi_offset b11, -32
-; NO_SVE-NEXT:    tst w2, #0x1
-; NO_SVE-NEXT:    ldr q0, [x0, #240]
-; NO_SVE-NEXT:    csetm x8, ne
-; NO_SVE-NEXT:    ldr q1, [x0, #224]
-; NO_SVE-NEXT:    ldr q2, [x0, #208]
-; NO_SVE-NEXT:    ldr q3, [x0, #192]
-; NO_SVE-NEXT:    ldr q4, [x0, #176]
-; NO_SVE-NEXT:    dup v8.2d, x8
-; NO_SVE-NEXT:    ldr q5, [x0, #160]
-; NO_SVE-NEXT:    ldr q6, [x0, #144]
-; NO_SVE-NEXT:    ldr q7, [x0, #128]
-; NO_SVE-NEXT:    ldr q16, [x0, #112]
-; NO_SVE-NEXT:    ldr q17, [x0, #96]
-; NO_SVE-NEXT:    ldr q18, [x0, #80]
-; NO_SVE-NEXT:    ldr q19, [x0, #64]
-; NO_SVE-NEXT:    ldr q20, [x0, #48]
-; NO_SVE-NEXT:    ldr q21, [x0, #32]
-; NO_SVE-NEXT:    ldr q22, [x0, #16]
-; NO_SVE-NEXT:    ldr q23, [x0]
-; NO_SVE-NEXT:    ldr q24, [x1, #240]
-; NO_SVE-NEXT:    ldr q25, [x1, #224]
-; NO_SVE-NEXT:    ldr q26, [x1, #208]
-; NO_SVE-NEXT:    ldr q27, [x1, #192]
-; NO_SVE-NEXT:    bif v0.16b, v24.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #176]
-; NO_SVE-NEXT:    bif v1.16b, v25.16b, v8.16b
-; NO_SVE-NEXT:    ldr q29, [x1, #160]
-; NO_SVE-NEXT:    bif v2.16b, v26.16b, v8.16b
-; NO_SVE-NEXT:    ldr q30, [x1, #144]
-; NO_SVE-NEXT:    bif v3.16b, v27.16b, v8.16b
-; NO_SVE-NEXT:    ldr q31, [x1, #128]
-; NO_SVE-NEXT:    ldr q9, [x1, #112]
-; NO_SVE-NEXT:    ldr q10, [x1, #96]
-; NO_SVE-NEXT:    bif v4.16b, v28.16b, v8.16b
-; NO_SVE-NEXT:    ldr q28, [x1, #80]
-; NO_SVE-NEXT:    ldr q24, [x1, #64]
-; NO_SVE-NEXT:    ldr q25, [x1, #48]
-; NO_SVE-NEXT:    ldr q26, [x1, #32]
-; NO_SVE-NEXT:    ldr q27, [x1, #16]
-; NO_SVE-NEXT:    ldr q11, [x1]
-; NO_SVE-NEXT:    stp q1, q0, [x0, #224]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q3, q2, [x0, #192]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    bsl v0.16b, v5.16b, v29.16b
-; NO_SVE-NEXT:    bsl v1.16b, v6.16b, v30.16b
-; NO_SVE-NEXT:    bsl v2.16b, v7.16b, v31.16b
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    stp q0, q4, [x0, #160]
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    stp q2, q1, [x0, #128]
-; NO_SVE-NEXT:    mov v1.16b, v8.16b
-; NO_SVE-NEXT:    bsl v3.16b, v16.16b, v9.16b
-; NO_SVE-NEXT:    bsl v4.16b, v17.16b, v10.16b
-; NO_SVE-NEXT:    bsl v0.16b, v18.16b, v28.16b
-; NO_SVE-NEXT:    bsl v1.16b, v19.16b, v24.16b
-; NO_SVE-NEXT:    mov v2.16b, v8.16b
-; NO_SVE-NEXT:    stp q4, q3, [x0, #96]
-; NO_SVE-NEXT:    mov v3.16b, v8.16b
-; NO_SVE-NEXT:    mov v4.16b, v8.16b
-; NO_SVE-NEXT:    stp q1, q0, [x0, #64]
-; NO_SVE-NEXT:    mov v0.16b, v8.16b
-; NO_SVE-NEXT:    bsl v2.16b, v20.16b, v25.16b
-; NO_SVE-NEXT:    bsl v3.16b, v21.16b, v26.16b
-; NO_SVE-NEXT:    bsl v4.16b, v22.16b, v27.16b
-; NO_SVE-NEXT:    bsl v0.16b, v23.16b, v11.16b
-; NO_SVE-NEXT:    ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; NO_SVE-NEXT:    stp q3, q2, [x0, #32]
-; NO_SVE-NEXT:    stp q0, q4, [x0]
-; NO_SVE-NEXT:    ldp d11, d10, [sp], #32 // 16-byte Folded Reload
-; NO_SVE-NEXT:    ret
-;
 ; VBITS_GE_2048-LABEL: select_v32f64:
 ; VBITS_GE_2048:       // %bb.0:
 ; VBITS_GE_2048-NEXT:    and w8, w2, #0x1

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
index a4f012f7af349..9d89649808542 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
@@ -312,4 +312,4 @@ define void @select_v32f64(<32 x double>* %a, <32 x double>* %b) #0 {
   ret void
 }
 
-attributes #0 = { "target-features"="+sve" uwtable }
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index d8c81b804ada6..191e9edc8d84f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -130,11 +130,7 @@ define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b, <32 x i1>* %c) #0 {
 ; CHECK-NEXT:    sel z0.b, p1, z1.b, z2.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:   .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i8>, <32 x i8>* %a
@@ -294,11 +290,7 @@ define void @select_v64i8(<64 x i8>* %a, <64 x i8>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_512-NEXT:    sel z0.b, p1, z1.b, z2.b
 ; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x0]
 ; VBITS_GE_512-NEXT:    mov sp, x29
-; VBITS_GE_512-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_512-NEXT:    .cfi_restore w30
-; VBITS_GE_512-NEXT:    .cfi_restore w29
 ; VBITS_GE_512-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i8>, <64 x i8>* %a
@@ -587,11 +579,7 @@ define void @select_v128i8(<128 x i8>* %a, <128 x i8>* %b, <128 x i1>* %c) #0 {
 ; VBITS_GE_1024-NEXT:    sel z0.b, p1, z1.b, z2.b
 ; VBITS_GE_1024-NEXT:    st1b { z0.b }, p0, [x0]
 ; VBITS_GE_1024-NEXT:    mov sp, x29
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_1024-NEXT:    .cfi_restore w30
-; VBITS_GE_1024-NEXT:    .cfi_restore w29
 ; VBITS_GE_1024-NEXT:    ret
   %mask = load <128 x i1>, <128 x i1>* %c
   %op1 = load <128 x i8>, <128 x i8>* %a
@@ -1138,11 +1126,7 @@ define void @select_v256i8(<256 x i8>* %a, <256 x i8>* %b, <256 x i1>* %c) #0 {
 ; VBITS_GE_2048-NEXT:    sel z0.b, p1, z1.b, z2.b
 ; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x0]
 ; VBITS_GE_2048-NEXT:    mov sp, x29
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_2048-NEXT:    .cfi_restore w30
-; VBITS_GE_2048-NEXT:    .cfi_restore w29
 ; VBITS_GE_2048-NEXT:    ret
   %mask = load <256 x i1>, <256 x i1>* %c
   %op1 = load <256 x i8>, <256 x i8>* %a
@@ -1231,11 +1215,7 @@ define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %c) #0 {
 ; CHECK-NEXT:    sel z0.h, p1, z1.h, z2.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i16>, <16 x i16>* %a
@@ -1331,11 +1311,7 @@ define void @select_v32i16(<32 x i16>* %a, <32 x i16>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_512-NEXT:    sel z0.h, p1, z1.h, z2.h
 ; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_512-NEXT:    mov sp, x29
-; VBITS_GE_512-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_512-NEXT:    .cfi_restore w30
-; VBITS_GE_512-NEXT:    .cfi_restore w29
 ; VBITS_GE_512-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i16>, <32 x i16>* %a
@@ -1495,11 +1471,7 @@ define void @select_v64i16(<64 x i16>* %a, <64 x i16>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_1024-NEXT:    sel z0.h, p1, z1.h, z2.h
 ; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_1024-NEXT:    mov sp, x29
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_1024-NEXT:    .cfi_restore w30
-; VBITS_GE_1024-NEXT:    .cfi_restore w29
 ; VBITS_GE_1024-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i16>, <64 x i16>* %a
@@ -1788,11 +1760,7 @@ define void @select_v128i16(<128 x i16>* %a, <128 x i16>* %b, <128 x i1>* %c) #0
 ; VBITS_GE_2048-NEXT:    sel z0.h, p1, z1.h, z2.h
 ; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_2048-NEXT:    mov sp, x29
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_2048-NEXT:    .cfi_restore w30
-; VBITS_GE_2048-NEXT:    .cfi_restore w29
 ; VBITS_GE_2048-NEXT:    ret
   %mask = load <128 x i1>, <128 x i1>* %c
   %op1 = load <128 x i16>, <128 x i16>* %a
@@ -1861,11 +1829,7 @@ define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %c) #0 {
 ; CHECK-NEXT:    sel z0.s, p1, z1.s, z2.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x i32>, <8 x i32>* %a
@@ -1921,11 +1885,7 @@ define void @select_v16i32(<16 x i32>* %a, <16 x i32>* %b, <16 x i1>* %c) #0 {
 ; VBITS_GE_512-NEXT:    sel z0.s, p1, z1.s, z2.s
 ; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_512-NEXT:    mov sp, x29
-; VBITS_GE_512-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_512-NEXT:    .cfi_restore w30
-; VBITS_GE_512-NEXT:    .cfi_restore w29
 ; VBITS_GE_512-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i32>, <16 x i32>* %a
@@ -2005,11 +1965,7 @@ define void @select_v32i32(<32 x i32>* %a, <32 x i32>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_1024-NEXT:    sel z0.s, p1, z1.s, z2.s
 ; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_1024-NEXT:    mov sp, x29
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_1024-NEXT:    .cfi_restore w30
-; VBITS_GE_1024-NEXT:    .cfi_restore w29
 ; VBITS_GE_1024-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i32>, <32 x i32>* %a
@@ -2137,11 +2093,7 @@ define void @select_v64i32(<64 x i32>* %a, <64 x i32>* %b, <64 x i1>* %c) #0 {
 ; VBITS_GE_2048-NEXT:    sel z0.s, p1, z1.s, z2.s
 ; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_2048-NEXT:    mov sp, x29
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_2048-NEXT:    .cfi_restore w30
-; VBITS_GE_2048-NEXT:    .cfi_restore w29
 ; VBITS_GE_2048-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i32>, <64 x i32>* %a
@@ -2208,11 +2160,7 @@ define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b, <4 x i1>* %c) #0 {
 ; CHECK-NEXT:    sel z0.d, p1, z1.d, z2.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %mask = load <4 x i1>, <4 x i1>* %c
   %op1 = load <4 x i64>, <4 x i64>* %a
@@ -2263,11 +2211,7 @@ define void @select_v8i64(<8 x i64>* %a, <8 x i64>* %b, <8 x i1>* %c) #0 {
 ; VBITS_GE_512-NEXT:    sel z0.d, p1, z1.d, z2.d
 ; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_512-NEXT:    mov sp, x29
-; VBITS_GE_512-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_512-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_512-NEXT:    .cfi_restore w30
-; VBITS_GE_512-NEXT:    .cfi_restore w29
 ; VBITS_GE_512-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x i64>, <8 x i64>* %a
@@ -2338,11 +2282,7 @@ define void @select_v16i64(<16 x i64>* %a, <16 x i64>* %b, <16 x i1>* %c) #0 {
 ; VBITS_GE_1024-NEXT:    sel z0.d, p1, z1.d, z2.d
 ; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_1024-NEXT:    mov sp, x29
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_1024-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_1024-NEXT:    .cfi_restore w30
-; VBITS_GE_1024-NEXT:    .cfi_restore w29
 ; VBITS_GE_1024-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i64>, <16 x i64>* %a
@@ -2484,11 +2424,7 @@ define void @select_v32i64(<32 x i64>* %a, <32 x i64>* %b, <32 x i1>* %c) #0 {
 ; VBITS_GE_2048-NEXT:    sel z0.d, p1, z1.d, z2.d
 ; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_2048-NEXT:    mov sp, x29
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa wsp, 16
 ; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; VBITS_GE_2048-NEXT:    .cfi_def_cfa_offset 0
-; VBITS_GE_2048-NEXT:    .cfi_restore w30
-; VBITS_GE_2048-NEXT:    .cfi_restore w29
 ; VBITS_GE_2048-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i64>, <32 x i64>* %a
@@ -2498,4 +2434,4 @@ define void @select_v32i64(<32 x i64>* %a, <32 x i64>* %b, <32 x i1>* %c) #0 {
   ret void
 }
 
-attributes #0 = { "target-features"="+sve" uwtable }
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
index 0e2b2dd6ebf3c..45b33f5acae7a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
@@ -950,11 +950,7 @@ define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
@@ -963,4 +959,4 @@ define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 {
   ret void
 }
 
-attributes #0 = { "target-features"="+sve" uwtable }
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index 49951be72a3ca..f08bb15f84e99 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -496,7 +496,7 @@ define <vscale x 16 x i1> @test_predicate_insert_16xi1(<vscale x 16 x i1> %val,
   ret <vscale x 16 x i1> %res
 }
 
-define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val, i1 %elt, i32 %idx) uwtable {
+define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val, i1 %elt, i32 %idx) {
 ; CHECK-LABEL: test_predicate_insert_32xi1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -524,10 +524,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
 ; CHECK-NEXT:    cmpne p0.b, p1/z, z0.b, #0
 ; CHECK-NEXT:    cmpne p1.b, p1/z, z1.b, #0
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %res = insertelement <vscale x 32 x i1> %val, i1 %elt, i32 %idx
   ret <vscale x 32 x i1> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 9cfbbf5dfb6bd..d717585d4e480 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -186,7 +186,7 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x
   ret void
 }
 
-define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16 x i64>* %out) uwtable {
+define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16 x i64>* %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -207,10 +207,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #4
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
   %v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
@@ -231,7 +228,7 @@ define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
   ret void
 }
 
-define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %out) uwtable {
+define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -247,10 +244,7 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x1, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %sv = load <2 x i64>, <2 x i64>* %psv
   %v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)

diff  --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
index 991d997f3317a..7d979ffb1883b 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
@@ -4,26 +4,26 @@
 # Test that prologepilog works for each of the LDNF1 instructions for stack-based objects.
 #
 --- |
-  define void @testcase_positive_offset() uwtable {
+  define void @testcase_positive_offset() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset 63 * readsize
     ret void
   }
-  define void @testcase_negative_offset() uwtable {
+  define void @testcase_negative_offset() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset 63 * readsize
     ret void
   }
 
-  define void @testcase_positive_offset_out_of_range() uwtable {
+  define void @testcase_positive_offset_out_of_range() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset 64 * readsize
     ret void
   }
-  define void @testcase_negative_offset_out_of_range() uwtable {
+  define void @testcase_negative_offset_out_of_range() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset -1 * readsize
@@ -65,10 +65,7 @@ body:             |
     ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8)
     ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, 7 :: (load (s64) from %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNF1B_IMM renamable $p0, %stack.1.object, 7 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNF1B_H_IMM renamable $p0, %stack.1.object, 7 :: (load 1 from %ir.object, align 2)
@@ -124,10 +121,7 @@ body:             |
     ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, -8 :: (load (s32) from %ir.object, align 8)
     ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, -8 :: (load (s64) from %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNF1B_IMM renamable $p0, %stack.1.object, -8 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNF1B_H_IMM renamable $p0, %stack.1.object, -8 :: (load 1 from %ir.object, align 2)
@@ -197,10 +191,7 @@ body:             |
     ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
     ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, 7 :: (load (s32) from %ir.object, align 8)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNF1B_IMM renamable $p0, %stack.1.object, 8 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNF1B_H_IMM renamable $p0, %stack.1.object, 8 :: (load 1 from %ir.object, align 2)
@@ -269,10 +260,7 @@ body:             |
     ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
     ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, -8 :: (load (s32) from %ir.object, align 8)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNF1B_IMM renamable $p0, %stack.1.object, -9 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNF1B_H_IMM renamable $p0, %stack.1.object, -9 :: (load 1 from %ir.object, align 2)

diff  --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
index 1352b9ddcacdf..7e30d69b5ca45 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
@@ -4,26 +4,26 @@
 # Test that prologepilog works for each of the LDNT1/STNT1 instructions for stack-based objects.
 #
 --- |
-  define void @testcase_positive_offset() uwtable {
+  define void @testcase_positive_offset() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset 7 * readsize
     ret void
   }
-  define void @testcase_negative_offset() uwtable {
+  define void @testcase_negative_offset() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset -8 * readsize
     ret void
   }
 
-  define void @testcase_positive_offset_out_of_range() uwtable {
+  define void @testcase_positive_offset_out_of_range() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset 8 * readsize
     ret void
   }
-  define void @testcase_negative_offset_out_of_range() uwtable {
+  define void @testcase_negative_offset_out_of_range() {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
     ; Reads from %object at offset -9 * readsize
@@ -57,10 +57,7 @@ body:             |
     ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s32) into %ir.object, align 8)
     ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s64) into %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNT1B_ZRI renamable $p0, %stack.1.object, 7 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNT1H_ZRI renamable $p0, %stack.1.object, 7 :: (load 2 from %ir.object, align 2)
@@ -100,10 +97,7 @@ body:             |
     ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s32) into %ir.object, align 8)
     ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s64) into %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNT1B_ZRI renamable $p0, %stack.1.object, -8 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNT1H_ZRI renamable $p0, %stack.1.object, -8 :: (load 2 from %ir.object, align 2)
@@ -151,10 +145,7 @@ body:             |
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
     ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s64) into %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNT1B_ZRI renamable $p0, %stack.1.object, 8 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNT1H_ZRI renamable $p0, %stack.1.object, 8 :: (load 2 from %ir.object, align 2)
@@ -202,10 +193,7 @@ body:             |
     ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
     ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s64) into %ir.object)
     ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
     ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
-    ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
     ; CHECK-NEXT: RET_ReallyLR implicit $z0
     renamable $z0 = LDNT1B_ZRI renamable $p0, %stack.1.object, -9 :: (load 1 from %ir.object, align 2)
     renamable $z0 = LDNT1H_ZRI renamable $p0, %stack.1.object, -9 :: (load 2 from %ir.object, align 2)

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
index d53dba17dd969..dc20a123a15a2 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
@@ -46,7 +46,7 @@ define <vscale x 2 x i1> @add_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b)
 
 ; ILLEGAL ADDS
 
-define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) uwtable {
+define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
 ; CHECK-LABEL: add_nxv64i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -74,10 +74,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %res = add <vscale x 64 x i1> %a, %b
   ret <vscale x 64 x i1> %res;
@@ -130,7 +127,7 @@ define <vscale x 2 x i1> @sub_xv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b)
 ; ILLEGAL SUBGS
 
 
-define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) uwtable {
+define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
 ; CHECK-LABEL: sub_nxv64i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -158,10 +155,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1>
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %res = sub <vscale x 64 x i1> %a, %b
   ret <vscale x 64 x i1> %res;

diff  --git a/llvm/test/CodeGen/AArch64/sve-varargs.ll b/llvm/test/CodeGen/AArch64/sve-varargs.ll
index 4ba5ad8a1008a..af37e0f77591d 100644
--- a/llvm/test/CodeGen/AArch64/sve-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/sve-varargs.ll
@@ -5,7 +5,7 @@ declare i32 @sve_printf(i8*, <vscale x 4 x i32>, ...)
 
 @.str_1 = internal constant [6 x i8] c"boo!\0A\00"
 
-define void @foo(<vscale x 4 x i32> %x) uwtable {
+define void @foo(<vscale x 4 x i32> %x) {
 ; CHECK-LABEL: foo:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -15,8 +15,6 @@ define void @foo(<vscale x 4 x i32> %x) uwtable {
 ; CHECK-NEXT:    add x0, x0, :lo12:.str_1
 ; CHECK-NEXT:    bl sve_printf
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0
   call i32 (i8*, <vscale x 4 x i32>, ...) @sve_printf(i8* %f, <vscale x 4 x i32> %x)

diff  --git a/llvm/test/CodeGen/AArch64/swifttail-call.ll b/llvm/test/CodeGen/AArch64/swifttail-call.ll
index 5b612ab40008a..64109958a2f0a 100644
--- a/llvm/test/CodeGen/AArch64/swifttail-call.ll
+++ b/llvm/test/CodeGen/AArch64/swifttail-call.ll
@@ -16,18 +16,17 @@ define swifttailcc void @caller_to0_from0() nounwind {
 ; COMMON-NEXT: b callee_stack0
 }
 
-define swifttailcc void @caller_to0_from8([8 x i64], i64) #0 {
+define swifttailcc void @caller_to0_from8([8 x i64], i64) {
 ; COMMON-LABEL: caller_to0_from8:
 
   musttail call swifttailcc void @callee_stack0()
   ret void
 
 ; COMMON: add sp, sp, #16
-; COMMON-NEXT:	.cfi_def_cfa_offset -16
 ; COMMON-NEXT: b callee_stack0
 }
 
-define swifttailcc void @caller_to8_from0() #0 {
+define swifttailcc void @caller_to8_from0() {
 ; COMMON-LABEL: caller_to8_from0:
 
 ; Key point is that the "42" should go #16 below incoming stack
@@ -39,7 +38,7 @@ define swifttailcc void @caller_to8_from0() #0 {
 ; COMMON-NEXT: b callee_stack8
 }
 
-define swifttailcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
+define swifttailcc void @caller_to8_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to8_from8:
 ; COMMON-NOT: sub sp,
 
@@ -51,7 +50,7 @@ define swifttailcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
 ; COMMON-NEXT: b callee_stack8
 }
 
-define swifttailcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
+define swifttailcc void @caller_to16_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to16_from8:
 ; COMMON-NOT: sub sp,
 
@@ -66,7 +65,7 @@ define swifttailcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
 }
 
 
-define swifttailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0 {
+define swifttailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
 ; COMMON-LABEL: caller_to8_from24:
 ; COMMON-NOT: sub sp,
 
@@ -75,12 +74,11 @@ define swifttailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0
   ret void
 
 ; COMMON: str {{x[0-9]+}}, [sp, #16]!
-; COMMON-NEXT: .cfi_def_cfa_offset -16
 ; COMMON-NEXT: b callee_stack8
 }
 
 
-define swifttailcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) #0 {
+define swifttailcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
 ; COMMON-LABEL: caller_to16_from16:
 ; COMMON-NOT: sub sp,
 
@@ -107,7 +105,7 @@ define swifttailcc void @disable_tail_calls() nounwind "disable-tail-calls"="tru
 
 ; Weakly-referenced extern functions cannot be tail-called, as AAELF does
 ; not define the behaviour of branch instructions to undefined weak symbols.
-define swifttailcc void @caller_weak() #0 {
+define swifttailcc void @caller_weak() {
 ; COMMON-LABEL: caller_weak:
 ; COMMON: bl callee_weak
   tail call void @callee_weak()
@@ -116,7 +114,7 @@ define swifttailcc void @caller_weak() #0 {
 
 declare { [2 x float] } @get_vec2()
 
-define { [3 x float] } @test_add_elem() #0 {
+define { [3 x float] } @test_add_elem() {
 ; SDAG-LABEL: test_add_elem:
 ; SDAG: bl get_vec2
 ; SDAG: fmov s2, #1.0
@@ -140,7 +138,7 @@ define { [3 x float] } @test_add_elem() #0 {
 }
 
 declare double @get_double()
-define { double, [2 x double] } @test_mismatched_insert() #0 {
+define { double, [2 x double] } @test_mismatched_insert() {
 ; COMMON-LABEL: test_mismatched_insert:
 ; COMMON: bl get_double
 ; COMMON: bl get_double
@@ -158,7 +156,7 @@ define { double, [2 x double] } @test_mismatched_insert() #0 {
   ret { double, [2 x double] } %res.012
 }
 
-define void @fromC_totail() #0 {
+define void @fromC_totail() {
 ; COMMON-LABEL: fromC_totail:
 ; COMMON: sub sp, sp, #48
 
@@ -176,7 +174,7 @@ define void @fromC_totail() #0 {
   ret void
 }
 
-define void @fromC_totail_noreservedframe(i32 %len) #0 {
+define void @fromC_totail_noreservedframe(i32 %len) {
 ; COMMON-LABEL: fromC_totail_noreservedframe:
 ; COMMON: stp x29, x30, [sp, #-48]!
 
@@ -200,7 +198,7 @@ define void @fromC_totail_noreservedframe(i32 %len) #0 {
 
 declare void @Ccallee_stack8([8 x i64], i64)
 
-define swifttailcc void @fromtail_toC() #0 {
+define swifttailcc void @fromtail_toC() {
 ; COMMON-LABEL: fromtail_toC:
 ; COMMON: sub sp, sp, #32
 
@@ -222,7 +220,7 @@ define swifttailcc void @fromtail_toC() #0 {
 }
 
 declare swifttailcc i8* @SwiftSelf(i8 * swiftasync %context, i8* swiftself %closure)
-define swiftcc i8* @CallSwiftSelf(i8* swiftself %closure, i8* %context) #0 {
+define swiftcc i8* @CallSwiftSelf(i8* swiftself %closure, i8* %context) {
 ; CHECK-LABEL: CallSwiftSelf:
 ; CHECK: stp x20
   ;call void asm "","~{r13}"() ; We get a push r13 but why not with the call
@@ -230,5 +228,3 @@ define swiftcc i8* @CallSwiftSelf(i8* swiftself %closure, i8* %context) #0 {
   %res = call swifttailcc i8* @SwiftSelf(i8 * swiftasync %context, i8* swiftself %closure)
   ret i8* %res
 }
-
-attributes #0 = { uwtable }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/tail-call.ll b/llvm/test/CodeGen/AArch64/tail-call.ll
index 83943801344b6..537754f4526b7 100644
--- a/llvm/test/CodeGen/AArch64/tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/tail-call.ll
@@ -16,18 +16,17 @@ define fastcc void @caller_to0_from0() nounwind {
 ; COMMON-NEXT: b callee_stack0
 }
 
-define fastcc void @caller_to0_from8([8 x i64], i64) #0 {
+define fastcc void @caller_to0_from8([8 x i64], i64) {
 ; COMMON-LABEL: caller_to0_from8:
 
   tail call fastcc void @callee_stack0()
   ret void
 
 ; COMMON: add sp, sp, #16
-; COMMON: .cfi_def_cfa_offset  -16
 ; COMMON-NEXT: b callee_stack0
 }
 
-define fastcc void @caller_to8_from0() #0 {
+define fastcc void @caller_to8_from0() {
 ; COMMON-LABEL: caller_to8_from0:
 
 ; Key point is that the "42" should go #16 below incoming stack
@@ -39,7 +38,7 @@ define fastcc void @caller_to8_from0() #0 {
 ; COMMON-NEXT: b callee_stack8
 }
 
-define fastcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
+define fastcc void @caller_to8_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to8_from8:
 ; COMMON-NOT: sub sp,
 
@@ -51,7 +50,7 @@ define fastcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
 ; COMMON-NEXT: b callee_stack8
 }
 
-define fastcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
+define fastcc void @caller_to16_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to16_from8:
 ; COMMON-NOT: sub sp,
 
@@ -66,7 +65,7 @@ define fastcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
 }
 
 
-define fastcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0 {
+define fastcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
 ; COMMON-LABEL: caller_to8_from24:
 ; COMMON-NOT: sub sp,
 
@@ -75,12 +74,11 @@ define fastcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0 {
   ret void
 
 ; COMMON: str {{x[0-9]+}}, [sp, #16]!
-; COMMON: .cfi_def_cfa_offset  -16
 ; COMMON-NEXT: b callee_stack8
 }
 
 
-define fastcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) #0 {
+define fastcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
 ; COMMON-LABEL: caller_to16_from16:
 ; COMMON-NOT: sub sp,
 
@@ -107,7 +105,7 @@ define fastcc void @disable_tail_calls() nounwind "disable-tail-calls"="true" {
 
 ; Weakly-referenced extern functions cannot be tail-called, as AAELF does
 ; not define the behaviour of branch instructions to undefined weak symbols.
-define fastcc void @caller_weak() #0 {
+define fastcc void @caller_weak() {
 ; COMMON-LABEL: caller_weak:
 ; COMMON: bl callee_weak
   tail call void @callee_weak()
@@ -116,7 +114,7 @@ define fastcc void @caller_weak() #0 {
 
 declare { [2 x float] } @get_vec2()
 
-define { [3 x float] } @test_add_elem() #0 {
+define { [3 x float] } @test_add_elem() {
 ; SDAG-LABEL: test_add_elem:
 ; SDAG: bl get_vec2
 ; SDAG: fmov s2, #1.0
@@ -140,7 +138,7 @@ define { [3 x float] } @test_add_elem() #0 {
 }
 
 declare double @get_double()
-define { double, [2 x double] } @test_mismatched_insert() #0 {
+define { double, [2 x double] } @test_mismatched_insert() {
 ; COMMON-LABEL: test_mismatched_insert:
 ; COMMON: bl get_double
 ; COMMON: bl get_double
@@ -157,5 +155,3 @@ define { double, [2 x double] } @test_mismatched_insert() #0 {
 
   ret { double, [2 x double] } %res.012
 }
-
-attributes #0 = { uwtable }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/tailcc-tail-call.ll b/llvm/test/CodeGen/AArch64/tailcc-tail-call.ll
index a1482ac5d53cb..57b9f8e1f72d8 100644
--- a/llvm/test/CodeGen/AArch64/tailcc-tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/tailcc-tail-call.ll
@@ -16,18 +16,17 @@ define tailcc void @caller_to0_from0() nounwind {
 ; COMMON-NEXT: b callee_stack0
 }
 
-define tailcc void @caller_to0_from8([8 x i64], i64) #0 {
+define tailcc void @caller_to0_from8([8 x i64], i64) {
 ; COMMON-LABEL: caller_to0_from8:
 
   tail call tailcc void @callee_stack0()
   ret void
 
 ; COMMON: add sp, sp, #16
-; COMMON: .cfi_def_cfa_offset -16
 ; COMMON-NEXT: b callee_stack0
 }
 
-define tailcc void @caller_to8_from0() "frame-pointer"="all" uwtable {
+define tailcc void @caller_to8_from0() "frame-pointer"="all"{
 ; COMMON-LABEL: caller_to8_from0:
 
 ; Key point is that the "42" should go #16 below incoming stack
@@ -41,13 +40,10 @@ define tailcc void @caller_to8_from0() "frame-pointer"="all" uwtable {
   ; from an interrupt if the kernel does not honour a red-zone, and a larger
   ; call could well overflow the red zone even if it is present.
 ; COMMON-NOT: sub sp,
-; COMMON-NEXT: .cfi_def_cfa_offset 16
-; COMMON-NEXT: .cfi_restore w30
-; COMMON-NEXT: .cfi_restore w29
 ; COMMON-NEXT: b callee_stack8
 }
 
-define tailcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
+define tailcc void @caller_to8_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to8_from8:
 ; COMMON-NOT: sub sp,
 
@@ -59,7 +55,7 @@ define tailcc void @caller_to8_from8([8 x i64], i64 %a) #0 {
 ; COMMON-NEXT: b callee_stack8
 }
 
-define tailcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
+define tailcc void @caller_to16_from8([8 x i64], i64 %a) {
 ; COMMON-LABEL: caller_to16_from8:
 ; COMMON-NOT: sub sp,
 
@@ -74,7 +70,7 @@ define tailcc void @caller_to16_from8([8 x i64], i64 %a) #0 {
 }
 
 
-define tailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0 {
+define tailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
 ; COMMON-LABEL: caller_to8_from24:
 ; COMMON-NOT: sub sp,
 
@@ -83,12 +79,11 @@ define tailcc void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) #0 {
   ret void
 
 ; COMMON: str {{x[0-9]+}}, [sp, #16]!
-; COMMON-NEXT: .cfi_def_cfa_offset -16
 ; COMMON-NEXT: b callee_stack8
 }
 
 
-define tailcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) #0 {
+define tailcc void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
 ; COMMON-LABEL: caller_to16_from16:
 ; COMMON-NOT: sub sp,
 
@@ -115,7 +110,7 @@ define tailcc void @disable_tail_calls() nounwind "disable-tail-calls"="true" {
 
 ; Weakly-referenced extern functions cannot be tail-called, as AAELF does
 ; not define the behaviour of branch instructions to undefined weak symbols.
-define tailcc void @caller_weak() #0 {
+define tailcc void @caller_weak() {
 ; COMMON-LABEL: caller_weak:
 ; COMMON: bl callee_weak
   tail call void @callee_weak()
@@ -124,7 +119,7 @@ define tailcc void @caller_weak() #0 {
 
 declare { [2 x float] } @get_vec2()
 
-define { [3 x float] } @test_add_elem() #0 {
+define { [3 x float] } @test_add_elem() {
 ; SDAG-LABEL: test_add_elem:
 ; SDAG: bl get_vec2
 ; SDAG: fmov s2, #1.0
@@ -148,7 +143,7 @@ define { [3 x float] } @test_add_elem() #0 {
 }
 
 declare double @get_double()
-define { double, [2 x double] } @test_mismatched_insert() #0 {
+define { double, [2 x double] } @test_mismatched_insert() {
 ; COMMON-LABEL: test_mismatched_insert:
 ; COMMON: bl get_double
 ; COMMON: bl get_double
@@ -166,7 +161,7 @@ define { double, [2 x double] } @test_mismatched_insert() #0 {
   ret { double, [2 x double] } %res.012
 }
 
-define void @fromC_totail() #0 {
+define void @fromC_totail() {
 ; COMMON-LABEL: fromC_totail:
 ; COMMON: sub sp, sp, #32
 
@@ -184,7 +179,7 @@ define void @fromC_totail() #0 {
   ret void
 }
 
-define void @fromC_totail_noreservedframe(i32 %len) #0 {
+define void @fromC_totail_noreservedframe(i32 %len) {
 ; COMMON-LABEL: fromC_totail_noreservedframe:
 ; COMMON: stp x29, x30, [sp, #-32]!
 
@@ -208,7 +203,7 @@ define void @fromC_totail_noreservedframe(i32 %len) #0 {
 
 declare void @Ccallee_stack8([8 x i64], i64)
 
-define tailcc void @fromtail_toC() #0 {
+define tailcc void @fromtail_toC() {
 ; COMMON-LABEL: fromtail_toC:
 ; COMMON: sub sp, sp, #32
 
@@ -228,5 +223,3 @@ define tailcc void @fromtail_toC() #0 {
   call void @Ccallee_stack8([8 x i64] undef, i64 42)
   ret void
 }
-
-attributes #0 = { uwtable }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index f28bff6f79738..a5eb53c554e61 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -O0 -global-isel=1 -global-isel-abort=0 < %s | FileCheck %s --check-prefix=GISEL
 
 ; Test that z0 is saved/restored, as the unwinder may only retain the low 64bits (d0).
-define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uwtable personality i8 0 {
+define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) personality i8 0 {
 ; CHECK-LABEL: invoke_callee_may_throw_sve:
 ; CHECK:       .Lfunc_begin0:
 ; CHECK-NEXT:    .cfi_startproc
@@ -61,7 +61,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:  .LBB0_1: // %.Lcontinue
 ; CHECK-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -91,25 +90,12 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
-; CHECK-NEXT:    .cfi_restore z8
-; CHECK-NEXT:    .cfi_restore z9
-; CHECK-NEXT:    .cfi_restore z10
-; CHECK-NEXT:    .cfi_restore z11
-; CHECK-NEXT:    .cfi_restore z12
-; CHECK-NEXT:    .cfi_restore z13
-; CHECK-NEXT:    .cfi_restore z14
-; CHECK-NEXT:    .cfi_restore z15
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2: // %.Lunwind
 ; CHECK-NEXT:  .Ltmp2:
 ; CHECK-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #2
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -139,19 +125,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    addvl sp, sp, #18
-; CHECK-NEXT:    .cfi_def_cfa wsp, 16
-; CHECK-NEXT:    .cfi_restore z8
-; CHECK-NEXT:    .cfi_restore z9
-; CHECK-NEXT:    .cfi_restore z10
-; CHECK-NEXT:    .cfi_restore z11
-; CHECK-NEXT:    .cfi_restore z12
-; CHECK-NEXT:    .cfi_restore z13
-; CHECK-NEXT:    .cfi_restore z14
-; CHECK-NEXT:    .cfi_restore z15
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: invoke_callee_may_throw_sve:
@@ -211,7 +185,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:  .LBB0_1: // %.Lcontinue
 ; GISEL-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
-; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -241,25 +214,12 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
-; GISEL-NEXT:    .cfi_def_cfa wsp, 16
-; GISEL-NEXT:    .cfi_restore z8
-; GISEL-NEXT:    .cfi_restore z9
-; GISEL-NEXT:    .cfi_restore z10
-; GISEL-NEXT:    .cfi_restore z11
-; GISEL-NEXT:    .cfi_restore z12
-; GISEL-NEXT:    .cfi_restore z13
-; GISEL-NEXT:    .cfi_restore z14
-; GISEL-NEXT:    .cfi_restore z15
 ; GISEL-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; GISEL-NEXT:    .cfi_def_cfa_offset 0
-; GISEL-NEXT:    .cfi_restore w30
-; GISEL-NEXT:    .cfi_restore w29
 ; GISEL-NEXT:    ret
 ; GISEL-NEXT:  .LBB0_2: // %.Lunwind
 ; GISEL-NEXT:  .Ltmp2:
 ; GISEL-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #2
-; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
 ; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
 ; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
@@ -289,19 +249,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 ; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
 ; GISEL-NEXT:    addvl sp, sp, #18
-; GISEL-NEXT:    .cfi_def_cfa wsp, 16
-; GISEL-NEXT:    .cfi_restore z8
-; GISEL-NEXT:    .cfi_restore z9
-; GISEL-NEXT:    .cfi_restore z10
-; GISEL-NEXT:    .cfi_restore z11
-; GISEL-NEXT:    .cfi_restore z12
-; GISEL-NEXT:    .cfi_restore z13
-; GISEL-NEXT:    .cfi_restore z14
-; GISEL-NEXT:    .cfi_restore z15
 ; GISEL-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; GISEL-NEXT:    .cfi_def_cfa_offset 0
-; GISEL-NEXT:    .cfi_restore w30
-; GISEL-NEXT:    .cfi_restore w29
 ; GISEL-NEXT:    ret
   %result = invoke <vscale x 4 x i32> @may_throw_sve(<vscale x 4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
 .Lcontinue:
@@ -315,7 +263,7 @@ declare <vscale x 4 x i32> @may_throw_sve(<vscale x 4 x i32> %v);
 
 
 ; Test that q0 is saved/restored, as the unwinder may only retain the low 64bits (d0).
-define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) uwtable personality i8 0 {
+define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) personality i8 0 {
 ; CHECK-LABEL: invoke_callee_may_throw_neon:
 ; CHECK:       .Lfunc_begin1:
 ; CHECK-NEXT:    .cfi_startproc
@@ -367,25 +315,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #304
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
-; CHECK-NEXT:    .cfi_restore b8
-; CHECK-NEXT:    .cfi_restore b9
-; CHECK-NEXT:    .cfi_restore b10
-; CHECK-NEXT:    .cfi_restore b11
-; CHECK-NEXT:    .cfi_restore b12
-; CHECK-NEXT:    .cfi_restore b13
-; CHECK-NEXT:    .cfi_restore b14
-; CHECK-NEXT:    .cfi_restore b15
-; CHECK-NEXT:    .cfi_restore b16
-; CHECK-NEXT:    .cfi_restore b17
-; CHECK-NEXT:    .cfi_restore b18
-; CHECK-NEXT:    .cfi_restore b19
-; CHECK-NEXT:    .cfi_restore b20
-; CHECK-NEXT:    .cfi_restore b21
-; CHECK-NEXT:    .cfi_restore b22
-; CHECK-NEXT:    .cfi_restore b23
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2: // %.Lunwind
 ; CHECK-NEXT:  .Ltmp5:
@@ -400,25 +329,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #304
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
-; CHECK-NEXT:    .cfi_restore b8
-; CHECK-NEXT:    .cfi_restore b9
-; CHECK-NEXT:    .cfi_restore b10
-; CHECK-NEXT:    .cfi_restore b11
-; CHECK-NEXT:    .cfi_restore b12
-; CHECK-NEXT:    .cfi_restore b13
-; CHECK-NEXT:    .cfi_restore b14
-; CHECK-NEXT:    .cfi_restore b15
-; CHECK-NEXT:    .cfi_restore b16
-; CHECK-NEXT:    .cfi_restore b17
-; CHECK-NEXT:    .cfi_restore b18
-; CHECK-NEXT:    .cfi_restore b19
-; CHECK-NEXT:    .cfi_restore b20
-; CHECK-NEXT:    .cfi_restore b21
-; CHECK-NEXT:    .cfi_restore b22
-; CHECK-NEXT:    .cfi_restore b23
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: invoke_callee_may_throw_neon:
@@ -472,25 +382,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
 ; GISEL-NEXT:    add sp, sp, #304
-; GISEL-NEXT:    .cfi_def_cfa_offset 0
-; GISEL-NEXT:    .cfi_restore w30
-; GISEL-NEXT:    .cfi_restore w29
-; GISEL-NEXT:    .cfi_restore b8
-; GISEL-NEXT:    .cfi_restore b9
-; GISEL-NEXT:    .cfi_restore b10
-; GISEL-NEXT:    .cfi_restore b11
-; GISEL-NEXT:    .cfi_restore b12
-; GISEL-NEXT:    .cfi_restore b13
-; GISEL-NEXT:    .cfi_restore b14
-; GISEL-NEXT:    .cfi_restore b15
-; GISEL-NEXT:    .cfi_restore b16
-; GISEL-NEXT:    .cfi_restore b17
-; GISEL-NEXT:    .cfi_restore b18
-; GISEL-NEXT:    .cfi_restore b19
-; GISEL-NEXT:    .cfi_restore b20
-; GISEL-NEXT:    .cfi_restore b21
-; GISEL-NEXT:    .cfi_restore b22
-; GISEL-NEXT:    .cfi_restore b23
 ; GISEL-NEXT:    ret
 ; GISEL-NEXT:  .LBB1_2: // %.Lunwind
 ; GISEL-NEXT:  .Ltmp5:
@@ -505,25 +396,6 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
 ; GISEL-NEXT:    add sp, sp, #304
-; GISEL-NEXT:    .cfi_def_cfa_offset 0
-; GISEL-NEXT:    .cfi_restore w30
-; GISEL-NEXT:    .cfi_restore w29
-; GISEL-NEXT:    .cfi_restore b8
-; GISEL-NEXT:    .cfi_restore b9
-; GISEL-NEXT:    .cfi_restore b10
-; GISEL-NEXT:    .cfi_restore b11
-; GISEL-NEXT:    .cfi_restore b12
-; GISEL-NEXT:    .cfi_restore b13
-; GISEL-NEXT:    .cfi_restore b14
-; GISEL-NEXT:    .cfi_restore b15
-; GISEL-NEXT:    .cfi_restore b16
-; GISEL-NEXT:    .cfi_restore b17
-; GISEL-NEXT:    .cfi_restore b18
-; GISEL-NEXT:    .cfi_restore b19
-; GISEL-NEXT:    .cfi_restore b20
-; GISEL-NEXT:    .cfi_restore b21
-; GISEL-NEXT:    .cfi_restore b22
-; GISEL-NEXT:    .cfi_restore b23
 ; GISEL-NEXT:    ret
   %result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
 .Lcontinue:

diff  --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
index 6870d72a04f6a..2bc064d986f99 100644
--- a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll
@@ -134,7 +134,7 @@ while_end:
 declare %struct_type* @foo()
 declare void @foo2()
 
-define void @test4(i32 %n) uwtable personality i32 (...)* @__FrameHandler {
+define void @test4(i32 %n) personality i32 (...)* @__FrameHandler {
 ; CHECK-LABEL: test4:
 ; CHECK:       .Lfunc_begin0:
 ; CHECK-NEXT:    .cfi_startproc
@@ -171,11 +171,6 @@ define void @test4(i32 %n) uwtable personality i32 (...)* @__FrameHandler {
 ; CHECK-NEXT:  .LBB3_4: // %while_end
 ; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x30, x21, [sp], #32 // 16-byte Folded Reload
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w19
-; CHECK-NEXT:    .cfi_restore w20
-; CHECK-NEXT:    .cfi_restore w21
-; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB3_5: // %cleanup
 ; CHECK-NEXT:  .Ltmp2:

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
index 92e3485929d48..df33af22e1a75 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
@@ -90,13 +90,9 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    bl OUTLINED_FUNCTION_0
 ; CHECK-NEXT:  .LBB0_5:
-; CHECK-NEXT:    mov w0, wzr
-; CHECK-NEXT:    .cfi_def_cfa wsp, 48
 ; CHECK-NEXT:    ldp x29, x30, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    add sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-LABEL: main:
@@ -121,13 +117,9 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    stp w10, w8, [x29, #-12]
-; CHECK-NEXT:    stp w9, w11, [sp, #12]
-; CHECK-NEXT:    .cfi_def_cfa wsp, 48
 ; CHECK-NEXT:    ldp x29, x30, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    stp w9, w11, [sp, #12]
 ; CHECK-NEXT:    add sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-LABEL: OUTLINED_FUNCTION_0:

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
index c59b24fbd697a..9765cbace729a 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
@@ -31,13 +31,9 @@ define dso_local i32 @check_boundaries() #0 {
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    bl OUTLINED_FUNCTION_0
 ; CHECK-NEXT:  .LBB0_5:
-; CHECK-NEXT:    mov w0, wzr
-; CHECK-NEXT:    .cfi_def_cfa wsp, 48
 ; CHECK-NEXT:    ldp x29, x30, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    add sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
@@ -98,13 +94,9 @@ define dso_local i32 @main() #0 {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    stp w10, w8, [x29, #-12]
-; CHECK-NEXT:    stp w9, w11, [sp, #12]
-; CHECK-NEXT:    .cfi_def_cfa wsp, 48
 ; CHECK-NEXT:    ldp x29, x30, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    stp w9, w11, [sp, #12]
 ; CHECK-NEXT:    add sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    .cfi_restore w30
-; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4


        


More information about the llvm-commits mailing list